text stringlengths 957 885k |
|---|
from .dataset import *
import torch
class WicExample(ParaphraseExample):
"""single example from WiC dataset"""
def __init__(self, lemma, pos, idxs, sent1, sent2, **kwargs):
"""
Args:
lemma: the lemma of the word in the two contexts
pos: the part of speech of the word in the two contexts (Verb: V, Noun: N)
idxs: indexes of the word in the first and second sentence
sent1: firs sentence of the example
sent2: second sentence of the example
"""
super().__init__(sent1, sent2, **kwargs)
self.lemma = lemma
self.pos = pos
self.idxs = idxs
@property
def get_lemma(self):
return self.lemma
@property
def get_pos(self):
return self.pos
@property
def get_idxs(self):
return self.idxs
class WicProcessor(ParaphraseProcessor):
def __init__(self):
super().__init__()
def get_examples(self, path):
"""returns a complete list of WiCExample objects"""
data_entries = []
for i, line in enumerate(open(path)):
lemma, pos, idxs, sent1, sent2 = line.strip().split('\t')
idx1, idx2 = list(map(int, idxs.split('-')))
data_entries.append(WicExample(lemma, pos.lower(), (idx1, idx2), sent1, sent2))
return data_entries
def get_labels(self, path):
""" returns the gold labels for the WiC examples """
gold_entries = []
for line in open(path):
gold = line.strip()
if gold == 'T':
gold_entries.append(1)
elif gold == 'F':
gold_entries.append(0)
return gold_entries
def get_dataset_json(self, path):
""" return a ParaphraseDataset with examples from json files """
examples = []
labels = []
objects = []
with open(path, "r") as f:
for line in f:
objects.append(json.loads(line))
for obj in objects:
lemma = str(obj["word"])
sent1 = str(obj["sentence1"])
sent2 = str(obj["sentence2"])
label = bool(obj["label"])
start1 = int(obj["start1"])
start2 = int(obj["start2"])
end1 = int(obj["end1"])
end2 = int(obj["end2"])
example = WicJsonExample(lemma, start1, start2, end1, end2, sent1, sent2)
examples.append(example)
labels.append(label)
return ParaphraseDataset(examples, labels)
@staticmethod
def get_dev_examples(path):
""" returns a complete list of dev examples """
raise NotImplementedError()
@staticmethod
def get_test_examples(path):
""" returns a complete list of test examples """
raise NotImplementedError()
class WiCDataLoader(DataLoader):
def __init__(self, batch_size, batches):
super().__init__(batch_size, batches)
@classmethod
def build_batches(cls, dataset, batch_size, evaluation=False):
batches = []
for i in range(0, len(dataset), batch_size):
#take the examples of the current batches
batch_examples = dataset.get_examples[i:i+batch_size]
#take the labels
labels = dataset.get_labels[i:i+batch_size]
batch_labels = torch.LongTensor(labels)
lemmas_list = []
sentence_pairs = []
input_ids = []
token_type_ids = []
w1_tokens_positions = []
w2_tokens_positions = []
attention_mask = []
for ex in batch_examples:
lemma = ex.get_lemma
lemmas_list.append(lemma)
sent1 = ex.get_sent1
sent2 = ex.get_sent2
sentence_pairs.append([sent1, sent2])
w1_idx = ex.get_idxs[0]
w2_idx = ex.get_idxs[1]
w1 = sent1.strip().split(" ")[w1_idx]
w2 = sent2.strip().split(" ")[w2_idx]
sent1_tokenized = config.CONFIG.tokenizer.encode(sent1)
sent2_tokenized = config.CONFIG.tokenizer.encode(sent2)[1:]
tokenized_w1 = config.CONFIG.tokenizer.encode(w1)[1:-1]
tokenized_w2 = config.CONFIG.tokenizer.encode(w2)[1:-1]
w1_token_positions = DataLoader.find_word_in_tokenized_sentence(tokenized_w1, sent1_tokenized)
w2_token_positions = DataLoader.find_word_in_tokenized_sentence(tokenized_w2, sent2_tokenized)
w2_idx1_adjusted = w2_token_positions[0] + len(sent1_tokenized)
w2_idx2_adjusted = w2_token_positions[1] + len(sent1_tokenized)
if w1_token_positions is None or w2_token_positions is None:
raise Exception("Something went wrong, words not found in tokenized sequence!")
range_list_1 = list(range(w1_token_positions[0], w1_token_positions[1]+1))
range_list_2 = list(range(w2_idx1_adjusted, w2_idx2_adjusted+1))
w1_tokens_positions.append(torch.LongTensor(range_list_1).to(config.CONFIG.device))
w2_tokens_positions.append(torch.LongTensor(range_list_2).to(config.CONFIG.device))
encoded = config.CONFIG.tokenizer(
text=sentence_pairs,
add_special_tokens=True,
padding='longest',
truncation=True,
max_length=config.CONFIG.sequence_max_len,
return_attention_mask=True,
return_token_type_ids=True,
return_tensors='pt'
)
embed_features_1 = WordFeatures.from_dict(encoded, indexes = w1_tokens_positions, words=lemmas_list)
embed_features_2 = WordFeatures.from_dict(encoded, indexes = w2_tokens_positions, words=lemmas_list)
d = WordClassifierFeatures(
w1_features = embed_features_1,
w2_features = embed_features_2,
labels = batch_labels
)
d.to_device(config.CONFIG.device)
batches.append(d)
return cls(batch_size, batches)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, Cisco Systems, Inc.
from copy import deepcopy
import inspect
import logging
from quantum.openstack.common import importutils
from quantum.plugins.cisco.common import cisco_constants as const
from quantum.plugins.cisco.common import cisco_credentials_v2 as cred
from quantum.plugins.cisco.db import network_db_v2 as cdb
from quantum.plugins.cisco import l2network_plugin_configuration as conf
from quantum import quantum_plugin_base_v2
LOG = logging.getLogger(__name__)
class NetworkMultiBladeV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
"""
This implementation works with UCS and Nexus plugin for the
following topology:
One or more UCSM (each with one or more chasses connected),
All FICs connected to a single Nexus Switch.
"""
_plugins = {}
_inventory = {}
def __init__(self):
"""
Initialize the segmentation manager, check which device plugins are
configured, and load the inventories those device plugins for which the
inventory is configured
"""
cdb.initialize()
cred.Store.initialize()
self._vlan_mgr = importutils.import_object(conf.MANAGER_CLASS)
for key in conf.PLUGINS[const.PLUGINS].keys():
plugin_obj = conf.PLUGINS[const.PLUGINS][key]
self._plugins[key] = importutils.import_object(plugin_obj)
LOG.debug("Loaded device plugin %s\n" %
conf.PLUGINS[const.PLUGINS][key])
if key in conf.PLUGINS[const.INVENTORY].keys():
inventory_obj = conf.PLUGINS[const.INVENTORY][key]
self._inventory[key] = importutils.import_object(inventory_obj)
LOG.debug("Loaded device inventory %s\n" %
conf.PLUGINS[const.INVENTORY][key])
LOG.debug("%s.%s init done" % (__name__, self.__class__.__name__))
def _func_name(self, offset=0):
"""Get the name of the calling function"""
return inspect.stack()[1 + offset][3]
def _invoke_plugin_per_device(self, plugin_key, function_name, args):
"""
Invokes a device plugin's relevant functions (on the it's
inventory and plugin implementation) for completing this operation.
"""
if not plugin_key in self._plugins.keys():
LOG.info("No %s Plugin loaded" % plugin_key)
LOG.info("%s: %s with args %s ignored" %
(plugin_key, function_name, args))
return
device_params = self._invoke_inventory(plugin_key, function_name,
args)
device_ips = device_params[const.DEVICE_IP]
if not device_ips:
return [self._invoke_plugin(plugin_key, function_name, args,
device_params)]
else:
output = []
for device_ip in device_ips:
new_device_params = deepcopy(device_params)
new_device_params[const.DEVICE_IP] = device_ip
output.append(self._invoke_plugin(plugin_key, function_name,
args, new_device_params))
return output
def _invoke_inventory(self, plugin_key, function_name, args):
"""
Invokes the relevant function on a device plugin's
inventory for completing this operation.
"""
if not plugin_key in self._inventory.keys():
LOG.info("No %s inventory loaded" % plugin_key)
LOG.info("%s: %s with args %s ignored" %
(plugin_key, function_name, args))
return {const.DEVICE_IP: []}
else:
return getattr(self._inventory[plugin_key], function_name)(args)
def _invoke_plugin(self, plugin_key, function_name, args, kwargs):
"""
Invokes the relevant function on a device plugin's
implementation for completing this operation.
"""
func = getattr(self._plugins[plugin_key], function_name)
func_args_len = int(inspect.getargspec(func).args.__len__()) - 1
if args.__len__() > func_args_len:
func_args = args[:func_args_len]
extra_args = args[func_args_len:]
for dict_arg in extra_args:
for k, v in dict_arg.iteritems():
kwargs[k] = v
return func(*func_args, **kwargs)
else:
return func(*args, **kwargs)
def create_network(self, context, network):
"""
Perform this operation in the context of the configured device
plugins.
"""
n = network
try:
vlan_id = self._vlan_mgr.reserve_segmentation_id(n['tenant_id'],
n['name'])
vlan_name = self._vlan_mgr.get_vlan_name(n['id'], str(vlan_id))
args = [n['tenant_id'], n['name'], n['id'], vlan_name, vlan_id]
output = []
ucs_output = self._invoke_plugin_per_device(const.UCS_PLUGIN,
self._func_name(),
args)
nexus_output = self._invoke_plugin_per_device(const.NEXUS_PLUGIN,
self._func_name(),
args)
output.extend(ucs_output or [])
output.extend(nexus_output or [])
cdb.add_vlan_binding(vlan_id, vlan_name, n['id'])
return output
except:
# TODO (Sumit): Check if we need to perform any rollback here
raise
def get_network(self, context, id, fields=None):
"""Currently there is no processing required for the device plugins"""
pass
def get_networks(self, context, filters=None, fields=None):
"""Currently there is no processing required for the device plugins"""
pass
def update_network(self, context, id, network):
"""
Perform this operation in the context of the configured device
plugins.
"""
n = network
vlan = cdb.get_vlan_binding(id)
args = [n['tenant_id'], id, {'vlan_id': vlan.vlan_id},
{'net_admin_state': n['admin_state_up']},
{'vlan_ids': ''}]
nexus_output = self._invoke_plugin_per_device(const.NEXUS_PLUGIN,
self._func_name(),
args)
return nexus_output
def delete_network(self, context, id, kwargs):
"""
Perform this operation in the context of the configured device
plugins.
"""
try:
base_plugin_ref = kwargs[const.BASE_PLUGIN_REF]
n = kwargs[const.NETWORK]
tenant_id = n['tenant_id']
args = [tenant_id, id, {const.CONTEXT:context},
{const.BASE_PLUGIN_REF:base_plugin_ref}]
# TODO (Sumit): Might first need to check here if there are active
# ports
output = []
ucs_output = self._invoke_plugin_per_device(const.UCS_PLUGIN,
self._func_name(),
args)
nexus_output = self._invoke_plugin_per_device(const.NEXUS_PLUGIN,
self._func_name(),
args)
output.extend(ucs_output or [])
output.extend(nexus_output or [])
self._vlan_mgr.release_segmentation_id(tenant_id, id)
cdb.remove_vlan_binding(id)
return output
except:
# TODO (Sumit): Check if we need to perform any rollback here
raise
def create_port(self, context, port):
"""
Perform this operation in the context of the configured device
plugins.
"""
try:
tenant_id = port['tenant_id']
net_id = port['network_id']
port_state = port['admin_state_up']
port_id_string = port['id']
args = [tenant_id, net_id, port_state, port_id_string]
ret_val = self._invoke_plugin_per_device(const.UCS_PLUGIN,
self._func_name(), args)
new_args = [tenant_id, net_id, port['id'], port['id']]
self._invoke_plugin_per_device(const.UCS_PLUGIN,
"plug_interface", new_args)
return ret_val
except:
# TODO (Sumit): Check if we need to perform any rollback here
raise
def get_port(self, context, id, fields=None):
"""Currently there is no processing required for the device plugins"""
pass
def get_ports(self, context, filters=None, fields=None):
"""Currently there is no processing required for the device plugins"""
pass
def update_port(self, context, id, port):
"""Currently there is no processing required for the device plugins"""
pass
def delete_port(self, context, id, kwargs):
"""
Perform this operation in the context of the configured device
plugins.
"""
try:
p = kwargs['port']
args = [p['tenant_id'], p['network_id'], p['id']]
return self._invoke_plugin_per_device(const.UCS_PLUGIN,
self._func_name(), args)
except:
# TODO (Sumit): Check if we need to perform any rollback here
raise
def create_subnet(self, context, subnet):
"""Currently there is no processing required for the device plugins"""
pass
def update_subnet(self, context, id, subnet):
"""Currently there is no processing required for the device plugins"""
pass
def get_subnet(self, context, id, fields=None):
"""Currently there is no processing required for the device plugins"""
pass
def delete_subnet(self, context, id, kwargs):
"""Currently there is no processing required for the device plugins"""
pass
def get_subnets(self, context, filters=None, fields=None):
"""Currently there is no processing required for the device plugins"""
pass
"""
Extensions' implementation in device plugins
"""
def schedule_host(self, args):
"""Provides the hostname on which a dynamic vnic is reserved"""
try:
return self._invoke_inventory(const.UCS_PLUGIN, self._func_name(),
args)
except:
# TODO (Sumit): Check if we need to perform any rollback here
raise
def associate_port(self, args):
"""Get the portprofile name and the device name for the dynamic vnic"""
try:
return self._invoke_inventory(const.UCS_PLUGIN, self._func_name(),
args)
except:
# TODO (Sumit): Check if we need to perform any rollback here
raise
def detach_port(self, args):
"""Remove the association of the VIF with the dynamic vnic """
try:
return self._invoke_plugin_per_device(const.UCS_PLUGIN,
self._func_name(), args)
except:
# TODO (Sumit): Check if we need to perform any rollback here
raise
def create_multiport(self, args):
"""
Makes a call to the UCS device plugin to create ports on the same
host.
"""
try:
self._invoke_plugin_per_device(const.UCS_PLUGIN, self._func_name(),
args)
except:
# TODO (Sumit): Check if we need to perform any rollback here
raise
|
<reponame>mikespub-org/bjodah-pyemf
from .constants import *
from .field import *
from .record import _EMR_UNKNOWN
_type_map = {}
def register(klass):
"""Register META with id."""
_type_map[klass.emr_id] = klass
return klass
class META_UNKNOWN(_EMR_UNKNOWN):
emr_id = 0x7FFF
def __init__(self):
_EMR_UNKNOWN.__init__(self)
def readHdr(self, already_read):
(count, func) = struct.unpack("<IH", already_read)
return func, count * 2
def writeHdr(self, fh):
fh.write(struct.pack("<IH", self.nSize // 2, self.iType))
def hdrLen(self):
return 6
def verifySize(self, before, calcSize):
return
class META_COLOR(META_UNKNOWN):
typedef = [("I", "crColor", 0)]
def __init__(self, color=0):
META_UNKNOWN.__init__(self)
self.crColor = color
class META_HAS_HANDLE(META_UNKNOWN):
def __init__(self, dc=None, handle=0):
META_UNKNOWN.__init__(self)
self.handle = handle
def hasHandle(self):
return True
class META_HANDLE(META_UNKNOWN):
typedef = [("H", "handle")]
def __init__(self, dc=None, handle=0):
META_UNKNOWN.__init__(self)
self.handle = handle
class META_SETMODE(META_UNKNOWN):
typedef = [("H", "iMode", 0), ("H", "iReserved", 0)]
def __init__(self, mode, first, last):
META_UNKNOWN.__init__(self)
if mode < first or mode > last:
self.error = 1
else:
self.iMode = mode
class META_CREATEOBJECT(META_HAS_HANDLE):
def __init__(self, dc=None, handle=0):
META_HAS_HANDLE.__init__(self, dc, handle)
def isCreateObject(self):
return True
def setHandle(self, handle):
self.handle = handle
class META_PLACEABLE(META_UNKNOWN):
"""The META_PLACEABLE record is the first record in a placeable
WMF metafile, which is an extension to the WMF metafile format."""
typedef = [
("I", "nKey", 0x9AC6CDD7),
("H", "hWmf", 0x0000),
(Points(num=2, fmt="H"), "rclBounds"),
("H", "sInch", 0),
("I", "nReserved", 0),
("H", "sChecksum", 0),
]
def __init__(self):
META_UNKNOWN.__init__(self)
self.szlDevice = [0, 0]
self.szlMicrometers = [0, 0]
self.szlMillimeters = [0, 0]
def setBounds(self, dc, scaleheader=False):
self.rclBounds = [
[dc.bounds_left, dc.bounds_top],
[dc.bounds_right, dc.bounds_bottom],
]
self.rclFrame = [
[dc.frame_left, dc.frame_top],
[dc.frame_right, dc.frame_bottom],
]
print(self)
def writeHdr(self, fh):
return
def hdrLen(self):
return 0
class META_HEADER(META_UNKNOWN):
"""The META_HEADER record is the first record in a standard (nonplaceable)
WMF metafile."""
typedef = [
("H", "sType", 0),
("H", "sHeaderSize", 9),
("H", "sVersion", 0),
("H", "sSizeLow", 0),
("H", "sSizeHigh", 0),
("H", "sNumberOfObjects", 0),
("I", "nMaxRecord", 0),
("H", "sNumberOfMembers", 0),
]
def __init__(self):
META_UNKNOWN.__init__(self)
def writeHdr(self, fh):
return
def hdrLen(self):
return 0
@register
class META_EOF(META_UNKNOWN):
emr_id = 0x0000
def isEOF(self):
return True
# MetafileType
MEMORYMETAFILE = 0x0001
DISKMETAFILE = 0x0002
@register
class META_REALIZEPALETTE(META_UNKNOWN):
emr_id = 0x0035
@register
class META_SETPALENTRIES(META_UNKNOWN):
emr_id = 0x0037
@register
class META_SETBKMODE(META_SETMODE):
emr_id = 0x0102
def __init__(self, mode=OPAQUE):
META_SETMODE.__init__(self, mode, TRANSPARENT, BKMODE_LAST)
@register
class META_SETMAPMODE(META_SETMODE):
typedef = [("H", "iMapMode", 0)]
emr_id = 0x0103
def __init__(self, mode=MM_ANISOTROPIC):
META_SETMODE.__init__(self, mode, MM_TEXT, MM_MAX)
@register
class META_SETROP2(META_UNKNOWN):
emr_id = 0x0104
@register
class META_SETRELABS(META_UNKNOWN):
emr_id = 0x0105
@register
class META_SETPOLYFILLMODE(META_SETMODE):
emr_id = 0x0106
def __init__(self, mode=ALTERNATE):
META_SETMODE.__init__(self, mode, ALTERNATE, POLYFILL_LAST)
@register
class META_SETSTRETCHBLTMODE(META_UNKNOWN):
emr_id = 0x0107
@register
class META_SETTEXTCHAREXTRA(META_UNKNOWN):
emr_id = 0x0108
@register
class META_RESTOREDC(META_UNKNOWN):
emr_id = 0x0127
@register
class META_RESIZEPALETTE(META_UNKNOWN):
emr_id = 0x0139
@register
class META_DIBCREATEPATTERNBRUSH(META_UNKNOWN):
emr_id = 0x0142
@register
class META_SETLAYOUT(META_UNKNOWN):
emr_id = 0x0149
@register
class META_SETBKCOLOR(META_COLOR):
emr_id = 0x0201
@register
class META_SETTEXTCOLOR(META_UNKNOWN):
emr_id = 0x0209
@register
class META_OFFSETVIEWPORTORG(META_UNKNOWN):
emr_id = 0x0211
@register
class META_LINETO(META_UNKNOWN):
emr_id = 0x0213
@register
class META_MOVETO(META_UNKNOWN):
emr_id = 0x0214
@register
class META_OFFSETCLIPRGN(META_UNKNOWN):
emr_id = 0x0220
@register
class META_FILLREGION(META_UNKNOWN):
emr_id = 0x0228
@register
class META_SETMAPPERFLAGS(META_UNKNOWN):
emr_id = 0x0231
@register
class META_SELECTPALETTE(META_HANDLE):
emr_id = 0x0234
@register
class META_POLYGON(META_UNKNOWN):
emr_id = 0x0324
@register
class META_POLYLINE(META_UNKNOWN):
emr_id = 0x0325
typedef = [
("h", "sNumberOfPoints", 0),
(Points(num="sNumberOfPoints", fmt="h"), "aPoints"),
]
@register
class META_SETTEXTJUSTIFICATION(META_UNKNOWN):
emr_id = 0x020A
@register
class META_SETWINDOWORG(META_UNKNOWN):
emr_id = 0x020B
typedef = [
("H", "ptlOrigin_y"),
("H", "ptlOrigin_x"),
]
def __init__(self, x=0, y=0):
META_UNKNOWN.__init__(self)
self.ptlOrigin_x = x
self.ptlOrigin_y = y
@register
class META_SETWINDOWEXT(META_UNKNOWN):
emr_id = 0x020C
typedef = [
("H", "szlExtent_cy"),
("H", "szlExtent_cx"),
]
def __init__(self, cx=0, cy=0):
META_UNKNOWN.__init__(self)
self.szlExtent_cx = cx
self.szlExtent_cy = cy
@register
class META_SETVIEWPORTORG(META_UNKNOWN):
emr_id = 0x020D
@register
class META_SETVIEWPORTEXT(META_UNKNOWN):
emr_id = 0x020E
@register
class META_OFFSETWINDOWORG(META_UNKNOWN):
emr_id = 0x020F
@register
class META_SCALEWINDOWEXT(META_UNKNOWN):
emr_id = 0x0410
@register
class META_SCALEVIEWPORTEXT(META_UNKNOWN):
emr_id = 0x0412
@register
class META_EXCLUDECLIPRECT(META_UNKNOWN):
emr_id = 0x0415
@register
class META_INTERSECTCLIPRECT(META_UNKNOWN):
emr_id = 0x0416
@register
class META_ELLIPSE(META_UNKNOWN):
emr_id = 0x0418
@register
class META_FLOODFILL(META_UNKNOWN):
emr_id = 0x0419
@register
class META_FRAMEREGION(META_UNKNOWN):
emr_id = 0x0429
@register
class META_ANIMATEPALETTE(META_UNKNOWN):
emr_id = 0x0436
@register
class META_TEXTOUT(META_UNKNOWN):
emr_id = 0x0521
@register
class META_POLYPOLYGON(META_UNKNOWN):
emr_id = 0x0538
@register
class META_EXTFLOODFILL(META_UNKNOWN):
emr_id = 0x0548
@register
class META_RECTANGLE(META_UNKNOWN):
emr_id = 0x041B
@register
class META_SETPIXEL(META_UNKNOWN):
emr_id = 0x041F
@register
class META_ROUNDRECT(META_UNKNOWN):
emr_id = 0x061C
@register
class META_PATBLT(META_UNKNOWN):
emr_id = 0x061D
@register
class META_SAVEDC(META_UNKNOWN):
emr_id = 0x001E
@register
class META_PIE(META_UNKNOWN):
emr_id = 0x081A
@register
class META_STRETCHBLT(META_UNKNOWN):
emr_id = 0x0B23
@register
class META_ESCAPE(META_UNKNOWN):
emr_id = 0x0626
@register
class META_INVERTREGION(META_UNKNOWN):
emr_id = 0x012A
@register
class META_PAINTREGION(META_UNKNOWN):
emr_id = 0x012B
@register
class META_SELECTCLIPREGION(META_UNKNOWN):
emr_id = 0x012C
@register
class META_SELECTOBJECT(META_HANDLE):
emr_id = 0x012D
@register
class META_SETTEXTALIGN(META_UNKNOWN):
emr_id = 0x012E
@register
class META_ARC(META_UNKNOWN):
emr_id = 0x0817
@register
class META_CHORD(META_UNKNOWN):
emr_id = 0x0830
@register
class META_BITBLT(META_UNKNOWN):
emr_id = 0x0922
@register
class META_EXTTEXTOUT(META_UNKNOWN):
emr_id = 0x0A32
typedef = [
("h", "ptlReference_y", 0),
("h", "ptlReference_x", 0),
("h", "nChars", 0),
("H", "fwOpts", 0),
]
# type descriptors of variable fields
_rclBounds = Points(num=2, fmt="h")
_string = EMFString(num="nChars", size=1, pad=2)
_dx = List(num="nChars", fmt="h")
def __init__(self, x=0, y=0, txt=""):
META_UNKNOWN.__init__(self)
self.ptlReference_x = x
self.ptlReference_y = y
self.string = txt
self.nChars = len(txt) if txt is not None else 0
self.charsize = 1
self.rclBounds = [[0, 0], [-1, -1]]
self.dx = []
def _write(self, fh, fmt, name, value):
output = fmt.pack(self, name, value)
fh.write(output)
def sizeExtra(self):
fh = BytesIO()
if self.fwOpts & (ETO_OPAQUE | ETO_CLIPPED):
fmt = self.__class__._rclBounds
self._write(fh, fmt, "rclBounds", self.rclBounds)
if self.nChars > 0:
fmt = self.__class__._string
self._write(fh, fmt, "string", self.string)
if self.fwOpts & (ETO_GLYPH_INDEX | ETO_PDY):
fmt = self.__class__._dx
old_nChars = self.nChars
try:
self.nChars = len(self.dx)
self._write(fh, fmt, "dx", self.dx)
finally:
self.nChars = old_nChars
self.unhandleddata = fh.getvalue()
return super().sizeExtra()
def unserializeExtra(self, data):
super().unserializeExtra(data)
ptr = 0
if self.fwOpts & (ETO_OPAQUE | ETO_CLIPPED):
fmt = self.__class__._rclBounds
obj = self
name = "rclBounds"
(value, size) = fmt.unpack(obj, name, data, ptr)
self.rclBounds = value
ptr += size
if self.nChars > 0:
fmt = self.__class__._string
(value, size) = fmt.unpack(self, "string", data, ptr)
self.string = value
ptr += size
if self.fwOpts & (ETO_GLYPH_INDEX | ETO_PDY):
fmt = self.__class__._dx
# don't rely on nChars but compute it using the actual dx byte count
old_nChars = self.nChars
try:
self.nChars = (len(data) - ptr) // 2
(value, size) = fmt.unpack(self, "dx", data, ptr)
finally:
self.nChars = old_nChars
self.dx = value
ptr += size
@register
class META_SETDIBTODEV(META_UNKNOWN):
emr_id = 0x0D33
@register
class META_DIBBITBLT(META_UNKNOWN):
emr_id = 0x0940
@register
class META_DIBSTRETCHBLT(META_UNKNOWN):
emr_id = 0x0B41
@register
class META_STRETCHDIB(META_UNKNOWN):
emr_id = 0x0F43
typedef = [
("I", "dwRop"),
("H", "iUsageSrc"),
("H", "cySrc"),
("H", "cxSrc"),
("H", "ySrc"),
("H", "xSrc"),
("H", "cyDest"),
("H", "cxDest"),
("H", "yDest"),
("H", "xDest"),
]
def __init__(self):
META_UNKNOWN.__init__(self)
def unserializeExtra(self, data):
# self.write_bitmap("test.bmp", data)
super().unserializeExtra(data)
def write_bitmap(self, file_name, data):
bmp_header_len = 14
dib_header_len = struct.unpack("<I", data[:4])
with open(file_name, "wb") as f:
f.write("BM" + struct.pack("<I", bmp_header_len + len(data)))
f.write("\0\0\0\0")
f.write(struct.pack("<I", bmp_header_len + dib_header_len[0]))
f.write(data)
@register
class META_DELETEOBJECT(META_HANDLE):
emr_id = 0x01F0
def isDeleteObject(self):
return True
@register
class META_CREATEPALETTE(META_CREATEOBJECT):
emr_id = 0x00F7
@register
class META_CREATEPATTERNBRUSH(META_UNKNOWN):
emr_id = 0x01F9
@register
class META_CREATEPENINDIRECT(META_CREATEOBJECT):
emr_id = 0x02FA
typedef = [
("H", "lopn_style"),
(Points(num=1, fmt="H"), "lopn_width"),
("I", "lopn_color"),
]
def __init__(self, style=PS_SOLID, width=1, color=0):
META_CREATEOBJECT.__init__(self)
self.lopn_style = style
self.lopn_width = width
self.lopn_color = color
@register
class META_CREATEFONTINDIRECT(META_CREATEOBJECT):
emr_id = 0x02FB
typedef = [
("h", "lfHeight"),
("h", "lfWidth"),
("h", "lfEscapement"),
("h", "lfOrientation"),
("h", "lfWeight"),
("B", "lfItalic"),
("B", "lfUnderline"),
("B", "lfStrikeOut"),
("B", "lfCharSet"),
("B", "lfOutPrecision"),
("B", "lfClipPrecision"),
("B", "lfQuality"),
("B", "lfPitchAndFamily"),
(CString(num=32), "lfFaceName"),
]
def __init__(
self,
height=0,
width=0,
escapement=0,
orientation=0,
weight=FW_NORMAL,
italic=0,
underline=0,
strike_out=0,
charset=ANSI_CHARSET,
out_precision=OUT_DEFAULT_PRECIS,
clip_precision=CLIP_DEFAULT_PRECIS,
quality=DEFAULT_QUALITY,
pitch_family=DEFAULT_PITCH | FF_DONTCARE,
name="<NAME>",
):
META_CREATEOBJECT.__init__(self)
self.lfHeight = height
self.lfWidth = width
self.lfEscapement = escapement
self.lfOrientation = orientation
self.lfWeight = weight
self.lfItalic = italic
self.lfUnderline = underline
self.lfStrikeOut = strike_out
self.lfCharSet = charset
self.lfOutPrecision = out_precision
self.lfClipPrecision = clip_precision
self.lfQuality = quality
self.lfPitchAndFamily = pitch_family
# truncate or pad to exactly 32 characters
name = name.split("\0")[0]
nameLen = len(name)
if nameLen > 31:
name = name[0:31]
name += "\0" * (32 - nameLen)
self.lfFaceName = name
# print("lfFaceName=%r" % self.lfFaceName)
def hasHandle(self):
return True
@register
class META_CREATEBRUSHINDIRECT(META_CREATEOBJECT):
emr_id = 0x02FC
typedef = [
("H", "lbStyle"),
("I", "lbColor"),
("H", "lbHatch"),
]
@register
class META_CREATEREGION(META_UNKNOWN):
emr_id = 0x06FF
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from os.path import join, abspath, dirname
from datetime import datetime, timedelta
import os.path
from alsaaudio import Mixer
from adapt.intent import IntentBuilder
from mycroft import MycroftSkill, intent_handler, intent_file_handler
from mycroft.audio import wait_while_speaking
from mycroft.configuration.config import LocalConf, USER_CONFIG
from mycroft.messagebus.message import Message
from mycroft.util import play_wav, play_mp3
from mycroft.util.format import nice_date_time, nice_time
from mycroft.util.log import LOG
from mycroft.util.parse import fuzzy_match, extract_datetime, extract_number
from dateutil.parser import parse
from dateutil.rrule import rrulestr
from mycroft.util.time import (
to_utc, default_timezone, to_local, now_local, now_utc)
try:
from mycroft.util.time import to_system
except:
# Until to_system is included in 18.08.3, define it here too
from dateutil.tz import gettz, tzlocal
def to_system(dt):
""" Convert a datetime to the system's local timezone
Args:
dt (datetime): A datetime (if no timezone, assumed to be UTC)
Returns:
(datetime): time converted to the local timezone
"""
tz = tzlocal()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
# WORKING:
# Set an alarm
# for 9
# no for 9 am
# Set an alarm for tomorrow evening at 8:20
# Set an alarm for monday morning at 8
# create an alarm for monday morning at 8
# snooze
# stop
# turn off the alarm
# create a repeating alarm for tuesdays at 7 am
# Set a recurring alarm
# Set a recurring alarm for weekdays at 7
# snooze for 15 minutes
# set an alarm for 20 seconds from now
# Set an alarm every monday at 7
# #
# TODO:
# "Set a recurring alarm for mondays and wednesdays at 7"
# "Set an alarm for 10 am every weekday - Adapt is missing "every""
# TODO: Context - save the alarm found in queries as context
# When is the next alarm
# > 7pm tomorrow
# Cancel it
# TODO:
# Interact with date/time to show a dot in the upper-right corner when alarm
# is set. Define a custom messagebus message
class AlarmSkill(MycroftSkill):
def __init__(self):
super(AlarmSkill, self).__init__()
self.beep_process = None
self.settings['max_alarm_secs'] = 10*60 # max time to beep: 10 min
self.beep_start_time = None
self.flash_state = 0
# Seconds of gap between sound repeats.
# The value name must match an option from the 'sound' value of the
# settingmeta.json, which also corresponds to the name of an mp3
# file in the skill's sounds/ folder. E.g. <skill>/sounds/bell.mp3
#
self.sound_interval = {
"bell": 9.07,
"escalate": 40.0,
"constant_beep": 10.0,
"beep4": 7.0,
"chimes": 30.0
}
# default sound is 'constant_beep'
self.settings['sound'] = 'constant_beep'
self.settings['start_quiet'] = True
try:
self.mixer = Mixer()
except Exception:
# Retry instanciating the mixer
try:
self.mixer = Mixer()
except Exception as e:
self.log.error('Couldn\'t allocate mixer, {}'.format(repr(e)))
self.mixer = None
self.saved_volume = None
# Alarm list format [(timestamp, repeat_rule[, timestamp2]), ...]
# where:
# timestamp is a POSIX timestamp float assumed to
# be in the utc timezone.
#
# repeat_rule is for generating the next in a series. Valid
# repeat_rules include None for a one-shot alarm or any other
# iCalendar rule from RFC <https://tools.ietf.org/html/rfc5545>.
#
# timestamp2 is optional, for a recently passed alarm at boot or a
# running or snoozed alarm. This is the time that is used for
# the repeat. E.g. your daily alarm is a 24 hours increment
# from when it was set, not 24 hours from when it shut off
# or was snoozed.
# NOTE: Using list instead of tuple because of serialization
self.settings["alarm"] = []
def dump_alarms(self, tag=""):
# Useful when debugging
self.log.info("********** ALARMS "+tag+"*************")
self.log.info(self.settings["alarm"])
idx = 1
for alarm in self.settings["alarm"]:
dt = self.get_alarm_local(alarm)
self.log.info(str(idx) + " - " + str(alarm) +
" U" + str(dt) + " L" + str(to_local(dt)))
idx += 1
now_ts = to_utc(now_utc()).timestamp()
dt = datetime.fromtimestamp(now_ts)
self.log.info("-"*40)
self.log.info("NOW: " + str(now_ts) +
" U" + str(to_utc(dt)) + " L" + str(to_local(dt)))
self.log.info("*"*60)
def initialize(self):
self.register_entity_file('daytype.entity') # TODO: Keep?
self.recurrence_dict = self.translate_namedvalues('recurring')
# Time is the first value, so this will sort alarms by time
self.settings["alarm"].sort()
# This will reschedule alarms which have expired within the last
# 5 minutes, and cull anything older.
self._curate_alarms(5*60)
self._schedule()
# TODO: Move is_listening into MycroftSkill and reimplement (signal?)
self.is_currently_listening = False
self.add_event('recognizer_loop:record_begin', self.on_listen_started)
self.add_event('recognizer_loop:record_end', self.on_listen_ended)
def on_listen_started(self, message):
self.log.info("on started...")
self.is_currently_listening = True
def on_listen_ended(self, message):
self.log.info("on ended...")
self.is_currently_listening = False
def is_listening(self):
return self.is_currently_listening
def get_alarm_local(self, alarm=None, timestamp=None):
if timestamp:
ts = timestamp
else:
ts = alarm[0]
return datetime.fromtimestamp(ts, default_timezone())
def set_alarm(self, when, repeat=None):
if repeat:
alarm = self._create_recurring_alarm(when, repeat)
else:
alarm = [to_utc(when).timestamp(), ""]
self.settings["alarm"].append(alarm)
self._schedule()
return alarm
def _schedule(self):
# cancel any existing timed event
self.cancel_scheduled_event('NextAlarm')
self._curate_alarms()
# set timed event for next alarm (if it exists)
if self.settings["alarm"]:
dt = self.get_alarm_local(self.settings["alarm"][0])
self.schedule_event(self._alarm_expired,
to_system(dt),
name='NextAlarm')
def _curate_alarms(self, curation_limit=1):
"""[summary]
curation_limit (int, optional): Seconds past expired at which to
remove the alarm
"""
alarms = []
now_ts = to_utc(now_utc()).timestamp()
for alarm in self.settings["alarm"]:
# Alarm format == [timestamp, repeat_rule[, orig_alarm_timestamp]]
if alarm[0] < now_ts:
if alarm[0] < (now_ts - curation_limit):
# skip playing an old alarm
if alarm[1]:
# resched in future if repeat rule exists
alarms.append(self._next_repeat(alarm))
else:
# schedule for right now, with the
# third entry as the original base time
base = alarm[2] if len(alarm) == 3 else alarm[0]
alarms.append([now_ts+1, alarm[1], base])
else:
alarms.append(alarm)
alarms.sort()
self.settings["alarm"] = alarms
def _next_repeat(self, alarm):
# evaluate recurrence to the next instance
r = rrulestr("RRULE:" + alarm[1])
if len(alarm) == 3:
ref = datetime.fromtimestamp(alarm[2])
else:
ref = datetime.fromtimestamp(alarm[0])
local_ref_notz = to_local(ref).replace(tzinfo=None)
dt_next_local = r.after(local_ref_notz)
return [to_utc(dt_next_local).timestamp(), alarm[1]]
def _create_recurring_alarm(self, when, repeat):
# 'repeat' is one of the values in the self.recurrence_dict
# convert rule into an iCal rrule
# TODO: Support more complex alarms, e.g. first monday, monthly, etc
reps = self.recurrence_dict[repeat].split()
rule = ""
abbr = ["SU", "MO", "TU", "WE", "TH", "FR", "SA"]
days = []
for day in reps:
days.append(abbr[int(day)])
if days:
rule = "FREQ=WEEKLY;INTERVAL=1;BYDAY=" + ",".join(days)
return [to_utc(when).timestamp(), rule]
def has_expired_alarm(self):
# True is an alarm should be 'going off' now. Snoozed alarms don't
# count until the are triggered again.
if not self.settings["alarm"]:
return False
now_ts = to_utc(now_utc()).timestamp()
for alarm in self.settings["alarm"]:
if alarm[0] <= now_ts:
return True
return False
# Wake me on ... (hard to match with Adapt entities)
@intent_handler(IntentBuilder("").require("WakeMe").
optionally("Recurring").optionally("Recurrence"))
def handle_wake_me(self, message):
self.handle_set_alarm(message)
# Set an alarm for ...
@intent_handler(IntentBuilder("").require("Set").require("Alarm").
optionally("Recurring").optionally("Recurrence"))
def handle_set_alarm(self, message):
utt = message.data.get('utterance').lower()
recurrence = None
if message.data.get('Recurring'):
recurrence = message.data.get('Recurrence')
if not recurrence:
# a bug in Adapt is missing the recurrence.voc. Look ourselves
for r in self.recurrence_dict:
if r in utt:
recurrence = r
while recurrence not in self.recurrence_dict:
r = self.get_response('query.recurrence', num_retries=1)
if not r:
return
recurrence = r
# Get the time
when = extract_datetime(utt)
now = extract_datetime("now")
while not when or when[0] == now[0]:
# No time given, ask for one
r = self.get_response('query.for.when', num_retries=1)
if not r:
return
when = extract_datetime(r)
# Verify time
alarm_time = when[0]
confirmed_time = False
while not when or when[0] == now[0]:
if recurrence:
t = nice_time(alarm_time, use_ampm=True)
conf = self.ask_yesno('confirm.recurring.alarm',
data={'time': t,
'recurrence': recurrence})
else:
t = nice_date_time(alarm_time, now=now[0], use_ampm=True)
conf = self.ask_yesno('confirm.alarm', data={'time': t})
if not conf:
return
if conf == 'yes':
when = [alarm_time]
confirmed_time = True
else:
# check if a new (corrected) time was given
when = extract_datetime(conf)
if not when or when[0] == now[0]:
# Not a confirmation and no date/time in statement, quit
return
alarm_time = when[0]
when = None # reverify
if not recurrence:
alarm = self.set_alarm(alarm_time)
else:
alarm = self.set_alarm(alarm_time, repeat=recurrence)
# Don't want to hide the animation
self.enclosure.deactivate_mouth_events()
if confirmed_time:
self.speak_dialog("alarm.scheduled")
else:
t = self._describe(alarm)
reltime = nice_relative_time(self.get_alarm_local(alarm))
if recurrence:
self.speak_dialog("recurring.alarm.scheduled.for.time",
data={"time": t, "rel": reltime})
else:
self.speak_dialog("alarm.scheduled.for.time",
data={"time": t, "rel": reltime})
self._show_alarm_anim(alarm_time)
self.enclosure.activate_mouth_events()
@property
def use_24hour(self):
return self.config_core.get('time_format') == 'full'
def _flash_alarm(self, message):
# draw on the display
if self.flash_state < 3:
if self.flash_state == 0:
alarm_timestamp = message.data["alarm_time"]
dt = self.get_alarm_local(timestamp=alarm_timestamp)
self._render_time(dt)
self.flash_state += 1
else:
self.enclosure.mouth_reset()
self.flash_state = 0
# Listen for cries of "Stop!!!"
if not self.is_listening():
self.log.info("Auto listen...")
self.bus.emit(Message('mycroft.mic.listen'))
def _show_alarm_anim(self, dt):
# Animated confirmation of the alarm
self.enclosure.mouth_reset()
self._render_time(dt)
time.sleep(2)
self.enclosure.mouth_reset()
# Show an animation
# TODO: mouth_display_png() is choking images > 8x8
# (likely on the enclosure side)
for i in range(1, 16):
png = join(abspath(dirname(__file__)),
"anim",
"Alarm-"+str(int(i))+"-1.png")
# self.enclosure.mouth_display_png(png, x=0, y=0, refresh=False,
# invert=True)
png = join(abspath(dirname(__file__)),
"anim",
"Alarm-"+str(int(i))+"-2.png")
if i < 8:
self.enclosure.mouth_display_png(png, x=8, y=0, refresh=False,
invert=True)
png = join(abspath(dirname(__file__)),
"anim",
"Alarm-"+str(int(i))+"-3.png")
self.enclosure.mouth_display_png(png, x=16, y=0, refresh=False,
invert=True)
png = join(abspath(dirname(__file__)),
"anim",
"Alarm-"+str(int(i))+"-4.png")
self.enclosure.mouth_display_png(png, x=24, y=0, refresh=False,
invert=True)
if i == 4:
time.sleep(1)
else:
time.sleep(0.15)
self.enclosure.mouth_reset()
def _render_time(self, datetime):
# Show the time in numbers "8:00 AM"
timestr = nice_time(datetime, speech=False, use_ampm=True,
use_24hour=self.use_24hour)
x = 16 - ((len(timestr)*4) // 2) # centers on display
if not self.use_24hour:
x += 1 # account for wider letters P and M, offset by the colon
# draw on the display
for ch in timestr:
if ch == ":":
png = "colon.png"
w = 2
elif ch == " ":
png = "blank.png"
w = 2
elif ch == 'A' or ch == 'P' or ch == 'M':
png = ch+".png"
w = 5
else:
png = ch+".png"
w = 4
png = join(abspath(dirname(__file__)), "anim", png)
self.enclosure.mouth_display_png(png, x=x, y=2, refresh=False)
x += w
def _describe(self, alarm):
if alarm[1]:
# Describe repeating alarms
if alarm[1].startswith("FREQ=WEEKLY;INTERVAL=1;BYDAY="):
days = alarm[1][29:] # e.g. "SU,WE"
days = (days.replace("SU", "0").replace("MO", "1").
replace("TU", "2").replace("WE", "3").
replace("TH", "4").replace("FR", "5").
replace("SA", "6").replace(",", " ")) # now "0 3"
desc = None
for r in self.recurrence_dict:
if self.recurrence_dict[r] == days:
desc = r
break # accept the first match
# Assemble a long desc, e.g. "Monday and wednesday"
if not desc:
day_names = []
for day in days.split(" "):
for r in self.recurrence_dict:
if self.recurrence_dict[r] is day:
day_names.append(r)
break
# TODO: Make translatable. mycroft.util.format.join("and")?
desc = ", ".join(day_names[:-1]) + " and " + day_names[-1]
else:
desc = "repeats"
dt = self.get_alarm_local(alarm)
return self.translate('recurring.alarm',
data={'time': nice_time(dt, use_ampm=True),
'recurrence': desc})
else:
dt = self.get_alarm_local(alarm)
return nice_date_time(dt, now=now_local(), use_ampm=True)
@intent_file_handler('query.next.alarm.intent')
def handle_query_next(self, message):
total = len(self.settings["alarm"])
if not total:
self.speak_dialog("alarms.list.empty")
return
alarm = self.settings["alarm"][0]
reltime = nice_relative_time(self.get_alarm_local(alarm))
self.speak_dialog("next.alarm", data={"when": self._describe(alarm),
"duration": reltime})
@intent_file_handler('alarm.status.intent')
def handle_status(self, message):
total = len(self.settings["alarm"])
if not total:
self.speak_dialog("alarms.list.empty")
return
desc = []
for alarm in self.settings["alarm"]:
desc.append(self._describe(alarm))
if len(desc) > 3:
break
if total == 1:
self.speak_dialog("alarms.list.single", data={'item': desc[0]})
else:
self.speak_dialog("alarms.list.multi",
data={'count': total,
'item': ", ".join(desc[:-1]),
'itemAnd': desc[-1]})
@intent_file_handler('delete.all.intent')
def handle_delete_all(self, message):
total = len(self.settings["alarm"])
if not total:
self.speak_dialog("alarms.list.empty")
return
# Confirm cancel alarms...
prompt = ('ask.cancel.alarm' if total == 1
else 'ask.cancel.alarm.plural')
if self.ask_yesno(prompt, data={"count": total}) == 'yes':
self.settings["alarm"] = []
self._schedule()
self.speak_dialog('alarms.cancelled')
@intent_file_handler('delete.intent')
def handle_delete(self, message):
total = len(self.settings["alarm"])
if not total:
self.speak_dialog("alarms.list.empty")
return
utt = message.data.get('utterance') or ""
time = message.data.get('time') or ""
# First see if the user spoke a date/time in the delete request
when = extract_datetime(utt)
now = extract_datetime("now")
if when and when[0] != now[0]:
# Look for a match...
search = when[0]
for alarm in self.settings["alarm"]:
# TODO: Handle repeating desc
dt = self.get_alarm_local(alarm)
delta = search - dt
delta2 = dt - search
if (abs(delta.total_seconds()) < 60 or
abs(delta2.total_seconds()) < 60):
# Really close match, just delete it
desc = self._describe(alarm)
self.settings["alarm"].remove(alarm)
self._schedule()
self.speak_dialog("alarm.cancelled.desc",
data={'desc': desc})
return
if (abs(delta.total_seconds()) < 60*60*2 or
abs(delta2.total_seconds()) < 60*60*2):
# Not super close, get confirmation
desc = self._describe(alarm)
if self.ask_yesno('ask.cancel.desc.alarm',
data={'desc': desc}) == 'yes':
self.settings["alarm"].remove(alarm)
self._schedule()
self.speak_dialog("alarm.cancelled")
return
if total == 1:
desc = self._describe(self.settings["alarm"][0])
if self.ask_yesno('ask.cancel.desc.alarm',
data={'desc': desc}) == 'yes':
self.settings["alarm"] = []
self._schedule()
self.speak_dialog("alarm.cancelled")
return
else:
# list the alarms
self.handle_status(message)
resp = self.get_response('ask.which.alarm.delete')
if not resp:
return
when = extract_datetime(resp)
if when and when[0] != now[0]:
# Attempting to delete by spoken data
search = when[0]
for alarm in self.settings["alarm"]:
# TODO: Handle repeating desc
dt = self.get_alarm_local(alarm)
delta = search - dt
delta2 = dt - search
if (abs(delta.total_seconds()) < 60 or
abs(delta2.total_seconds()) < 60):
# Really close match, just delete it
desc = self._describe(alarm)
self.settings["alarm"].remove(alarm)
self._schedule()
self.speak_dialog("alarm.cancelled.desc",
data={'desc': desc})
return
if (abs(delta.total_seconds()) < 60*60*2 or
abs(delta2.total_seconds()) < 60*60*2):
# Not super close, get confirmation
desc = self._describe(alarm)
if self.ask_yesno('ask.cancel.desc.alarm',
data={'desc': desc}) == 'yes':
self.settings["alarm"].remove(alarm)
self._schedule()
self.speak_dialog("alarm.cancelled")
return
# Attempt to delete by spoken index
idx = extract_number(resp, ordinals=True)
if idx and idx > 0 and idx <= total:
idx = int(idx)
desc = self._describe(self.settings["alarm"][idx-1])
del self.settings["alarm"][idx-1]
self._schedule()
self.speak_dialog("alarm.cancelled", data={'desc': desc})
return
# Attempt to match by words, e.g. "all", "both"
if self.voc_match(resp, 'All'):
self.settings["alarm"] = []
self._schedule()
self.speak_dialog('alarms.cancelled')
return
# Failed to delete
self.speak_dialog("alarm.not.found")
def _alarm_expired(self):
# Find user-selected alarm sound
alarm_file = join(abspath(dirname(__file__)),
'sounds', self.settings["sound"] + ".mp3")
if os.path.isfile(alarm_file):
self.sound_file = alarm_file
self.sound_repeat = self.sound_interval[self.settings["sound"]]
else:
self.sound_file = join(abspath(dirname(__file__)),
'sounds', "constant_beep.mp3")
self.sound_repeat = self.sound_interval["constant_beep"]
if self.settings['start_quiet'] and self.mixer:
if not self.saved_volume: # don't overwrite if already saved!
self.saved_volume = self.mixer.getvolume()
self.volume = 0 # increase by 10% each pass
else:
self.saved_volume = None
self._disable_listen_beep()
self._play_beep()
self.flash_state = 0
self.enclosure.deactivate_mouth_events()
alarm = self.settings["alarm"][0]
self.schedule_repeating_event(self._flash_alarm, 0, 1,
name='Flash',
data={"alarm_time": alarm[0]})
def __end_beep(self):
self.cancel_scheduled_event('Beep')
self.beep_start_time = None
if self.beep_process:
self.beep_process.kill()
self.beep_process = None
self._restore_volume()
self._restore_listen_beep()
def __end_flash(self):
self.cancel_scheduled_event('Flash')
self.enclosure.mouth_reset()
self.enclosure.activate_mouth_events()
def _stop_expired_alarm(self):
if self.has_expired_alarm():
self.__end_beep()
self.__end_flash()
self.cancel_scheduled_event('NextAlarm')
self._curate_alarms(0)
self._schedule()
return True
else:
return False
def _restore_volume(self):
# Return global volume to the appropriate level if we've messed with it
if self.saved_volume:
self.mixer.setvolume(self.saved_volume[0])
self.saved_volume = None
def _disable_listen_beep(self):
user_config = LocalConf(USER_CONFIG)
if 'user_beep_setting' not in self.settings:
# Save any current local config setting
self.settings['user_beep_setting'] = user_config.get("confirm_listening", None)
# Disable in local config
user_config.merge({"confirm_listening": False})
user_config.store()
# Notify all processes to update their loaded configs
self.bus.emit(Message('configuration.updated'))
def _restore_listen_beep(self):
if 'user_beep_setting' in self.settings:
# Wipe from local config
new_conf_values = {"confirm_listening": False}
user_config = LocalConf(USER_CONFIG)
if self.settings["user_beep_setting"] is None:
del user_config["confirm_listening"]
else:
user_config.merge({"confirm_listening":
self.settings["user_beep_setting"]})
user_config.store()
# Notify all processes to update their loaded configs
self.bus.emit(Message('configuration.updated'))
del self.settings["user_beep_setting"]
@intent_file_handler('snooze.intent')
def snooze_alarm(self, message):
if not self.has_expired_alarm():
return
self.__end_beep()
self.__end_flash()
utt = message.data.get('utterance') or ""
snooze_for = extract_number(utt)
if not snooze_for or snooze_for < 1:
snooze_for = 9 # default to 9 minutes
# Snooze always applies the the first alarm in the sorted array
alarm = self.settings["alarm"][0]
dt = self.get_alarm_local(alarm)
snooze = to_utc(dt) + timedelta(minutes=snooze_for)
if len(alarm) < 2:
original_time = alarm[0]
else:
original_time = alarm[2] # already snoozed
# Fill schedule with a snoozed entry -- 3 items:
# snooze_expire_timestamp, repeat_rule, original_timestamp
self.settings["alarm"][0] = [snooze.timestamp(),
alarm[1],
original_time]
self._schedule()
def _play_beep(self, message=None):
""" Play alarm sound file """
now = now_local()
if not self.beep_start_time:
self.beep_start_time = now
elif (now - self.beep_start_time).total_seconds() > self.settings["max_alarm_secs"]:
# alarm has been running long enough, auto-quiet it
self.log.info("Automatically quieted alarm after 10 minutes")
self._stop_expired_alarm()
return
next_beep = now + timedelta(seconds=(self.sound_repeat))
self.cancel_scheduled_event('Beep')
self.schedule_event(self._play_beep, to_system(next_beep), name='Beep')
if self.beep_process:
self.beep_process.kill()
# Increase volume each pass until fully on
if self.saved_volume:
if self.volume < 90:
self.volume += 10
self.mixer.setvolume(self.volume)
self.beep_process = play_mp3(self.sound_file)
@intent_file_handler('stop.intent')
def handle_alternative_stop(self, message):
self.stop()
def stop(self):
return self._stop_expired_alarm()
def create_skill():
return AlarmSkill()
##########################################################################
# TODO: Move to mycroft.util.format and support translation
def nice_relative_time(when, lang="en-us"):
""" Create a relative phrase to roughly describe a datetime
Examples are "25 seconds", "tomorrow", "7 days".
Args:
when (datetime): Local timezone
lang (str, optional): Defaults to "en-us".
speech (bool, optional): Defaults to True.
Returns:
str: description of the given time
"""
now = now_local()
delta = (to_local(when) - now)
if delta.total_seconds() < 1:
return "now"
if delta.total_seconds() < 90:
if delta.total_seconds() == 1:
return "one second"
else:
return "{} seconds".format(int(delta.total_seconds()))
minutes = int((delta.total_seconds()+30) // 60) # +30 to round minutes
if minutes < 90:
if minutes == 1:
return "one minute"
else:
return "{} minutes".format(minutes)
hours = int((minutes+30) // 60) # +30 to round hours
if hours < 36:
if hours == 1:
return "one hour"
else:
return "{} hours".format(hours)
# TODO: "2 weeks", "3 months", "4 years", etc
days = int((hours+12) // 24) # +12 to round days
if days == 1:
return "1 day"
else:
return "{} days".format(days)
|
<reponame>dcrmg/Efficient-Segmentation-Networks
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation,
groups=inplanes, bias=bias)
self.bn = nn.BatchNorm2d(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x
# encoder block
class Block(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1, start_with_relu=True):
super(Block, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = nn.BatchNorm2d(planes)
else:
self.skip = None
first_conv = []
rep = []
# Deep SeparableConv1
if start_with_relu:
first_conv.append(nn.ReLU())
first_conv.append(SeparableConv2d(inplanes, planes // 4, 3, 1, dilation))
first_conv.append(nn.BatchNorm2d(planes // 4))
first_conv.append(nn.ReLU())
if not start_with_relu:
first_conv.append(SeparableConv2d(inplanes, planes // 4, 3, 1, dilation))
first_conv.append(nn.BatchNorm2d(planes // 4))
first_conv.append(nn.ReLU())
rep.append(SeparableConv2d(planes // 4, planes // 4, 3, 1, dilation))
rep.append(nn.BatchNorm2d(planes // 4))
if stride != 1:
rep.append(nn.ReLU())
rep.append(SeparableConv2d(planes // 4, planes, 3, 2))
rep.append(nn.BatchNorm2d(planes))
if stride == 1:
rep.append(nn.ReLU())
rep.append(SeparableConv2d(planes // 4, planes, 3, 1))
rep.append(nn.BatchNorm2d(planes))
self.first_conv = nn.Sequential(*first_conv)
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.first_conv(inp)
x = self.rep(x)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x = x + skip
return x
class enc(nn.Module):
"""
encoders:
stage:stage=X ,where X means encX,example: stage=2 that means you defined the encoder enc2
"""
def __init__(self, in_channels, out_channels, stage):
super(enc, self).__init__()
if (stage == 2 or stage == 4):
rep_nums = 4
elif (stage == 3):
rep_nums = 6
rep = []
rep.append(Block(in_channels, out_channels, stride=2, start_with_relu=False))
for i in range(rep_nums - 1):
rep.append(Block(out_channels, out_channels, stride=1, start_with_relu=True))
self.reps = nn.Sequential(*rep)
def forward(self, lp):
x = self.reps(lp)
return x
class fcattention(nn.Module):
def __init__(self, in_channels, out_channels):
super(fcattention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_channels, 1000, bias=False),
# nn.ReLU(inplace=True),
)
self.conv = nn.Sequential(
nn.Conv2d(1000, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
# print(y.size())
y = self.fc(y).view(b, 1000, 1, 1)
# print(y.size())
y = self.conv(y)
return x * y.expand_as(x)
class xceptionAx3(nn.Module):
"""
"""
def __init__(self, num_classes):
super(xceptionAx3, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(num_features=8),
nn.ReLU())
self.enc2a = enc(in_channels=8, out_channels=48, stage=2)
self.enc2b = enc(in_channels=240, out_channels=48, stage=2)
self.enc2c = enc(in_channels=240, out_channels=48, stage=2)
self.enc3a = enc(in_channels=48, out_channels=96, stage=3)
self.enc3b = enc(in_channels=144, out_channels=96, stage=3)
self.enc3c = enc(in_channels=144, out_channels=96, stage=3)
self.enc4a = enc(in_channels=96, out_channels=192, stage=4)
self.enc4b = enc(in_channels=288, out_channels=192, stage=4)
self.enc4c = enc(in_channels=288, out_channels=192, stage=4)
self.fca1 = fcattention(192, 192)
self.fca2 = fcattention(192, 192)
self.fca3 = fcattention(192, 192)
# self.
self.enc2a_to_decoder_dim_reduction = nn.Sequential(nn.Conv2d(48, 32, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU())
self.enc2b_to_decoder_dim_reduction = nn.Sequential(nn.Conv2d(48, 32, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU())
self.enc2c_to_decoder_dim_reduction = nn.Sequential(nn.Conv2d(48, 32, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU())
self.fca1_to_decoder_dim_reduction = nn.Sequential(nn.Conv2d(192, 32, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU())
self.fca2_to_decoder_dim_reduction = nn.Sequential(nn.Conv2d(192, 32, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU())
self.fca3_to_decoder_dim_reduction = nn.Sequential(nn.Conv2d(192, 32, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU())
self.merge_conv = nn.Sequential(nn.Conv2d(32, 32, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU())
self.last_conv = nn.Sequential(nn.Conv2d(32, num_classes, kernel_size=1, stride=1, bias=False))
def forward(self, x):
# backbone stage a
stage1 = self.conv1(x)
# print("stage1:",stage1.size())
stage_enc2a = self.enc2a(stage1)
stage_enc3a = self.enc3a(stage_enc2a)
# print('stage_enc3a:',stage_enc3a.size())
stage_enc4a = self.enc4a(stage_enc3a)
# print('stage_enc4a:',stage_enc4a.size())
stage_fca1 = self.fca1(stage_enc4a)
# print(stage_fca1.size())
up_fca1 = F.interpolate(stage_fca1,
stage_enc2a.size()[2:],
mode='bilinear',
align_corners=False)
# print('up_fca1:',up_fca1.size())
# stage b
stage_enc2b = self.enc2b(torch.cat((up_fca1, stage_enc2a), 1))
# print(stage_enc2b.size())
stage_enc3b = self.enc3b(torch.cat((stage_enc2b, stage_enc3a), 1))
# print(stage_enc3b.size())
stage_enc4b = self.enc4b(torch.cat((stage_enc3b, stage_enc4a), 1))
stage_fca2 = self.fca2(stage_enc4b)
# print(stage_fca2.size())
up_fca2 = F.interpolate(stage_fca2,
stage_enc2b.size()[2:],
mode='bilinear',
align_corners=False)
# stage c
stage_enc2c = self.enc2c(torch.cat((up_fca2, stage_enc2b), 1))
stage_enc3c = self.enc3c(torch.cat((stage_enc2c, stage_enc3b), 1))
stage_enc4c = self.enc4c(torch.cat((stage_enc3c, stage_enc4b), 1))
stage_fca3 = self.fca3(stage_enc4c)
# decoder
x1 = self.enc2a_to_decoder_dim_reduction(stage_enc2a)
# print(x1.size())
x2 = self.enc2b_to_decoder_dim_reduction(stage_enc2b)
x2_up = F.interpolate(x2,
x1.size()[2:],
mode='bilinear',
align_corners=False)
x3 = self.enc2c_to_decoder_dim_reduction(stage_enc2c)
x3_up = F.interpolate(x3,
x1.size()[2:],
mode='bilinear',
align_corners=False)
# print(x3.size())
x_up = x1 + x2_up + x3_up
x_merge = self.merge_conv(x_up)
# print(x_merge.size())
x_fca1 = self.fca1_to_decoder_dim_reduction(stage_fca1)
# print(x_fca1.size())
x_fca1_up = F.interpolate(x_fca1,
x1.size()[2:],
mode='bilinear',
align_corners=False)
x_fca2 = self.fca2_to_decoder_dim_reduction(stage_fca2)
# print(x_fca2.size())
x_fca2_up = F.interpolate(x_fca2,
x1.size()[2:],
mode='bilinear',
align_corners=False)
x_fca3 = self.fca3_to_decoder_dim_reduction(stage_fca3)
# print(x_fca3.size())
x_fca3_up = F.interpolate(x_fca3,
x1.size()[2:],
mode='bilinear',
align_corners=False)
x_fca_up = x_merge + x_fca1_up + x_fca2_up + x_fca3_up
# print(x_fca_up.size())
result = self.last_conv(x_fca_up)
# print(result.size())
result = F.interpolate(result, x.size()[2:], mode='bilinear', align_corners=False)
# print(result.size())
return result
def dfanet(classes=19, **kwargs):
"""
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
return xceptionAx3(classes, **kwargs)
if __name__ == "__main__":
from torch.nn import CrossEntropyLoss
criterion = CrossEntropyLoss()
net = xceptionAx3(num_classes=20)
# net=enc(in_channels=8,out_channels=48,stage=2)
input = torch.randn(4, 3, 1024, 1024)
outputs = net(input)
# torch.save(net.state_dict(), "model.pth")
print(outputs.size()) |
""" Module for storing interaction profiles of Systems and SystemTypes. """
import collections as col
import itertools as it
class ProfileError(Exception):
pass
def get_inx_class_features(inx_class, system):
features = []
for i, feature_type in enumerate(inx_class.feature_types):
# find the index of the feature in the member, order of features determines
member_idx = inx_class.association_type.member_idxs[i]
feat_idx = list(inx_class.association_type.member_types[i].feature_types.values())\
.index(feature_type)
feature = list(system.members[member_idx].features.values())[feat_idx]
feature = list(system.members[member_idx].features.values())[feat_idx]
features.append(feature)
return features
def profile_inx_class(inx_class, system):
# get the features for this inx class
features = get_inx_class_features(inx_class, system)
# calculate inter-feature parameters and check against
# constraints
okay, param_values = inx_class.check(*features)
if not okay:
return None
# if it passes we want to actually construct the interaction object
# associate the parameter values with the names for them
param_values = {param_name : param_val for param_name,
param_val in zip(inx_class.interaction_param_keys, param_values)}
# construct the interaction
inx = inx_class.interaction_constructor(*features,
interaction_class=inx_class,
check=False,
**param_values)
return inx
def profile_inx_classes(inx_classes, system):
inxs = []
hit_idxs = []
# gather the substantiated features and check it
for i, inx_class in enumerate(inx_classes):
inx = profile_inx_class(inx_class, system)
if inx is not None:
hit_idxs.append(i)
# DEBUG
assert inx.interaction_class.name == inx_class.name, \
"The interaction_class from the inx_class {0} does not match"\
" the created Interaction {1} in hit {2}".format(inx_class.name,
inx.interaction_class.name,
i)
# will add a None where the test fails to preserve position
inxs.append(inx)
return hit_idxs, inxs
def profiles_df(profiles, profile_ids=None):
import pandas as pd
hits_dfs = []
for prof_idx, profile in profiles:
hits_df = profile.hit_inx_df()
if profile_ids is not None:
prof_ids_col = [profile_ids[prof_idx]] * hits_df.shape[0]
else:
prof_ids_col = [prof_idx] * hits_df.shape[0]
hits_df['profile_id'] = prof_ids_col
hits_dfs.append(hits_df)
master_df = pd.concat(hits_dfs)
return master_df
class InxSpaceProfiler(object):
def __init__(self, interaction_space):
self._inx_space = interaction_space
def profile(self, system):
return InxSpaceProfile(self._inx_space, system)
class InxSpaceProfile(object):
def __init__(self, inx_space, system):
self._system = system
self._inx_space = inx_space
# profile by interaction class order
self._hit_idxs, self._inxs = profile_inx_classes(self._inx_space, self._system)
@property
def n_inx_classes(self):
return len(self._inx_space)
@property
def inxs(self):
return self._inxs
@property
def hit_idxs(self):
return self._hit_idxs
#return [i for i, inx in enumerate(self._inxs) if inx is not None]
@property
def hit_inxs(self):
return [inx for inx in self._inxs if inx is not None]
@property
def vector(self):
return [0 if inx is None else 1 for inx in self._inxs]
def hit_inx_records(self):
return [inx.record for inx in self.hit_inxs]
def hit_inx_dict(self):
profile_dict = col.defaultdict(list)
for inx in self.hit_inxs:
for field, value in inx.record_dict.items():
profile_dict[field].append(value)
return profile_dict
def hit_inx_df(self):
import pandas as pd
hit_df = pd.DataFrame(self.hit_inx_dict())
hit_df['hit_idx'] = self.hit_idxs
return hit_df
@property
def system(self):
return self._system
@property
def interaction_space(self):
return self._inx_space
inx_space = interaction_space
@property
def interactions(self):
return self._inxs
inxs = interactions
@property
def subspace_map(self):
return self._inx_space._subspace_map
# @property
# def subspace_vector(self, association_type, interaction_type):
# key = (association_type, interaction_type)
# idxs = self._subspace_map[key]
# sel_inxs = [inx for i, inx in enumerate(self._inxs) if i in idxs]
# return [0 if inx is None else 1 for inx in sel_inxs]
# TODO have a problem where a hit idx is assigned two inxs if they
# are the opposite of each other in the association
# def inx_type_hit_records(self, interaction_type):
# hit_records = []
# hit_idx_key = 'hit_idx'
# # we will want to make a new record for hits, so we get an
# # example record from the interaction
# inx_idxs = self.hits_by_inx_type(interaction_type)
# inx_record = self.inxs[inx_idxs[0]].record
# # add new hit_idx field
# record_fields = list(inx_record._fields)
# record_fields.append(hit_idx_key)
# # modify the name
# hit_record_name = "Hit" + type(inx_record).__name__
# # make the new namedtuple
# hit_record_type = col.namedtuple(hit_record_name, record_fields)
# # get the hits for this interaction type
# for hit_idx in inx_idxs:
# inx = self.inxs[hit_idx]
# # convert to a dictionary
# inx_dict_rec = inx.record._asdict()
# # add the hit index
# inx_dict_rec[hit_idx_key] = hit_idx
# # make the new record
# hit_record = hit_record_type(**inx_dict_rec)
# hit_records.append(hit_record)
# return hit_records
# def association_hit_records(self, association_type):
# """Returns a dictionary of the hit records for AssociationType."""
# hit_records = []
# hit_idx_key = 'hit_idx'
# # we will want to make a new record for hits, so we get an
# # example record from the interaction
# inx_idxs = self.hits_by_association(association_type)
# inx_record = self.inxs[inx_idxs[0]].record
# # add new hit_idx field
# record_fields = list(inx_record._fields)
# record_fields.append(hit_idx_key)
# # modify the name
# hit_record_name = "Hit" + type(inx_record).__name__
# # make the new namedtuple
# hit_record_type = col.namedtuple(hit_record_name, record_fields)
# # get the hits for this interaction type
# for hit_idx in inx_idxs:
# inx = self.inxs[hit_idx]
# # convert to a dictionary
# inx_dict_rec = inx.record._asdict()
# # add the hit index
# inx_dict_rec[hit_idx_key] = hit_idx
# # make the new record
# hit_record = hit_record_type(**inx_dict_rec)
# hit_records.append(hit_record)
# return hit_records
# def inx_type_hits_df(self, interaction_type):
# import pandas as pd
# return pd.DataFrame(self.inx_type_hit_records(interaction_type))
# def hits_by_inx_type(self, interaction_type):
# """Returns the indices of interactions matching the interaction_type"""
# return_idxs = []
# # for each subspace
# for assoc_inxtype_tup, idxs in self._subspace_map.items():
# # if the subspace involves the interaction type
# if interaction_type == assoc_inxtype_tup[self._interaction_type_idx]:
# # then we get the hit_idxs that match the ones in this subspace
# subspace_hit_idxs = [idx for idx in idxs if idx in self.hit_idxs]
# return_idxs.extend(subspace_hit_idxs)
# return return_idxs
# def hits_by_association(self, association_type):
# """Returns the indices of interactions matching the association_type"""
# return_idxs = []
# for assoc_inxtype_tup, idxs in self._subspace_map.items():
# if association_type == assoc_inxtype_tup[self._association_idx]:
# hit_idxs = [idx for idx in idxs if idx in self.hit_idxs]
# return_idxs.extend(hit_idxs)
# return return_idxs
|
"""Data used by this integration."""
from __future__ import annotations
import asyncio
from collections import defaultdict
from typing import NamedTuple, cast
from async_upnp_client import UpnpEventHandler, UpnpFactory, UpnpRequester
from async_upnp_client.aiohttp import AiohttpNotifyServer, AiohttpSessionRequester
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import CALLBACK_TYPE, Event, HomeAssistant
from homeassistant.helpers import aiohttp_client
from .const import DOMAIN, LOGGER
class EventListenAddr(NamedTuple):
"""Unique identifier for an event listener."""
host: str | None # Specific local IP(v6) address for listening on
port: int # Listening port, 0 means use an ephemeral port
callback_url: str | None
class DlnaDmrData:
"""Storage class for domain global data."""
lock: asyncio.Lock
requester: UpnpRequester
upnp_factory: UpnpFactory
event_notifiers: dict[EventListenAddr, AiohttpNotifyServer]
event_notifier_refs: defaultdict[EventListenAddr, int]
stop_listener_remove: CALLBACK_TYPE | None = None
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize global data."""
self.lock = asyncio.Lock()
session = aiohttp_client.async_get_clientsession(hass, verify_ssl=False)
self.requester = AiohttpSessionRequester(session, with_sleep=True)
self.upnp_factory = UpnpFactory(self.requester, non_strict=True)
self.event_notifiers = {}
self.event_notifier_refs = defaultdict(int)
async def async_cleanup_event_notifiers(self, event: Event) -> None:
"""Clean up resources when Home Assistant is stopped."""
LOGGER.debug("Cleaning resources in DlnaDmrData")
async with self.lock:
tasks = (server.stop_server() for server in self.event_notifiers.values())
asyncio.gather(*tasks)
self.event_notifiers = {}
self.event_notifier_refs = defaultdict(int)
async def async_get_event_notifier(
self, listen_addr: EventListenAddr, hass: HomeAssistant
) -> UpnpEventHandler:
"""Return existing event notifier for the listen_addr, or create one.
Only one event notify server is kept for each listen_addr. Must call
async_release_event_notifier when done to cleanup resources.
"""
LOGGER.debug("Getting event handler for %s", listen_addr)
async with self.lock:
# Stop all servers when HA shuts down, to release resources on devices
if not self.stop_listener_remove:
self.stop_listener_remove = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, self.async_cleanup_event_notifiers
)
# Always increment the reference counter, for existing or new event handlers
self.event_notifier_refs[listen_addr] += 1
# Return an existing event handler if we can
if listen_addr in self.event_notifiers:
return self.event_notifiers[listen_addr].event_handler
# Start event handler
server = AiohttpNotifyServer(
requester=self.requester,
listen_port=listen_addr.port,
listen_host=listen_addr.host,
callback_url=listen_addr.callback_url,
loop=hass.loop,
)
await server.start_server()
LOGGER.debug("Started event handler at %s", server.callback_url)
self.event_notifiers[listen_addr] = server
return server.event_handler
async def async_release_event_notifier(self, listen_addr: EventListenAddr) -> None:
"""Indicate that the event notifier for listen_addr is not used anymore.
This is called once by each caller of async_get_event_notifier, and will
stop the listening server when all users are done.
"""
async with self.lock:
assert self.event_notifier_refs[listen_addr] > 0
self.event_notifier_refs[listen_addr] -= 1
# Shutdown the server when it has no more users
if self.event_notifier_refs[listen_addr] == 0:
server = self.event_notifiers.pop(listen_addr)
await server.stop_server()
# Remove the cleanup listener when there's nothing left to cleanup
if not self.event_notifiers:
assert self.stop_listener_remove is not None
self.stop_listener_remove()
self.stop_listener_remove = None
def get_domain_data(hass: HomeAssistant) -> DlnaDmrData:
"""Obtain this integration's domain data, creating it if needed."""
if DOMAIN in hass.data:
return cast(DlnaDmrData, hass.data[DOMAIN])
data = DlnaDmrData(hass)
hass.data[DOMAIN] = data
return data
|
<gh_stars>0
from torch.nn import CrossEntropyLoss
import torch.optim as optim
import torch
import argparse
import sys
sys.path.append("..")
from model_pytorch import LeNet5
from dataloader import get_mnist
# check if GPU is available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def load_model(pool, activation):
# load model
net = LeNet5(pool=pool, activation=activation)
# if gpu is available, load model to gpu
net.to(device)
return net
def load_optimizer(parameters):
# define criterion
criterion = CrossEntropyLoss()
# define optimizer
optimizer = optim.Adam(parameters, lr=2e-3)
return criterion, optimizer
def train(net, train_loader, epochs, optimizer, criterion):
for epoch in range(epochs):
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 100 == 99:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print(">>> Finished Training")
def save_model(net, save_path):
# save model
torch.save(net.state_dict(), save_path)
def evaluate(net, test_loader):
# evaluate our model
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %.2f %%' % (
100 * correct / total))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train LeNet5 architecture in PyTorch.")
parser.add_argument("-p", "--pool", default="max", help="avg pool or max pool?")
parser.add_argument("-a", "--activation", default="relu", help="choose from relu, tanh, and leaky")
parser.add_argument("-b", "--batchsize", type=int, default=256, help="Batch size for training/testing data.")
parser.add_argument("-d", "--data", default="./data/mnist", help="Path to data.")
parser.add_argument("-s", "--shuffle", default="True", help="Shuffle data or not?")
parser.add_argument("-n", "--numworker", type=int, default=8,
help="Number of workers to divide the batching process.")
parser.add_argument("-m", "--modelsavepath", default="./lenet5.pth", help="Model save path.")
parser.add_argument("-l", "--learningrate", type=float, default=2e-3, help="Set learning rate.")
parser.add_argument("-e", "--epochs", type=int, default=10, help="Set number of epochs.")
parser = vars(parser.parse_args())
model = load_model(parser["pool"], parser["activation"])
criterion, optimizer = load_optimizer(model.parameters())
epochs = parser["epochs"]
train_loader, test_loader = get_mnist(path=parser["data"], batch_size=parser["batchsize"], shuffle=parser["shuffle"]
, num_workers=parser["numworker"])
train(model, train_loader, epochs, optimizer, criterion)
save_model(model, parser["modelsavepath"])
evaluate(model, test_loader)
|
<reponame>elifesciences-publications/genomic-features-survival
#!/usr/bin/env python
# encoding: utf-8
'''
variant_allele_freq.py
Created by <NAME>
on 2017-09-02.
Given the set of mutation files and the variant allele frequency key, calculate variante allel frequency distributions.
Copyright (c) 2018. All rights reserved.
'''
import pandas as pd
import numpy as np
import argparse
import sys
import os
import glob
import itertools
import pdb
from multiprocessing import Pool
from matplotlib import pyplot
sys.path.append('../common/')
import utilities as util
import analysis
MUTATION_PERCENT = .02
COMMONLY_MUTATED = ['\'TP53', '\'KRAS', '\'PIK3CA', '\'APC', '\'KMT2D', '\'ARID1A', '\'PTEN', '\'BRAF', '\'ATM',
'\'EGFR', '\'NF1', '\'RB1', '\'BRCA2', '\'ATRX', '\'NOTCH1', '\'CDKN2A', '\'SETD2', '\'CREBBP',
'\'SMAD4', '\'FBXW7', '\'ARID1B', '\'SMARCA4', '\'KMT2A', '\'EP300', '\'ERBB4', '\'IDH1',
'\'ARID2', '\'NRAS', '\'ROS1', '\'CTNNB1']
def get_options():
parser = argparse.ArgumentParser(description='Get mutation and clinical dir')
parser.add_argument('-i', action='store', dest='mutation_directory')
parser.add_argument('-k', action='store', dest='key_file')
parser.add_argument('-o', action='store', dest='output_directory', default='.')
namespace = parser.parse_args()
return (namespace.mutation_directory, namespace.key_file,
namespace.output_directory)
def prep_data(mutation, key):
df = pd.read_csv(mutation, sep='\t', low_memory=False, dtype=str)
cancer_type = util.get_cancer_type(mutation)
# remove column headers from combined mutation sheet
df = df[~df[u'Hugo_Symbol'].str.contains('Hugo_Symbol')]
df['Hugo_Symbol'] = '\'' + df['Hugo_Symbol'].astype(str)
df = df[df[u'Hugo_Symbol'].isin(COMMONLY_MUTATED)]
df[u'Tumor_Sample_Barcode'] = df[u'Tumor_Sample_Barcode'].str.strip()
number_barcodes_in_mutation_data = df[u'Tumor_Sample_Barcode'].unique().size
print 'Number of total sequenced barcodes: ', number_barcodes_in_mutation_data
df = util.maybe_clear_non_01s(df, u'Tumor_Sample_Barcode', cancer_type)
df = util.add_identifier_column(df, u'Tumor_Sample_Barcode')
# include only nonsilent mutations
non_silent = df.where(df[u'Variant_Classification'] != 'Silent')
df = non_silent.dropna(subset=[u'Variant_Classification'])
df = df.reset_index()
df['VAF'] = calculate_vaf(df, key.loc[cancer_type])
# use the largest VAF
df = df.groupby(['Hugo_Symbol', 'identifier']).max()
df = df.reset_index()
pivoted = df.pivot(index='identifier', columns='Hugo_Symbol', values='VAF')
minimum_vaf_count = MUTATION_PERCENT * number_barcodes_in_mutation_data
enough_patients = pivoted.count() >= minimum_vaf_count
too_few_patients = enough_patients[~enough_patients].index.values
print 'Genes with too few patients:', too_few_patients
pivoted = pivoted.drop(too_few_patients, axis=1)
return pivoted
def calculate_vaf(df, key):
if not pd.isnull(key['VAF?']):
vaf = df[key['VAF?']].str[:-1]
vaf = vaf.replace(r'\s+', np.nan, regex=True)
vaf = vaf.replace(r'', np.nan, regex=True)
return vaf.astype(float) / 100
alt = df[key['Alt?']]
if not pd.isnull(key['Total?']):
total = df[key['Total?']].astype(float)
print total
else:
total = df[key['Alt?']].astype(float) + df[key['Ref?']].astype(float)
alt = alt.rename('alt')
total = total.rename('total')
var_allele_freq = pd.DataFrame([alt, total], dtype=float).transpose()
return var_allele_freq['alt'] / var_allele_freq['total']
def calculate_variant_allele_distribution(cancer_type, mutation, key, outdir):
df = prep_data(mutation, key)
boxplot_data = []
boxplot_labels = []
for gene in df:
print gene
boxplot_data.append(df[gene].dropna())
boxplot_labels.append(gene)
fig, ax = pyplot.subplots()
pyplot.title(cancer_type)
pyplot.boxplot(boxplot_data, labels=boxplot_labels)
pyplot.setp(ax.get_xticklabels(), rotation=90, horizontalalignment='center')
pyplot.savefig(cancer_type + '.png', pad_inches=1, bbox_inches='tight')
df.to_csv(cancer_type + '_vaf_distribition.csv')
def main(argv=None):
mutation_dir, key_file, outdir = get_options()
mutation_files = glob.glob(mutation_dir + '*txt')
key = pd.read_csv(key_file, na_values=['-'], index_col=0)
key = key.dropna(how='all')
print key
p = Pool(1)
args = []
pancan = {}
for mutation in mutation_files:
cancer_type = util.get_cancer_type(mutation)
if cancer_type in key.index:
print cancer_type
pancan[cancer_type] = calculate_variant_allele_distribution(cancer_type, mutation, key, outdir)
if __name__ == "__main__":
main()
|
import numpy as np
def scatter_matrix(data):
pass
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def hist(data, column, by=None, ax=None, fontsize=None):
keys, values = zip(*data.groupby(by)[column])
if ax is None:
ax = _gca()
ax.boxplot(values)
ax.set_xticklabels(keys, rotation=0, fontsize=fontsize)
return ax
def grouped_hist(data, column, by=None, ax=None, bins=50, log=False,
figsize=None):
"""
Returns
-------
fig : matplotlib.Figure
"""
def plot_group(group, ax):
ax.hist(group[column].dropna(), bins=bins)
fig = _grouped_plot(plot_group, data, by=by, sharex=False,
sharey=False, figsize=figsize)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.3, wspace=0.2)
return fig
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None):
"""
Make a box plot from DataFrame column optionally grouped by some columns or
other inputs
Parameters
----------
data : DataFrame
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
fontsize : int or string
Returns
-------
ax : matplotlib.axes.AxesSubplot
"""
def plot_group(grouped, ax):
keys, values = zip(*grouped)
keys = [_stringify(x) for x in keys]
ax.boxplot(values)
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
if column == None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
if not isinstance(by, (list, tuple)):
by = [by]
fig, axes = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize)
ax = axes
else:
if ax is None:
ax = _gca()
fig = ax.get_figure()
data = data._get_numeric_data()
if columns:
cols = columns
else:
cols = data.columns
keys = [_stringify(x) for x in cols]
ax.boxplot(list(data[cols].values.T))
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
ax.grid(grid)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return ax
def _stringify(x):
if isinstance(x, tuple):
return '|'.join(str(y) for y in x)
else:
return str(x)
def format_date_labels(ax):
# mini version of autofmt_xdate
try:
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
except Exception: # pragma: no cover
pass
def scatter_plot(data, x, y, by=None, ax=None, figsize=None):
"""
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize)
else:
fig = plt.figure()
ax = fig.add_subplot(111)
plot_group(data, ax)
ax.set_ylabel(str(y))
ax.set_xlabel(str(x))
return fig
def _grouped_plot(plotf, data, by=None, numeric_only=True, figsize=None,
sharex=True, sharey=True):
import matplotlib.pyplot as plt
# allow to specify mpl default with 'default'
if not (isinstance(figsize, str) and figsize == 'default'):
figsize = (10, 5) # our default
grouped = data.groupby(by)
ngroups = len(grouped)
nrows, ncols = _get_layout(ngroups)
if figsize is None:
# our favorite default beating matplotlib's idea of the
# default size
figsize = (10, 5)
fig, axes = subplots(nrows=nrows, ncols=ncols, figsize=figsize,
sharex=sharex, sharey=sharey)
ravel_axes = []
for row in axes:
ravel_axes.extend(row)
for i, (key, group) in enumerate(grouped):
ax = ravel_axes[i]
if numeric_only:
group = group._get_numeric_data()
plotf(group, ax)
ax.set_title(str(key))
return fig, axes
def _grouped_plot_by_column(plotf, data, columns=None, by=None,
numeric_only=True, grid=False,
figsize=None):
import matplotlib.pyplot as plt
grouped = data.groupby(by)
if columns is None:
columns = data._get_numeric_data().columns - by
ngroups = len(columns)
nrows, ncols = _get_layout(ngroups)
fig, axes = subplots(nrows=nrows, ncols=ncols,
sharex=True, sharey=True,
figsize=figsize)
if isinstance(axes, plt.Axes):
ravel_axes = [axes]
else:
ravel_axes = []
for row in axes:
if isinstance(row, plt.Axes):
ravel_axes.append(row)
else:
ravel_axes.extend(row)
for i, col in enumerate(columns):
ax = ravel_axes[i]
gp_col = grouped[col]
plotf(gp_col, ax)
ax.set_title(col)
ax.set_xlabel(str(by))
ax.grid(grid)
byline = by[0] if len(by) == 1 else by
fig.suptitle('Boxplot grouped by %s' % byline)
return fig, axes
def _get_layout(nplots):
if nplots == 1:
return (1, 1)
elif nplots == 2:
return (1, 2)
elif nplots < 4:
return (2, 2)
k = 1
while k ** 2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
return k, k
# copied from matplotlib/pyplot.py for compatibility with matplotlib < 1.0
def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, **fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
nrows : int
Number of rows of the subplot grid. Defaults to 1.
ncols : int
Number of columns of the subplot grid. Defaults to 1.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharex : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing at all is done: the returned axis object is always
a 2-d array contaning Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
fig_kw : dict
Dict with keywords passed to the figure() call. Note that all keywords
not recognized above will be automatically included here.
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one supblot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
fig = plt.figure(**fig_kw)
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
nplots = nrows*ncols
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
axarr[i] = fig.add_subplot(nrows, ncols, i+1, **subplot_kw)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots==1:
return fig, axarr[0]
else:
return fig, axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
return fig, axarr.reshape(nrows, ncols)
if __name__ == '__main__':
import pandas.rpy.common as com
sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
top10 = sales['zip'].value_counts()[:10].index
sales2 = sales[sales.zip.isin(top10)]
fig = scatter_plot(sales2, 'squarefeet', 'price', by='zip')
# plt.show()
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import math
# In[45]:
class Attention(nn.Module):
"""
返回值:
返回的不是attention权重,而是每个timestep乘以权重后相加得到的向量。
输入:
(batch_size, step_dim, dims_to_weight, features_dim)
"""
def __init__(self, dims_to_weight, features_dim, bias=True):
super(Attention, self).__init__()
self.dims_to_weight = dims_to_weight
self.features_dim = features_dim
self.bias = bias
self.latent_dim = 64
self.eps = 1e-5
self.weight1 = nn.Parameter(torch.Tensor(self.features_dim, self.latent_dim))
self.weight2 = nn.Parameter(torch.Tensor(self.latent_dim, 1))
if self.bias:
self.b1 = nn.Parameter(torch.Tensor(self.latent_dim))
self.b2 = nn.Parameter(torch.Tensor(1))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight1.size(1))
self.weight1.data.uniform_(-stdv, stdv)
stdv = 1. / math.sqrt(self.weight2.size(1))
self.weight2.data.uniform_(-stdv, stdv)
if self.bias:
# self.encode_bias.data.uniform_(-stdv, stdv)
# self.decode_bias.data.uniform_(-stdv, stdv)
nn.init.zeros_(self.b1)
nn.init.zeros_(self.b2)
def forward(self, x):
eij = F.relu(torch.matmul(x, self.weight1)) # (batch_size, step_dim, dims_to_weight, latent_dim)
if self.bias:
eij = torch.add(eij, self.b1)
eij = torch.matmul(eij, self.weight2) # (batch_size, step_dim, dims_to_weight, 1)
if self.bias:
eij = torch.add(eij, self.b2)
# RNN一般默认激活函数为tanh, 对attention来说激活函数差别不大,因为要做softmax
eij = torch.tanh(eij)
a = torch.exp(eij) # (batch_size, step_dim, dims_to_weight, 1)
# cast是做类型转换,keras计算时会检查类型,可能是因为用gpu的原因
a = torch.div(a, (torch.sum(a, dim=2, keepdim=True) + self.eps)) # (batch_size, step_dim, dims_to_weight, 1)
# 此时a.shape = (batch_size, step_dim, dims_to_weight, 1),
# x.shape = (batch_size, step_dim, dims_to_weight, features_dim)
weighted_input = torch.add(torch.mul(x, a), x)
return weighted_input
def extra_repr(self):
return 'dims_to_weight={}, features_dim={}, bias={}'.format(
self.dims_to_weight, self.features_dim, self.bias
)
# In[46]:
class AutoEncoder(nn.Module):
"""
输入:(3, 224, 50)
输出:(3, 224, 224)
"""
def __init__(self, in_features, latent_features, bias=True):
super(AutoEncoder, self).__init__()
self.in_features = in_features
self.latent_features = latent_features
self.weight = nn.Parameter(torch.Tensor(latent_features, in_features))
if bias:
self.encode_bias = nn.Parameter(torch.Tensor(latent_features))
self.decode_bias = nn.Parameter(torch.Tensor(in_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.encode_bias is not None:
# self.encode_bias.data.uniform_(-stdv, stdv)
# self.decode_bias.data.uniform_(-stdv, stdv)
nn.init.zeros_(self.encode_bias)
nn.init.zeros_(self.decode_bias)
def forward(self, input):
encoded = F.relu(F.linear(input, self.weight, self.encode_bias))
decoded = F.linear(encoded, torch.transpose(self.weight, 0, 1), self.decode_bias)
return encoded, decoded
def extra_repr(self):
return 'in_features={}, latent_features={}, bias={}'.format(
self.in_features, self.latent_features, self.encode_bias is not None and self.decode_bias is not None
)
# In[47]:
class AttentionAE(nn.Module):
def __init__(self, dims_to_weight, features_dim, latent_features):
super(AttentionAE, self).__init__()
self.att = Attention(dims_to_weight=dims_to_weight, features_dim=features_dim)
# self.bn_att = nn.BatchNorm2d(features_dim)
self.autoencoder = AutoEncoder(in_features=dims_to_weight, latent_features=latent_features)
def forward(self, x):
# (batch_size, 3, 224, 50)
x = x.permute(0, 2, 3, 1)
# (batch_size, 224, 50, 3)
x = self.att(x)
# (batch_size, 224, 50, 3)
x = x.permute(0, 3, 1, 2)
# (batch_size, 3, 224, 50)
# x = self.bn_att(x)
encoded, decoded = self.autoencoder(x)
# (batch_size, 3, 224, 224)
return encoded, decoded
class AttentionFCResNet(nn.Module):
def __init__(self, num_classes, ae_path=None, resnet_path=None):
super(AttentionFCResNet, self).__init__()
self.num_classes = num_classes
self.ae_path = ae_path
self.resnet_path = resnet_path
self.att = Attention(224, 3)
# self.bn_att = nn.BatchNorm2d(9)
self.encode = nn.Linear(224, 224)
self.encode_relu = nn.ReLU()
self.bn = nn.BatchNorm2d(3)
self.classifier = models.resnet50(pretrained=True)
num_ftrs = self.classifier.fc.in_features
self.classifier.fc = nn.Linear(num_ftrs, self.num_classes)
# self.classifier = ResNetWithDropout()
if self.ae_path is not None or self.resnet_path is not None:
self.init_weights()
def init_weights(self):
print('initialize weight')
if self.ae_path is not None:
autoencoder = AttentionAE(224, 3, 224)
autoencoder.load_state_dict(torch.load(self.ae_path)['state_dict'], strict=False)
self.att.weight1.data = torch.from_numpy(autoencoder.att.weight1.detach().numpy())
self.att.weight2.data = torch.from_numpy(autoencoder.att.weight2.detach().numpy())
self.att.b1.data = torch.from_numpy(autoencoder.att.b1.detach().numpy())
self.att.b2.data = torch.from_numpy(autoencoder.att.b2.detach().numpy())
# self.bn_att.weight.data = torch.from_numpy(autoencoder.bn_att.weight.detach().numpy())
# self.bn_att.bias.data = torch.from_numpy(autoencoder.bn_att.bias.detach().numpy())
self.encode.weight.data = torch.from_numpy(autoencoder.autoencoder.weight.detach().numpy())
self.encode.bias.data = torch.from_numpy(autoencoder.autoencoder.encode_bias.detach().numpy())
if self.resnet_path is not None:
self.classifier.load_state_dict(torch.load(self.resnet_path)['state_dict'], strict=False)
def forward(self, x):
# (batch_size, 3, 224, 50)
x = x.permute(0, 2, 3, 1)
# (batch_size, 224, 50, 3)
x = self.att(x)
# (batch_size, 224, 50, 3)
x = x.permute(0, 3, 1, 2)
# (batch_size, 3, 224, 50)
# x = self.bn_att(x)
x = self.encode(x)
# (batch_size, 3, 224, 224)
x = self.encode_relu(x)
x = self.bn(x)
x = self.classifier(x)
return x
# In[5]:
class Regularization(torch.nn.Module):
def __init__(self,model,weight_decay,p=2):
"""
:param model 模型
:param weight_decay:正则化参数
:param p: 范数计算中的幂指数值,默认求2范数,
当p=0为L2正则化,p=1为L1正则化
"""
super(Regularization, self).__init__()
if weight_decay <= 0:
print("param weight_decay can not <=0")
exit(0)
self.model=model
self.weight_decay=weight_decay
self.p=p
self.weight_list=self.get_weight(model)
self.weight_info(self.weight_list)
def to(self,device):
"""
指定运行模式
:param device: cuda or cpu
:return:
"""
self.device=device
super().to(device)
return self
def forward(self, model):
self.weight_list=self.get_weight(model)#获得最新的权重
reg_loss = self.regularization_loss(self.weight_list, self.weight_decay, p=self.p)
return reg_loss
def get_weight(self,model):
"""
获得模型的权重列表
:param model:
:return:
"""
weight_list = []
for name, param in model.named_parameters():
if 'weight' in name:
weight = (name, param)
weight_list.append(weight)
return weight_list
def regularization_loss(self,weight_list, weight_decay, p=2):
"""
计算张量范数
:param weight_list:
:param p: 范数计算中的幂指数值,默认求2范数
:param weight_decay:
:return:
"""
# weight_decay=Variable(torch.FloatTensor([weight_decay]).to(self.device),requires_grad=True)
# reg_loss=Variable(torch.FloatTensor([0.]).to(self.device),requires_grad=True)
# weight_decay=torch.FloatTensor([weight_decay]).to(self.device)
# reg_loss=torch.FloatTensor([0.]).to(self.device)
reg_loss=0
for name, w in weight_list:
l2_reg = torch.norm(w, p=p)
reg_loss = reg_loss + l2_reg
reg_loss=weight_decay*reg_loss
return reg_loss
def weight_info(self,weight_list):
"""
打印权重列表信息
:param weight_list:
:return:
"""
print("---------------regularization weight---------------")
for name ,w in weight_list:
print(name)
print("---------------------------------------------------")
# In[6]:
class ResNetWithDropout(nn.Module):
def __init__(self, num_classes):
super(ResNetWithDropout, self).__init__()
self.num_classes = num_classes
self.resnet = models.resnet50(pretrained=True)
# self.resnet.conv1 = nn.Conv2d(9, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
num_ftrs = self.resnet.fc.in_features
self.resnet.fc = nn.Dropout(0.5)
self.fc1 = nn.Linear(num_ftrs, self.num_classes)
def forward(self, x):
x = self.resnet(x)
x = self.fc1(x)
return x
# In[7]:
class FCResNet(nn.Module):
def __init__(self, num_classes, autoencoder=None):
super(FCResNet, self).__init__()
self.num_classes = num_classes
self.autoencoder = autoencoder
self.encode = nn.Linear(224, 224)
self.encode_relu = nn.ReLU()
self.classifier = models.resnet50(pretrained=True)
num_ftrs = self.classifier.fc.in_features
self.classifier.fc = nn.Linear(num_ftrs, self.num_classes)
# self.classifier = ResNetWithDropout()
if self.autoencoder is not None:
self.init_weights()
self.autoencoder = None
def init_weights(self):
print('initialize weight')
self.encode.weight.data = torch.from_numpy(self.autoencoder.weight.detach().numpy())
self.encode.bias.data = torch.from_numpy(self.autoencoder.encode_bias.detach().numpy())
def forward(self, x):
# (batch_size, 3, 224, 50)
x = self.encode(x)
# (batch_size, 3, 224, 224)
x = self.encode_relu(x)
x = self.classifier(x)
return x
# In[8]:
class AttentionResNet(nn.Module):
def __init__(self, num_classes):
super(AttentionResNet, self).__init__()
self.num_classes = num_classes
self.att = Attention(224, 9)
self.bn = nn.BatchNorm2d(9)
self.classifier = models.resnet50(pretrained=True)
self.classifier.conv1 = nn.Conv2d(9, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
num_ftrs = self.classifier.fc.in_features
self.classifier.fc = nn.Linear(num_ftrs, self.num_classes)
def forward(self, x):
# (batch_size, 3, 224, 50)
x = x.permute(0, 2, 3, 1)
# (batch_size, 224, 50, 3)
x = self.att(x)
# (batch_size, 224, 50, 3)
x = x.permute(0, 3, 1, 2)
# (batch_size, 3, 224, 50)
x = self.bn(x)
x = self.classifier(x)
return x
# In[9]:
# In[10]:
# In[11]:
# In[12]:
# In[53]:
# In[43]:
# In[55]:
# In[ ]:
|
<reponame>codejamninja/nb2plots
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Test scripts
Test running scripts
"""
from __future__ import division, print_function, absolute_import
from os.path import (join as pjoin, exists)
from glob import glob
import re
from scripttester import ScriptTester
from nb2plots.testing import DATA_PATH
from nb2plots.testing.convutils import fcontents, unsmart, unsmart_nb
from nb2plots.testing.nbtesters import assert_nb_equiv
runner = ScriptTester('nb2plots', win_bin_ext='.bat')
run_command = runner.run_command
def script_test(func):
# Decorator to label test as a script_test
func.script_test = True
return func
@script_test
def test_rst2md():
# test rst2md script over all .rst files checking against .md files
for rst_fname in glob(pjoin(DATA_PATH, '*.rst')):
md_fname = rst_fname[:-3] + 'md'
with open(md_fname, 'rb') as fobj:
expected_md = fobj.read()
# Skip files containing text "skip". These are files for which the
# source ReST is not valid in plain docutils, such as those containing
# Sphinx directives and roles.
if expected_md.strip() == b'skip':
continue
cmd = ['rst2md', rst_fname]
code, stdout, stderr = run_command(cmd)
assert stdout == expected_md
@script_test
def test_sphinx2md():
# test sphinx2md script over all .rst files checking against .smd / .md
# files
for rst_fname in glob(pjoin(DATA_PATH, '*.rst')):
# Try .smd filename first, otherwise ordinary .md
md_fname = rst_fname[:-3] + 'smd'
if not exists(md_fname):
md_fname = rst_fname[:-3] + 'md'
expected_md = fcontents(md_fname)
cmd = ['sphinx2md', rst_fname]
code, stdout, stderr = run_command(cmd)
assert (unsmart(stdout.decode('utf-8')) ==
expected_md.decode('utf-8'))
@script_test
def test_sphinx2nb():
# test sphinx2nb script over all .rst files checking against .ipynb files
for rst_fname in glob(pjoin(DATA_PATH, '*.rst')):
nb_fname = rst_fname[:-3] + 'ipynb'
expected = fcontents(nb_fname, 't')
cmd = ['sphinx2nb', rst_fname]
code, stdout, stderr = run_command(cmd)
assert_nb_equiv(unsmart_nb(stdout.decode('utf-8')),
expected)
@script_test
def test_sphinx2py():
# test sphinx2py script over all .rst files checking against .ipynb files
for rst_fname in glob(pjoin(DATA_PATH, '*.rst')):
py_fname = rst_fname[:-3] + 'py'
expected = fcontents(py_fname, 'b')
cmd = ['sphinx2py', rst_fname]
code, stdout, stderr = run_command(cmd)
assert (unsmart(stdout.decode('utf-8')) ==
expected.decode('utf-8'))
@script_test
def test_sphinx2pxml():
rst_fname = pjoin(DATA_PATH, 'sect_text.rst')
cmd = ['sphinx2pxml', rst_fname]
code, stdout, stderr = run_command(cmd)
pattern = r"""<document source=".*?">
<section ids="a-section" names="a\\ section">
<title>
A section
<paragraph>
Some
<emphasis>
text
."""
output = stdout.decode('utf-8')
assert re.match(pattern, output)
|
<reponame>chachabooboo/king-phisher<gh_stars>1000+
"""Schema v3
Revision ID: 24a4a626ff7c
Revises: None
Create Date: 2015-07-17
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
import os
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), *['..'] * 5)))
from alembic import op
from king_phisher.server.database import manager as db_manager
import sqlalchemy
def upgrade():
op.create_table(
'campaign_types',
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String, nullable=False),
sqlalchemy.Column('description', sqlalchemy.String)
)
op.create_table(
'company_departments',
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String, nullable=False),
sqlalchemy.Column('description', sqlalchemy.String)
)
op.create_table(
'industries',
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String, nullable=False),
sqlalchemy.Column('description', sqlalchemy.String)
)
op.create_table(
'companies',
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String, unique=True, nullable=False),
sqlalchemy.Column('description', sqlalchemy.String),
sqlalchemy.Column('industry_id', sqlalchemy.Integer, sqlalchemy.ForeignKey('industries.id')),
sqlalchemy.Column('url_main', sqlalchemy.String),
sqlalchemy.Column('url_email', sqlalchemy.String),
sqlalchemy.Column('url_remote_access', sqlalchemy.String)
)
alert_subscriptions_type = sqlalchemy.Enum('email', 'sms', name='alert_subscription_type')
alert_subscriptions_type.create(op.get_bind(), checkfirst=True)
op.add_column('alert_subscriptions', sqlalchemy.Column('type', alert_subscriptions_type, default='sms', server_default='sms', nullable=False))
op.add_column('alert_subscriptions', sqlalchemy.Column('mute_timestamp', sqlalchemy.DateTime))
op.add_column('campaigns', sqlalchemy.Column('campaign_type_id', sqlalchemy.Integer, sqlalchemy.ForeignKey('campaign_types.id')))
op.add_column('campaigns', sqlalchemy.Column('company_id', sqlalchemy.Integer, sqlalchemy.ForeignKey('companies.id')))
op.add_column('campaigns', sqlalchemy.Column('expiration', sqlalchemy.DateTime))
op.add_column('messages', sqlalchemy.Column('company_department_id', sqlalchemy.Integer, sqlalchemy.ForeignKey('company_departments.id')))
op.add_column('users', sqlalchemy.Column('email_address', sqlalchemy.String))
op.add_column('users', sqlalchemy.Column('otp_secret', sqlalchemy.String(16)))
db_manager.Session.remove()
db_manager.Session.configure(bind=op.get_bind())
session = db_manager.Session()
db_manager.set_meta_data('schema_version', 3, session=session)
session.commit()
def downgrade():
op.drop_column('alert_subscriptions', 'type')
op.drop_column('alert_subscriptions', 'mute_timestamp')
op.drop_column('campaigns', 'campaign_type_id')
op.drop_column('campaigns', 'company_id')
op.drop_column('campaigns', 'expiration')
op.drop_column('messages', 'company_department_id')
op.drop_column('users', 'email_address')
op.drop_column('users', 'otp_secret')
op.drop_table('campaign_types')
op.drop_table('company_departments')
op.drop_table('companies')
op.drop_table('industries')
db_manager.Session.remove()
db_manager.Session.configure(bind=op.get_bind())
session = db_manager.Session()
db_manager.set_meta_data('schema_version', 2, session=session)
session.commit()
|
<reponame>leilakhalili87/gbmc_v0<gh_stars>0
import util_funcs as uf
import pad_dump_file as pdf
import vv_props as vvp
import lammps_dump_writer as ldw
import lammps_script_writer as lsw
import ovito.data as ovd
from ovito.pipeline import StaticSource, Pipeline
import ovito.modifiers as ovm
from shutil import copyfile
import numpy as np
# --------------------------------------------------------------------------
# Define the input
# --------------------------------------------------------------------------
lat_par = 4.05
rCut = 2*lat_par
CohEng= -3.35999998818377 # calculated from in.cohesive
Tm = 933.5
weight_1 = .5
# tol_fix_reg = 5 * lat_par # the width of rigid traslation region
SC_tol = 5 * lat_par
str_alg = "ptm"
csc_tol = .1
# --------------------------------------------------------------------------
# Define the path to dump files
# --------------------------------------------------------------------------
lammps_exe_path = '/home/leila/Downloads/mylammps/src/lmp_mpi'
pot_path = './lammps_dump/' # the path for the potential
dump_path = './lammps_dump/test/'
pkl_file = './tests/data/gb_attr.pkl'
initial_dump = 'tests/data/dump.3' # the name of the dump file that
# --------------------------------------------------------------------------
# Create lammps dump file for pkl file
# --------------------------------------------------------------------------
# box_bound, dump_lamp, box_type = ldw.lammps_box(lat_par, pkl_file) # lammps creates from the pkl file
# ldw.write_lammps_dump(initial_dump, box_bound, dump_lamp, box_type) # writing the dump file
# --------------------------------------------------------------------------
# Define the path to dump files
# --------------------------------------------------------------------------
filename_0 = dump_path + 'dump.0' # the output of previous step
fil_name = 'in.min' # the initila minimization lammps script write the in.min script and run it and create dump_minimized
lsw.run_lammps_min(initial_dump, fil_name, pot_path, lat_par, tol_fix_reg, lammps_exe_path,\
filename_0, step=1, Etol=1e-9, Ftol=1e-9, MaxIter=5000, MaxEval=10000)
iter = 2
ff = open('output', 'w')
for i in range(1, iter, 1):
print(i)
# read the data
data_0 = uf.compute_ovito_data(filename_0)
non_p = uf.identify_pbc(data_0)
# find the gb atoms
GbRegion, GbIndex, GbWidth, w_bottom_SC, w_top_SC = pdf.GB_finder(data_0, lat_par, non_p, str_alg, csc_tol)
# decide between remove and insertion
choice = uf.choos_rem_ins()
# --------------------------------------------------------------------------
# If the choice is removal
# --------------------------------------------------------------------------
if choice == "removal":
p_rm = uf.RemProb(data_0, CohEng, GbIndex)
ID2change = uf.RemIns_decision(p_rm)
ff.write(filename_0 + '\n')
ff.write(str(i) + ' ' + choice + ' ' + str(GbIndex[ID2change]) )
ff.write('\n')
print(GbIndex[ID2change])
var2change = data_0.particles['Particle Identifier'][GbIndex[ID2change]]
uf.atom_removal(filename_0, dump_path , GbIndex[ID2change], var2change)
fil_name = 'in.min' # the initila minimization lammps script write the in.min script and run it and create dump_minimized
filename_rem = dump_path + 'rem_dump'
copyfile(filename_rem, dump_path + 'rem/rem_dump_' + str(i))
lsw.run_lammps_min(filename_rem, fil_name, pot_path, lat_par, tol_fix_reg, lammps_exe_path, dump_path + 'dump.' + str(i), Etol=1e-9, Ftol=1e-9, MaxIter=5000, MaxEval=10000)
filename_1 = dump_path + 'dump.' + str(i)
data_1 = uf.compute_ovito_data(filename_1)
SC_boolean = uf.check_SC_reg(data_1, lat_par, rCut, non_p, tol_fix_reg, SC_tol, str_alg, csc_tol)
if str_alg == "ptm":
assert data_0.particles['Structure Type'][GbIndex[ID2change]] !=1
else:
assert data_0.particles['c_csym'][GbIndex[ID2change]] > .1
assert SC_boolean == [True, True]
E_1 = uf.cal_GB_E(data_1, weight_1, non_p, lat_par, CohEng, str_alg, csc_tol) # after removal
E_0 = uf.cal_GB_E(data_0, weight_1, non_p, lat_par, CohEng, str_alg, csc_tol)
dE = E_1 - E_0
if dE < 0:
decision = "accept"
print("finally accepted in removal")
else:
area = uf.cal_area(data_1, non_p)
p_boltz = uf.p_boltz_func(dE, area, Tm)
decision = uf.decide(p_boltz)
if decision == "accept":
print("accepted in botlzman removal")
print(GbIndex[ID2change])
copyfile(filename_1, dump_path + 'accepted/dump.' + str(i))
filename_0 = filename_1
# --------------------------------------------------------------------------
# If the choice is insertion
# --------------------------------------------------------------------------
else:
ff.write(filename_0 + '\n' )
pts_w_imgs, gb1_inds, inds_arr = pdf.pad_dump_file(data_0, lat_par, rCut, non_p, str_alg, csc_tol)
tri_vertices, gb_tri_inds = vvp.triang_inds(pts_w_imgs, gb1_inds, inds_arr)
cc_coors, cc_rad = vvp.vv_props(pts_w_imgs, tri_vertices, gb_tri_inds, lat_par)
cc_coors1 = vvp.wrap_cc(data_0.cell, cc_coors)
Prob = uf.radi_normaliz(cc_rad)
ID2change = uf.RemIns_decision(Prob)
pos_add_atom = cc_coors[ID2change]
atom_id = np.max(data_0.particles['Particle Identifier']+1)
uf.atom_insertion(filename_0, dump_path, pos_add_atom, atom_id)
ff.write(str(i) + ' ' + choice + ' ' + str(pos_add_atom) )
ff.write('\n')
fil_name = 'in.min' # the initila minimization lammps script write the in.min script and run it and create dump_minimized
filename_ins = dump_path + 'ins_dump'
copyfile(filename_ins, dump_path + 'ins/ins_dump_' + str(i))
lsw.run_lammps_min(filename_ins, fil_name, pot_path, lat_par, tol_fix_reg, lammps_exe_path, dump_path + 'dump.' + str(i), Etol=1e-9, Ftol=1e-9, MaxIter=5000, MaxEval=10000)
filename_1 = dump_path + 'dump.' + str(i)
data_1 = uf.compute_ovito_data(filename_1)
SC_boolean = uf.check_SC_reg(data_1, lat_par, rCut, non_p, tol_fix_reg, SC_tol, str_alg, csc_tol)
assert SC_boolean == [True, True]
E_1 = uf.cal_GB_E(data_1, weight_1, non_p, lat_par, CohEng, str_alg, csc_tol) # after removal
E_0 = uf.cal_GB_E(data_0, weight_1, non_p, lat_par, CohEng, str_alg, csc_tol)
dE = E_1 - E_0
if dE < 0:
decision = "accept"
else:
area = uf.cal_area(data_1, non_p)
p_boltz = uf.p_boltz_func(dE, area, Tm)
decision = uf.decide(p_boltz)
if decision == "accept":
copyfile(filename_1, dump_path + 'accepted/dump.' + str(i))
filename_0 = filename_1
ff.close()
|
<gh_stars>0
import pyautogui
from time import sleep
import pyperclip
from datetime import datetime
pyautogui.PAUSE = 1
def tres_esq():
pyautogui.press('left')
sleep(0.6)
pyautogui.press('left')
sleep(0.6)
pyautogui.press('left')
sleep(0.6)
def tres_dir():
pyautogui.press('right')
sleep(0.5)
pyautogui.press('right')
sleep(0.5)
pyautogui.press('right')
sleep(0.5)
def dir_dois():
pyautogui.press('right')
sleep(0.5)
pyautogui.press('right')
sleep(0.5)
def dois_b():
pyautogui.press('down')
sleep(0.5)
pyautogui.press('down')
sleep(0.5)
def quatro_b():
pyautogui.press('down')
sleep(0.5)
pyautogui.press('down')
sleep(0.5)
pyautogui.press('down')
sleep(0.5)
pyautogui.press('down')
sleep(0.5)
def esq_dois():
pyautogui.press('left')
sleep(0.5)
pyautogui.press('left')
sleep(0.5)
def exl_or():
pyautogui.press('delete')
sleep(0.2)
pyautogui.press('delete')
sleep(0.2)
pyautogui.press('delete')
sleep(0.2)
pyautogui.press('delete')
sleep(0.2)
pyautogui.press('delete')
sleep(0.1)
pyautogui.press('end')
nordem = int(input('Em qual número a última Ordem parou: '))
ordem = str('')
local = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021'
nome_ordem = 'NOVA ORDEM DE SERVIÇO'
local_excel = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\NOVEMBRO\ORDEM SERVIÇO ORIGINAL.xlsx'
nordito = 0
nordito_final = str('')
dia = str(datetime.today().strftime(r'%d-%m-%Y'))
nordito = nordem + 1
nordito_final = str(nordito)
cliente = str('')
window = 1
janela = 0
telefone = str('')
equip = str('')
model = str('')
snid = str('')
acessorio = str('')
nordem_str = ('')
mes = str(datetime.today().strftime(r'%m'))
jan = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\JANEIRO'
fev = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\JANEIRO'
mar = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\MARÇO'
apr = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\ABRIL'
mai = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\MAIO'
jun = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\JUNHO'
jul = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\JULHO'
ago = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\AGOSTO'
sep = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\SETEMBRO'
out = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\OUTUBRO'
nov = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\NOVEMBRO'
dez = r'C:\Users\Escritório\Documents\ORDENS DE SERVIÇO\2021\DEZEMBRO'
while ordem != 'nao':
nordito = nordem + 1
nordem = nordito
nordem_str = str(nordem)
ordem = str(input('Quer lançar uma ordem de serviço: '))
if ordem in 'SIMSsimsyyesYES':
if janela < 1:
pyautogui.press('win')
if mes == '01':
pyperclip.copy(jan)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
elif mes == '02':
pyperclip.copy(fev)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
elif mes == '03':
pyperclip.copy(mar)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
elif mes == '04':
pyperclip.copy(apr)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
elif mes == '05':
pyperclip.copy(mai)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
elif mes == '06':
pyperclip.copy(jun)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
elif mes == '07':
pyperclip.copy(jul)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
elif mes == '08':
pyperclip.copy(ago)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
elif mes == '09':
pyperclip.copy(sep)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
elif mes == '10':
pyperclip.copy(out)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
elif mes == '11':
pyperclip.copy(nov)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
elif mes == '12':
pyperclip.copy(dez)
sleep(1.2)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.7)
pyautogui.hotkey('ctrl', 'f')
sleep(0.9)
pyperclip.copy(nome_ordem)
sleep(0.9)
pyautogui.hotkey('ctrl', 'v')
sleep(1.2)
pyautogui.press('enter')
sleep(2.1)
pyautogui.press('tab')
sleep(1.2)
pyautogui.press('down')
pyautogui.press('enter')
pyautogui.alert('Clique em OK quando aberto')
sleep(0.9)
nordito_final = str(nordito)
pyautogui.write(nordito_final)
sleep(1.2)
tres_esq()
sleep(1)
pyautogui.write(dia)
sleep(1.2)
dir_dois()
cliente = pyautogui.prompt('Qual o nome do cliente. Não use acentos!: ')
sleep(1)
pyautogui.write(cliente)
sleep(1)
dois_b()
telefone = str(pyautogui.prompt('Caso não tenha o telefone, digite 0'))
sleep(1.1)
if telefone != 0:
pyautogui.write(telefone)
sleep(0.8)
dois_b()
else:
quatro_b()
sleep(1)
equip = str(pyautogui.prompt('Equipamento: '))
sleep(1)
pyautogui.write(equip)
sleep(1)
pyautogui.press('right')
model = pyautogui.prompt('Modelo: ')
sleep(1)
pyautogui.write(model)
sleep(1)
pyautogui.press('right')
snid = pyautogui.prompt('SNID | SN')
sleep(1)
pyautogui.write(snid)
sleep(0.8)
pyautogui.press('down')
sleep(1)
esq_dois()
acessorio = pyautogui.prompt('Algum acessorio: ')
sleep(1)
pyautogui.write(acessorio)
sleep(1)
pyautogui.hotkey('alt', 'a')
sleep(0.9)
pyautogui.press('p')
pyautogui.alert('quando impresso clique em OK!')
sleep(3.5)
pyautogui.hotkey('alt', 'a')
sleep(1.2)
pyautogui.press('a')
sleep(1)
pyautogui.press('y')
sleep(1)
pyautogui.press('2')
sleep(2)
pyautogui.press('left')
sleep(0.9)
exl_or()
sleep(0.9)
pyautogui.press('space')
sleep(0.7)
pyautogui.write(cliente)
sleep(1)
pyautogui.press('space')
pyautogui.write(nordem_str, interval=0.15)
sleep(1)
pyautogui.press('enter')
janela = window + 1
elif janela > 1:
pyautogui.press('win')
sleep(1.4)
if mes == '01':
pyperclip.copy(jan)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(2.5)
elif mes == '02':
pyperclip.copy(fev)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(2.5)
elif mes == '03':
pyperclip.copy(mar)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(2)
elif mes == '04':
pyperclip.copy(apr)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(1.5)
elif mes == '05':
pyperclip.copy(mai)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(2)
elif mes == '06':
pyperclip.copy(jun)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(2)
elif mes == '07':
pyperclip.copy(jul)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(2)
elif mes == '08':
pyperclip.copy(ago)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(2)
elif mes == '09':
pyperclip.copy(sep)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(2)
elif mes == '10':
pyperclip.copy(out)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(2)
elif mes == '11':
pyperclip.copy(nov)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(2)
elif mes == '12':
pyperclip.copy(dez)
sleep(1.5)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('Enter')
sleep(2)
pyautogui.hotkey('ctrl', 'v')
sleep(1.5)
pyautogui.press('enter')
pyautogui.alert('Clique em OK quando aberto')
sleep(0.9)
nordito_final = str(nordito)
pyautogui.write(nordito_final)
sleep(1.5)
tres_esq()
sleep(1.3)
pyautogui.write(dia)
sleep(1.5)
dir_dois()
cliente = pyautogui.prompt('Qual o nome do cliente: ')
sleep(1)
pyautogui.write(cliente)
sleep(1.5)
dois_b()
telefone = str(pyautogui.prompt('Caso não tenha o telefone, digite 0'))
if telefone != 0:
pyautogui.write(telefone)
sleep(0.9)
dois_b()
else:
quatro_b()
sleep(1.3)
equip = str(pyautogui.prompt('Equipamento: '))
sleep(1)
pyautogui.write(equip)
sleep(1.3)
pyautogui.press('right')
model = pyautogui.prompt('Modelo: ')
sleep(1.3)
pyautogui.write(model)
sleep(1.3)
pyautogui.press('right')
snid = pyautogui.prompt('SNID | SN')
sleep(1.3)
pyautogui.write(snid)
sleep(1)
pyautogui.press('down')
sleep(1.3)
pyautogui.press('left')
acessorio = pyautogui.prompt('Algum acessorio: ')
sleep(1.3)
pyautogui.write(acessorio)
sleep(1.3)
pyautogui.hotkey('alt', 'a')
sleep(1.2)
pyautogui.press('p')
pyautogui.alert('Quando Impresso Clique em OK!')
sleep(3.7)
pyautogui.hotkey('alt', 'a')
sleep(1.5)
pyautogui.press('a')
sleep(1.3)
pyautogui.press('y')
sleep(1.3)
pyautogui.press('2')
sleep(2.5)
pyautogui.press('left')
sleep(1)
exl_or()
sleep(0.7)
pyautogui.press('space')
sleep(0.7)
pyautogui.write(cliente)
sleep(1)
pyautogui.press('space')
pyautogui.write(nordem_str, interval=0.15)
sleep(1)
pyautogui.press('enter')
janela = window + 1
pyautogui.alert('A ultima ordem foi: {}'.format(nordem - 1)) |
<filename>LightFields/xmlFiles/generateXMLFiles.py
import xml.etree.ElementTree as etree
import xml.dom.minidom
import subprocess
import os
import imageio
import h5py
import numpy as np
def createXMLstring(filename,scaleVal,cameraPosX,cameraPosY):
scene = etree.Element("scene",version="0.5.0")
sensor = etree.SubElement(scene, "sensor", type="perspective")
sensor_transform = etree.SubElement(sensor,"transform",name="toWorld")
etree.SubElement(sensor_transform,"lookat",origin=str(5)+","+cameraPosX+","+cameraPosY,target="0,0,0",up="0,1,0")
sensor_sampler = etree.SubElement(sensor,"sampler",type="ldsampler")
etree.SubElement(sensor_sampler,"integer",name="sampleCount",value="128")
sensor_film = etree.SubElement(sensor,"film",type="ldrfilm")
etree.SubElement(sensor_film,"boolean",name="banner",value="false")
etree.SubElement(sensor_film,"integer",name="width",value="400")
etree.SubElement(sensor_film,"integer",name="height",value="400")
shapeObj = etree.SubElement(scene,"shape",type="obj")
shapeObj_string = etree.SubElement(shapeObj,"string",name="filename",value=filename+".obj")
shapeObj_transform = etree.SubElement(shapeObj,"transform",name="toWorld")
etree.SubElement(shapeObj_transform,"scale",value=scaleVal)
etree.SubElement(shapeObj_transform,"rotate",angle="60",y="1")
rough_string = etree.tostring(scene, "utf-8")
reparsed = xml.dom.minidom.parseString(rough_string)
reparsed_pretty = reparsed.toprettyxml(indent=" " * 4)
return reparsed_pretty
def create_h5(data, label, path, file_name):
with h5py.File(os.path.join(path, file_name), 'w') as file:
file.create_dataset("data", data = data)
file.create_dataset("label", data = label)
filenames = ["airboat","al","alfa147","cessna","cube","diamond","dodecahedron","gourd","humanoid_quad","humanoid_tri","icosahedron","lamp","magnolia","minicooper","octahedron","power_lines","roi","sandal","shuttle","skyscraper","slot_machine","teapot","tetrahedron","violin_case"]
scaleVal = [0.5,0.5,0.01,0.08,0.5,0.01,0.5,0.5,0.1,0.1,0.5,0.2,0.025,0.01,0.5,0.07,0.02,0.2,0.1,0.03,0.1,0.01,0.5,0.5]
index = 0
cameraPosOrigin = [5,1,-3]
deltaCam = 0.1
hr_image = []
lr_image = []
destination_path = "/home/sudarshan/git/OptimizationDeepLearningImageProcessing/LightFields/h5Files/"
dataset_name = "generatedLightFields"
for filename in filenames:
HRindex = 0
with imageio.get_writer(filename+"/"+filename+".gif", mode='I') as writer:
for indx in range(-2,3):
for indy in range(-2,3):
cwd = os.getcwd()
directory = cwd+"/"+filename+"/"
if not os.path.exists(directory):
os.makedirs(directory)
cameraPos = [5, cameraPosOrigin[1]+indx*deltaCam,cameraPosOrigin[2]+indy*deltaCam]
XMLstring = createXMLstring(filename,str(scaleVal[index]),str(cameraPos[1]),str(cameraPos[2]))
with open(directory+filename+str(indx)+str(indy)+".xml", "w") as cube_xml:
cube_xml.write(XMLstring)
cmd = ["mitsuba", filename+"/"+filename+str(indx)+str(indy)+".xml"]
cmd_out = subprocess.check_output(cmd)
image = imageio.imread(filename+"/"+filename+str(indx)+str(indy)+".png")
hr_image.append(np.asarray(image))
HRindex = HRindex+1
if indx == 0 and indy == 0:
lr_image.append(np.asarray(image))
writer.append_data(image)
print(["Completed index: "+str(index)])
index = index+1
create_h5(data = lr_image, label = hr_image, path = destination_path, file_name = dataset_name+"training.h5")
print("data of length ", len(lr_image), "and label of length ", len(hr_image)) |
<gh_stars>0
import pickle
import numpy as np
from pprint import pprint
import cv2
import matplotlib.pyplot as plt
from itertools import combinations
from slam.utils import visualize2d, to_gridmap
from collections import defaultdict
from scipy.spatial import ConvexHull, convex_hull_plot_2d
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
class Plane:
def __init__(self, e, d, margin2d, scaler, pca_comps):
self.e = e
self.d = d
self.margin2d = margin2d
self.scaler = scaler
self.pca_comps = pca_comps
with open("/home/slam_data/data_sets1/temp.pickle", 'rb') as conn:
kf_ids_from_mps, kf_ids, mp_3dpts, kf_3dpts, kf_ids_from_planes, plane_params, frame_ids, clouds, plane_segs = pickle.load(conn)
kf_ids = kf_ids.flatten()
frame_ids = frame_ids.flatten()
kf_ids_from_mps = kf_ids_from_mps.flatten()
kf_ids_from_planes = kf_ids_from_planes.flatten()
plane_segs = plane_segs.reshape((-1, 480, 640))
clouds = clouds.reshape((-1, len(kf_ids), 4)).transpose([1, 0, 2])
# print(clouds.shape)
# # print(clouds[:2, :])
# exit()
clouds = clouds[:, :, :3]
# print(clouds.shape)
# exit()
plane_params = plane_params.reshape((-1, 4))
dict_kfs = {i: pt for i, pt in zip(kf_ids, kf_3dpts)}
dict_params = defaultdict(list)
for id_kf, param in zip(kf_ids_from_planes, plane_params):
dict_params[id_kf].append(param)
dict_mps = defaultdict(list)
for id_kf, mp in zip(kf_ids_from_mps, mp_3dpts):
dict_mps[id_kf].append(mp)
pca = PCA(n_components=2)
scaler = StandardScaler()
for i, (id_kf, cloud) in enumerate(zip(kf_ids, clouds)):
print(11111111)
mps = np.concatenate(dict_mps[id_kf])
scaler.fit_transform(mps)
m0, s0 = scaler.mean_, scaler.scale_
scaler.fit_transform(cloud)
m1, s1 = scaler.mean_, scaler.scale_
d = m1 - m0
nd = np.linalg.norm(d)
de = d / nd
d1 = np.abs(np.dot(s0, de))
d2 = np.abs(np.dot(s1, de))
print(d1 < nd, d2 < nd, d1 , nd, d2)
continue
params = dict_params[id_kf]
seg = plane_segs[i, :, :].flatten()
masks = [seg == j + 1 for j in range(len(params))]
planes = []
for mask in masks:
scaler = StandardScaler()
cloud1 = cloud[mask]
cloud1 = scaler.fit_transform(cloud1)
pca.fit(cloud1)
cloud1 = pca.transform(cloud1)
hull = ConvexHull(cloud1)
e = np.cross(pca.components_[0, :], pca.components_[1, :])
d = -np.dot(e, scaler.mean_)
# plt.plot(cloud1[:, 0], cloud1[:, 1], '.')
inds = set()
for simplex in hull.simplices:
inds.update(simplex)
# plt.plot(cloud1[simplex, 0], cloud1[simplex, 1], 'k-')
# plt.show()
# exit()
inds = list(inds)
margin2d = cloud1[inds,:]
margin3d = np.matmul(margin2d, pca.components_)
for i in range(3):
margin3d[:, i] *= scaler.scale_[i]
margin3d[:, i] += scaler.mean_[i]
planes.append(Plane(e, d, margin2d, scaler, pca.components_))
mps = np.concatenate(dict_mps[id_kf])
kf = kf_3dpts[i, :][0, :]
for plane in planes:
eb = np.dot(kf, plane.e)
d = plane.d
# d = -np.dot(kf, plane.e)
ea = np.matmul(mps, plane.e)
lambdas = (d - eb) / (ea - eb)
mask = np.logical_and(0 <= lambdas, lambdas < 1)
if np.sum(mask) > 0:
print(np.average(mask))
# TODO izmanto convex hull
kf1 = np.stack([kf] * np.sum(mask))
mps[mask, :] = kf1 * lambdas[mask] + mps[mask, :] * (1 - lambdas[mask])
dict_mps[id_kf] = mps
# exit()
kfs, mps = [], []
for id_kf, mps1 in dict_mps.items():
for mp in mps1:
mps.append(mp)
kfs.append(kf_3dpts[id_kf])
mps = np.stack(mps)
kfs = np.concatenate(kfs)
print(mps.shape, kfs.shape)
img = to_gridmap(kfs, mps)
plt.imshow(img)
plt.show() |
from functionality.shared_functions import create_event_tree, create_type_tree, add_event_to_file, turn_types_to_string
from types import TracebackType
from Event import Event
from parse.match import parse_period
from functionality.create_event_type import create_event_type
from functionality.distance import get_distance
from datetime import datetime, timedelta
from parse.match import parse_period24
def check_complete(start, start_date, end, end_date, array):
"""
Function:
check_complete
Description:
Boolean function to check if both the date objects are created
Input:
start_date - start date
end_date - end date
Output:
- True if both the date objects are created else False
"""
if start and end:
print("Both date objects created")
array.append(start_date)
array.append(end_date)
return True
else:
return False
async def add_event(ctx, client):
"""
Function:
add_event
Description:
Walks a user through the event creation process
Input:
ctx - Discord context window
client - Discord bot user
Output:
- A new event added to the user's calendar file
- A message sent to the context saying an event was successfully created
"""
channel = await ctx.author.create_dm()
def check(m):
return m.content is not None and m.channel == channel and m.author == ctx.author
event_array = []
await channel.send("Lets add an event!\n" + "First give me the name of your event:")
event_msg = await client.wait_for("message", check=check) # Waits for user input
event_msg = event_msg.content # Strips message to just the text the user entered
event_array.append(event_msg)
await channel.send(
"Now give me the start & end dates for you event. "
+ "You can use 12-hour formatting or 24-hour formatting\n\n"
+ "Here is the format you should follow (Start is first, end is second):\n"
+ "mm/dd/yy hh:mm am/pm mm/dd/yy hh:mm am/pm (12-hour formatting)\n"
+ "Or mm/dd/yy hh:mm mm/dd/yy hh:mm (24-hour formatting)"
)
event_dates = False
# A loop that keeps running until a user enters correct start and end dates for their event following the required format
# Adds start and end dates to the array if both are valid
while not event_dates:
date_array = []
msg_content = ""
start_complete = False
end_complete = True
if ctx.message.author != client.user:
# Waits for user input
event_msg = await client.wait_for("message", check=check)
# Strips message to just the text the user entered
msg_content = event_msg.content
#print(" yesa " + str(msg_content))
if msg_content.__contains__("am") or msg_content.__contains__("pm") or msg_content.__contains__("AM") or msg_content.__contains__("PM"):
try:
parse_result = parse_period(msg_content)
except Exception as e:
await channel.send(
"Looks like "
+ str(e)
+ ". Please re-enter your dates.\n"
+ "Here is the format you should follow (Start is first, end is second):\n"
+ "mm/dd/yy hh:mm am/pm mm/dd/yy hh:mm am/pm"
)
start_complete = False
continue
start_complete = True
#print("Lets see for 12 hr it now " + str(parse_result))
start_date = parse_result[0]
end_date = parse_result[1]
# If both datetime objects were successfully created, they get appended to the list and exits the while loop
if not (event_dates := check_complete(start_complete, start_date, end_complete, end_date, event_array)):
# If both objects were unsuccessfully created, the bot notifies the user and the loop starts again
await channel.send(
"Make sure you follow this format(Start is first, end is second): mm/dd/yy hh:mm am/pm mm/dd/yy hh:mm am/pm"
)
date_array = []
msg_content = ""
# 24hr format
else:
try:
parse_result = parse_period24(msg_content)
except Exception as e:
await channel.send(
"Looks like "
+ str(e)
+ ". Please re-enter your dates.\n"
+ "Here is the format you should follow (Start is first, end is second):\n"
+ "mm/dd/yy hh:mm mm/dd/yy hh:mm "
)
start_complete = False
continue
start_complete = True
#print("Lets see it now " + str(parse_result))
start_date = parse_result[0]
end_date = parse_result[1]
flag=0
# If both datetime objects were successfully created, they get appended to the list and exits the while loop
if not (event_dates := check_complete(start_complete, start_date, end_complete, end_date, event_array)):
# If both objects were unsuccessfully created, the bot notifies the user and the loop starts again
flag+=1
if flag>3:
await channel.send(
"unable to create event due to incorrect time format"
)
return
await channel.send(
"Make sure you follow this format(Start is first, end is second): mm/dd/yy hh:mm mm/dd/yy hh:mm"
)
date_array = []
msg_content = ""
# A loop to error check when user enters priority value
event_priority_set = False
while not event_priority_set:
await channel.send(
"How important is this event? Enter a number between 1-5.\n\n" +
"5 - Highest priority.\n" +
"4 - High priority.\n" +
"3 - Medium priority.\n" +
"2 - Low priority.\n" +
"1 - Lowest priority.\n"
)
event_msg = await client.wait_for("message", check=check) # Waits for user input
event_msg = event_msg.content # Strips message to just the text the user entered
try:
if 1 <= int(event_msg) <= 5:
event_array.append(event_msg)
event_priority_set = True # if entered value is in the range, loop exits
else:
await channel.send(
"Please enter a number between 1-5\n")
except:
await channel.send(
"Please enter a number between 1-5\n") # Handles when user enters non numeric entries
continue
create_type_tree(str(ctx.author.id))
output = turn_types_to_string(str(ctx.author.id))
await channel.send(
"Tell me what type of event this is. Here are a list of event types I currently know:\n" + output
)
event_msg = await client.wait_for("message", check=check) # Waits for user input
event_msg = event_msg.content # Strips message to just the text the user entered
await create_event_type(ctx, client, event_msg) # Running event_type creation subroutine
event_array.append(event_msg)
await channel.send(
"What is the location of the event?(Type None for no location/online)"
)
event_msg = await client.wait_for("message", check=check) # Waits for user input
event_msg = event_msg.content # Strips message to just the text the user entered
event_array.append(event_msg)
dest=event_msg
print(dest)
if event_msg !='None':
await channel.send(
"Do you want to block travel time for this event?(Yes/No)"
)
event_msg = await client.wait_for("message", check=check) # Waits for user input
travel_flag = event_msg.content
if travel_flag =='Yes':
await channel.send(
"Enter exact string out of following modes:[DRIVING, WALKING, BICYCLING, TRANSIT])"
)
event_msg = await client.wait_for("message", check=check) # Waits for user input
mode = event_msg.content
await channel.send(
"Enter source address"
)
event_msg = await client.wait_for("message", check=check) # Waits for user input
src = event_msg.content
travel_time=get_distance(dest,src,mode)
end=event_array[1]
strt=(end-timedelta(seconds=travel_time))
current = Event("Travel",strt, end, "1", "", "", "")
await channel.send("Your Travel event was successfully created!")
create_event_tree(str(ctx.author.id))
add_event_to_file(str(ctx.author.id), current)
await channel.send("Any additional description you want me to add about the event? If not, enter 'done'")
event_msg = await client.wait_for("message", check=check) # Waits for user input
event_msg = event_msg.content # Strips message to just the text the user entered
if event_msg.lower() == "done":
event_array.append("")
else:
event_array.append(event_msg)
# Tries to create an Event object from the user input
try:
current = Event(event_array[0], event_array[1], event_array[2], event_array[3], event_array[4], event_array[6],event_array[5])
await channel.send("Your event was successfully created!")
create_event_tree(str(ctx.author.id))
add_event_to_file(str(ctx.author.id), current)
except Exception as e:
# Outputs an error message if the event could not be created
print(e)
TracebackType.print_exc()
await channel.send(
"There was an error creating your event. Make sure your formatting is correct and try creating the event again."
)
|
<reponame>mheidir/BlueCatSG-SplunkApp-UnOfficial<gh_stars>1-10
import os
import subprocess
import warnings
from api_exception import api_exception
"""
Various functions for peforming dynamic DNS operations via nsupdate. There are Python modules to do this directly
but it's not clear how well debugged these are hence sticking to running nsupdate.
"""
# Ignore warnings about tmpnam being potentially insecure, this is reasonable given the environment this code will run in.
warnings.filterwarnings('ignore', 'tmpnam')
"""Run an nsupdate command file optionally using a TSIG key.
:param command_file: The name of a file containing some nsupdate commands.
:param tsig_key_file: The name of TSIG key file (can be None).
"""
def run_nsupdate(command_file, tsig_key_file=None):
try:
if tsig_key_file is not None:
subprocess.check_output(['nsupdate', '-k', tsig_key_file, '-v', command_file], stderr=subprocess.STDOUT,
shell=False)
else:
subprocess.check_output(['nsupdate', '-v', command_file], stderr=subprocess.STDOUT, shell=False)
os.unlink(command_file)
except subprocess.CalledProcessError as e:
os.unlink(command_file)
raise api_exception('nsupdate failed:' + e.output.strip())
"""Dynamically create a host record.
:param type: the type of host record ('a' or 'aaaa')
:param server_ip: the IP address of the DNS server on which to create the record.
:param name: the name of the new record to create.
:param address: the address of the new record to create.
:param ttl: the TTL of the new record to create.
:param tsig_key_file: the name of the optional TSIG key file to use.
"""
def create_host_record(type, server_ip, name, addr, ttl, tsig_key_file=None):
fn = os.tmpnam()
f = open(fn, 'w')
f.write('server %s\n' % server_ip)
f.write('update add %s %s %s %s\n' % (name, ttl, type, addr))
f.write('send\n')
f.close()
run_nsupdate(fn, tsig_key_file)
"""Dynamically update a host record.
:param type: the type of host record ('a' or 'aaaa')
:param server_ip: the IP address of the DNS server on which to update the record.
:param name: the name of the record to update.
:param address: the new address of the record.
:param ttl: the new TTL of the record to update.
:param tsig_key_file: the name of the optional TSIG key file to use.
"""
def update_host_record(type, server_ip, name, addr, ttl, tsig_key_file=None):
fn = os.tmpnam()
f = open(fn, 'w')
f.write('server %s\n' % server_ip)
# delete then add rather than just update to cope with records that don't already exist
f.write('update delete %s a\n' % name)
f.write('update add %s %s %s %s\n' % (name, ttl, type, addr))
f.write('send\n')
f.close()
run_nsupdate(fn, tsig_key_file)
"""Dynamically delete a host record.
:param type: the type of host record ('a' or 'aaaa')
:param server_ip: the IP address of the DNS server on which to delete the record.
:param name: the name of the record to delete.
:param tsig_key_file: the name of the optional TSIG key file to use.
"""
def delete_host_record(type, server_ip, name, tsig_key_file):
fn = os.tmpnam()
f = open(fn, 'w')
f.write('server %s\n' % server_ip)
f.write('update delete %s %s\n' % (name, type))
f.write('send\n')
f.close()
run_nsupdate(fn, tsig_key_file)
"""Dynamically update an A record.
:param server_ip: the IP address of the DNS server on which to update the record.
:param name: the name of the record to update.
:param address: the new address of the record to update.
:param ttl: the new TTL of the record to update.
:param tsig_key_file: the name of the optional TSIG key file to use.
"""
def update_a(server_ip, name, addr, ttl, tsig_key_file=None):
update_host_record('a', server_ip, name, addr, ttl, tsig_key_file)
"""Dynamically update an AAAA record.
:param server_ip: the IP address of the DNS server on which to update the record.
:param name: the name of the record to update.
:param address: the new address of the record to update.
:param ttl: the new TTL of the record to update.
:param tsig_key_file: the name of the optional TSIG key file to use.
"""
def update_aaaa(server_ip, name, tsig_key_file=None):
update_host_record('aaaa', server_ip, name, addr, ttl, tsig_key_file)
"""Dynamically create an A record.
:param server_ip: the IP address of the DNS server on which to create the record.
:param name: the name of the new record to create.
:param address: the address of the new record to create.
:param ttl: the TTL of the new record to create.
:param tsig_key_file: the name of the optional TSIG key file to use.
"""
def create_a(server_ip, name, addr, ttl, tsig_key_file=None):
create_host_record('a', server_ip, name, addr, ttl, tsig_key_file)
"""Dynamically create an AAAA record.
:param server_ip: the IP address of the DNS server on which to create the record.
:param name: the name of the new record to create.
:param address: the address of the new record to create.
:param ttl: the TTL of the new record to create.
:param tsig_key_file: the name of the optional TSIG key file to use.
"""
def create_aaaa(server_ip, name, tsig_key_file=None):
create_host_record('aaaa', server_ip, name, addr, ttl, tsig_key_file)
"""Dynamically delete an A record.
:param server_ip: the IP address of the DNS server on which to delete the record.
:param name: the name of the record to delete.
:param tsig_key_file: the name of the optional TSIG key file to use.
"""
def delete_a(server_ip, name, tsig_key_file=None):
delete_host_record('a', server_ip, name, tsig_key_file)
"""Dynamically delete an AAAA record.
:param server_ip: the IP address of the DNS server on which to delete the record.
:param name: the name of the record to delete.
:param tsig_key_file: the name of the optional TSIG key file to use.
"""
def delete_aaaa(server_ip, name, tsig_key_file=None):
delete_host_record('aaaa', server_ip, name, tsig_key_file)
"""Dynamically create a PTR record.
:param server_ip: the IP address of the DNS server on which to create the record.
:param name: the name of the new record to create.
:param reverse_name: the reverse space name of the new record to create.
:param ttl: the TTL of the new record to create.
:param tsig_key_file: the name of the optional TSIG key file to use.
"""
def create_ptr(server_ip, name, reverse_name, ttl, tsig_key_file=None):
fn = os.tmpnam()
f = open(fn, 'w')
f.write('server %s\n' % server_ip)
f.write('update add %s %s ptr %s\n' % (reverse_name, ttl, name))
f.write('send\n')
f.close()
run_nsupdate(fn, tsig_key_file)
"""Dynamically delete a PTR record.
:param server_ip: the IP address of the DNS server on which to delete the record.
:param reverse_name: the reverse space name of the record to delete.
:param tsig_key_file: the name of the optional TSIG key file to use.
"""
def delete_ptr(server_ip, reverse_name, tsig_key_file=None):
fn = os.tmpnam()
f = open(fn, 'w')
f.write('server %s\n' % server_ip)
f.write('update delete %s ptr\n' % reverse_name)
f.write('send\n')
f.close()
run_nsupdate(fn, tsig_key_file)
|
<gh_stars>0
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.python.summary.summary_iterator import summary_iterator
from tensorflow.python.framework import tensor_util
def getEventFileData(path):
data = {}
for event in summary_iterator(path):
for value in event.summary.value:
if value.simple_value == 0.0:
t = tensor_util.MakeNdarray(value.tensor)
else:
t = np.array([value.simple_value])
if value.tag not in data:
data[value.tag] = []
data[value.tag].append([event.step, t.item()])
return data
def generalization_plts():
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(111)
plot = 'train'
if plot == 'train':
# igibson apartments generalize
batches = 8000
generalize_aprts_path = "/media/neo/robotics/final_report/Aprts_400_8.0/Aprts_rgbd/train/events.out.tfevents.1630605625.rlgpu2.17887.0.v2"
generalize_aprts_loss = np.array(getEventFileData(generalize_aprts_path)["loss"])
generalize_aprts = ax.plot(generalize_aprts_loss[:, 0]*batches, generalize_aprts_loss[:, 1])
# igibson apartments
# batches = 8000
# aprts_path = "/media/neo/robotics/final_report/Aprts_400_8.0/Aprts_overfit_rgbd/train/events.out.tfevents.1630920914.rlgpu2.12306.0.v2"
# aprts_loss = np.array(getEventFileData(aprts_path)["loss"])
# aprts = ax.plot(aprts_loss[:, 0]*batches, aprts_loss[:, 1])
# igibson 10 apartments
batches = 1250
ten_aprts_path = "/media/neo/robotics/final_report/Aprts_400_8.0/10_Aprts_rgbd/train/events.out.tfevents.1630920486.rlgpu2.39725.0.v2"
ten_aprts_loss = np.array(getEventFileData(ten_aprts_path)["loss"])
ten_aprts = ax.plot(ten_aprts_loss[:, 0]*batches, ten_aprts_loss[:, 1])
# igibson 1 apartments
batches = 330
one_aprts_path = "/media/neo/robotics/final_report/Rs_400_8.0/Rs_rgb_depth/train/events.out.tfevents.1631779563.rlgpu2.40822.0.v2"
one_aprts_loss = np.array(getEventFileData(one_aprts_path)["loss"])
one_aprts = ax.plot(one_aprts_loss[:, 0]*batches, one_aprts_loss[:, 1])
ax.set_title('Training loss for iGibson environment', fontsize=22, weight='bold')
ax.set_xlabel("number of training batches", fontsize=18)
ax.set_ylabel("mean square error (cm)", fontsize=18)
ax.legend([
"115 Apartments",
# "115 Floors",
"17 Apartments",
"1 Apartment"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("igibson_train_loss.png")
else:
# igibson apartments generalize
batches = 1000
generalize_aprts_path = "/media/neo/robotics/final_report/Aprts_400_8.0/Aprts_rgbd/eval/events.out.tfevents.1630605625.rlgpu2.17887.1.v2"
generalize_aprts_loss = np.array(getEventFileData(generalize_aprts_path)["loss"])
generalize_aprts = ax.plot(generalize_aprts_loss[:, 0]*batches, generalize_aprts_loss[:, 1])
# igibson apartments
# aprts_path = "/media/neo/robotics/final_report/Aprts_400_8.0/Aprts_overfit_rgbd/eval/events.out.tfevents.1630920914.rlgpu2.12306.1.v2"
# aprts_loss = np.array(getEventFileData(aprts_path)["loss"])
# aprts = ax.plot(aprts_loss[:, 0], aprts_loss[:, 1])
# igibson 10 apartments
batches = 300
ten_aprts_path = "/media/neo/robotics/final_report/Aprts_400_8.0/10_Aprts_rgbd/eval/events.out.tfevents.1630920486.rlgpu2.39725.1.v2"
ten_aprts_loss = np.array(getEventFileData(ten_aprts_path)["loss"])
ten_aprts = ax.plot(ten_aprts_loss[:, 0]*batches, ten_aprts_loss[:, 1])
# igibson 1 apartments
batches = 40
one_aprts_path = "/media/neo/robotics/final_report/Rs_400_8.0/Rs_rgb_depth/eval/events.out.tfevents.1631779563.rlgpu2.40822.1.v2"
one_aprts_loss = np.array(getEventFileData(one_aprts_path)["loss"])
one_aprts = ax.plot(one_aprts_loss[:, 0]*batches, one_aprts_loss[:, 1])
ax.set_title('Evaluation loss for iGibson environment', fontsize=22, weight='bold')
ax.set_xlabel("number of evaluation batches", fontsize=18)
ax.set_ylabel("mean square error (cm)", fontsize=18)
ax.legend([
"15 Apartments (unseen)",
# "15 Floors",
"4 Apartments",
"1 Apartment"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("igibson_eval_loss.png")
def house3d_plts():
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(111)
# pfnet house3d apartment
pfnet_train_path = "/media/neo/robotics/final_report/House3D_4000_8.0/house3d_rgb_depth/train/events.out.tfevents.1631127344.rlgpu2.10177.0.v2"
pfnet_train_loss = np.array(getEventFileData(pfnet_train_path)["loss"])
pfnet_eval_path = "/media/neo/robotics/final_report/House3D_4000_8.0/house3d_rgb_depth/eval/events.out.tfevents.1631127344.rlgpu2.10177.1.v2"
pfnet_eval_loss = np.array(getEventFileData(pfnet_eval_path)["loss"])
pfnet_train = ax.plot(pfnet_train_loss[:, 0], pfnet_train_loss[:, 1])
pfnet_eval = ax.plot(pfnet_eval_loss[:, 0], pfnet_eval_loss[:, 1])
# dpf house3d apartment
dpf_train_path = "/media/neo/robotics/deep-activate-localization/bckp/jan/jan_22/runs/Jan23_00-10-06_pearl8/train_stats_mean_total_loss/events.out.tfevents.1611357667.pearl8.6887.3"
dpf_train_loss = np.array(getEventFileData(dpf_train_path)["train_stats"])
dpf_eval_path = "/media/neo/robotics/deep-activate-localization/bckp/jan/jan_27_1/runs/Jan27_10-55-58_pearl8/eval_stats_mean_loss/events.out.tfevents.1611741820.pearl8.17432.3"
dpf_eval_loss = np.array(getEventFileData(dpf_eval_path)["eval_stats"])
dpf_train = ax.plot(dpf_train_loss[:, 0], dpf_train_loss[:, 1])
dpf_eval = ax.plot(dpf_eval_loss[:, 0]*3, dpf_eval_loss[:, 1])
ax.set_title('Training/Evaluation loss for House3D environment', fontsize=18, weight='bold')
ax.set_xlabel("number of train epochs", fontsize=16)
ax.set_ylabel("pose error (meters)", fontsize=16)
ax.legend([
"PFNet Train",
"PFNet Eval",
"DPF Train",
"DPF Eval"
], loc='upper right', fontsize=14)
plt.show()
fig.savefig("house3d_loss.png")
def igibson_plts():
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(111)
N = np.array([500, 500, 1000])
T = np.array([50, 100, 100])
accuracy = np.array([90.0, 90.625, 91.875])
area = np.array([100, 200, 300])
colors = np.random.rand(len(N+1))
ax.scatter(x=N, y=T, s=area, c=colors, alpha=0.5)
for i, txt in enumerate(accuracy):
ax.annotate(f" {txt}", (N[i], T[i]), fontsize=16)
ax.set_xticks(np.array([0, 250, 500, 1000]))
ax.set_yticks(np.array([0, 10, 50, 100]))
ax.set_title('iGibson PFNet global localization RGB-D success (%) ', fontsize=18, weight='bold')
ax.set_xlabel("number of particles (N)", fontsize=16)
ax.set_ylabel("episode steps (t)", fontsize=16)
plt.show()
fig.savefig("igibson_rgbd_accuracy.png")
def belief_plts():
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(111)
# kmeans representation
pfnet_train_path = "/media/neo/robotics/August/20-07-2021/train_rl_uniform_kmeans/train/events.out.tfevents.1629406907.pearl9.4239.0.v2"
pfnet_train_return = np.array(getEventFileData(pfnet_train_path)["Metrics/AverageReturn"])
pfnet_eval_path = "/media/neo/robotics/August/20-07-2021/train_rl_uniform_kmeans/eval/events.out.tfevents.1629406907.pearl9.4239.1.v2"
pfnet_eval_return = np.array(getEventFileData(pfnet_eval_path)["Metrics/AverageReturn"])
pfnet_train = ax.plot(pfnet_train_return[:, 0], pfnet_train_return[:, 1])
pfnet_eval = ax.plot(pfnet_eval_return[:, 0], pfnet_eval_return[:, 1])
# belief map representation
dpf_train_path = "/media/neo/robotics/August/20-07-2021/train_rl_uniform_likelihood/train/events.out.tfevents.1629406377.pearl8.20947.0.v2"
dpf_train_return = np.array(getEventFileData(dpf_train_path)["Metrics/AverageReturn"])
dpf_eval_path = "/media/neo/robotics/August/20-07-2021/train_rl_uniform_likelihood/eval/events.out.tfevents.1629406377.pearl8.20947.1.v2"
dpf_eval_return = np.array(getEventFileData(dpf_eval_path)["Metrics/AverageReturn"])
dpf_train = ax.plot(dpf_train_return[:, 0], dpf_train_return[:, 1])
dpf_eval = ax.plot(dpf_eval_return[:, 0], dpf_eval_return[:, 1])
ax.set_title('Training/Evaluation episode return for SAC agent', fontsize=22, weight='bold')
ax.set_xlabel("number of train epochs", fontsize=18)
ax.set_ylabel("average episode return", fontsize=18)
ax.legend([
"KMeans (k=10) Train",
"KMeans (k=10) Eval",
"Belief Map Train",
"Belief Map Eval"
], loc='upper left', fontsize=16)
plt.show()
fig.savefig("particle_rep_sac_return.png")
def rl_train_eval_plts():
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(111)
plot = 'train'
if plot == 'train':
# 1.0 box + 25 steps rl agent
box_path_1_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_25/train/events.out.tfevents.1631716010.pearl8.18818.0.v2"
box_return_1_0 = np.array(getEventFileData(box_path_1_0)["Metrics/AverageReturn"])
box_1_0 = ax.plot(box_return_1_0[:, 0], box_return_1_0[:, 1])
# 2.0 box + 25 steps rl agent
box_path_2_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_25/train/events.out.tfevents.1631867346.pearl8.18370.0.v2"
box_return_2_0 = np.array(getEventFileData(box_path_2_0)["Metrics/AverageReturn"])
box_2_0 = ax.plot(box_return_2_0[:, 0], box_return_2_0[:, 1])
# 4.0 box + 50 steps rl agent
box_path_4_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/train/events.out.tfevents.1632144090.pearl2.5531.0.v2"
box_return_4_0 = np.array(getEventFileData(box_path_4_0)["Metrics/AverageReturn"])
box_4_0 = ax.plot(box_return_4_0[:, 0], box_return_4_0[:, 1])
# 5.0 box + 50 steps rl agent
box_path_5_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.5_box_50/train/events.out.tfevents.1632349779.pearl8.24775.0.v2"
box_return_5_0 = np.array(getEventFileData(box_path_5_0)["Metrics/AverageReturn"])
box_5_0 = ax.plot(box_return_5_0[:, 0], box_return_5_0[:, 1])
ax.set_title('Training episode return for SAC agent with Belief Map', fontsize=22, weight='bold')
ax.set_xlabel("number of train epochs", fontsize=18)
ax.set_ylabel("average episode return", fontsize=18)
ax.legend([
"1.0 sampling box",
"2.0 sampling box",
"4.0 sampling box",
"5.0 sampling box"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("rl_belief_train_returns.png")
else:
# 1.0 box + 25 steps rl agent
box_path_1_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_25/eval/events.out.tfevents.1631716010.pearl8.18818.1.v2"
box_return_1_0 = np.array(getEventFileData(box_path_1_0)["Metrics/AverageReturn"])
box_1_0 = ax.plot(box_return_1_0[:, 0], box_return_1_0[:, 1])
# 2.0 box + 25 steps rl agent
box_path_2_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_25/eval/events.out.tfevents.1631867347.pearl8.18370.1.v2"
box_return_2_0 = np.array(getEventFileData(box_path_2_0)["Metrics/AverageReturn"])
box_2_0 = ax.plot(box_return_2_0[:, 0], box_return_2_0[:, 1])
# 4.0 box + 50 steps rl agent
box_path_4_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/eval/events.out.tfevents.1632144090.pearl2.5531.1.v2"
box_return_4_0 = np.array(getEventFileData(box_path_4_0)["Metrics/AverageReturn"])
box_4_0 = ax.plot(box_return_4_0[:, 0], box_return_4_0[:, 1])
# 5.0 box + 50 steps rl agent
box_path_5_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.5_box_50/eval/events.out.tfevents.1632349779.pearl8.24775.1.v2"
box_return_5_0 = np.array(getEventFileData(box_path_5_0)["Metrics/AverageReturn"])
box_5_0 = ax.plot(box_return_5_0[:, 0], box_return_5_0[:, 1])
ax.set_title('Evaluation episode return for SAC agent with belief map', fontsize=22, weight='bold')
ax.set_xlabel("number of train epochs", fontsize=18)
ax.set_ylabel("average episode return", fontsize=18)
ax.legend([
"1.0 sampling box",
"2.0 sampling box",
"4.0 sampling box",
"5.0 sampling box"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("rl_belief_eval_returns.png")
def rl_test_plts():
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(111)
plot = 'collision_penalty'
if plot == 'collision_penalty':
# obstacle avoid agent
avoid_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/avoid_agent/events.out.tfevents.1632427985.pearl2.18690.0.v2"
avoid_eps_mcp = np.array(getEventFileData(avoid_path)["per_eps_mcp"])
# random agent
rnd_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/rnd_agent/events.out.tfevents.1632428087.pearl2.18689.0.v2"
rnd_eps_mcp = np.array(getEventFileData(rnd_path)["per_eps_mcp"])
# trained sac agent
sac_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/sac_agent/events.out.tfevents.1632427849.pearl5.11213.0.v2"
sac_eps_mcp = np.array(getEventFileData(sac_path)["per_eps_mcp"])
data = np.concatenate([
-avoid_eps_mcp[:, 1:2],
-rnd_eps_mcp[:, 1:2],
-sac_eps_mcp[:, 1:2]
], axis=1)
ax.boxplot(data)
ax.set_title('Episode mean collision penalty for 4.0 sampling box', fontsize=22, weight='bold')
# ax.set_xlabel("agent behavior", fontsize=16)
ax.set_ylabel("mean collision penalty (%)", fontsize=18)
# ax.set_ylim(-1.05, 0.05)
ax.set_xticklabels([
"Obstacle Avoidance Agent",
"Random Action Agent",
"Trained SAC Agent"
], fontsize=18)
plt.show()
fig.savefig("rl_belief_test_mcp.png")
elif plot == 'orientation_error':
# obstacle avoid agent
avoid_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/avoid_agent/events.out.tfevents.1632427985.pearl2.18690.0.v2"
avoid_eps_mso = np.array(getEventFileData(avoid_path)["per_eps_mso"])
# random agent
rnd_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/rnd_agent/events.out.tfevents.1632428087.pearl2.18689.0.v2"
rnd_eps_mso = np.array(getEventFileData(rnd_path)["per_eps_mso"])
# trained sac agent
sac_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/sac_agent/events.out.tfevents.1632427849.pearl5.11213.0.v2"
sac_eps_mso = np.array(getEventFileData(sac_path)["per_eps_mso"])
data = np.concatenate([
avoid_eps_mso[:, 1:2],
rnd_eps_mso[:, 1:2],
sac_eps_mso[:, 1:2]
], axis=1)
ax.boxplot(data)
ax.set_title('Episode mean orientation error for 4.0 sampling box', fontsize=22, weight='bold')
# ax.set_xlabel("agent behavior", fontsize=16)
ax.set_ylabel("mean orientation error (radians)", fontsize=18)
ax.set_ylim(-0.05, 0.15)
ax.set_xticklabels([
"Obstacle Avoidance Agent",
"Random Action Agent",
"Trained SAC Agent"
], fontsize=18)
plt.show()
fig.savefig("rl_belief_test_mso.png")
elif plot == 'position_error':
# obstacle avoid agent
avoid_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/avoid_agent/events.out.tfevents.1632427985.pearl2.18690.0.v2"
avoid_eps_msp = np.array(getEventFileData(avoid_path)["per_eps_msp"])
# random agent
rnd_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/rnd_agent/events.out.tfevents.1632428087.pearl2.18689.0.v2"
rnd_eps_msp = np.array(getEventFileData(rnd_path)["per_eps_msp"])
# trained sac agent
sac_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/sac_agent/events.out.tfevents.1632427849.pearl5.11213.0.v2"
sac_eps_msp = np.array(getEventFileData(sac_path)["per_eps_msp"])
data = np.concatenate([
avoid_eps_msp[:, 1:2],
rnd_eps_msp[:, 1:2],
sac_eps_msp[:, 1:2]
], axis=1)
ax.boxplot(data)
ax.set_title('Episode mean position error for 4.0 sampling box', fontsize=22, weight='bold')
# ax.set_xlabel("agent behavior", fontsize=16)
ax.set_ylabel("mean position error (meters)", fontsize=18)
ax.set_ylim(-0.05, 2.0)
ax.set_xticklabels([
"Obstacle Avoidance Agent",
"Random Action Agent",
"Trained SAC Agent"
], fontsize=18)
plt.show()
fig.savefig("rl_belief_test_msp.png")
else:
# obstacle avoid agent
avoid_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/avoid_agent/events.out.tfevents.1632427985.pearl2.18690.0.v2"
avoid_eps_end_mse = np.array(getEventFileData(avoid_path)["per_eps_end_reward"])
# random agent
rnd_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/rnd_agent/events.out.tfevents.1632428087.pearl2.18689.0.v2"
rnd_eps_end_mse = np.array(getEventFileData(rnd_path)["per_eps_end_reward"])
# trained sac agent
sac_path = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/sac_agent/events.out.tfevents.1632427849.pearl5.11213.0.v2"
sac_eps_end_mse = np.array(getEventFileData(sac_path)["per_eps_end_reward"])
data = np.concatenate([
-avoid_eps_end_mse[:, 1:2],
-rnd_eps_end_mse[:, 1:2],
-sac_eps_end_mse[:, 1:2]
], axis=1)
ax.boxplot(data)
ax.set_title('Episode end pose error for 4.0 sampling box', fontsize=22, weight='bold')
# ax.set_xlabel("agent behavior", fontsize=16)
ax.set_ylabel("mean squared error (meters)", fontsize=18)
ax.set_ylim(-0.05, 0.3)
ax.set_xticklabels([
"Obstacle Avoidance Agent",
"Random Action Agent",
"Trained SAC Agent"
], fontsize=18)
plt.show()
fig.savefig("rl_belief_test_end_mse.png")
def diff_steps_plts():
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(111)
plot = 'average_return'
# # 1.0 box + 25 steps rl agent
# box_path_1_0_25 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_25/train/events.out.tfevents.1631867346.pearl8.18370.0.v2"
# box_return_1_0_25 = np.array(getEventFileData(box_path_1_0_25)["Metrics/AverageReturn"])
# box_1_0_25 = ax.plot(box_return_1_0_25[:, 0], box_return_1_0_25[:, 1])
#
# # 1.0 box + 50 steps rl agent
# box_path_1_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_50/train/events.out.tfevents.1631961881.pearl2.22000.0.v2"
# box_return_1_0_50 = np.array(getEventFileData(box_path_1_0_50)["Metrics/AverageReturn"])
# box_1_0_50 = ax.plot(box_return_1_0_50[:, 0], box_return_1_0_50[:, 1])
if plot == 'average_return':
# 1.0 box + 25 steps rl agent
box_path_1_0_25 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_25/eval/events.out.tfevents.1631867347.pearl8.18370.1.v2"
box_return_1_0_25 = np.array(getEventFileData(box_path_1_0_25)["Metrics/AverageReturn"])
box_1_0_25 = ax.plot(box_return_1_0_25[:, 0], box_return_1_0_25[:, 1])
# 1.0 box + 50 steps rl agent
box_path_1_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_50/eval/events.out.tfevents.1631961881.pearl2.22000.1.v2"
box_return_1_0_50 = np.array(getEventFileData(box_path_1_0_50)["Metrics/AverageReturn"])
box_1_0_50 = ax.plot(box_return_1_0_50[:, 0], box_return_1_0_50[:, 1])
ax.set_title('Average episode return for SAC agent with belief map (2.0 sampling box)', fontsize=22, weight='bold')
ax.set_xlabel("number of eval epochs", fontsize=18)
ax.set_ylabel("average episode return", fontsize=18)
ax.legend([
"25 particle filter steps",
"50 particle filter steps"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("diff_steps_eval_avg_eps_return.png")
elif plot == 'collision_penalty':
# 1.0 box + 25 steps rl agent
box_path_1_0_25 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_25/eval/events.out.tfevents.1631867347.pearl8.18370.1.v2"
box_return_1_0_25 = np.array(getEventFileData(box_path_1_0_25)["Metrics/AverageStepCollisionPenality"])
box_1_0_25 = ax.plot(box_return_1_0_25[:, 0], box_return_1_0_25[:, 1])
# 1.0 box + 50 steps rl agent
box_path_1_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_50/eval/events.out.tfevents.1631961881.pearl2.22000.1.v2"
box_return_1_0_50 = np.array(getEventFileData(box_path_1_0_50)["Metrics/AverageStepCollisionPenality"])
box_1_0_50 = ax.plot(box_return_1_0_50[:, 0], box_return_1_0_50[:, 1])
ax.set_title('Average step collision penalty for SAC agent with belief map (2.0 sampling box)', fontsize=22, weight='bold')
ax.set_xlabel("number of eval epochs", fontsize=18)
ax.set_ylabel("average collision penalty (%)", fontsize=18)
ax.legend([
"25 particle filter steps",
"50 particle filter steps"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("diff_steps_eval_avg_step_collision.png")
elif plot == 'orientation_error':
# 1.0 box + 25 steps rl agent
box_path_1_0_25 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_25/eval/events.out.tfevents.1631867347.pearl8.18370.1.v2"
box_return_1_0_25 = np.array(getEventFileData(box_path_1_0_25)["Metrics/AverageStepOrientationError"])
box_1_0_25 = ax.plot(box_return_1_0_25[:, 0], box_return_1_0_25[:, 1])
# box_return_1_0_25 = np.array(getEventFileData(box_path_1_0_25)["Metrics/AverageStepPositionError"])
# 1.0 box + 50 steps rl agent
box_path_1_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_50/eval/events.out.tfevents.1631961881.pearl2.22000.1.v2"
box_return_1_0_50 = np.array(getEventFileData(box_path_1_0_50)["Metrics/AverageStepOrientationError"])
box_1_0_50 = ax.plot(box_return_1_0_50[:, 0], box_return_1_0_50[:, 1])
# box_return_1_0_50 = np.array(getEventFileData(box_path_1_0_50)["Metrics/AverageStepPositionError"])
ax.set_title('Average step orientation error for SAC agent with belief map (2.0 sampling box)', fontsize=22, weight='bold')
ax.set_xlabel("number of eval epochs", fontsize=18)
ax.set_ylabel("average orientation error (radians)", fontsize=18)
ax.legend([
"25 particle filter steps",
"50 particle filter steps"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("diff_steps_eval_avg_step_orientation.png")
else:
# 1.0 box + 25 steps rl agent
box_path_1_0_25 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_25/eval/events.out.tfevents.1631867347.pearl8.18370.1.v2"
box_return_1_0_25 = np.array(getEventFileData(box_path_1_0_25)["Metrics/AverageStepPositionError"])
box_1_0_25 = ax.plot(box_return_1_0_25[:, 0], box_return_1_0_25[:, 1])
# 1.0 box + 50 steps rl agent
box_path_1_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_50/eval/events.out.tfevents.1631961881.pearl2.22000.1.v2"
box_return_1_0_50 = np.array(getEventFileData(box_path_1_0_50)["Metrics/AverageStepPositionError"])
box_1_0_50 = ax.plot(box_return_1_0_50[:, 0], box_return_1_0_50[:, 1])
ax.set_title('Average step position error for SAC agent with belief map (2.0 sampling box)', fontsize=22, weight='bold')
ax.set_xlabel("number of eval epochs", fontsize=18)
ax.set_ylabel("average position error (meters)", fontsize=18)
ax.legend([
"25 particle filter steps",
"50 particle filter steps"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("diff_steps_eval_avg_step_position.png")
def diff_resample_plts():
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(111)
plot = 'average_return'
# # 1.0 box + 25 steps rl agent
# box_path_1_0_25 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_25/train/events.out.tfevents.1631867346.pearl8.18370.0.v2"
# box_return_1_0_25 = np.array(getEventFileData(box_path_1_0_25)["Metrics/AverageReturn"])
# box_1_0_25 = ax.plot(box_return_1_0_25[:, 0], box_return_1_0_25[:, 1])
#
# # 1.0 box + 50 steps rl agent
# box_path_1_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_50/train/events.out.tfevents.1631961881.pearl2.22000.0.v2"
# box_return_1_0_50 = np.array(getEventFileData(box_path_1_0_50)["Metrics/AverageReturn"])
# box_1_0_50 = ax.plot(box_return_1_0_50[:, 0], box_return_1_0_50[:, 1])
if plot == 'average_return':
# 0.5 box + 25 steps + 1.0 resample rl agent
box_path_1_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_25/eval/events.out.tfevents.1631716010.pearl8.18818.1.v2"
box_return_1_0 = np.array(getEventFileData(box_path_1_0)["Metrics/AverageReturn"])
box_1_0 = ax.plot(box_return_1_0[:, 0], box_return_1_0[:, 1])
# 0.5 box + 25 steps + 0.5 resample rl agent
box_path_0_5 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_0.5/eval/events.out.tfevents.1633342690.pearl8.24069.1.v2"
box_return_0_5 = np.array(getEventFileData(box_path_0_5)["Metrics/AverageReturn"])
box_0_5 = ax.plot(box_return_0_5[:, 0], box_return_0_5[:, 1])
ax.set_title('Average episode return for SAC agent with belief map (0.5 sampling box)', fontsize=22, weight='bold')
ax.set_xlabel("number of eval epochs", fontsize=18)
ax.set_ylabel("average episode return", fontsize=18)
ax.legend([
"1.0 soft resample rate",
"0.5 soft resample rate"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("diff_resample_eval_avg_eps_return.png")
elif plot == 'collision_penalty':
# 0.5 box + 25 steps + 1.0 resample rl agent
box_path_1_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_25/eval/events.out.tfevents.1631716010.pearl8.18818.1.v2"
box_return_1_0 = np.array(getEventFileData(box_path_1_0)["Metrics/AverageStepCollisionPenality"])
box_1_0 = ax.plot(box_return_1_0[:, 0], box_return_1_0[:, 1])
# 0.5 box + 25 steps + 0.5 resample rl agent
box_path_0_5 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_0.5/eval/events.out.tfevents.1633342690.pearl8.24069.1.v2"
box_return_0_5 = np.array(getEventFileData(box_path_0_5)["Metrics/AverageStepCollisionPenality"])
box_0_5 = ax.plot(box_return_0_5[:, 0], box_return_0_5[:, 1])
ax.set_title('Average step collision penalty for SAC agent with belief map (0.5 sampling box)', fontsize=22, weight='bold')
ax.set_xlabel("number of eval epochs", fontsize=18)
ax.set_ylabel("average collision penalty (%)", fontsize=18)
ax.legend([
"1.0 soft resample rate",
"0.5 soft resample rate"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("diff_resample_eval_avg_step_collision.png")
elif plot == 'orientation_error':
# 0.5 box + 25 steps + 1.0 resample rl agent
box_path_1_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_25/eval/events.out.tfevents.1631716010.pearl8.18818.1.v2"
box_return_1_0 = np.array(getEventFileData(box_path_1_0)["Metrics/AverageStepOrientationError"])
box_1_0 = ax.plot(box_return_1_0[:, 0], box_return_1_0[:, 1])
# 0.5 box + 25 steps + 0.5 resample rl agent
box_path_0_5 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_0.5/eval/events.out.tfevents.1633342690.pearl8.24069.1.v2"
box_return_0_5 = np.array(getEventFileData(box_path_0_5)["Metrics/AverageStepOrientationError"])
box_0_5 = ax.plot(box_return_0_5[:, 0], box_return_0_5[:, 1])
ax.set_title('Average step orientation error for SAC agent with belief map (0.5 sampling box)', fontsize=22, weight='bold')
ax.set_xlabel("number of eval epochs", fontsize=18)
ax.set_ylabel("average orientation error (radians)", fontsize=18)
ax.legend([
"1.0 soft resample rate",
"0.5 soft resample rate"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("diff_resample_eval_avg_step_orientation.png")
else:
# 0.5 box + 25 steps + 1.0 resample rl agent
box_path_1_0 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_25/eval/events.out.tfevents.1631716010.pearl8.18818.1.v2"
box_return_1_0 = np.array(getEventFileData(box_path_1_0)["Metrics/AverageStepPositionError"])
box_1_0 = ax.plot(box_return_1_0[:, 0], box_return_1_0[:, 1])
# 0.5 box + 25 steps + 0.5 resample rl agent
box_path_0_5 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_0.5/eval/events.out.tfevents.1633342690.pearl8.24069.1.v2"
box_return_0_5 = np.array(getEventFileData(box_path_0_5)["Metrics/AverageStepPositionError"])
box_0_5 = ax.plot(box_return_0_5[:, 0], box_return_0_5[:, 1])
ax.set_title('Average step position error for SAC agent with belief map (0.5 sampling box)', fontsize=22, weight='bold')
ax.set_xlabel("number of eval epochs", fontsize=18)
ax.set_ylabel("average position error (meters)", fontsize=18)
ax.legend([
"1.0 soft resample rate",
"0.5 soft resample rate"
], loc='upper right', fontsize=16)
plt.show()
fig.savefig("diff_resample_eval_avg_step_position.png")
def all_rl_eval_plts():
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(111)
plot = 'collision_penalty'
limit = 15
if plot == 'collision_penalty':
# 0.5 box + 25 steps rl agent
box_path_0_5_25 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_25/eval/events.out.tfevents.1631716010.pearl8.18818.1.v2"
box_0_5_mcp = np.array(getEventFileData(box_path_0_5_25)["Metrics/AverageStepCollisionPenality"])
# 1.0 box + 50 steps rl agent
box_path_1_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_50/eval/events.out.tfevents.1631961881.pearl2.22000.1.v2"
box_1_0_mcp = np.array(getEventFileData(box_path_1_0_50)["Metrics/AverageStepCollisionPenality"])
# 2.0 box + 50 steps rl agent
box_path_2_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/eval/events.out.tfevents.1632144090.pearl2.5531.1.v2"
box_2_0_mcp = np.array(getEventFileData(box_path_2_0_50)["Metrics/AverageStepCollisionPenality"])
# 2.5 box + 50 steps rl agent
box_path_2_5_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.5_box_50/eval/events.out.tfevents.1632349779.pearl8.24775.1.v2"
box_2_5_mcp = np.array(getEventFileData(box_path_2_5_50)["Metrics/AverageStepCollisionPenality"])
# full aprt + 50 steps rl agent
box_path_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_likelihood_rnd_50_new/eval/events.out.tfevents.1633162880.pearl8.3776.1.v2"
box_mcp = np.array(getEventFileData(box_path_50)["Metrics/AverageStepCollisionPenality"])
data = np.concatenate([
box_0_5_mcp[:limit, 1:2],
box_1_0_mcp[:limit, 1:2],
box_2_0_mcp[:limit, 1:2],
box_2_5_mcp[:limit, 1:2],
box_mcp[:limit, 1:2]
], axis=1)
ax.boxplot(data)
ax.set_title('Episode mean collision penalty for different start pose sampling area', fontsize=22, weight='bold')
# ax.set_xlabel("agent behavior", fontsize=16)
ax.set_ylabel("mean collision penalty (%)", fontsize=18)
# ax.set_ylim(-1.05, 0.05)
ax.set_xticklabels([
"1.0 Sampling Box",
"2.0 Sampling Box",
"4.0 Sampling Box",
"5.0 Sampling Box",
"Full Apartment",
], fontsize=18)
plt.show()
fig.savefig("rl_belief_eval_mcp.png")
elif plot == 'orientation_error':
# 0.5 box + 25 steps rl agent
box_path_0_5_25 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_25/eval/events.out.tfevents.1631716010.pearl8.18818.1.v2"
box_0_5_mso = np.array(getEventFileData(box_path_0_5_25)["Metrics/AverageStepOrientationError"])
# 1.0 box + 50 steps rl agent
box_path_1_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_50/eval/events.out.tfevents.1631961881.pearl2.22000.1.v2"
box_1_0_mso = np.array(getEventFileData(box_path_1_0_50)["Metrics/AverageStepOrientationError"])
# 2.0 box + 50 steps rl agent
box_path_2_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/eval/events.out.tfevents.1632144090.pearl2.5531.1.v2"
box_2_0_mso = np.array(getEventFileData(box_path_2_0_50)["Metrics/AverageStepOrientationError"])
# 2.5 box + 50 steps rl agent
box_path_2_5_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.5_box_50/eval/events.out.tfevents.1632349779.pearl8.24775.1.v2"
box_2_5_mso = np.array(getEventFileData(box_path_2_5_50)["Metrics/AverageStepOrientationError"])
# full aprt + 50 steps rl agent
box_path_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_likelihood_rnd_50_new/eval/events.out.tfevents.1633162880.pearl8.3776.1.v2"
box_mso = np.array(getEventFileData(box_path_50)["Metrics/AverageStepOrientationError"])
data = np.concatenate([
box_0_5_mso[:limit, 1:2],
box_1_0_mso[:limit, 1:2],
box_2_0_mso[:limit, 1:2],
box_2_5_mso[:limit, 1:2],
box_mso[:limit, 1:2]
], axis=1)
ax.boxplot(data)
ax.set_title('Episode mean orientation error for different start pose sampling area', fontsize=22, weight='bold')
# ax.set_xlabel("agent behavior", fontsize=16)
ax.set_ylabel("mean orientation error (radians)", fontsize=18)
# ax.set_ylim(-0.05, 0.15)
ax.set_xticklabels([
"1.0 Sampling Box",
"2.0 Sampling Box",
"4.0 Sampling Box",
"5.0 Sampling Box",
"Full Apartment",
], fontsize=18)
plt.show()
fig.savefig("rl_belief_eval_mso.png")
elif plot == 'position_error':
# 0.5 box + 25 steps rl agent
box_path_0_5_25 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_25/eval/events.out.tfevents.1631716010.pearl8.18818.1.v2"
box_0_5_msp = np.array(getEventFileData(box_path_0_5_25)["Metrics/AverageStepPositionError"])
# 1.0 box + 50 steps rl agent
box_path_1_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_50/eval/events.out.tfevents.1631961881.pearl2.22000.1.v2"
box_1_0_msp = np.array(getEventFileData(box_path_1_0_50)["Metrics/AverageStepPositionError"])
# 2.0 box + 50 steps rl agent
box_path_2_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/eval/events.out.tfevents.1632144090.pearl2.5531.1.v2"
box_2_0_msp = np.array(getEventFileData(box_path_2_0_50)["Metrics/AverageStepPositionError"])
# 2.5 box + 50 steps rl agent
box_path_2_5_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.5_box_50/eval/events.out.tfevents.1632349779.pearl8.24775.1.v2"
box_2_5_msp = np.array(getEventFileData(box_path_2_5_50)["Metrics/AverageStepPositionError"])
# full aprt + 50 steps rl agent
box_path_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_likelihood_rnd_50_new/eval/events.out.tfevents.1633162880.pearl8.3776.1.v2"
box_msp = np.array(getEventFileData(box_path_50)["Metrics/AverageStepPositionError"])
data = np.concatenate([
box_0_5_msp[:limit, 1:2],
box_1_0_msp[:limit, 1:2],
box_2_0_msp[:limit, 1:2],
box_2_5_msp[:limit, 1:2],
box_msp[:limit, 1:2]
], axis=1)
ax.boxplot(data)
ax.set_title('Episode mean position error for different start pose sampling area', fontsize=22, weight='bold')
# ax.set_xlabel("agent behavior", fontsize=16)
ax.set_ylabel("mean position error (meters)", fontsize=18)
# ax.set_ylim(-1.05, 0.05)
ax.set_xticklabels([
"1.0 Sampling Box",
"2.0 Sampling Box",
"4.0 Sampling Box",
"5.0 Sampling Box",
"Full Apartment",
], fontsize=18)
plt.show()
fig.savefig("rl_belief_eval_msp.png")
else:
# 0.5 box + 25 steps rl agent
box_path_0_5_25 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_0.5_box_25/eval/events.out.tfevents.1631716010.pearl8.18818.1.v2"
box_0_5_mer = np.array(getEventFileData(box_path_0_5_25)["Metrics/AverageReturn"])
# 1.0 box + 50 steps rl agent
box_path_1_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_1.0_box_50/eval/events.out.tfevents.1631961881.pearl2.22000.1.v2"
box_1_0_mer = np.array(getEventFileData(box_path_1_0_50)["Metrics/AverageReturn"])
# 2.0 box + 50 steps rl agent
box_path_2_0_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.0_box_50/eval/events.out.tfevents.1632144090.pearl2.5531.1.v2"
box_2_0_mer = np.array(getEventFileData(box_path_2_0_50)["Metrics/AverageReturn"])
# 2.5 box + 50 steps rl agent
box_path_2_5_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_2.5_box_50/eval/events.out.tfevents.1632349779.pearl8.24775.1.v2"
box_2_5_mer = np.array(getEventFileData(box_path_2_5_50)["Metrics/AverageReturn"])
# full aprt + 50 steps rl agent
box_path_50 = "/media/neo/robotics/August/17-09-2021/train_rl_uniform_likelihood_rnd_50_new/eval/events.out.tfevents.1633162880.pearl8.3776.1.v2"
box_mer = np.array(getEventFileData(box_path_50)["Metrics/AverageReturn"])
data = np.concatenate([
box_0_5_mer[:limit, 1:2],
box_1_0_mer[:limit, 1:2],
box_2_0_mer[:limit, 1:2],
box_2_5_mer[:limit, 1:2],
box_mer[:limit, 1:2]
], axis=1)
ax.boxplot(data)
ax.set_title('Episode mean return for different start pose sampling area', fontsize=22, weight='bold')
# ax.set_xlabel("agent behavior", fontsize=16)
ax.set_ylabel("return", fontsize=18)
# ax.set_ylim(-1.05, 0.05)
ax.set_xticklabels([
"1.0 Sampling Box",
"2.0 Sampling Box",
"4.0 Sampling Box",
"5.0 Sampling Box",
"Full Apartment",
], fontsize=18)
plt.show()
fig.savefig("rl_belief_eval_mer.png")
if __name__ == '__main__':
# generalization_plts()
# house3d_plts()
# igibson_plts()
# belief_plts()
# rl_train_eval_plts()
# rl_test_plts()
# diff_steps_plts()
diff_resample_plts()
# all_rl_eval_plts()
|
from werkzeug.exceptions import NotFound, MethodNotAllowed
from werkzeug.routing import Map, Rule
from werkzeug.wrappers import Response
from lymph.testing import WebServiceTestCase
from lymph.web.interfaces import WebServiceInterface
from lymph.web.handlers import RequestHandler
from lymph.web.routing import HandledRule
class RuleHandler(RequestHandler):
def get(self):
return Response("Rule Handler")
class HandledRuleHandler(RequestHandler):
def get(self):
return Response("Handled Rule Handler")
class CustomNotFound(NotFound):
def __init__(self):
response = Response("never-gonna-match-you-down", status=404)
super(CustomNotFound, self).__init__(response=response)
class CustomMethodNotAllowed(MethodNotAllowed):
def get_body(self, *args, **kwargs):
return "never-gonna-run-around-or-post-you"
class Web(WebServiceInterface):
url_map = Map([
Rule("/test/", endpoint="test"),
Rule("/foo/", endpoint=RuleHandler, methods=['get']),
Rule("/baz/", endpoint=RuleHandler),
HandledRule("/bar/", endpoint="bar", handler=HandledRuleHandler),
Rule("/fail/", endpoint="fail"),
Rule("/fail-wrong-endpoint/", endpoint=42),
])
def test(self, request):
return Response("method test")
class CustomErrorHandlingWeb(Web):
NotFound = CustomNotFound
MethodNotAllowed = CustomMethodNotAllowed
class WebIntegrationTest(WebServiceTestCase):
service_class = Web
def test_dispatch_rule_with_string_endpoint(self):
response = self.client.get("/test/")
self.assertEqual(response.data.decode("utf8"), "method test")
self.assertEqual(response.status_code, 200)
def test_dispatch_rule_with_no_trailing_slash(self):
response = self.client.get("/test", follow_redirects=True)
self.assertEqual(response.data.decode("utf8"), "method test")
self.assertEqual(response.status_code, 200)
def test_dispatch_rule_with_callable_endpoint(self):
response = self.client.get("/foo/")
self.assertEqual(response.data.decode("utf8"), "Rule Handler")
self.assertEqual(response.status_code, 200)
def test_dispatch_handled_rule(self):
response = self.client.get("/bar/")
self.assertEqual(response.data.decode("utf8"), "Handled Rule Handler")
self.assertEqual(response.status_code, 200)
def test_dispatch_failing_rule_to_500(self):
response = self.client.get("/fail/")
self.assertEqual(response.data.decode("utf8"), "")
self.assertEqual(response.status_code, 500)
def test_dispatch_failing_endpoint_to_500(self):
response = self.client.get("/fail-wrong-endpoint/")
self.assertEqual(response.data.decode("utf8"), "")
self.assertEqual(response.status_code, 500)
def test_dispatch_not_found(self):
response = self.client.get("/never-gonna-match-me-up/")
self.assertEqual(response.status_code, 404)
def test_dispatch_methond_not_allowed(self):
response = self.client.post("/bar/")
self.assertEqual(response.status_code, 405)
response = self.client.post("/foo/")
self.assertEqual(response.status_code, 405)
response = self.client.post("/baz/")
self.assertEqual(response.status_code, 405)
class CustomErrorHandlingWebIntegrationTest(WebIntegrationTest):
service_class = CustomErrorHandlingWeb
def test_dispatch_not_found(self):
response = self.client.get("/never-gonna-match-me-up/")
self.assertEqual(response.data.decode("utf8"), "never-gonna-match-you-down")
self.assertEqual(response.status_code, 404)
def test_dispatch_methond_not_allowed(self):
response = self.client.post("/bar/")
self.assertEqual(response.data.decode("utf8"), "never-gonna-run-around-or-post-you")
self.assertEqual(response.status_code, 405)
response = self.client.post("/foo/")
self.assertEqual(response.data.decode("utf8"), "never-gonna-run-around-or-post-you")
self.assertEqual(response.status_code, 405)
response = self.client.post("/baz/")
self.assertEqual(response.data.decode("utf8"), "never-gonna-run-around-or-post-you")
self.assertEqual(response.status_code, 405)
|
<filename>ros_ws/src/bluetooth_bridge/src/bluetooth_bridge_server_node.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## @package docstring
# This package provides the bridge between Bluetooth and ROS, both ways.
# Initially it receives "String" messages and sends "String" messages
#
import rospy
import math
import sys
import signal
import bluetooth
import select
import time
from std_msgs.msg import String
from std_msgs.msg import Int32
import serial
import struct
import threading
import struct
#--------------------------------- Constants ----------------------------------#
TAG = "Bluetooth Bridge Node:" ## Node verbosity tag
node_name = "bluetooth_bridge" ## ROS node name
version_num = "2020/5/29/11/26"
#------------------------------------------------------------------------------#
#serial_node_name='serial_port'
#serialPort = "/dev/ttyUSB0"
#baudRate = 1000000;
#ser = serial.Serial(serialPort, baudRate, timeout=0.5)
print("This software version is %s" % (version_num))
get_str = "0123456789"
## Application class
#
class Application:
## "Is application running" flag
is_running = True
## "Is connection established" flag
is_connected = False
is_normal_sending = False
## Input topics
input_topic = "/bluetooth/send" # Send a string messsage to this
# topic to send it via Bluetooth.
sound_topic = "/soundRequest" # Send a string messsage to this
## Output topics
output_topic = "/bluetooth/received/data"
output_topic_direction = "/bluetooth/received/direction" # Received data from Bluetooth will
output_topic_speed = "/bluetooth/received/speed" # be published to this topic.
output_topic_gear = "/bluetooth/received/gear"
output_topic_manual = "/bluetooth/received/manual"
output_topic_beep = "/bluetooth/received/beep"
status_topic = "/bluetooth/status"
direction = 0
count = 0
## Bluetooth channel
bt_channel = 22 # IMPORTANT! Mae sure this is THE SAME
# as was used diring
# sdptool add --channel=<number> SP comand.
# Also, use this command before launching
# this node if you have rebooted your robot.
## Init function
def __init__(self):
# Assigning the SIGINT handler
signal.signal(signal.SIGINT, self.sigint_handler)
#print math.sin(10)
# Starting the node
rospy.init_node(node_name, anonymous=False)
# Getting parameters
#self.input_topic = rospy.get_param("~send_topic", self.input_topic)
#self.output_topic_direction = rospy.get_param("~recv_topic", self.output_topic_direction)
#self.output_topic_speed = rospy.get#self.direction#self.direction_param("~recv_topic", self.output_topic_speed)
#self.output_topic_gear = rospy.get_param("~recv_topic", self.output_topic_gear)
#self.output_topic_manual = rospy.get_param("~recv_topic", self.output_topic_manual)
#self.output_topic_beep = rospy.get_param("~recv_topic", self.output_topic_beep)
#self.status_topic = rospy.get_param("~status_topic", self.status_topic)
#self.bt_channel = rospy.get_param("~rfcomm_channel", self.bt_channel)
#print TAG, "param: input_topic =", self.input_topic
#print TAG, "param: output_topic_direction =", self.output_topic_direction
#print TAG, "param: output_topic_speed =", self.output_topic_speed
#print TAG, "param: output_topic_gear =", self.output_topic_gear
#print TAG, "param: output_topic_manual =", self.output_topic_manual
#print TAG, "param: output_topic_beep =", self.output_topic_beep
#print TAG, "param: status_topic =", self.status_topic
#print TAG, "param: bt_channel =", self.bt_channel
# Subscribers
self.sub = rospy.Subscriber(self.input_topic, String, self.send_callback)
# Publishers
self.pub = rospy.Publisher(self.output_topic, String, queue_size=10)
self.sound_pub = rospy.Publisher(self.sound_topic, Int32, queue_size=10)
self.direction_pub = rospy.Publisher(self.output_topic_direction, Int32, queue_size=10)
self.speed_pub = rospy.Publisher(self.output_topic_speed, Int32, queue_size=10)
self.gear_pub = rospy.Publisher(self.output_topic_gear, Int32, queue_size=10)
self.manual_pub = rospy.Publisher(self.output_topic_manual, Int32, queue_size=10)
self.beep_pub = rospy.Publisher(self.output_topic_beep, Int32, queue_size=10)
self.status_pub = rospy.Publisher(self.status_topic, String, queue_size=10)
time.sleep(0.5)
self.status_pub.publish("INIT")
rospy.Timer(rospy.Duration(0.5), self.timer_callback)
self.sound_pub.publish(2)
self.add_thread = threading.Thread(target=self.thread_job)
self.add_thread.start()
while self.is_running:
try:
# Starting the bluetooth server
self.server_sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
# Listening for incoming connections
self.server_sock.bind(("", self.bt_channel))
#self.server_sock.bind( ("", self.bt_channel) )
print TAG, "Waiting for incoming connections on port %d ..." % self.bt_channel
self.status_pub.publish("LISTENING")
self.server_sock.listen(1)
# Accepting incoming connection
self.client_sock, self.address = self.server_sock.accept()
#print TAG, "Accepted connection from ", self.address
self.status_pub.publish("CONNECTED: " + str(self.address))
# [IMPORTANT] THIS IS HOW TO RECEIVE MESSAGE FROM BLUETOOTH
# AND PUBLISH IT TO ROS
# Running the loop to receive messages
self.is_connected = True
while self.is_running:
ready = select.select([self.client_sock], [], [], 2)
# print ready
if ready[0]:
data = self.client_sock.recv(1024)
self.is_normal_sending = True
#self.direction=math.cos(ord(data[1])//math.pi)*ord(data[6])
# print TAG, "Received: ", data.encode('hex'),type(data.encode('hex'))
#print TAG, "Received: ", ord(data[0]),ord(data[1]),ord(data[2]),ord(data[3]),ord(data[4]),ord(data[5]),ord(data[6]))
print TAG, "Received: header=%d,direction=%d,speed=%d,gear==%d,manual=%d,beep=%d,crc=%d" % (ord(data[0]), ord(data[1]), ord(data[2]), ord(data[3]), ord(data[4]), ord(data[5]), ord(data[6]))
#if ((data[1]>>8)&1) :
# print "is 1"
#else:
# print "is 0"
if (ord(data[0]) == 0xaa) and (ord(data[7]) == (ord(data[0]) ^ ord(data[1]) ^ ord(data[2]) ^ ord(data[3]) ^ ord(data[4]) ^ ord(data[5]) ^ ord(data[6]))):
self.pub.publish(data)
self.direction_pub.publish(ord(data[1])) #/bluetooth/received/direction
#self.direction_pub.publish(self.direction)
self.speed_pub.publish(ord(data[2])) #/bluetooth/received/speed
self.gear_pub.publish(ord(data[3])) #/bluetooth/received/speed
self.manual_pub.publish(ord(data[4])) #/bluetooth/received/manual
self.beep_pub.publish(ord(data[5])) #/bluetooth/received/beep
if (ord(data[5])):
self.sound_pub.publish(1)
# self.client_sock.send(data[0:6])
global get_str
get_str = data
# buffer=struct.pack("s",data)
#ser.write(data[0:8])
#ser.flush()
else:
print "CRC not pass"
except Exception, e:
self.is_connected = False
self.server_sock.close() #
print TAG, "EXCEPTION:", str(e)
self.status_pub.publish("EXCEPTION: " + str(e))
#self.pub.publish(data) #
self.direction_pub.publish(0) #/bluetooth/received/direction
self.speed_pub.publish(0) #/bluetooth/received/speed
self.gear_pub.publish(0) #/bluetooth/received/speed
self.manual_pub.publish(0) #/bluetooth/received/manual
self.beep_pub.publish(0) #/bluetooth/received/beep
print TAG, "RESTARTING SERVER"
time.sleep(0.1)
## SIGINT Signal handler, you need this one to interrupt your node
def sigint_handler(self, signal, frame):
print ""
print TAG, "Interrupt!"
self.status_pub.publish("SIGINT")
self.is_running = False
print TAG, "Terminated"
sys.exit(0) #
def thread_job(self):
rospy.spin()
## [IMPORTANT] THIS IS HOW TO SEND MESSAGES VIA BLUETOOTH
## Handler for the messages to be sent via bluetooth.
def send_callback(self, message):
if self.is_connected:
print TAG, "Sending:", message.data
s = struct.Struct('<34b')
s3 = struct.Struct('h')
s1 = struct.Struct('2b13h')
unpack_data = s.unpack(message.data)
unpack_data3 = s3.unpack(message.data[1:3])
unpack_data1 = s1.unpack(message.data[3:31])
#print("speed:",unpack_data3[0],"gear:",unpack_data1[0])
#unpack_data=str(unpack_data3[0])+","+str(unpack_data1[0])
print("head is", unpack_data[0])
print("Vol:", unpack_data[31], "temp:", unpack_data[32])
print(unpack_data3[0], unpack_data1[0])
speed = unpack_data3[0]
if speed < 0:
speed = -(speed)
print(speed, unpack_data1[0])
global get_str
# get_str=get_str[0:7]+chr(speed/256)+chr(speed%256)+chr(unpack_data1[0])
print("speed is %d,%d.gear is %d" % (unpack_data3[0] / 256, unpack_data3[0] % 256, unpack_data1[0]))
#self.client_sock.send(get_str[0:10])
self.client_sock.send(message.data)
print(type(message.data))
def timer_callback(self, event):
if (self.is_connected == True) and (self.is_normal_sending == False):
self.status_pub.publish("NODATA")
self.direction_pub.publish(0) #/bluetooth/received/direction
self.speed_pub.publish(0) #/bluetooth/received/speed
self.gear_pub.publish(0) #/bluetooth/received/gear
self.manual_pub.publish(0) #/bluetooth/received/manual
self.beep_pub.publish(0) #/bluetooth/received/beep
else:
self.status_pub.publish("SEND/RECEIVE")
self.is_normal_sending = False
#------------------------------------- Main -------------------------------------#
if __name__ == '__main__':
print TAG, "Started"
app = Application()
print TAG, "Terminated"
|
<filename>apps/organizations/models.py<gh_stars>0
"""
This module provides the different ``models`` pertaining to the ``organizations`` app.
"""
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from django.core.validators import MaxValueValidator
from django.core.validators import MinValueValidator
import apps.organizations.constants as constants
class Organization(models.Model):
"""
``Organization`` is the model representing an organization in need of funds.
Attributes:
name: A ``models.CharField()`` representing the name of the organization.
description: A ``models.TextField()`` representing a description of the organization.
owner: A ``models.ForeignKey()`` field representing the owner of the orgnaization.
email: A ``models.EmailField()`` representing the email of the organization.
address: A ``models.CharField()`` representing the address of the organization.
latitute: A ``models.FloatField()`` representing the geographical latitude on which the organization is located.
longitude: A ``models.FloatField()`` representing the geogpraghical longitude on which
the organization is located.
created_at: A ``models.DateTimeField()`` representing the date and time when the instance was created.
updated_at: A ``models.DateTimeField()`` representing the date and time when the instaince was updated.
"""
name = models.CharField(verbose_name=_('Name'), max_length=256)
description = models.TextField(verbose_name=_('Description'))
owner = models.ForeignKey(
get_user_model(), related_name='organizations',
on_delete=models.CASCADE, verbose_name=_('Owner')
)
email = models.EmailField(verbose_name=_('Email'))
amount_to_be_raised = models.PositiveIntegerField(
verbose_name=_('Amount to be raised?')
)
address = models.CharField(verbose_name=_('Address'), max_length=1024)
latitude = models.FloatField(verbose_name=_('Latitude'))
longitude = models.FloatField(verbose_name=_('Longitude'))
created_at = models.DateTimeField(verbose_name=_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_('Updated at'), auto_now=True)
class Review(models.Model):
"""
``Review`` is the model representing a review for an organization.
Attributes:
rating: A ``models.SmallIntegerField()`` representing the rating given to an organization (out of 5).
comment: A ``models.TextField()`` representing the comment/review given to an organization.
user: A ``models.ForeignKey()`` field representing the user who gave the rating and/or the comment.
organization: A ``models.ForiegnKey()`` field representing the origanization to which the user
has given the rating and/or comment.
created_at: A ``models.DateTimeField()`` representing the date and time when the instance was created.
updated_at: A ``models.DateTimeField()`` representing the date and time when the instaince was updated.
"""
rating = models.SmallIntegerField(
verbose_name=_('Rating'), default=constants.REVIEW_MAX_RATING,
validators=[
MaxValueValidator(constants.REVIEW_MAX_RATING),
MinValueValidator(constants.REVIEW_MIN_RATING)
]
)
comment = models.TextField(verbose_name=_('Comment'), null=True, blank=True)
user = models.ForeignKey(
get_user_model(), related_name='reviews',
on_delete=models.CASCADE, verbose_name=_('User')
)
organization = models.ForeignKey(
Organization, related_name='reviews',
on_delete=models.CASCADE, verbose_name=_('Organization')
)
created_at = models.DateTimeField(verbose_name=_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_('Updated at'), auto_now=True)
class Coupon(models.Model):
"""
``Coupon`` is the model representing a coupon associated with an organization.
Attributes:
title: A ``models.CharField`` representing the title of the coupon.
description: A ``models.TextField`` representing the description of the coupon.
organization: A ``models.ForeignKey`` representing the organization associated with the coupon.
minimum_fund: A ``models.PositiveIntegerField`` representing the minimum fund on which this coupon
is to be issued.
maximum_fund: A ``models.PositiveIntegerField`` representing the maximum fund on which this coupon
is to be issued.
validity_start_date: A ``models.DateTimeField`` representing the date and time from which this coupon
is valid.
validity_end_date: A ``models.DateTimeField`` representing the date and time from which this coupon
is valid.
created_at: A ``models.DateTimeField`` representing the date and time when the instance was created.
updated_at: A ``models.DateTimeField`` representing the date and time when the instaince was updated.
"""
title = models.CharField(max_length=256, verbose_name=_('Title'))
description = models.TextField(verbose_name=_('Description'))
organization = models.ForeignKey(
Organization, related_name='coupons',
on_delete=models.CASCADE, verbose_name=_('Organization')
)
minimum_fund = models.PositiveIntegerField(verbose_name=_('Minimum fund'))
maximum_fund = models.PositiveIntegerField(verbose_name=_('Maximum fund'))
validity_start_date = models.DateTimeField(verbose_name=_('Validity start date'))
validity_end_date = models.DateTimeField(verbose_name=_('Validity end date'))
created_at = models.DateTimeField(verbose_name=_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_('Updated at'), auto_now=True)
|
<reponame>rubenvillegas/icml2017hierchvid<gh_stars>10-100
import os
import cv2
import sys
import time
import socket
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import scipy.misc as sm
import numpy as np
import scipy.io as sio
from os import listdir, makedirs, system
from argparse import ArgumentParser
from utils import *
from det_lstm import DET_LSTM
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx / size[1]
img[j * h:j * h + h, i * w:i * w + w] = image
return img
def transform(input_):
return 2 * input_ - 1.
def inverse_transform(input_):
return (input_ + 1.) / 2.
def imsave(images, size, path):
return sm.imsave(path, merge(images, size))
def visualize_lm(posex, posey, image_size):
posey = inverse_transform(posey) * image_size
posex = inverse_transform(posex) * image_size
cpose = np.zeros((image_size, image_size, 32))
for j in xrange(32):
gmask = gauss2D_mask(
(posey[j], posex[j]), (image_size, image_size), sigma=8.)
cpose[:, :, j] = gmask / gmask.max()
return np.amax(cpose, axis=2)
def main(gpu, image_size, batch_size, num_layer, lstm_units, seen_step,
fut_step, mem_frac, keep_prob, learning_rate):
lm_size = 32
input_size = lm_size * 2
fskip = 4
prefix = 'HUMAN3.6M_DET_LSTM'
for kk, vv in locals().iteritems():
if kk != 'prefix' and kk != 'mem_frac' and kk != 'gpu':
prefix += '_' + kk + '=' + str(vv)
layers = []
for i in range(num_layer):
layers.append(lstm_units)
class_dict = {
'walkdog': 0,
'purchases': 1,
'waiting': 2,
'eating': 3,
'sitting': 4,
'photo': 5,
'discussion': 6,
'greeting': 7,
'walking': 8,
'phoning': 9,
'posing': 10,
'walktogether': 11,
'directions': 12,
'smoking': 13,
'sittingdown': 14
}
num_class = len(class_dict.keys())
samples_dir = './samples/' + prefix
models_dir = './models/' + prefix
logs_dir = './logs/' + prefix
data_path = './datasets/Human3.6M/'
trainfiles = open(data_path + 'train_list_pose.txt', 'r').readlines()
alldata = []
for i in xrange(len(trainfiles)):
vid_path = trainfiles[i].split('\n')[0]
data = {}
tdata = np.load(vid_path)
for kk, vv in tdata.iteritems():
data[kk] = vv
data['all_posex'] = data['all_posex'] / (
1.0 * data['box'][2] - data['box'][0])
data['all_posey'] = data['all_posey'] / (
1.0 * data['box'][3] - data['box'][1])
class_name = vid_path.split('/')[-1].split()[0].split('.')[0].lower()
if class_name == 'walkingdog':
class_name = 'walkdog'
if class_name == 'takingphoto':
class_name = 'photo'
data['action'] = class_name
if class_name in class_dict.keys():
alldata.append(data)
with tf.device('/gpu:%d' % gpu):
lstm = DET_LSTM(batch_size, input_size, layers, seen_step, fut_step,
keep_prob, logs_dir, learning_rate)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_frac)
with tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
loaded, model_name = lstm.load(sess, models_dir)
if loaded:
print("[*] Load SUCCESS")
step = int(model_name.split("-")[-1])
else:
print("[!] Load failed...")
step = 0
total_steps = round(600000 * 16 / batch_size)
del_list = None
while step < total_steps:
mini_batches, del_list = get_minibatches_idx(
len(alldata),
batch_size,
shuffle=True,
min_frame=None,
trainfiles=trainfiles,
del_list=del_list)
for _, batchidx in mini_batches:
start_time = time.time()
if len(batchidx) == batch_size:
pose_batch = np.zeros(
(batch_size, seen_step + fut_step, input_size), dtype='float32')
mask_batch = np.zeros(
(batch_size, seen_step + fut_step, lm_size), dtype='float32')
act_batch = np.zeros((batch_size, num_class), dtype='int32')
for i in xrange(batch_size):
ff = alldata[batchidx[i]]
nframes = ff['all_posex'].shape[0]
high = nframes - fskip * (fut_step + seen_step) + 1
stidx = np.random.randint(low=0, high=high)
posey = transform(
ff['all_posey'][stidx:stidx +
fskip * (seen_step + fut_step):fskip, :])
posex = transform(
ff['all_posex'][stidx:stidx +
fskip * (seen_step + fut_step):fskip, :])
pose_batch[i] = np.concatenate((posex, posey), axis=1)
mask_batch[i] = np.ones(mask_batch[i].shape)
act_batch[i, class_dict[str(ff['action'])]] = 1
mid_time = time.time()
err = lstm.train(
sess, pose_batch, mask_batch, step, save_logs=True)
if step % 100 == 0:
output = lstm.predict(sess, pose_batch, mask_batch)
samples = None
for idx in range(1):
for stp in range(seen_step + fut_step):
pre = output[idx, stp, :2 * lm_size]
posex, posey = (pre[:lm_size], pre[lm_size:])
act = class_dict.keys()[class_dict.values().index(
act_batch[idx].argmax())]
sample = visualize_lm(posex, posey, image_size)
sample = sample.reshape((1, image_size, image_size))
samples = sample if samples is None else np.concatenate(
[samples, sample], axis=0)
if not os.path.exists(samples_dir):
os.makedirs(samples_dir)
img_save_path = samples_dir + '/{0:07d}'.format(
step) + '_' + act + '.png'
imsave(samples, [1, seen_step + fut_step], img_save_path)
print('step=%d/%d, loss=%.12f, time=%.2f+%.2f' % (
step, total_steps, err, mid_time - start_time,
time.time() - mid_time))
if step >= 10000 and step % 10000 == 0:
lstm.save(sess, models_dir, lstm.global_step)
step = step + 1
lstm.save(sess, models_dir, lstm.global_step)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--gpu", type=int, dest="gpu", required=True, help="GPU device id")
parser.add_argument(
"--image_size",
type=int,
default=128,
dest="image_size",
help="Spatial size of image")
parser.add_argument(
"--batch_size",
type=int,
default=256,
dest="batch_size",
help="Batch size for training")
parser.add_argument(
"--num_layer",
type=int,
default=1,
dest="num_layer",
help="Number of hidden layers for LSTM")
parser.add_argument(
"--lstm_units",
type=int,
default=1024,
dest="lstm_units",
help="Number of hidden units for LSTM")
parser.add_argument(
"--seen_step",
type=int,
default=10,
dest="seen_step",
help="Number of seen steps")
parser.add_argument(
"--fut_step",
type=int,
default=32,
dest="fut_step",
help="Number of steps into future")
parser.add_argument(
"--mem_frac",
type=float,
default=0.4,
dest="mem_frac",
help="GPU memory fraction to take up")
parser.add_argument(
"--keep_prob",
type=float,
default=1.0,
dest="keep_prob",
help="Keep probability for dropout")
parser.add_argument(
"--learning_rate",
type=float,
default=0.001,
dest="learning_rate",
help="Keep probability for dropout")
args = parser.parse_args()
main(**vars(args))
|
<gh_stars>0
__author__ = 'Hao'
import openpyxl
import json
from string import Template
wb = openpyxl.load_workbook("../data/Inkjet Printing Process File Repository/Droplet Ejection/Trigger Waveform Graph Master Sheet.xlsx", data_only=True)
waves = {key: {"id": key, "label": key} for key in wb.sheetnames}
for wave in waves:
# print(wave)
try:
sheet = wb[wave]
waves[wave]["line_type"] = sheet["A" + str(len(sheet["A"]) - 1)].value.strip()
waves[wave]["waveform_type"] = sheet["A" + str(len(sheet["A"]))].value.strip()
# print(waves[wave]["line_type"])
# print(waves[wave]["waveform_type"])
except:
waves[wave]["line_type"] = ""
waves[wave]["waveform_type"] = ""
if waves[wave]["line_type"] == 'Bipolar Lines':
waves[wave]["rise_time"] = sheet["B3"].value - sheet["B2"].value
waves[wave]["dwell_time"] = sheet["B4"].value - sheet["B3"].value
waves[wave]["fall_time"] = sheet["B5"].value - sheet["B4"].value
waves[wave]["echo_time"] = sheet["B6"].value - sheet["B5"].value
waves[wave]["final_rise_time"] = sheet["B7"].value - sheet["B6"].value
waves[wave]["max_volt"] = max([x[0].value for x in sheet["C2":"C7"]])
waves[wave]["min_volt"] = min([x[0].value for x in sheet["C2":"C7"]])
waves[wave]["idle_volt"] = ""
waves[wave]["amplitude"] = ""
waves[wave]["period"] = ""
elif waves[wave]["waveform_type"] == 'Sine Waveform':
waves[wave]["rise_time"] = ""
waves[wave]["dwell_time"] = ""
waves[wave]["fall_time"] = ""
waves[wave]["echo_time"] = ""
waves[wave]["final_rise_time"] = ""
waves[wave]["max_volt"] = max([x[0].value for x in sheet["C2":"C14"]])
waves[wave]["min_volt"] = min([x[0].value for x in sheet["C2":"C14"]])
waves[wave]["idle_volt"] = sheet["G1"].value
waves[wave]["amplitude"] = sheet["G2"].value
waves[wave]["period"] = sheet["G4"].value
elif waves[wave]["waveform_type"] == 'Unipolar Waveform':
waves[wave]["rise_time"] = sheet["B3"].value - sheet["B2"].value
waves[wave]["dwell_time"] = sheet["B4"].value - sheet["B3"].value
waves[wave]["fall_time"] = sheet["B5"].value - sheet["B4"].value
waves[wave]["echo_time"] = ""
waves[wave]["final_rise_time"] = ""
waves[wave]["max_volt"] = max([x[0].value for x in sheet["C2":"C5"]])
waves[wave]["min_volt"] = 0
waves[wave]["idle_volt"] = ""
waves[wave]["amplitude"] = ""
waves[wave]["period"] = ""
else:
waves[wave]["rise_time"] = ""
waves[wave]["dwell_time"] = ""
waves[wave]["fall_time"] = ""
waves[wave]["echo_time"] = ""
waves[wave]["final_rise_time"] = ""
waves[wave]["max_volt"] = ""
waves[wave]["min_volt"] = ""
waves[wave]["idle_volt"] = ""
waves[wave]["amplitude"] = ""
waves[wave]["period"] = ""
#print(json.dumps(waves, sort_keys=True, indent=4))
# Keys to use in constructing inks dictionary and template matching dictionary
d_keys = ["id", "label", "line_type", "waveform_type",
"rise_time", "dwell_time", "fall_time", "echo_time", "final_rise_time",
"max_volt", "min_volt", "idle_volt", "amplitude", "period"]
prefix_ampo = "https://tw.rpi.edu/web/project/ampo-ink#"
tt = """### https://tw.rpi.edu/web/project/ampo-ink#DropletActuation_${id}
:DropletActuation_${id} rdf:type :Actuator_DropletActuation, owl:NamedIndividual ;
rdfs:label "${label}"^^xsd:string ;
ampo:hasAttribute [ rdf:type :DropletActuation_DwellTime ;
rdfs:label "Dwell Time (μs)"^^xsd:string ;
qudt:quantityValue [ qudt:numericValue "${dwell_time}"^^xsd:double ;
qudt:unit qudt-unit:MicroSecond
]
] ,
[ rdf:type :DropletActuation_FallTime ;
rdfs:label "Fall Time (μs)"^^xsd:string ;
qudt:quantityValue [ qudt:numericValue "${fall_time}"^^xsd:double ;
qudt:unit qudt-unit:MicroSecond
]
] ,
[ rdf:type :DropletActuation_RiseTime ;
rdfs:label "Rise Time (μs)"^^xsd:string ;
qudt:quantityValue [ qudt:numericValue "${rise_time}"^^xsd:double ;
qudt:unit qudt-unit:MicroSecond
]
] ,
[ rdf:type :DropletActuation_Amplitude ;
rdfs:label "Amplitude (V)"^^xsd:string ;
qudt:quantityValue [ qudt:numericValue "${amplitude}"^^xsd:double ;
qudt:unit qudt-unit:Volt
]
] ,
[ rdf:type :DropletActuation_WaveformType ;
rdfs:label "Waveform Type"^^xsd:string ;
ampo:descriptiveValue "${waveform_type}"^^xsd:string
] ,
[ rdf:type :DropletActuation_FinalRiseTime ;
rdfs:label "Final Rise Time (μs)"^^xsd:string ;
qudt:quantityValue [ qudt:numericValue "${final_rise_time}"^^xsd:double ;
qudt:unit qudt-unit:MicroSecond
]
] ,
[ rdf:type :DropletActuation_LineType ;
rdfs:label "Line Type"^^xsd:string ;
ampo:descriptiveValue "${line_type}"^^xsd:string
] ,
[ rdf:type :DropletActuation_EchoTime ;
rdfs:label "Echo Time (μs)"^^xsd:string ;
qudt:quantityValue [ qudt:numericValue "${echo_time}"^^xsd:double ;
qudt:unit qudt-unit:MicroSecond
]
] ,
[ rdf:type :DropletActuation_IdleVoltage ;
rdfs:label "Idle Voltage (V)"^^xsd:string ;
qudt:quantityValue [ qudt:numericValue "${idle_volt}"^^xsd:double ;
qudt:unit qudt-unit:Volt
]
] ,
[ rdf:type :DropletActuation_MaxVoltageAmplitude ;
rdfs:label "Maximum Voltage Amplitude (V)"^^xsd:string ;
qudt:quantityValue [ qudt:numericValue "${max_volt}"^^xsd:double ;
qudt:unit qudt-unit:Volt
]
] ,
[ rdf:type :DropletActuation_Period ;
rdfs:label "Period (μs)"^^xsd:string ;
qudt:quantityValue [ qudt:numericValue "${period}"^^xsd:double ;
qudt:unit qudt-unit:MicroSecond
]
] ,
[ rdf:type :DropletActuation_MinVoltageAmplitude ;
rdfs:label "Minimum Voltage Amplitude (V)"^^xsd:string ;
qudt:quantityValue [ qudt:numericValue "${min_volt}"^^xsd:double ;
qudt:unit qudt-unit:Volt
]
] .
"""
t = Template(tt)
f = open("../output/waveforms.ttl", "w+")
f.write("""@prefix : <https://tw.rpi.edu/web/project/ampo-ink#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix sio: <http://semanticscience.org/resource/> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ampo: <https://tw.rpi.edu/web/project/ampo#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix qudt: <http://data.nasa.gov/qudt/owl/qudt#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcterms: <http://purl.org/dc/terms/> .
@prefix qudt-unit: <http://qudt.org/1.1/vocab/unit#> .
@base <https://tw.rpi.edu/web/project/ampo-ink> .
""")
# for wave in waves:
for i in range(1, 119):
wave = "TR-" + str(i)
print(wave)
d = {key: waves[wave][key] for key in d_keys}
# print(d)
# print(t.substitute(d))
f.write(t.substitute(d) + "\n\n\n")
f.close()
print("All waveforms read.")
|
import uuid
import datetime as dt
import decimal
import sqlalchemy as sa
import pytest
from sqlalchemy.dialects import postgresql, mysql
from sqlalchemy.orm import column_property
from marshmallow import Schema, fields, validate
from marshmallow_sqlalchemy import (
fields_for_model,
ModelConverter,
property2field,
column2field,
field_for,
ModelConversionError,
)
from marshmallow_sqlalchemy.fields import Related, RelatedList
def contains_validator(field, v_type):
for v in field.validators:
if isinstance(v, v_type):
return v
return False
class TestModelFieldConversion:
def test_fields_for_model_types(self, models):
fields_ = fields_for_model(models.Student, include_fk=True)
assert type(fields_["id"]) is fields.Int
assert type(fields_["full_name"]) is fields.Str
assert type(fields_["dob"]) is fields.Date
assert type(fields_["current_school_id"]) is fields.Int
assert type(fields_["date_created"]) is fields.DateTime
def test_fields_for_model_handles_exclude(self, models):
fields_ = fields_for_model(models.Student, exclude=("dob",))
assert type(fields_["id"]) is fields.Int
assert type(fields_["full_name"]) is fields.Str
assert fields_["dob"] is None
def test_fields_for_model_handles_custom_types(self, models):
fields_ = fields_for_model(models.Course, include_fk=True)
assert type(fields_["grade"]) is fields.Int
assert type(fields_["transcription"]) is fields.Str
def test_fields_for_model_saves_doc(self, models):
fields_ = fields_for_model(models.Student, include_fk=True)
assert (
fields_["date_created"].metadata["description"]
== "date the student was created"
)
def test_length_validator_set(self, models):
fields_ = fields_for_model(models.Student)
validator = contains_validator(fields_["full_name"], validate.Length)
assert validator
assert validator.max == 255
def test_none_length_validator_not_set(self, models):
fields_ = fields_for_model(models.Course)
assert not contains_validator(fields_["transcription"], validate.Length)
def test_sets_allow_none_for_nullable_fields(self, models):
fields_ = fields_for_model(models.Student)
assert fields_["dob"].allow_none is True
def test_sets_enum_choices(self, models):
fields_ = fields_for_model(models.Course)
validator = contains_validator(fields_["level"], validate.OneOf)
assert validator
assert list(validator.choices) == ["Primary", "Secondary"]
def test_many_to_many_relationship(self, models):
student_fields = fields_for_model(models.Student, include_relationships=True)
assert type(student_fields["courses"]) is RelatedList
course_fields = fields_for_model(models.Course, include_relationships=True)
assert type(course_fields["students"]) is RelatedList
def test_many_to_one_relationship(self, models):
student_fields = fields_for_model(models.Student, include_relationships=True)
assert type(student_fields["current_school"]) is Related
school_fields = fields_for_model(models.School, include_relationships=True)
assert type(school_fields["students"]) is RelatedList
def test_include_fk(self, models):
student_fields = fields_for_model(models.Student, include_fk=False)
assert "current_school_id" not in student_fields
student_fields2 = fields_for_model(models.Student, include_fk=True)
assert "current_school_id" in student_fields2
def test_overridden_with_fk(self, models):
graded_paper_fields = fields_for_model(models.GradedPaper, include_fk=False)
assert "id" in graded_paper_fields
def test_info_overrides(self, models):
class TestModel(models.Course):
test = sa.Column(
sa.Text,
nullable=True,
info=dict(
marshmallow=dict(
validate=[validate.Length(max=1000)], required=True
)
),
)
fields_ = fields_for_model(TestModel)
field = fields_["test"]
validator = contains_validator(field, validate.Length)
assert validator.max == 1000
assert field.required
def test_rename_key(self, models):
class RenameConverter(ModelConverter):
def _get_field_name(self, prop):
if prop.key == "name":
return "title"
return prop.key
converter = RenameConverter()
fields = converter.fields_for_model(models.Paper)
assert "title" in fields
assert "name" not in fields
def test_subquery_proxies(self, session, Base, models):
# Model from a subquery, columns are proxied.
# https://github.com/marshmallow-code/marshmallow-sqlalchemy/issues/383
first_graders = session.query(models.Student).filter(
models.Student.courses.any(models.Course.grade == 1)
)
class FirstGradeStudent(Base):
__table__ = first_graders.subquery("first_graders")
fields_ = fields_for_model(FirstGradeStudent)
assert fields_["dob"].allow_none is True
def make_property(*column_args, **column_kwargs):
return column_property(sa.Column(*column_args, **column_kwargs))
class TestPropertyFieldConversion:
@pytest.fixture()
def converter(self):
return ModelConverter()
def test_convert_custom_type_mapping_on_schema(self):
class MyDateTimeField(fields.DateTime):
pass
class MySchema(Schema):
TYPE_MAPPING = Schema.TYPE_MAPPING.copy()
TYPE_MAPPING.update({dt.datetime: MyDateTimeField})
converter = ModelConverter(schema_cls=MySchema)
prop = make_property(sa.DateTime())
field = converter.property2field(prop)
assert type(field) == MyDateTimeField
@pytest.mark.parametrize(
("sa_type", "field_type"),
(
(sa.String, fields.Str),
(sa.Unicode, fields.Str),
(sa.LargeBinary, fields.Str),
(sa.Text, fields.Str),
(sa.Date, fields.Date),
(sa.DateTime, fields.DateTime),
(sa.Boolean, fields.Bool),
(sa.Boolean, fields.Bool),
(sa.Float, fields.Float),
(sa.SmallInteger, fields.Int),
(sa.Interval, fields.TimeDelta),
(postgresql.UUID, fields.UUID),
(postgresql.MACADDR, fields.Str),
(postgresql.INET, fields.Str),
(postgresql.BIT, fields.Integer),
(postgresql.OID, fields.Integer),
(postgresql.CIDR, fields.String),
(postgresql.DATE, fields.Date),
(postgresql.TIME, fields.Time),
(mysql.INTEGER, fields.Integer),
(mysql.DATETIME, fields.DateTime),
),
)
def test_convert_types(self, converter, sa_type, field_type):
prop = make_property(sa_type())
field = converter.property2field(prop)
assert type(field) == field_type
def test_convert_Numeric(self, converter):
prop = make_property(sa.Numeric(scale=2))
field = converter.property2field(prop)
assert type(field) == fields.Decimal
assert field.places == decimal.Decimal((0, (1,), -2))
def test_convert_ARRAY_String(self, converter):
prop = make_property(postgresql.ARRAY(sa.String()))
field = converter.property2field(prop)
assert type(field) == fields.List
inner_field = getattr(field, "inner", getattr(field, "container", None))
assert type(inner_field) == fields.Str
def test_convert_ARRAY_Integer(self, converter):
prop = make_property(postgresql.ARRAY(sa.Integer))
field = converter.property2field(prop)
assert type(field) == fields.List
inner_field = getattr(field, "inner", getattr(field, "container", None))
assert type(inner_field) == fields.Int
def test_convert_TSVECTOR(self, converter):
prop = make_property(postgresql.TSVECTOR)
with pytest.raises(ModelConversionError):
converter.property2field(prop)
def test_convert_default(self, converter):
prop = make_property(sa.String, default="ack")
field = converter.property2field(prop)
assert field.required is False
def test_convert_server_default(self, converter):
prop = make_property(sa.String, server_default=sa.text("sysdate"))
field = converter.property2field(prop)
assert field.required is False
def test_convert_autoincrement(self, models, converter):
prop = models.Course.__mapper__.get_property("id")
field = converter.property2field(prop)
assert field.required is False
class TestPropToFieldClass:
def test_property2field(self):
prop = make_property(sa.Integer())
field = property2field(prop, instance=True)
assert type(field) == fields.Int
field_cls = property2field(prop, instance=False)
assert field_cls == fields.Int
def test_can_pass_extra_kwargs(self):
prop = make_property(sa.String())
field = property2field(prop, instance=True, description="just a string")
assert field.metadata["description"] == "just a string"
class TestColumnToFieldClass:
def test_column2field(self):
column = sa.Column(sa.String(255))
field = column2field(column, instance=True)
assert type(field) == fields.String
field_cls = column2field(column, instance=False)
assert field_cls == fields.String
def test_can_pass_extra_kwargs(self):
column = sa.Column(sa.String(255))
field = column2field(column, instance=True, description="just a string")
assert field.metadata["description"] == "just a string"
def test_uuid_column2field(self):
class UUIDType(sa.types.TypeDecorator):
python_type = uuid.UUID
impl = sa.BINARY(16)
column = sa.Column(UUIDType)
assert issubclass(column.type.python_type, uuid.UUID) # Test against test check
assert hasattr(column.type, "length") # Test against test check
assert column.type.length == 16 # Test against test
field = column2field(column, instance=True)
uuid_val = uuid.uuid4()
assert field.deserialize(str(uuid_val)) == uuid_val
class TestFieldFor:
def test_field_for(self, models, session):
field = field_for(models.Student, "full_name")
assert type(field) == fields.Str
field = field_for(models.Student, "current_school", session=session)
assert type(field) == Related
field = field_for(models.Student, "full_name", field_class=fields.Date)
assert type(field) == fields.Date
def test_field_for_can_override_validators(self, models, session):
field = field_for(
models.Student, "full_name", validate=[validate.Length(max=20)]
)
assert len(field.validators) == 1
assert field.validators[0].max == 20
field = field_for(models.Student, "full_name", validate=[])
assert field.validators == []
def tests_postgresql_array_with_args(self, Base):
# regression test for #392
from sqlalchemy import Column, Integer, String
from sqlalchemy.dialects.postgresql import ARRAY
class ModelWithArray(Base):
__tablename__ = "model_with_array"
id = Column(Integer, primary_key=True)
bar = Column(ARRAY(String))
field = field_for(ModelWithArray, "bar", dump_only=True)
assert type(field) == fields.List
assert field.dump_only is True
def _repr_validator_list(validators):
return sorted(repr(validator) for validator in validators)
@pytest.mark.parametrize(
"defaults,new,expected",
[
([validate.Length()], [], [validate.Length()]),
(
[validate.Range(max=100), validate.Length(min=3)],
[validate.Range(max=1000)],
[validate.Range(max=1000), validate.Length(min=3)],
),
(
[validate.Range(max=1000)],
[validate.Length(min=3)],
[validate.Range(max=1000), validate.Length(min=3)],
),
([], [validate.Length(min=3)], [validate.Length(min=3)]),
],
)
def test_merge_validators(defaults, new, expected):
converter = ModelConverter()
validators = converter._merge_validators(defaults, new)
assert _repr_validator_list(validators) == _repr_validator_list(expected)
|
<gh_stars>0
from app import app
from flask import render_template, redirect, url_for
from .forms import SearchForm, AddForm
# %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*% HOME %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*%
@app.route('/')
def home():
return render_template('home.html')
# %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*% END HOME %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*%
# %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*% STATUS %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*%
from app.Backend.Status import refresh
i = 0
@app.route('/Status')
def homeStatus():
global i
if i == 0:
env = 'production'
data, count, jinw = refresh(env) # jinw = jinja workaround, count means count of boxes that need to be populated
i = 1
return render_template('status.html', Data=data, env=env, count=count, jinw=jinw)
if i == 1:
env = 'sales'
data, count, jinw = refresh(env)
i = 2
return render_template('status.html', Data=data, env=env, count=count, jinw=jinw)
if i == 2:
env = 'internal'
data, count, jinw = refresh(env)
i = 0
return render_template('status.html', Data=data, env=env, count=count, jinw=jinw)
# %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*% END STATUS %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*%
# %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*% BLACKLIST %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*%
from app.Backend.Blacklist import dataSearch, addtoDB, deleter
tempdata = []
@app.route('/Blacklist', methods=["GET", "POST"])
def homeBL():
global tempdata
form = SearchForm()
if form.validate_on_submit():
print(form.search.data)
data = dataSearch(form.search.data)
tempdata = data
else:
data = tempdata
return render_template('blacklist.html', form=form, data=data)
@app.route('/delete/<name>')
def delete(name):
print(name)
deleter(name)
global tempdata
print(tempdata)
tempdata.remove(name)
return redirect(url_for('homeBL'))
tempAddData = []
@app.route('/add', methods=["GET","POST"])
def append(): # named append so that it doesnt conflict with a python function
form = AddForm()
global tempAddData
if form.validate_on_submit():
tempAddData.append(form.add.data)
return render_template('add.html', form=form, data=tempAddData)
@app.route('/deleteadd/<name>')
def deleteAdd(name):
print(name) # vrp
global tempAddData
tempAddData.remove(name)
return redirect(url_for('append'))
@app.route('/commit')
def commit():
global tempAddData #make set
for i in tempAddData:
addtoDB(i)
print(i)
print(tempAddData)
tempAddData = []
return redirect(url_for('homeBL'))
# %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*% END BLACKLIST %*%*%*%*%*%*%*%*%*%*%*%*%*%*%*% |
import discord
from discord.ext import commands
import datetime
from random import choice
class Decisions(commands.Cog):
"Polls and decision making commands"
def __init__(self, bot):
self.bot = bot
@property
def reactions(self):
return {
1: "1️⃣",
2: "2️⃣",
3: "3️⃣",
4: "4️⃣",
5: "5️⃣",
6: "6️⃣",
7: "7️⃣",
8: "8️⃣",
9: "9️⃣",
10: "🔟",
}
@commands.command(help="Creates a simple poll with only 👍/👎 as an option.")
async def ask(self, ctx, *, question: str) -> None:
"""
creates simple poll with only 👍/👎 as an option
:param ctx: discord context manager
:type ctx: discord.ContextManager
:param question: question to poll on
:type question: str
"""
await ctx.message.delete()
embed = discord.Embed(description=question)
embed.set_author(
name=f"Poll by {ctx.author.display_name}", icon_url=ctx.author.avatar_url
)
msg = await ctx.send(embed=embed)
await msg.add_reaction("👍")
await msg.add_reaction("👎")
@commands.Cog.listener()
async def on_reaction(self, payload) -> None:
"""
discord listener, reacts to people's reaction
:param payload: discord message
:type payload: discord.message
"""
user = payload.member
if user.bot:
return
msg = (
await self.bot.get_guild(payload.guild_id)
.get_channel(payload.channel_id)
.fetch_message(payload.message_id)
)
emoji = payload.emoji
users = []
if msg.author.bot and ("👍" and "👎") in [str(i) for i in msg.reactions]:
for react in msg.reactions:
if str(react) == "👍":
async for reactor in react.users():
if reactor.bot:
continue
if reactor in users:
await msg.remove_reaction(emoji, user)
return
users.append(reactor)
elif str(react) == "👎":
async for reactor in react.users():
if reactor.bot:
continue
if reactor in users:
await msg.remove_reaction(emoji, user)
return
return
@commands.command(help="Creates a poll with up to 10 choices.")
async def poll(self, ctx, desc, *choices) -> None:
"""
create a poll with up to 10 choices
:param ctx: discord context manager
:type ctx: discod.contextManager
:param desc: question/decision to conduct poll
:type desc: str
:param choices: available choices for the poll
:type choices: list[str]
"""
await ctx.message.delete()
if len(choices) < 2:
ctx.command.reset_cooldown(ctx)
if len(choices) == 1:
return await ctx.send("Can't make a poll with only one choice")
return await ctx.send(
"You have to enter two or more choices to make a poll"
)
if len(choices) > 10:
ctx.command.reset_cooldown(ctx)
return await ctx.send("You can't make a poll with more than 10 choices")
embed = discord.Embed(
description=f"**{desc}**\n\n"
+ "\n\n".join(
f"{str(self.reactions[i])} {choice}"
for i, choice in enumerate(choices, 1)
),
timestamp=datetime.datetime.utcnow(),
color=discord.colour.Color.red(),
)
embed.set_footer(text=f"Poll by {str(ctx.author)}")
msg = await ctx.send(embed=embed)
for i in range(1, len(choices) + 1):
await msg.add_reaction(self.reactions[i])
@commands.command(help="toss a coin")
async def toss(self, ctx) -> None:
"""
toss a coin
:param ctx: discord context manager
:type ctx: discord.ContextManager
"""
await ctx.send(f"Coin is tossed, and.... it's {choice(['HEADS','TAILS'])}")
@commands.command(help="takes a decision from available choices")
async def choose(self, ctx, *args) -> None:
"""
choose one the given option
:param ctx: discord context manager
:type ctx: discord.ContextManager
"""
respose = choice(
["choose", "prefer", "think you should go with", "would choose"]
)
await ctx.send(f"Well! , I {respose} {choice(args)}")
def setup(bot):
bot.add_cog(Decisions(bot))
|
<reponame>codehag/jsparagus<gh_stars>0
"""Parse a grammar written in ECMArkup."""
import os
from jsparagus import parse_pgen, gen, grammar, types
from jsparagus.lexer import LexicalGrammar
from jsparagus.ordered import OrderedFrozenSet
ESGrammarLexer = LexicalGrammar(
# the operators and keywords:
"[ ] { } , ~ + ? <! == != => ( ) @ < > "
"but empty here lookahead no not of one or returns through Some None",
NL="\n",
# any number of colons together
EQ=r':+',
# terminals of the ES grammar, quoted with backticks
T=r'`[^` \n]+`|```',
# also terminals, denoting control characters
CHR=r'<[A-Z ]+>|U\+[0-9A-f]{4}',
# nonterminals/types that will be followed by parameters
NTCALL=r'[A-Za-z]\w*(?=[\[<])',
# nonterminals (also, boolean parameters and type names)
NT=r'[A-Za-z]\w*',
# nonterminals wrapped in vertical bars for no apparent reason
NTALT=r'\|[A-Z]\w+\|',
# the spec also gives a few productions names
PRODID=r'#[A-Za-z]\w*',
# prose not wrapped in square brackets
# To avoid conflict with the `>` token, this is recognized only after a space.
PROSE=r'(?<= )>[^\n]*',
# prose wrapped in square brackets
WPROSE=r'\[>[^]]*\]',
# expression denoting a matched terminal or nonterminal
MATCH_REF=r'\$(?:0|[1-9][0-9]*)',
)
ESGrammarParser = gen.compile(
parse_pgen.load_grammar(
os.path.join(os.path.dirname(__file__), "esgrammar.pgen")))
SIGIL_FALSE = '~'
SIGIL_TRUE = '+'
# Abbreviations for single-character terminals, used in the lexical grammar.
ECMASCRIPT_CODE_POINTS = {
# From <https://tc39.es/ecma262/#table-31>
'<ZWNJ>': grammar.Literal('\u200c'),
'<ZWJ>': grammar.Literal('\u200d'),
'<ZWNBSP>': grammar.Literal('\ufeff'),
# From <https://tc39.es/ecma262/#table-32>
'<TAB>': grammar.Literal('\t'),
'<VT>': grammar.Literal('\u000b'),
'<FF>': grammar.Literal('\u000c'),
'<SP>': grammar.Literal(' '),
'<NBSP>': grammar.Literal('\u00a0'),
# <ZWNBSP> already defined above
'<USP>': grammar.UnicodeCategory('Zs'),
# From <https://tc39.es/ecma262/#table-33>
'<LF>': grammar.Literal('\u000a'),
'<CR>': grammar.Literal('\u000d'),
'<LS>': grammar.Literal('\u2028'),
'<PS>': grammar.Literal('\u2028'),
}
class ESGrammarBuilder:
def __init__(self, terminal_names):
# Names of terminals that are written as nonterminals in the grammar.
# For example, "BooleanLiteral" is a terminal name when parsing the
# syntactic grammar.
if terminal_names is None:
terminal_names = frozenset()
self.terminal_names = terminal_names
def single(self, x):
return [x]
def append(self, x, y):
return x + [y]
def concat(self, x, y):
return x + y
def blank_line(self):
return []
def nt_def_to_list(self, nt_def):
return [nt_def]
def to_production(self, lhs, i, rhs, is_sole_production):
"""Wrap a list of grammar symbols `rhs` in a Production object."""
body, reducer, condition = rhs
if reducer is None:
reducer = self.default_reducer(lhs, i, body, is_sole_production)
return grammar.Production(body, reducer, condition=condition)
def default_reducer(self, lhs, i, body, is_sole_production):
assert isinstance(lhs, grammar.Nt)
nt_name = lhs.name
nargs = sum(1 for e in body if grammar.is_concrete_element(e))
if is_sole_production:
method_name = nt_name
else:
method_name = '{} {}'.format(nt_name, i)
return grammar.CallMethod(method_name, tuple(range(nargs)))
def needs_asi(self, lhs, p):
"""True if p is a production in which ASI can happen."""
# The purpose of the fake ForLexicalDeclaration production is to have a
# copy of LexicalDeclaration that does not trigger ASI.
#
# Two productions have body == [";"] -- one for EmptyStatement and one
# for ClassMember. Neither should trigger ASI.
#
# The only other semicolons that should not trigger ASI are the ones in
# `for` statement productions, which happen to be exactly those
# semicolons that are not at the end of a production.
return (not (isinstance(lhs, grammar.Nt)
and lhs.name == 'ForLexicalDeclaration')
and len(p.body) > 1
and p.body[-1] == ';')
def apply_asi(self, p, reducer_was_autogenerated):
"""Return two rules based on p, so that ASI can be applied."""
assert isinstance(p.reducer, grammar.CallMethod)
if reducer_was_autogenerated:
# Don't pass the semicolon to the method.
reducer = grammar.CallMethod(p.reducer.method,
p.reducer.args[:-1])
else:
reducer = p.reducer
# Except for do-while loops, check at runtime that ASI occurs only at
# the end of a line.
if (len(p.body) == 7
and p.body[0] == 'do'
and p.body[2] == 'while'
and p.body[3] == '('
and p.body[5] == ')'
and p.body[6] == ';'):
code = "do_while_asi"
else:
code = "asi"
return [
# The preferred production, with the semicolon in.
p.copy_with(body=p.body[:],
reducer=reducer),
# The fallback production, performing ASI.
p.copy_with(body=p.body[:-1] + [grammar.ErrorSymbol(code)],
reducer=reducer),
]
def expand_lexical_rhs(self, rhs):
body, reducer, condition = rhs
out = []
for e in body:
if isinstance(e, str):
# The terminal symbols of the lexical grammar are characters, so
# add each character of this string as a separate element.
out += [grammar.Literal(ch) for ch in e]
else:
out.append(e)
return [out, reducer, condition]
def nt_def(self, nt_type, lhs, eq, rhs_list):
has_sole_production = (len(rhs_list) == 1)
production_list = []
for i, rhs in enumerate(rhs_list):
if eq == ':':
# Syntactic grammar. A hack is needed for ASI.
reducer_was_autogenerated = rhs[1] is None
p = self.to_production(lhs, i, rhs, has_sole_production)
if self.needs_asi(lhs, p):
production_list += self.apply_asi(p, reducer_was_autogenerated)
else:
production_list.append(p)
elif eq == '::':
# Lexical grammar. A hack is needed to replace multicharacter
# terminals like `!==` into sequences of character terminals.
rhs = self.expand_lexical_rhs(rhs)
p = self.to_production(lhs, i, rhs, has_sole_production)
production_list.append(p)
return (lhs.name, eq, grammar.NtDef(lhs.args, production_list, nt_type))
def nt_def_one_of(self, nt_type, nt_lhs, eq, terminals):
return self.nt_def(nt_type, nt_lhs, eq, [([t], None, None) for t in terminals])
def nt_lhs_no_params(self, name):
return grammar.Nt(name, ())
def nt_lhs_with_params(self, name, params):
return grammar.Nt(name, tuple(params))
def simple_type(self, name):
return types.Type(name)
def parameterized_type(self, name, args):
return types.Type(name, args)
def t_list_line(self, terminals):
return terminals
def terminal(self, t):
assert t[0] == "`"
assert t[-1] == "`"
return t[1:-1]
def terminal_chr(self, chr):
raise ValueError("FAILED: %r" % chr)
def rhs_line(self, ifdef, rhs, reducer, _prodid):
return (rhs, reducer, ifdef)
def rhs_line_prose(self, prose):
return ([prose], None, None)
def empty_rhs(self):
return []
def expr_match_ref(self, token):
assert token.startswith('$')
return int(token[1:])
def expr_call(self, method, args):
return grammar.CallMethod(method, args or ())
def expr_some(self, expr):
return grammar.Some(expr)
def expr_none(self):
return None
def ifdef(self, value, nt):
return nt, value
def optional(self, nt):
return grammar.Optional(nt)
def but_not(self, nt, exclusion):
_, exclusion = exclusion
return grammar.Exclude(nt, [exclusion])
# return ('-', nt, exclusion)
def but_not_one_of(self, nt, exclusion_list):
exclusion_list = [exclusion for _, exclusion in exclusion_list]
return grammar.Exclude(nt, exclusion_list)
# return ('-', nt, exclusion_list)
def no_line_terminator_here(self, lt):
if lt not in ('LineTerminator', '|LineTerminator|'):
raise ValueError("unrecognized directive " + repr("[no " + lt + " here]"))
return grammar.NoLineTerminatorHere
def nonterminal(self, name):
if name in self.terminal_names:
return name
return grammar.Nt(name, ())
def nonterminal_apply(self, name, args):
if name in self.terminal_names:
raise ValueError("parameters applied to terminal {!r}".format(name))
if len(set(k for k, expr in args)) != len(args):
raise ValueError("parameter passed multiple times")
return grammar.Nt(name, tuple(args))
def arg_expr(self, sigil, argname):
if sigil == '?':
return (argname, grammar.Var(argname))
else:
return (argname, sigil)
def sigil_false(self):
return False
def sigil_true(self):
return True
def exclusion_terminal(self, t):
return ("t", t)
def exclusion_nonterminal(self, nt):
return ("nt", nt)
def exclusion_chr_range(self, c1, c2):
return ("range", c1, c2)
def la_eq(self, t):
return grammar.LookaheadRule(OrderedFrozenSet([t]), True)
def la_ne(self, t):
return grammar.LookaheadRule(OrderedFrozenSet([t]), False)
def la_not_in_nonterminal(self, nt):
return grammar.LookaheadRule(OrderedFrozenSet([nt]), False)
def la_not_in_set(self, lookahead_exclusions):
if all(len(excl) == 1 for excl in lookahead_exclusions):
return grammar.LookaheadRule(
OrderedFrozenSet(excl[0] for excl in lookahead_exclusions),
False)
raise ValueError("unsupported: lookahead > 1 token, {!r}"
.format(lookahead_exclusions))
def chr(self, t):
assert t[0] == "<" or t[0] == 'U'
if t[0] == "<":
assert t[-1] == ">"
if t not in ECMASCRIPT_CODE_POINTS:
raise ValueError("unrecognized character abbreviation {!r}".format(t))
return ECMASCRIPT_CODE_POINTS[t]
else:
assert t[1] == "+"
return grammar.Literal(chr(int(t[2:], base=16)))
def finish_grammar(nt_defs, goals, variable_terminals, synthetic_terminals, single_grammar=True):
nt_grammars = {}
for nt_name, eq, _ in nt_defs:
if nt_name in nt_grammars:
raise ValueError(
"duplicate definitions for nonterminal {!r}"
.format(nt_name))
nt_grammars[nt_name] = eq
# Figure out which grammar we were trying to get (":" for syntactic,
# "::" for lexical) based on the goal symbols.
goals = list(goals)
if len(goals) == 0:
raise ValueError("no goal nonterminals specified")
if single_grammar:
selected_grammars = set(nt_grammars[goal] for goal in goals)
assert len(selected_grammars) != 0
if len(selected_grammars) > 1:
raise ValueError(
"all goal nonterminals must be part of the same grammar; "
"got {!r} (matching these grammars: {!r})"
.format(set(goals), set(selected_grammars)))
[selected_grammar] = selected_grammars
terminal_set = set()
def hack_production(p):
for i, e in enumerate(p.body):
if isinstance(e, str) and e[:1] == "`":
if len(e) < 3 or e[-1:] != "`":
raise ValueError(
"Unrecognized grammar symbol: {!r} (in {!r})"
.format(e, p))
p[i] = token = e[1:-1]
terminal_set.add(token)
nonterminals = {}
for nt_name, eq, rhs_list_or_lambda in nt_defs:
if single_grammar and eq != selected_grammar:
continue
if isinstance(rhs_list_or_lambda, grammar.NtDef):
nonterminals[nt_name] = rhs_list_or_lambda
else:
rhs_list = rhs_list_or_lambda
for p in rhs_list:
if not isinstance(p, grammar.Production):
raise ValueError(
"invalid grammar: ifdef in non-function-call context")
hack_production(p)
if nt_name in nonterminals:
raise ValueError(
"unsupported: multiple definitions for nt " + nt_name)
nonterminals[nt_name] = rhs_list
for t in terminal_set:
if t in nonterminals:
raise ValueError(
"grammar contains both a terminal `{}` and nonterminal {}"
.format(t, t))
return grammar.Grammar(
nonterminals,
goal_nts=goals,
variable_terminals=variable_terminals,
synthetic_terminals=synthetic_terminals)
def parse_esgrammar(
text,
*,
filename=None,
goals=None,
terminal_names=None,
synthetic_terminals=None,
single_grammar=True):
parser = ESGrammarParser(builder=ESGrammarBuilder(terminal_names))
lexer = ESGrammarLexer(parser, filename=filename)
lexer.write(text)
nt_defs = lexer.close()
return finish_grammar(
nt_defs,
goals=goals,
variable_terminals=set(terminal_names) - set(synthetic_terminals),
synthetic_terminals=synthetic_terminals,
single_grammar=single_grammar)
|
<gh_stars>0
"""
Some utilities for working with spiders
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from itertools import izip_longest
from scrapy.crawler import Crawler
from scrapy.utils.project import get_project_settings
import magic
import subprocess
#import pydocx
from pydocx import PyDocX
import os
import lxml.etree
import lxml.html
from lxml.html import HTMLParser
from lxml.html.clean import clean_html,Cleaner
from logging import raiseExceptions
HTML = 1
DOC = 2
DOCX = 3
PDF = 4
#ZIP = 5
def list_spiders():
settings = get_project_settings()
crawler = Crawler(settings)
return crawler.spiders.list()
def check_file_type(filepath, as_string=False):
filetype = magic.from_file(filepath)
if not filetype:
# Filetype Could Not Be Determined
return None
elif filetype == 'empty':
# Filetype Could Not Be Determined (file looks empty)
return None
elif filetype == 'very short file (no magic)':
# Filetype Could Not Be Determined (very short file)
return None
elif "Microsoft Office Word" in filetype:
return DOC if not as_string else 'DOC'
elif filetype[0:4] == 'HTML':
return HTML if not as_string else 'HTML'
elif filetype == 'Microsoft Word 2007+':
return DOCX if not as_string else 'DOCX'
elif 'PDF' in filetype:
return PDF if not as_string else 'PDF'
elif filetype[0:3] == 'Zip':
# a lot of hansards are found to be in ZIP format, but can be opened with python-docx
return DOCX if not as_string else 'DOCX'
else:
# some other filetype that we don't account for
return None
def doc_to_html(filepath, overwrite=False):
"""
Converts a doc file to in-memory html string.
:param filepath: full filepath to the file to convert
:return: unicode string
"""
html_file = '{}.html'.format(filepath)
if not os.path.exists(html_file) or overwrite:
cmd = ['abiword', '--to=html', '--to-name=fd://1', filepath]
try:
res = subprocess.check_output(cmd)
except:
return None
with open(html_file, 'wb') as tmp:
tmp.write(res)
else:
with open(html_file, 'rb') as tmp:
res = tmp.read()
return res.decode('utf-8')
def docx_to_html(filepath, overwrite=False):
"""
Converts docx file to in-memory html string
:param filepath: full path to the file to convert
:return: unicode string
"""
html_file = '{}.html'.format(filepath)
if not os.path.exists(html_file) or overwrite:
#res = pydocx.docx2html(filepath)
res = PyDocX.to_html(filepath)
with open(html_file, 'wb') as tmp:
tmp.write(res.encode('utf-8'))
else:
with open(html_file, 'rb') as tmp:
res = tmp.read().decode('utf-8')
return res
def get_file_path(rel_path):
"""
Given a relative path for a file downloaded by scrapy, get the absolute path
"""
files_folder = getattr(settings, 'SCRAPY_FILES_PATH', None)
if files_folder is None:
raise ImproperlyConfigured("No SCRAPY_FILES_PATH defined")
file_path = os.path.join(files_folder, rel_path)
if not os.path.exists(file_path):
raise RuntimeError("Could not find file at {}".format(file_path))
return file_path
def to_string(obj, encoding='utf-8'):
"""
Converts unicode objects to strings, and returns strings directly
"""
if isinstance(obj, basestring):
if isinstance(obj, unicode):
obj = obj.encode(encoding)
return obj
def to_unicode(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
def grouper(iterable, n, fillvalue=None):
"""
Collect data into fixed-length chunks or blocks
"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def utf2html(ustring):
ustring = ustring.replace('\r\n','<br><br />')
ustring = ustring.replace('\n','<br><br />')
ustring = ustring.replace('\t',' ')
return ustring
def merge_docx(docx_list=None, out_htmlpath=None):
"""
docx_list is a list of strings which contains the (absolute) path of DOC/DOCX files to be merged.
MERGE_DOCX() will follow the index order of docx_list for appending.
Returns the HTML file as string.
If OUT_HTMLPATH is given, write the HTML file out as well.
"""
if docx_list is None:
return None
cleaner = Cleaner()
parser = HTMLParser(encoding='utf-8')
html_list = []
for path in docx_list:
try:
tmp_html = PyDocX.to_html(path)
html_list.append(cleaner.clean_html(lxml.html.fromstring(tmp_html, parser=parser)))
except:
#'MalformedDocxException'
try:
# Pretend it is a html
html_file = '{}.html'.format(path)
with open(html_file, 'rb') as tmp:
tmp_html = tmp.read()
tmp_html = tmp_html.decode('utf-8')
html_list.append(cleaner.clean_html(lxml.html.fromstring(tmp_html, parser=parser)))
except:
# Cannot convert
continue
#print html_list
if len(html_list)>1:
#Append element at the end of first body
main_body = html_list[0].xpath('./body')[0]
for tree in html_list[1:]:
elem_list = tree.xpath('./body/*')
for elem in elem_list:
main_body.append(elem)
elif len(html_list)==1:
main_body = html_list[0].xpath('./body')[0]
else:
try:
main_body = html_list[0].xpath('./body')[0]
except IndexError:
# no body content. Most likely just an image/appendix
return None
# Convert ElementTree back to string
# in this way we will lose the 'style' info in html_list[0][0], which is usually in header,
# but not sure if it will cause any differences to parser later on. Probably not.
html_str = lxml.etree.tostring(main_body)
if out_htmlpath is not None:
with open(out_htmlpath, 'wb') as tmp:
tmp.write(html_str.encode('utf-8'))
return html_str
|
"""
Admin configurations for django-invite project
"""
from django.conf import settings
from django.contrib import admin, messages
from django.forms import BooleanField, ModelForm
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import gettext as _
from invite.join_and import join_and
from .models import Family, Guest, Accompany, Event, MailTemplate
from .send_mass_html_mail import send_mass_html_mail
class InviteInline(admin.TabularInline):
"""
Family guest admin view
A family require at least one guest
"""
model = Guest
extra = 2
min_num = 1
class AccompanyInline(admin.TabularInline):
"""Family accompanies admin view"""
model = Accompany
extra = 1
min_num = 0
class FamilyInvitationForm(ModelForm):
"""Form to permit Family Invitation to be sent"""
send_mail = BooleanField(label=_('Send the mail'), required=False)
class FamilyInvitationInline(admin.TabularInline):
"""Invitation families admin view"""
autocomplete_fields = ("family", "event")
model = Event.families.through
readonly_fields = ('show_mail',)
form = FamilyInvitationForm
extra = 1
min_num = 0
@staticmethod
def show_mail(instance):
"""Extra field adding a link to preview the email"""
if instance.pk:
if instance.event.has_mailtemplate:
url = reverse('show_mail', kwargs={"event_id": instance.event_id,
"family_id": instance.family_id})
return format_html(u'<a href="{}">{}</a>'.format(url, _("Preview the mail")))
return _("The event has no email template set")
return ""
class MailTemplateInline(admin.StackedInline):
"""
MailTemplate admin view
An event can only have one mail template (for now ?)
"""
model = MailTemplate
class FamilyInvitationModelAdminMixin(admin.ModelAdmin):
"""
Mixin model admin for family invitation management
Inlines is preset to add FamilyInvitation Inline and saving it will send the email from the
formset
"""
inlines = [FamilyInvitationInline]
def save_formset(self, request, form, formset, change):
"""Send FamilyInvitation mail after saving the formset"""
super().save_formset(request, form, formset, change)
if 'send_mail' in formset.form.declared_fields and \
'event' in formset.form.base_fields and \
'family' in formset.form.base_fields:
self._send_mail(request, formset)
@staticmethod
def _send_mail(request, formset):
"""Send the emails for a formset from a FamilyInvitationForm"""
family_invitations = {(data['family'], data['event'])
for data in formset.cleaned_data
if data and data["send_mail"]}
if family_invitations:
to_send = (
event.gen_mass_email(family)
for family, event in family_invitations
)
send_result = send_mass_html_mail(
to_send,
reply_to=["{host} <{email}>".format(host=host, email=settings.INVITE_HOSTS[host])
for host in settings.INVITE_HOSTS]
)
messages.add_message(request, messages.INFO,
_("%(result)d messages send") % {"result": send_result})
@admin.register(Family, site=admin.site)
class FamilyAdmin(FamilyInvitationModelAdminMixin):
"""
Family admin view
This view use FamilyInvitationInline to send an initation to a selection of guests
"""
inlines = [InviteInline, AccompanyInline] + FamilyInvitationModelAdminMixin.inlines
search_fields = ("guests__name", "accompanies__name")
@admin.register(Event, site=admin.site)
class EventAdmin(FamilyInvitationModelAdminMixin):
"""
Event admin view
This view use FamilyInvitationInline to send an initation to a selection of guests
"""
exclude = ('families', )
actions = ["send_mail"]
search_fields = ("name", "date")
inlines = [MailTemplateInline] + FamilyInvitationModelAdminMixin.inlines
def send_mail(self, request, events):
"""
Email action, send the email to the guest
:param unused_model_admin: the admin.ModelAdmin
:param unused_request: the admin request
:param families: the list of the selected families to send the mail to
:return:
"""
events_without_mail = [str(event) for event in events if not event.has_mailtemplate]
if events_without_mail:
self.message_user(request, _("The %(events)s has no email template set") %
{"events": join_and(events_without_mail)},
messages.ERROR)
return
to_send = (
invitation.gen_mass_email(family, request=request)
for invitation in events
for family in invitation.families.all()
)
result = send_mass_html_mail(
to_send,
reply_to=["{host} <{email}>".format(host=host, email=settings.INVITE_HOSTS[host])
for host in settings.INVITE_HOSTS]
)
self.message_user(request, _("%(result)d messages send") % {"result": result})
send_mail.short_description = _("Send the email")
|
<filename>celseq2/diagnose.py
#!/usr/bin/env python3
import argparse
from .helper import print_logger
from .helper import filehandle_fastq_gz
from collections import Counter
def get_dict_bc_has_reads(r1, bc_index, bc_seq_col):
print(r1)
with open(bc_index, 'rt') as fin:
# next(fin)
rows = map(lambda row: row.strip().split(), fin)
known_bc = set([row[bc_seq_col] for row in rows])
print_logger('There are {} different cell barcodes.'.format(len(known_bc)))
res = Counter({bc: 0 for bc in known_bc})
bc_len = len(next(iter(res)))
fh_r1 = filehandle_fastq_gz(r1) if r1.endswith('.gz') else open(r1, 'r')
i = 0
while True:
if i % 1000000 == 0:
print_logger('Processing {:,} reads...'.format(i))
try:
_ = next(fh_r1).rstrip()
r1_seq = next(fh_r1).rstrip()
_ = next(fh_r1).rstrip()
_ = next(fh_r1).rstrip()
i += 1
r1_bc = r1_seq[:bc_len]
if not r1_bc:
continue
if r1_bc in known_bc:
res[r1_bc] += 1
else:
res['unknown'] += 1
except StopIteration:
break
print_logger('Processed total {:,} reads...'.format(i))
fh_r1.close()
return res
def main():
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument(
'--bc-index', metavar='FILENAME', type=str,
help=('File path to cell barcode index.'))
parser.add_argument(
'--bc-seq-col', metavar='N', default=1, type=int,
help=('Column index of cell barcode index file to find the sequence',
' of cell barcodes. Default: 1 (2nd column).'))
parser.add_argument(
'--r1', metavar='FILENAME', type=str,
help=('File path to R1.'))
parser.add_argument(
'-o', '--output',
metavar='FILENAME', type=str,
required=True,
help=('File path save output log.'))
args = parser.parse_args()
if args.r1 and args.bc_index:
counter_bc_size = get_dict_bc_has_reads(args.r1,
args.bc_index,
args.bc_seq_col)
fhout = open(args.output, 'w')
tot = sum([counter_bc_size[x] for x in counter_bc_size])
bc_size_max, bc_size_min = float('-inf'), float('inf')
for bc in counter_bc_size:
if bc != 'unknown' and counter_bc_size[bc] > bc_size_max:
bc_size_max = counter_bc_size[bc]
if bc != 'unknown' and counter_bc_size[bc] < bc_size_min:
bc_size_min = counter_bc_size[bc]
fhout.write('{:>{}}\t{:,}\t{:06.2f}\n'.format(
bc, 20,
counter_bc_size[bc], counter_bc_size[bc] * 100 / tot))
valid_bc_size_val = [counter_bc_size[x]
for x in counter_bc_size if x != 'unknown']
bc_size_avg = sum([x / len(valid_bc_size_val)
for x in valid_bc_size_val])
fhout.write('{:>{}}\t{:,}\t{:06.2f}\n'.format(
'bc_size_max', 20,
bc_size_max, bc_size_max * 100 / tot))
fhout.write('{:>{}}\t{:,}\t{:06.2f}\n'.format(
'bc_size_min', 20,
bc_size_min, bc_size_min * 100 / tot))
fhout.write('{:>{}}\t{:06.2f}\t{:06.2f}\n'.format(
'bc_size_avg', 20,
bc_size_avg, bc_size_avg * 100 / tot))
fhout.write('{:>{}}\t{:,}\t{:06.2f}\n'.format(
'total', 20,
tot, tot * 100 / tot))
fhout.close()
if __name__ == '__main__':
main()
|
import os
from math import ceil, floor
from .errors import InvalidCaptionsError
from .webvtt import WebVTT
from .structures import Caption
MPEGTS = 900000
SECONDS = 10 # default number of seconds per segment
__all__ = ['WebVTTSegmenter']
class WebVTTSegmenter(object):
"""
Provides segmentation of WebVTT captions for HTTP Live Streaming (HLS).
"""
def __init__(self):
self._total_segments = 0
self._output_folder = ''
self._seconds = 0
self._mpegts = 0
self._segments = []
def _validate_webvtt(self, webvtt):
# Validates that the captions is a list and all the captions are instances of Caption.
if not isinstance(webvtt, WebVTT):
return False
for c in webvtt.captions:
if not isinstance(c, Caption):
return False
return True
def _slice_segments(self, captions):
self._segments = [[] for _ in range(self.total_segments)]
for c in captions:
segment_index_start = floor(c.start_in_seconds / self.seconds)
self.segments[segment_index_start].append(c)
# Also include a caption in other segments based on the end time.
segment_index_end = floor(c.end_in_seconds / self.seconds)
if segment_index_end > segment_index_start:
for i in range(segment_index_start + 1, segment_index_end + 1):
self.segments[i].append(c)
def _write_segments(self):
for index in range(self.total_segments):
segment_file = os.path.join(self._output_folder, '{}-{}.webvtt'.format(self._webvttname, index))
with open(segment_file, 'w', encoding='utf-8') as f:
f.write('WEBVTT\n')
f.write('X-TIMESTAMP-MAP=MPEGTS:{},LOCAL:00:00:00.000\n'.format(self._mpegts))
for caption in self.segments[index]:
f.write('\n{} --> {}\n'.format(caption.start, caption.end))
f.writelines(['{}\n'.format(l) for l in caption.lines])
def _write_manifest(self):
manifest_file = os.path.join(self._output_folder, '{}.m3u8'.format(self._webvttname))
with open(manifest_file, 'w', encoding='utf-8') as f:
f.write('#EXTM3U\n')
f.write('#EXT-X-VERSION:3\n')
f.write('#EXT-X-MEDIA-SEQUENCE:0\n')
f.write('#EXT-X-TARGETDURATION:{}\n'.format(self.seconds))
f.write('#EXT-X-PLAYLIST-TYPE:VOD\n')
for i in range(self.total_segments):
f.write('#EXTINF:{}.00000\n'.format(self.seconds))
f.write('{}-{}.webvtt\n'.format(os.path.basename(self._webvttname), i))
f.write('#EXT-X-ENDLIST\n')
def segment(self, webvtt, output='', seconds=SECONDS, mpegts=MPEGTS):
"""Segments the captions based on a number of seconds."""
if isinstance(webvtt, str):
# if a string is supplied we parse the file
captions = WebVTT().read(webvtt).captions
elif not self._validate_webvtt(webvtt):
raise InvalidCaptionsError('The captions provided are invalid')
else:
# we expect to have a webvtt object
captions = webvtt.captions
self._total_segments = 0 if not captions else int(ceil(captions[-1].end_in_seconds / seconds))
self._output_folder = output
self._seconds = seconds
self._mpegts = mpegts
webvtt_name = os.path.splitext(webvtt)[0]
self._webvttname = webvtt_name
output_folder = os.path.join(os.getcwd(), output)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
self._slice_segments(captions)
self._write_segments()
self._write_manifest()
@property
def seconds(self):
"""Returns the number of seconds used for segmenting captions."""
return self._seconds
@property
def total_segments(self):
"""Returns the total of segments."""
return self._total_segments
@property
def segments(self):
"""Return the list of segments."""
return self._segments
|
"""The tests for the Modbus cover component."""
from pymodbus.exceptions import ModbusException
import pytest
from homeassistant.components.cover import DOMAIN as COVER_DOMAIN
from homeassistant.components.modbus.const import (
CALL_TYPE_COIL,
CALL_TYPE_REGISTER_HOLDING,
CONF_INPUT_TYPE,
CONF_LAZY_ERROR,
CONF_STATE_CLOSED,
CONF_STATE_CLOSING,
CONF_STATE_OPEN,
CONF_STATE_OPENING,
CONF_STATUS_REGISTER,
CONF_STATUS_REGISTER_TYPE,
)
from homeassistant.const import (
CONF_ADDRESS,
CONF_COVERS,
CONF_NAME,
CONF_SCAN_INTERVAL,
CONF_SLAVE,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
STATE_UNAVAILABLE,
)
from homeassistant.core import State
from .conftest import TEST_ENTITY_NAME, ReadResult
ENTITY_ID = f"{COVER_DOMAIN}.{TEST_ENTITY_NAME}"
@pytest.mark.parametrize(
"do_config",
[
{
CONF_COVERS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 1234,
CONF_INPUT_TYPE: CALL_TYPE_COIL,
}
]
},
{
CONF_COVERS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 1234,
CONF_INPUT_TYPE: CALL_TYPE_REGISTER_HOLDING,
CONF_SLAVE: 10,
CONF_SCAN_INTERVAL: 20,
CONF_LAZY_ERROR: 10,
}
]
},
],
)
async def test_config_cover(hass, mock_modbus):
"""Run configuration test for cover."""
assert COVER_DOMAIN in hass.config.components
@pytest.mark.parametrize(
"do_config",
[
{
CONF_COVERS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_INPUT_TYPE: CALL_TYPE_COIL,
CONF_ADDRESS: 1234,
CONF_SLAVE: 1,
},
],
},
],
)
@pytest.mark.parametrize(
"register_words,expected",
[
(
[0x00],
STATE_CLOSED,
),
(
[0x80],
STATE_CLOSED,
),
(
[0xFE],
STATE_CLOSED,
),
(
[0xFF],
STATE_OPEN,
),
(
[0x01],
STATE_OPEN,
),
],
)
async def test_coil_cover(hass, expected, mock_do_cycle):
"""Run test for given config."""
assert hass.states.get(ENTITY_ID).state == expected
@pytest.mark.parametrize(
"do_config",
[
{
CONF_COVERS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 1234,
CONF_SLAVE: 1,
},
],
},
],
)
@pytest.mark.parametrize(
"register_words,expected",
[
(
[0x00],
STATE_CLOSED,
),
(
[0x80],
STATE_OPEN,
),
(
[0xFE],
STATE_OPEN,
),
(
[0xFF],
STATE_OPEN,
),
(
[0x01],
STATE_OPEN,
),
],
)
async def test_register_cover(hass, expected, mock_do_cycle):
"""Run test for given config."""
assert hass.states.get(ENTITY_ID).state == expected
@pytest.mark.parametrize(
"do_config",
[
{
CONF_COVERS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 1234,
CONF_STATUS_REGISTER_TYPE: CALL_TYPE_REGISTER_HOLDING,
}
]
},
],
)
async def test_service_cover_update(hass, mock_modbus, mock_ha):
"""Run test for service homeassistant.update_entity."""
await hass.services.async_call(
"homeassistant", "update_entity", {"entity_id": ENTITY_ID}, blocking=True
)
assert hass.states.get(ENTITY_ID).state == STATE_CLOSED
mock_modbus.read_holding_registers.return_value = ReadResult([0x01])
await hass.services.async_call(
"homeassistant", "update_entity", {"entity_id": ENTITY_ID}, blocking=True
)
assert hass.states.get(ENTITY_ID).state == STATE_OPEN
@pytest.mark.parametrize(
"mock_test_state",
[
(State(ENTITY_ID, STATE_CLOSED),),
(State(ENTITY_ID, STATE_CLOSING),),
(State(ENTITY_ID, STATE_OPENING),),
(State(ENTITY_ID, STATE_OPEN),),
],
indirect=True,
)
@pytest.mark.parametrize(
"do_config",
[
{
CONF_COVERS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_INPUT_TYPE: CALL_TYPE_COIL,
CONF_ADDRESS: 1234,
CONF_STATE_OPEN: 1,
CONF_STATE_CLOSED: 0,
CONF_STATE_OPENING: 2,
CONF_STATE_CLOSING: 3,
CONF_STATUS_REGISTER: 1234,
CONF_STATUS_REGISTER_TYPE: CALL_TYPE_REGISTER_HOLDING,
CONF_SCAN_INTERVAL: 0,
}
]
},
],
)
async def test_restore_state_cover(hass, mock_test_state, mock_modbus):
"""Run test for cover restore state."""
test_state = mock_test_state[0].state
assert hass.states.get(ENTITY_ID).state == test_state
@pytest.mark.parametrize(
"do_config",
[
{
CONF_COVERS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 1234,
CONF_STATUS_REGISTER_TYPE: CALL_TYPE_REGISTER_HOLDING,
CONF_SCAN_INTERVAL: 0,
},
{
CONF_NAME: f"{TEST_ENTITY_NAME}2",
CONF_INPUT_TYPE: CALL_TYPE_COIL,
CONF_ADDRESS: 1235,
CONF_SCAN_INTERVAL: 0,
},
]
},
],
)
async def test_service_cover_move(hass, mock_modbus, mock_ha):
"""Run test for service homeassistant.update_entity."""
ENTITY_ID2 = f"{ENTITY_ID}2"
mock_modbus.read_holding_registers.return_value = ReadResult([0x01])
await hass.services.async_call(
"cover", "open_cover", {"entity_id": ENTITY_ID}, blocking=True
)
assert hass.states.get(ENTITY_ID).state == STATE_OPEN
mock_modbus.read_holding_registers.return_value = ReadResult([0x00])
await hass.services.async_call(
"cover", "close_cover", {"entity_id": ENTITY_ID}, blocking=True
)
assert hass.states.get(ENTITY_ID).state == STATE_CLOSED
mock_modbus.reset()
mock_modbus.read_holding_registers.side_effect = ModbusException("fail write_")
await hass.services.async_call(
"cover", "close_cover", {"entity_id": ENTITY_ID}, blocking=True
)
assert mock_modbus.read_holding_registers.called
assert hass.states.get(ENTITY_ID).state == STATE_UNAVAILABLE
mock_modbus.read_coils.side_effect = ModbusException("fail write_")
await hass.services.async_call(
"cover", "close_cover", {"entity_id": ENTITY_ID2}, blocking=True
)
assert hass.states.get(ENTITY_ID2).state == STATE_UNAVAILABLE
|
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
from scipy import sparse
from io import BytesIO
import trainer.metadata as metadata
from google.cloud import storage
def train_and_save(filenames, browse_score, basket_score, output_filename):
files = tf.gfile.Glob(filenames)
# build a lookup table
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(
keys=tf.constant(['Browsed', 'AddedToBasket']),
values=tf.constant([browse_score, basket_score], dtype=tf.float32),
), -1
)
feature_description = metadata.INPUT_TRAIN_SCHEMA
def _make_parser_function(feature_description):
def _parse_function(example_proto):
return tf.parse_single_example(example_proto, feature_description)
return _parse_function
_parser_fn = _make_parser_function(feature_description)
def _process_row(row):
"""
row: {'customer_id': int, 'actions_list': tf.Sparse, 'skus_list': tf.Sparse}
returns
same row but customer is replicated n times where n is the dimension of
actions_list (or skus_list) and `actions_list` is mapped to its correspondents
scores.
"""
row['customer_id'] = tf.SparseTensor(
indices=row['actions_list'].indices,
values=tf.tile(tf.expand_dims(row['customer_id'], 0),
[tf.size(row['skus_list'])]),
dense_shape=row['actions_list'].dense_shape
)
row['actions_list'] = tf.SparseTensor(
indices=row['actions_list'].indices,
values=table.lookup(row['actions_list'].values),
dense_shape=row['actions_list'].dense_shape
)
row['skus_list'] = tf.SparseTensor(
indices=row['skus_list'].indices,
values=row['skus_list'].values,
dense_shape=row['skus_list'].dense_shape
)
return row
dataset = tf.data.TFRecordDataset(files) \
.map(lambda x: _parser_fn(x)) \
.map(lambda x: _process_row(x)) \
.batch(1000)
iterator = dataset.make_initializable_iterator()
next_elem = iterator.get_next()
data, j, i = [], [], []
with tf.Session() as sess:
table.init.run()
sess.run(iterator.initializer)
while True:
try:
row = sess.run(next_elem)
data.extend(list(row['actions_list'].values))
i.extend(list(row['customer_id'].values))
j.extend(list(row['skus_list'].values))
except tf.errors.OutOfRangeError:
break
users_skus_interactions = sparse.coo_matrix((data, (i, j)), shape=(np.max(i) + 1,
np.max(j) + 1)).tocsc()
# ... code to build similarity sparse matrix ...
save_sparse_matrix(sim_matrix, output_filename)
def save_sparse_matrix(matrix, path):
if 'gs://' in path:
file_ = BytesIO()
sparse.save_npz(file_, matrix)
file_.seek(0)
storage_client = storage.Client()
bucket_name = path.split('/')[2]
bucket = storage_client.get_bucket(bucket_name)
destination_blob_name = '/'.join(path.split('/')[3:])
blob = bucket.blob(destination_blob_name)
blob.upload_from_file(file_)
else:
sparse.save_npz(path, matrix)
|
<reponame>marssaxman/robocrate<filename>library.py
import os
import os.path
import sys
import shutil
import json
DIR = os.path.expanduser("~/.robocrate")
LIBRARY = os.path.join(DIR, "library.json")
_library = None
_tracklist = None
class Track(object):
def __init__(self, fields):
self._fields = fields
@property
def source(self): return self._fields.get('source')
@property
def hash(self): return self._fields.get('hash')
@property
def title(self): return self._fields.get('title')
@property
def artist(self): return self._fields.get('artist')
@property
def album_artist(self): return self._fields.get('album_artist')
@property
def genre(self): return self._fields.get('genre')
@property
def bpm(self): return self._fields.get('bpm')
@property
def year(self): return self._fields.get('year')
@property
def publisher(self): return self._fields.get('publisher')
@property
def remixer(self): return self._fields.get('remixer')
def tags(self):
names = set()
# multiple artists are often packed in for a single track
if self.artist:
names.update(self.artist.split(", "))
if self.album_artist:
names.update(self.album_artist.split(", "))
if self.remixer:
names.update(self.remixer.split(", "))
if self.genre:
names.add(self.genre)
if self.publisher:
names.add(self.publisher)
return names
@property
def summary_file(self):
return os.path.join(DIR, self.hash + '.wav')
@property
def details_file(self):
return os.path.join(DIR, self.hash + '.json')
@property
def features_file(self):
return os.path.join(DIR, self.hash + '.npy')
@property
def caption(self):
if self.title and self.artist:
return "%s - %s" % (self.artist, self.title)
if self.title:
return self.title
return os.path.splitext(os.path.basename(self.source))[0]
def update(self, fields):
self._fields.update(fields)
self.save()
def save(self):
tracks().save()
@staticmethod
def create(fields):
assert fields['source']
assert fields['hash']
return tracks().insert(fields)
def delete(self):
tracks().delete(self)
self._fields.clear()
class Tracklist(object):
def __init__(self, track_dicts):
self._track_dicts = track_dicts
self._track_objs = [Track(t) for t in track_dicts]
def __len__(self):
return len(self._track_objs)
def __getitem__(self, index):
return self._track_objs[index]
def __iter__(self):
for t in self._track_objs:
yield t
def save(self):
commit()
def insert(self, fields):
self._track_dicts.append(fields)
t = Track(fields)
self._track_objs.append(t)
commit()
return t
def delete(self, track):
for i, fields in enumerate(self._track_dicts):
if fields.get("hash") == track.hash:
del self._track_dicts[i]
break
if self._track_objs[i].hash == track.hash:
del self._track_objs[i]
else:
for i, t in enumerate(self._track_objs):
if t.hash == track.hash:
del self._track_objs[i]
break
commit()
def tags():
# Return a dict from tags to lists of tracks with those tags.
tags = dict()
for t in tracks():
for n in t.tags():
if n in tags:
tags[n].append(t)
else:
tags[n] = [t]
return tags
def load():
global _library
if not _library is None:
return
if not os.path.isdir(DIR):
sys.stderr.write(
"Cannot find ~/.robocrate directory; must 'init' first.\n")
sys.exit(1)
if not os.path.isfile(LIBRARY):
sys.stderr.write(
"Cannot find ~/.robocrate/config.json file; must 'init' first.\n")
sys.exit(1)
with open(LIBRARY, 'r') as fd:
_library = json.load(fd)
def commit():
global _library
with open(LIBRARY, 'w') as fd:
json.dump(_library, fd)
def source():
global _library
load()
return _library['source']
def tracks():
global _library
global _tracklist
if _tracklist is None:
load()
_tracklist = Tracklist(_library['tracks'])
return _tracklist
def clean():
if not os.path.isdir(DIR):
print("%s is not a directory" % DIR)
return
if os.path.isfile(LIBRARY):
print("loading library file %s" % LIBRARY)
load()
# Search the library for tracks which have been deleted or renamed.
for track in tracks():
if track.source and not os.path.isfile(track.source):
print("removing reference to missing file %s" % track.source)
track.delete()
# Make a list of the files we expect to find in the library.
# We'll delete everything we don't recognize.
expected = {LIBRARY}
for track in tracks():
if track.summary_file:
expected.add(os.path.abspath(track.summary_file))
if track.details_file:
expected.add(os.path.abspath(track.details_file))
if track.features_file:
expected.add(os.path.abspath(track.features_file))
for name in os.listdir(DIR):
path = os.path.abspath(os.path.join(DIR, name))
if path in expected:
continue
if os.path.isfile(path):
os.unlink(path)
else:
shutil.rmtree(path)
def init(source):
if os.path.isfile(source):
sys.stderr.write("Source path '%s' is a file, not a directory.\n")
sys.exit(1)
if not os.path.isdir(source):
sys.stderr.write("Source directory '%s' does not exist.\n")
sys.exit(1)
if not os.path.isdir(DIR):
os.makedirs(DIR)
global _library
_library = {
"source": source,
"tracks": [],
}
commit()
|
<filename>Lib/pyclbr.py
"""Parse a Python file and retrieve classes and methods.
Parse enough of a Python file to recognize class and method
definitions and to find out the superclasses of a class.
The interface consists of a single function:
readmodule(module, path)
module is the name of a Python module, path is an optional list of
directories where the module is to be searched. If present, path is
prepended to the system search path sys.path.
The return value is a dictionary. The keys of the dictionary are
the names of the classes defined in the module (including classes
that are defined via the from XXX import YYY construct). The values
are class instances of the class Class defined here.
A class is described by the class Class in this module. Instances
of this class have the following instance variables:
name -- the name of the class
super -- a list of super classes (Class instances)
methods -- a dictionary of methods
file -- the file in which the class was defined
lineno -- the line in the file on which the class statement occurred
The dictionary of methods uses the method names as keys and the line
numbers on which the method was defined as values.
If the name of a super class is not recognized, the corresponding
entry in the list of super classes is not a class instance but a
string giving the name of the super class. Since import statements
are recognized and imported modules are scanned as well, this
shouldn't happen often.
BUGS
- Continuation lines are not dealt with at all, except inside strings.
- Nested classes and functions can confuse it.
- Code that doesn't pass tabnanny or python -t will confuse it, unless
you set the module TABWIDTH vrbl (default 8) to the correct tab width
for the file.
PACKAGE RELATED BUGS
- If you have a package and a module inside that or another package
with the same name, module caching doesn't work properly since the
key is the base name of the module/package.
- The only entry that is returned when you readmodule a package is a
__path__ whose value is a list which confuses certain class browsers.
- When code does:
from package import subpackage
class MyClass(subpackage.SuperClass):
...
It can't locate the parent. It probably needs to have the same
hairy logic that the import locator already does. (This logic
exists coded in Python in the freeze package.)
"""
import os
import sys
import imp
import re
import string
__all__ = ["readmodule"]
TABWIDTH = 8
_getnext = re.compile(r"""
(?P<String>
\""" [^"\\]* (?:
(?: \\. | "(?!"") )
[^"\\]*
)*
\"""
| ''' [^'\\]* (?:
(?: \\. | '(?!'') )
[^'\\]*
)*
'''
| " [^"\\\n]* (?: \\. [^"\\\n]*)* "
| ' [^'\\\n]* (?: \\. [^'\\\n]*)* '
)
| (?P<Method>
^
(?P<MethodIndent> [ \t]* )
def [ \t]+
(?P<MethodName> [a-zA-Z_] \w* )
[ \t]* \(
)
| (?P<Class>
^
(?P<ClassIndent> [ \t]* )
class [ \t]+
(?P<ClassName> [a-zA-Z_] \w* )
[ \t]*
(?P<ClassSupers> \( [^)\n]* \) )?
[ \t]* :
)
| (?P<Import>
^ import [ \t]+
(?P<ImportList> [^#;\n]+ )
)
| (?P<ImportFrom>
^ from [ \t]+
(?P<ImportFromPath>
[a-zA-Z_] \w*
(?:
[ \t]* \. [ \t]* [a-zA-Z_] \w*
)*
)
[ \t]+
import [ \t]+
(?P<ImportFromList> [^#;\n]+ )
)
""", re.VERBOSE | re.DOTALL | re.MULTILINE).search
_modules = {} # cache of modules we've seen
# each Python class is represented by an instance of this class
class Class:
'''Class to represent a Python class.'''
def __init__(self, module, name, super, file, lineno):
self.module = module
self.name = name
if super is None:
super = []
self.super = super
self.methods = {}
self.file = file
self.lineno = lineno
def _addmethod(self, name, lineno):
self.methods[name] = lineno
class Function(Class):
'''Class to represent a top-level Python function'''
def __init__(self, module, name, file, lineno):
Class.__init__(self, module, name, None, file, lineno)
def _addmethod(self, name, lineno):
assert 0, "Function._addmethod() shouldn't be called"
def readmodule(module, path=[], inpackage=0):
'''Backwards compatible interface.
Like readmodule_ex() but strips Function objects from the
resulting dictionary.'''
dict = readmodule_ex(module, path, inpackage)
res = {}
for key, value in dict.items():
if not isinstance(value, Function):
res[key] = value
return res
def readmodule_ex(module, path=[], inpackage=0):
'''Read a module file and return a dictionary of classes.
Search for MODULE in PATH and sys.path, read and parse the
module and return a dictionary with one entry for each class
found in the module.'''
dict = {}
i = module.rfind('.')
if i >= 0:
# Dotted module name
package = module[:i].strip()
submodule = module[i+1:].strip()
parent = readmodule(package, path, inpackage)
child = readmodule(submodule, parent['__path__'], 1)
return child
if _modules.has_key(module):
# we've seen this module before...
return _modules[module]
if module in sys.builtin_module_names:
# this is a built-in module
_modules[module] = dict
return dict
# search the path for the module
f = None
if inpackage:
try:
f, file, (suff, mode, type) = \
imp.find_module(module, path)
except ImportError:
f = None
if f is None:
fullpath = list(path) + sys.path
f, file, (suff, mode, type) = imp.find_module(module, fullpath)
if type == imp.PKG_DIRECTORY:
dict['__path__'] = [file]
_modules[module] = dict
path = [file] + path
f, file, (suff, mode, type) = \
imp.find_module('__init__', [file])
if type != imp.PY_SOURCE:
# not Python source, can't do anything with this module
f.close()
_modules[module] = dict
return dict
_modules[module] = dict
imports = []
classstack = [] # stack of (class, indent) pairs
src = f.read()
f.close()
# To avoid having to stop the regexp at each newline, instead
# when we need a line number we simply string.count the number of
# newlines in the string since the last time we did this; i.e.,
# lineno = lineno + \
# string.count(src, '\n', last_lineno_pos, here)
# last_lineno_pos = here
countnl = string.count
lineno, last_lineno_pos = 1, 0
i = 0
while 1:
m = _getnext(src, i)
if not m:
break
start, i = m.span()
if m.start("Method") >= 0:
# found a method definition or function
thisindent = _indent(m.group("MethodIndent"))
meth_name = m.group("MethodName")
lineno = lineno + \
countnl(src, '\n',
last_lineno_pos, start)
last_lineno_pos = start
# close all classes indented at least as much
while classstack and \
classstack[-1][1] >= thisindent:
del classstack[-1]
if classstack:
# it's a class method
cur_class = classstack[-1][0]
cur_class._addmethod(meth_name, lineno)
else:
# it's a function
f = Function(module, meth_name,
file, lineno)
dict[meth_name] = f
elif m.start("String") >= 0:
pass
elif m.start("Class") >= 0:
# we found a class definition
thisindent = _indent(m.group("ClassIndent"))
# close all classes indented at least as much
while classstack and \
classstack[-1][1] >= thisindent:
del classstack[-1]
lineno = lineno + \
countnl(src, '\n', last_lineno_pos, start)
last_lineno_pos = start
class_name = m.group("ClassName")
inherit = m.group("ClassSupers")
if inherit:
# the class inherits from other classes
inherit = inherit[1:-1].strip()
names = []
for n in inherit.split(','):
n = n.strip()
if dict.has_key(n):
# we know this super class
n = dict[n]
else:
c = n.split('.')
if len(c) > 1:
# super class
# is of the
# form module.class:
# look in
# module for class
m = c[-2]
c = c[-1]
if _modules.has_key(m):
d = _modules[m]
if d.has_key(c):
n = d[c]
names.append(n)
inherit = names
# remember this class
cur_class = Class(module, class_name, inherit,
file, lineno)
dict[class_name] = cur_class
classstack.append((cur_class, thisindent))
elif m.start("Import") >= 0:
# import module
for n in m.group("ImportList").split(','):
n = n.strip()
try:
# recursively read the imported module
d = readmodule(n, path, inpackage)
except:
##print 'module', n, 'not found'
pass
elif m.start("ImportFrom") >= 0:
# from module import stuff
mod = m.group("ImportFromPath")
names = m.group("ImportFromList").split(',')
try:
# recursively read the imported module
d = readmodule(mod, path, inpackage)
except:
##print 'module', mod, 'not found'
continue
# add any classes that were defined in the
# imported module to our name space if they
# were mentioned in the list
for n in names:
n = n.strip()
if d.has_key(n):
dict[n] = d[n]
elif n == '*':
# only add a name if not
# already there (to mimic what
# Python does internally)
# also don't add names that
# start with _
for n in d.keys():
if n[0] != '_' and \
not dict.has_key(n):
dict[n] = d[n]
else:
assert 0, "regexp _getnext found something unexpected"
return dict
def _indent(ws, _expandtabs=string.expandtabs):
return len(_expandtabs(ws, TABWIDTH))
|
<filename>tests/test_transducer.py
import myouji_kenchi
# Given that the output depends on what goes into the attested myouji file I'm
# hesitant to write too many tests in the blast radius of changes to that file
class TestTransducer():
nbt = myouji_kenchi.MyoujiBackTransliteration()
def assert_transliteration(self, romaji, *expected_results):
results = self.nbt.back_transliterate(romaji)
strings = set(r[0] for r in results)
assert strings == set(expected_results)
def test_assorted(self):
self.assert_transliteration('sa', 'サ')
self.assert_transliteration('SA', 'サ')
self.assert_transliteration('se', 'セ')
self.assert_transliteration('shō', 'ショウ') # composed
self.assert_transliteration('shō', 'ショウ') # decomposed
self.assert_transliteration('sho', 'ショウ')
self.assert_transliteration('syo', 'ショウ')
self.assert_transliteration('ho', 'ホ', 'ホウ', 'ホオ')
self.assert_transliteration('teppou', 'テッポウ')
self.assert_transliteration('shibukawa', 'シブカワ')
self.assert_transliteration('watamura', 'ワタムラ')
self.assert_transliteration('Matsumoto', 'マツモト')
self.assert_transliteration('Matumoto', 'マツモト')
self.assert_transliteration('Tusima', 'ツシマ')
self.assert_transliteration('IMAZU', 'イマヅ', 'イマズ')
self.assert_transliteration('SATO', 'サトウ', 'サトオ', 'サト')
self.assert_transliteration('Uchino', 'ウチノ', 'ウチノウ')
self.assert_transliteration('Utino', 'ウチノ', 'ウチノウ')
self.assert_transliteration('Chano', 'チャノ')
self.assert_transliteration('Tyano', 'チャノ')
self.assert_transliteration('Kojima', 'コジマ', 'コヂマ', 'コウジマ')
self.assert_transliteration('Kozima', 'コジマ', 'コヂマ', 'コウジマ')
self.assert_transliteration('Inuduka', 'イヌヅカ')
self.assert_transliteration('Inuzuka', 'イヌヅカ', 'イヌズカ')
self.assert_transliteration('Inudzuka', 'イヌヅカ')
self.assert_transliteration('Betchaku', 'ベッチャク')
self.assert_transliteration('Becchaku', 'ベッチャク')
self.assert_transliteration('Uwozaki', 'ウヲザキ')
self.assert_transliteration('Uozaki', 'ウヲザキ', 'ウオザキ')
self.assert_transliteration('Nyoya', 'ニョウヤ')
self.assert_transliteration('Nitta', 'ニッタ')
def test_oh(self):
self.assert_transliteration('Ohnishi', 'オオニシ', 'オウニシ')
def test_leading_m(self):
self.assert_transliteration('Sampei', 'サンペイ')
self.assert_transliteration('Sanpei', 'サンペイ')
def test_glottal_stop(self):
self.assert_transliteration('Shinyagaito', 'シンヤガイト')
self.assert_transliteration('Sinyagaito', 'シンヤガイト')
self.assert_transliteration('Shin\'yagaito', 'シンヤガイト')
self.assert_transliteration('Shin-yagaito', 'シンヤガイト')
def test_double_i(self):
self.assert_transliteration('Ishii', 'イシイ')
# To be clear, 'Ishî' is almost certainly a wrong transliteration
# Nevertheless, the below is the expected behavior
self.assert_transliteration('Ishî', 'イシイ')
self.assert_transliteration('Isî', 'イシイ')
def test_bad_characters(self):
self.assert_transliteration('@')
|
import string, random
from django.db import models
from django.contrib.auth.models import User
class base_element(models.Model):
"""
Base element, abstract class
"""
name = models.CharField(max_length=128)
short_description = models.CharField(max_length=256)
description = models.TextField()
slug = models.SlugField(db_index=True, null=True, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
enabled = models.BooleanField(default=True)
class Meta:
abstract = True
def __str__(self):
return self.name
class Supplier(base_element):
"""
Supplier - can have many products
"""
url = models.URLField(blank=True, null=True)
email = models.EmailField(blank=True, null=True)
phone = models.CharField(max_length=24, blank=True, null=True)
address_one = models.CharField(max_length=256, blank=True, null=True)
address_two = models.CharField(max_length=256, blank=True, null=True)
city = models.CharField(max_length=128, blank=True, null=True)
state_province = models.CharField(max_length=128, blank=True, null=True)
country = models.CharField(max_length=128, blank=True, null=True)
postal_code = models.CharField(max_length=32, blank=True, null=True)
discount_percent = models.IntegerField(default=0, help_text="Will be applied to all products by this" \
"supplier")
class ProductCategory(base_element):
"""
Product category - can have itself as a parent
"""
parent_category = models.ForeignKey("ProductCategory", null=True, on_delete=models.CASCADE)
discount_percent = models.IntegerField(default=0, help_text="Will be applied to all products in this" \
"category")
class Product(base_element):
"""
Product model
"""
category = models.ForeignKey(ProductCategory, on_delete=models.RESTRICT, blank=True, null=True)
supplier = models.ForeignKey(Supplier, on_delete=models.CASCADE, blank=True, null=True)
price = models.DecimalField(max_digits=8, decimal_places=2)
discount_percent = models.IntegerField(default=0, blank=True, null=True)
quantity_available = models.IntegerField(default=1, blank=True, null=True)
sku = models.CharField(max_length=16, blank=True, null=True)
digital_only = models.BooleanField(default=False)
class ProductImage(models.Model):
image = models.ImageField()
product = models.ForeignKey(Product, on_delete=models.CASCADE)
alt_text = models.CharField(max_length=128, blank=True, null=True)
class Cart(models.Model):
"""
Cart model
"""
date_added = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)
cart_key = models.CharField(max_length=32, db_index=True, unique=True, null=True)
def save(self, *args, **kwargs):
if not self.cart_key:
self.cart_key = ''.join(random.choices(string.ascii_letters+string.digits, k=32))
super(Cart, self).save(*args, **kwargs)
class CartItem(models.Model):
cart = models.ForeignKey(Cart, on_delete=models.CASCADE, related_name="items")
product = models.ForeignKey(Product, on_delete=models.CASCADE)
date_added = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
quantity = models.IntegerField(default=1)
|
<filename>backend/flaskr/db.py
# -*-codeing:utf-8 -*-
import pymysql
from flask import g
def get_db():
"""Connect to the application's configured database. The connection
is unique for each request and will be reused if this is called
again
"""
if 'db' not in g:
g.db = pymysql.connect(
host='localhost',
port=3306,
user='root',
password='',
database='qm',
charset='utf8'
)
return g.db
def close_db(e = None):
"""If this request connected to the database, close the
connection.
"""
db = g.pop('db', None)
if db is not None:
db.close()
def init_app(app):
"""Register database functions with the Flask app. This is called by
the application factory.
"""
app.teardown_appcontext(close_db)
def excute_select(db, sql):
cursor = db.cursor()
select_res = ()
try:
cursor.execute(sql)
select_res = cursor.fetchall()
except Exception as e:
print(e)
db.rollback()
cursor.close()
return select_res # 查不到返回()
def excute_insert(db, sql):
cursor = db.cursor()
res = ''
try:
cursor.execute(sql)
db.commit()
res = 'Successful'
print ('Successful!')
except Exception as e:
db.rollback()
res ='Error'
print("[Insert Error!]", e)
db.rollback()
cursor.close()
return res
def excute_delete(db, sql):
cursor = db.cursor()
res = ''
try:
cursor.execute(sql)
db.commit()
print ('Successful!')
res = 'Sucessful'
except Exception as e:
db.rollback()
res = 'Error'
print("[Delete Error!]", e)
cursor.close()
return res
def excute_update(db, sql):
cursor = db.cursor()
res = ''
try:
cursor.execute(sql)
db.commit()
print ('Successful!')
res = 'Sucessful'
except Exception as e:
db.rollback()
res = 'Error'
print("[Update Error!]", e)
cursor.close()
return res
def excute_procedure(db, sql):
cursor = db.cursor()
select_res = ()
try:
cursor.execute(sql)
select_res = cursor.fetchall()
except Exception as e:
print(e)
db.rollback()
cursor.close()
return select_res # 查不到返回()
# class Question_Bank_MS():
# def __init__(self):
# self.db = pymysql.connect(
# host='localhost',
# port=3306,
# user='root',
# password='<PASSWORD>',
# database='qm',
# charset='utf8'
# )
# self.cursor = self.db.cursor()
# def __del__(self):
# self.cursor.close()
# self.db.close()
# def excute_select_sql(self, sql):
# select_res = ()
# try:
# self.cursor.execute(sql)
# select_res = self.cursor.fetchall()
# except Exception as e:
# print(e)
# self.db.rollback()
# return select_res # 查不到返回()
# def excute_insert_sql(self, sql):
# try:
# self.cursor.execute(sql)
# self.db.commit()
# print ('Successful!')
# return 'Sucessful'
# except Exception as e:
# print("[Insert Error!]", e)
# self.db.rollback()
# return 'Error'
# def excute_delete_sql(self, sql):
# try:
# self.cursor.execute(sql)
# self.db.commit()
# print ('Successful!')
# return 'Sucessful'
# except Exception as e:
# print("[Delete Error!]", e)
# self.db.rollback()
# return 'Error'
# def excute_update_sql(self, sql):
# try:
# self.cursor.execute(sql)
# self.db.commit()
# print ('Successful!')
# return 'Sucessful'
# except Exception as e:
# print("[Update Error!]", e)
# self.db.rollback()
# return 'Error'
# def excute_procedure_sql(self, sql):
# select_res = ()
# try:
# self.cursor.execute(sql)
# select_res = self.cursor.fetchall()
# except Exception as e:
# print(e)
# self.db.rollback()
# return select_res # 查不到返回()
# if __name__ == '__main__':
# test = Question_Bank_MS()
# # SQL___ = "insert into school(school_name, school_nature) values('测试初中','公办普通初中')"
# # all_info = test.excute_insert_sql(SQL___)
# # SQL___ = "select * from school"
# # all_info = test.excute_select_sql(SQL___)
# exe = "select user_pwd, user_auth from user where user_name = '%s'" % "丁二"
# select_user_res = list(test.excute_select_sql(exe))
# select_user_res = [list(item) for item in select_user_res]
# print(select_user_res)
# print(select_user_res[0][0])
# # print(all_info)
|
<reponame>DuncanSmith147/KVMS
##Copyright (c) 2014 <NAME>
##
##Permission is hereby granted, free of charge, to any person obtaining a
##copy of this software and associated documentation files (the "Software"),
##to deal in the Software without restriction, including without limitation
##the rights to use, copy, modify, merge, publish, distribute, sublicense,
##and/or sell copies of the Software, and to permit persons to whom the
##Software is furnished to do so, subject to the following conditions:
##
##The above copyright notice and this permission notice shall be included
##in all copies or substantial portions of the Software.
##
##THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
##OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
##THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
##OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
##ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
##OTHER DEALINGS IN THE SOFTWARE.
# requires Python 2.6 or greater (for frozensets)
from __future__ import division
import itertools
import json
from abc import ABCMeta, abstractmethod
from digraph import DirectedGraph2
from traversals import dfs
import dot
class PartitionGraphError(Exception): pass
class PartitionGraph(DirectedGraph2):
def __init__(self):
super(PartitionGraph, self).__init__()
def structure_is_valid(self):
# the induced graph containing the nodes reachable from
# each root should be a tree
for root in self.root_nodes:
visited = set()
num_edges = 0
num_nodes = 0
for node in dfs(self, root, post=True):
num_nodes += 1
num_edges += self.outdegree(node)
if not num_nodes == num_edges + 1:
return False
return True
def to_json(self):
return json.dumps([list(self.nodes), list(self.iter_edges())])
@classmethod
def from_json(cls, txt):
nodes, edges = json.loads(txt)
g = cls()
for node in nodes:
g.add_node(node)
for edge in edges:
g.add_edge(edge)
return g
def to_file(self, filename):
with open(filename, 'w') as f:
f.write(self.to_json())
@classmethod
def from_file(cls, filename):
with open(filename, 'r') as f:
return cls.from_json(f.read())
def to_image(self, filename, format='png'):
dot.graph2image(self, filename, format=format)
def reachable(self, node):
# return a generator of the leaf
# nodes reachable from node
for n in dfs(self, node, post=True):
if not self.children[n]:
yield n
def leaf_partition(self, nodes):
# returns the partition of the
# leaf nodes implied by the nodes
# in the sequence 'nodes' as
# a list of sets
# raises a ValueError if 'nodes' is not
# a valid categorization w.r.t. the graph
parts = []
visited = set()
for n in nodes:
part = set()
for leaf in self.reachable(n):
if leaf in visited:
raise ValueError('%s is reachable from more than one node' % str(leaf))
visited.add(leaf)
part.add(leaf)
parts.append(part)
if len(visited) < len(self.leaf_nodes):
raise ValueError('Not all leaf nodes are reachable from specified nodes')
return parts
def flatten(self):
# flatten the structure (in place) so that
# all nodes are either leaf or parents of leaf nodes
for node in dfs(self, post=True):
grand_children = set()
for child in list(self.children[node]):
if self.outdegree(child):
self.del_edge((node, child))
grand_children |= self.children[child]
for gc in grand_children:
self.add_edge((node, gc))
def flattened(self):
# return a flattened copy
acopy = self.copy()
acopy.flatten()
return acopy
def deepen(self):
# deepen structure (in place) to provide
# nicer layout for humans
for node in list(dfs(self, post=True)):
children = list(self.children[node])
if children:
it = iter(children)
new_parents = set(self.parents[next(it)])
new_parents.remove(node)
for child in it:
if not new_parents:
break
new_parents &= self.parents[child]
for p in new_parents:
for child in children:
self.del_edge((p, child))
self.add_edge((p, node))
def deepened(self):
# return a deepened copy
acopy = self.copy()
acopy.deepen()
return acopy
def remove(self, node):
# remove node whilst
# maintaining validity
# leaf nodes cannot be removed
# unless their indegree is 1
# (otherwise raises a ValueError)
if self.children[node]:
for p in self.parents[node]:
for c in self.children[node]:
self.add_edge((p, c))
self.del_node(node)
else:
# can only be removed (singly)
# if it has indegree 1 and no siblings
if (self.indegree(node) == 1 and
self.outdegree(next(iter(self.parents[node]))) == 1):
self.del_node(node)
else:
raise ValueError('Cannot remove leaf node')
def discard(self, node):
try:
self.remove(node)
except ValueError:
pass
def prune(self, to_keep, aggressive=False):
to_keep = set(to_keep)
if not self.nodes >= to_keep:
raise ValueError('Not all nodes are in the graph')
# prune non-leaf nodes first
cands = self.nodes - to_keep - self.leaf_nodes
for node in cands:
self.remove(node)
if aggressive:
# attempt to prune leaf nodes
leaves = self.leaf_nodes - to_keep
while leaves:
node = leaves.pop()
if self.indegree(node) == 1:
parent = next(iter(self.parents[node]))
children = set(self.children[parent])
if (children.difference([node]) <= leaves and
all(self.indegree(c) == 1 for c in children)):
for c in children:
self.del_node(c)
if not parent in to_keep:
leaves.add(parent)
leaves -= children
class SetAPI:
__metaclass__ = ABCMeta
@abstractmethod
def make_node(self, *args, **kwargs):
pass
def add(self, *args, **kwargs):
# requires Python 2.6 or greater if using
# frozensets as nodes
# only for comparable nodes (supporting the relevant subset of the set API)
# validity depends on added nodes being
# exhaustive and new nodes not extending
# cover (unless they have no intersection
# with existing cover)
node = self.make_node(*args, **kwargs)
if not hasattr(node, 'issuperset'):
raise ValueError('Node type does not support interface')
if self.has_node(node):
return
leaf_nodes = list(self.leaf_nodes)
self.add_node(node)
for leaf in leaf_nodes:
if node.issuperset(leaf):
# requires existing leaves to be
# exhaustive
self.add_edge((node, leaf))
else:
intersect = node.intersection(leaf)
if intersect:
if node != intersect:
# node is not a subset of leaf
self.add_node(intersect)
self.add_edge((node, intersect))
self.add_edge((leaf, intersect))
diffs = leaf.difference(intersect)
if not isinstance(diffs, list):
diffs = [diffs]
for diff in diffs:
self.add_node(diff)
self.add_edge((leaf, diff))
if node == intersect:
break
# following code only required
# where node extends cover
if self.outdegree(node):
diffs = node.difference(*leaf_nodes)
if diffs:
if not isinstance(diffs, list):
diffs = [diffs]
for diff in diffs:
self.add_node(diff)
self.add_edge((node, diff))
def is_valid(self, cover=None):
if not self.structure_is_valid():
print 'invalid structure'
return False
for node in self.iter_nodes():
degree = self.outdegree(node)
children = iter(self.children[node])
if degree == 1:
if not node == next(children):
print node
return False
elif degree > 1:
if not node == next(children).union(*children):
print node
return False
if len(self.leaf_nodes) > 1:
# check they are disjoint
for a, b in itertools.combinations(self.leaf_nodes, 2):
if a.intersection(b):
print 'non-disjoint leaf nodes'
return False
if cover is not None:
leaves = iter(self.leaf_nodes)
if len(self.leaf_nodes) == 0: # empty graph
if cover:
print 'incorrect cover'
return False
elif len(self.leaf_nodes) == 1:
if not cover == next(leaves):
print 'incorrect cover'
return False
else:
if not cover == next(leaves).union(*leaves):
print 'incorrect cover'
return False
return True
class IntervalGraph(PartitionGraph, SetAPI):
def __init__(self, intervals=None):
# intervals is an iterable containing pairs of lower and upper bounds
super(IntervalGraph, self).__init__()
if intervals:
for tup in intervals:
self.add(tup)
def make_node(self, tup):
a, b = tup
if not a < b:
raise ValueError('Empty interval')
return Interval(a, b)
def to_json(self):
return json.dumps([[(n.a, n.b) for n in self.nodes],
[((n.a, n.b), (m.a, m.b)) for n, m in self.iter_edges()]])
@classmethod
def from_json(cls, txt):
node_data, edge_data = json.loads(txt)
g = cls()
nodes = [g.make_node(lis) for lis in node_data]
edges = [(g.make_node(lis1), g.make_node(lis2)) for lis1, lis2 in edge_data]
for node in nodes:
g.add_node(node)
for edge in edges:
g.add_edge(edge)
return g
class SetGraph(PartitionGraph, SetAPI):
def __init__(self, sets=None):
# sets is an iterable containing iterables
super(SetGraph, self).__init__()
if sets:
for set_ in sets:
self.add(set_)
def make_node(self, set_):
return frozenset(set_)
def to_json(self):
return json.dumps([[list(n) for n in self.nodes],
[(list(n), list(m)) for n, m in self.iter_edges()]])
@classmethod
def from_json(cls, txt):
node_data, edge_data = json.loads(txt)
g = cls()
nodes = [g.make_node(lis) for lis in node_data]
edges = [(g.make_node(lis1), g.make_node(lis2)) for lis1, lis2 in edge_data]
for node in nodes:
g.add_node(node)
for edge in edges:
g.add_edge(edge)
return g
class Interval(object):
# a very simple (non-disjoint) interval class that does not
# explicitly handle open / closed endpoints
def __init__(self, a, b):
# a and b must be hashable
if a > b:
raise ValueError('Invalid interval, %s > %s' % (str(a), str(b)))
self.a = a
self.b = b
def __nonzero__(self):
return self.a != self.b
def __str__(self):
return str((self.a, self.b))
__repr__ = __str__
def issubset(self, other):
return self.a >= other.a and self.b <= other.b
def issuperset(self, other):
return other.issubset(self)
def union(self, *others):
# can return a single interval or a list of intervals
ranges = [(other.a, other.b) for other in others if other]
if self:
ranges.append((self.a, self.b))
if not ranges:
return self.__class__(0, 0)
ranges.sort()
res = []
it = iter(ranges)
a, b = next(it)
for x, y in it:
if x > b:
res.append(self.__class__(a, b))
a = x
b = max([y, b])
res.append(self.__class__(a, b))
if len(res) == 1:
res = res[0]
return res
def intersection(self, *others):
# empty intersection has both parameters
# equal to 0
ranges = [(other.a, other.b) for other in others if other]
try:
if self:
ranges.append((self.a, self.b))
except:
print self, self.a, self.b
raise
if not ranges:
return self.__class__(0, 0)
it = iter(ranges)
a, b = next(it)
for x, y in it:
a = max([a, x])
b = min([b, y])
if a >= b:
return self.__class__(0, 0)
return self.__class__(a, b)
def difference(self, *others):
# can return a single interval or a list of intervals
if not self:
return self.__class__(0, 0)
others = self.__class__(0, 0).union(*others)
if not isinstance(others, list):
others = [others]
ranges = [(other.a, other.b) for other in others if other]
ranges.sort()
res = []
a, b = self.a, self.b
for x, y in ranges:
if x <= a:
if a < y < b:
a = y
elif y >= b:
a = b = 0
break
elif x >= b:
break
else:
res.append(self.__class__(a, x))
if y < b:
a = y
else:
a = b = 0
break
res.append(self.__class__(a, b))
res = [x for x in res if x]
if not res:
res = [self.__class__(0, 0)]
if len(res) == 1:
res = res[0]
return res
def __eq__(self, other):
return (self.a, self.b) == (other.a, other.b)
def __ne__(self, other):
return (self.a, self.b) != (other.a, other.b)
def __hash__(self):
return hash((self.a, self.b))
######################### Example graphs and basic tests ##########################
age_ranges = [(25, 26), (30, 45), (75, 76), (83, 84), (30, 40), (84, float('inf')),
(89, 90), (56, 60), (56, 66), (20, 21), (20, 24), (75, 85),
(25, 30), (36, 40), (25, 35), (36, 46), (45, 46), (92, 93),
(26, 36), (16, 20), (16, 24), (16, 26), (88, 89), (60, float('inf')),
(93, 94), (70, 71), (60, 65), (77, 78), (46, 50), (90, 91),
(45, 55), (46, 56), (17, 26), (17, 25), (80, 81), (70, 75),
(24, 25), (70, float('inf')), (95, float('inf')), (1, 2), (11, 12), (78, 79),
(55, 56), (6, 7), (9, 10), (22, 24), (30, 31), (17, 18), (8, 9),
(65, float('inf')), (26, 30), (50, 55), (31, 46), (16, 17), (16, 18),
(35, 45), (40, 50), (15, 16), (0, 22), (0, 21), (2, 3), (87, 88),
(16, float('inf')), (31, 35), (46, 60), (65, 66), (14, 15), (40, 45),
(0, 16), (0, 17), (7, 8), (4, 5), (71, 75), (35, 36), (20, 25),
(45, 60), (24, 31), (10, 11), (82, 83), (91, 92), (79, 80),
(18, 20), (65, 70), (65, 75), (0, 1), (86, 87), (55, 65),
(13, 14), (50, 60), (21, float('inf')), (84, 85), (94, 95), (76, 77),
(21, 22), (21, 25), (5, 6), (12, 13), (81, 82), (3, 4),
(85, 86), (0, 71), (66, 70)]
sets = [range(a, b) for a, b in age_ranges if not b==float('inf')]
def age():
txt = """[["25-26", "30-45", "75-76", "83-84", "30-40", "84-inf",
"89-90", "56-60", "56-66", "20-21", "20-24", "75-85",
"25-30", "36-40", "25-35", "36-46", "45-46", "92-93",
"50-55", "16-20", "16-24", "16-26", "80-81", "60-inf",
"70-75", "78-79", "60-65", "77-78", "46-50", "90-91",
"45-55", "46-56", "17-26", "17-25", "88-89", "93-94",
"84-85", "70-inf", "95-inf", "94-95", "11-12", "70-71",
"55-56", "6-7", "9-10", "22-24", "30-31", "17-18", "8-9",
"65-inf", "26-30", "26-36", "31-46", "16-17", "16-18",
"35-45", "40-50", "15-16", "0-22", "0-21", "2-3",
"87-88", "16-inf", "31-35", "46-60", "65-66", "14-15",
"40-45", "0-16", "0-17", "7-8", "4-5", "71-75", "35-36",
"20-25", "81-82", "24-31", "10-11", "82-83", "91-92",
"79-80", "18-20", "65-70", "65-75", "0-1", "86-87",
"55-65", "13-14", "50-60", "21-inf", "24-25", "1-2",
"76-77", "21-22", "!16-18", "21-25", "5-6", "12-13",
"45-60", "3-4", "85-86", "0-71", "66-70"], [["30-45",
"36-40"], ["30-45", "35-36"], ["30-45", "31-35"],
["30-45", "40-45"], ["30-45", "30-31"], ["30-40",
"36-40"], ["30-40", "35-36"], ["30-40", "31-35"],
["30-40", "30-31"], ["84-inf", "86-87"], ["84-inf",
"91-92"], ["84-inf", "84-85"], ["84-inf", "94-95"],
["84-inf", "85-86"], ["84-inf", "93-94"], ["84-inf",
"90-91"], ["84-inf", "92-93"], ["84-inf", "88-89"],
["84-inf", "89-90"], ["84-inf", "95-inf"], ["84-inf",
"87-88"], ["56-66", "56-60"], ["56-66", "60-65"],
["56-66", "65-66"], ["20-24", "20-21"], ["20-24",
"22-24"], ["20-24", "21-22"], ["75-85", "82-83"],
["75-85", "81-82"], ["75-85", "78-79"], ["75-85",
"79-80"], ["75-85", "77-78"], ["75-85", "75-76"],
["75-85", "80-81"], ["75-85", "76-77"], ["75-85",
"83-84"], ["75-85", "84-85"], ["25-30", "25-26"],
["25-30", "26-30"], ["25-35", "25-26"], ["25-35",
"31-35"], ["25-35", "26-30"], ["25-35", "30-31"],
["36-46", "36-40"], ["36-46", "40-45"], ["36-46",
"45-46"], ["16-20", "16-17"], ["16-20", "17-18"],
["16-20", "18-20"], ["16-24", "16-17"], ["16-24",
"22-24"], ["16-24", "17-18"], ["16-24", "18-20"],
["16-24", "20-21"], ["16-24", "21-22"], ["16-26",
"16-17"], ["16-26", "22-24"], ["16-26", "20-21"],
["16-26", "24-25"], ["16-26", "17-18"], ["16-26",
"25-26"], ["16-26", "18-20"], ["16-26", "21-22"],
["60-inf", "60-65"], ["60-inf", "91-92"], ["60-inf",
"79-80"], ["60-inf", "77-78"], ["60-inf", "75-76"],
["60-inf", "90-91"], ["60-inf", "83-84"], ["60-inf",
"78-79"], ["60-inf", "89-90"], ["60-inf", "82-83"],
["60-inf", "86-87"], ["60-inf", "88-89"], ["60-inf",
"71-75"], ["60-inf", "95-inf"], ["60-inf", "87-88"],
["60-inf", "84-85"], ["60-inf", "94-95"], ["60-inf",
"76-77"], ["60-inf", "92-93"], ["60-inf", "65-66"],
["60-inf", "85-86"], ["60-inf", "81-82"], ["60-inf",
"80-81"], ["60-inf", "93-94"], ["60-inf", "70-71"],
["60-inf", "66-70"], ["70-75", "71-75"], ["70-75",
"70-71"], ["45-55", "46-50"], ["45-55", "45-46"],
["45-55", "50-55"], ["46-56", "46-50"], ["46-56",
"50-55"], ["46-56", "55-56"], ["17-26", "22-24"],
["17-26", "20-21"], ["17-26", "24-25"], ["17-26",
"17-18"], ["17-26", "25-26"], ["17-26", "18-20"],
["17-26", "21-22"], ["17-25", "22-24"], ["17-25",
"24-25"], ["17-25", "17-18"], ["17-25", "18-20"],
["17-25", "20-21"], ["17-25", "21-22"], ["70-inf",
"82-83"], ["70-inf", "91-92"], ["70-inf", "79-80"],
["70-inf", "77-78"], ["70-inf", "75-76"], ["70-inf",
"90-91"], ["70-inf", "83-84"], ["70-inf", "89-90"],
["70-inf", "86-87"], ["70-inf", "80-81"], ["70-inf",
"88-89"], ["70-inf", "71-75"], ["70-inf", "95-inf"],
["70-inf", "70-71"], ["70-inf", "87-88"], ["70-inf",
"84-85"], ["70-inf", "94-95"], ["70-inf", "76-77"],
["70-inf", "92-93"], ["70-inf", "81-82"], ["70-inf",
"85-86"], ["70-inf", "93-94"], ["70-inf", "78-79"],
["65-inf", "82-83"], ["65-inf", "91-92"], ["65-inf",
"79-80"], ["65-inf", "77-78"], ["65-inf", "75-76"],
["65-inf", "90-91"], ["65-inf", "83-84"], ["65-inf",
"78-79"], ["65-inf", "89-90"], ["65-inf", "86-87"],
["65-inf", "85-86"], ["65-inf", "88-89"], ["65-inf",
"71-75"], ["65-inf", "95-inf"], ["65-inf", "87-88"],
["65-inf", "84-85"], ["65-inf", "94-95"], ["65-inf",
"76-77"], ["65-inf", "92-93"], ["65-inf", "65-66"],
["65-inf", "81-82"], ["65-inf", "80-81"], ["65-inf",
"93-94"], ["65-inf", "70-71"], ["65-inf", "66-70"],
["26-36", "26-30"], ["26-36", "35-36"], ["26-36",
"31-35"], ["26-36", "30-31"], ["31-46", "36-40"],
["31-46", "35-36"], ["31-46", "31-35"], ["31-46",
"40-45"], ["31-46", "45-46"], ["16-18", "17-18"],
["16-18", "16-17"], ["35-45", "36-40"], ["35-45",
"35-36"], ["35-45", "40-45"], ["40-50", "45-46"],
["40-50", "40-45"], ["40-50", "46-50"], ["0-22",
"2-3"], ["0-22", "7-8"], ["0-22", "15-16"], ["0-22",
"4-5"], ["0-22", "11-12"], ["0-22", "10-11"], ["0-22",
"1-2"], ["0-22", "3-4"], ["0-22", "21-22"], ["0-22",
"12-13"], ["0-22", "13-14"], ["0-22", "18-20"], ["0-22",
"8-9"], ["0-22", "0-1"], ["0-22", "16-17"], ["0-22",
"20-21"], ["0-22", "6-7"], ["0-22", "14-15"], ["0-22",
"17-18"], ["0-22", "9-10"], ["0-22", "5-6"], ["0-21",
"0-17"], ["0-21", "17-18"], ["0-21", "18-20"], ["0-21",
"20-21"], ["16-inf", "35-45"], ["16-inf", "16-20"],
["16-inf", "25-35"], ["16-inf", "55-65"], ["16-inf",
"45-55"], ["16-inf", "65-inf"], ["16-inf", "20-25"],
["46-60", "46-50"], ["46-60", "50-55"], ["46-60",
"56-60"], ["46-60", "55-56"], ["0-16", "2-3"], ["0-16",
"9-10"], ["0-16", "5-6"], ["0-16", "0-1"], ["0-16",
"11-12"], ["0-16", "1-2"], ["0-16", "3-4"], ["0-16",
"12-13"], ["0-16", "7-8"], ["0-16", "8-9"], ["0-16",
"15-16"], ["0-16", "6-7"], ["0-16", "14-15"], ["0-16",
"10-11"], ["0-16", "13-14"], ["0-16", "4-5"], ["0-17",
"2-3"], ["0-17", "9-10"], ["0-17", "7-8"], ["0-17",
"15-16"], ["0-17", "4-5"], ["0-17", "11-12"], ["0-17",
"1-2"], ["0-17", "3-4"], ["0-17", "12-13"], ["0-17",
"5-6"], ["0-17", "8-9"], ["0-17", "16-17"], ["0-17",
"6-7"], ["0-17", "14-15"], ["0-17", "10-11"], ["0-17",
"13-14"], ["0-17", "0-1"], ["20-25", "20-21"], ["20-25",
"22-24"], ["20-25", "21-22"], ["20-25", "24-25"], ["24-31",
"24-25"], ["24-31", "25-26"], ["24-31", "26-30"], ["24-31",
"30-31"], ["65-70", "65-66"], ["65-70", "66-70"], ["65-75",
"65-66"], ["65-75", "66-70"], ["65-75", "71-75"], ["65-75",
"70-71"], ["55-65", "56-60"], ["55-65", "60-65"], ["55-65",
"55-56"], ["50-60", "56-60"], ["50-60", "50-55"], ["50-60",
"55-56"], ["21-inf", "25-35"], ["21-inf", "35-45"], ["21-inf",
"45-55"], ["21-inf", "65-inf"], ["21-inf", "55-65"], ["21-inf",
"21-25"], ["!16-18", "10-11"], ["!16-18", "82-83"], ["!16-18",
"36-40"], ["!16-18", "91-92"], ["!16-18", "25-26"], ["!16-18",
"55-56"], ["!16-18", "79-80"], ["!16-18", "50-55"], ["!16-18",
"46-50"], ["!16-18", "21-22"], ["!16-18", "90-91"], ["!16-18",
"83-84"], ["!16-18", "18-20"], ["!16-18", "89-90"], ["!16-18",
"60-65"], ["!16-18", "86-87"], ["!16-18", "88-89"], ["!16-18",
"56-60"], ["!16-18", "40-45"], ["!16-18", "13-14"], ["!16-18",
"24-25"], ["!16-18", "20-21"], ["!16-18", "26-30"], ["!16-18",
"95-inf"], ["!16-18", "71-75"], ["!16-18", "2-3"], ["!16-18",
"70-71"], ["!16-18", "1-2"], ["!16-18", "87-88"], ["!16-18",
"11-12"], ["!16-18", "84-85"], ["!16-18", "94-95"], ["!16-18",
"45-46"], ["!16-18", "76-77"], ["!16-18", "31-35"], ["!16-18",
"0-1"], ["!16-18", "92-93"], ["!16-18", "6-7"], ["!16-18",
"14-15"], ["!16-18", "77-78"], ["!16-18", "9-10"], ["!16-18",
"22-24"], ["!16-18", "5-6"], ["!16-18", "15-16"], ["!16-18",
"4-5"], ["!16-18", "30-31"], ["!16-18", "81-82"], ["!16-18",
"35-36"], ["!16-18", "7-8"], ["!16-18", "75-76"], ["!16-18",
"80-81"], ["!16-18", "12-13"], ["!16-18", "93-94"], ["!16-18",
"85-86"], ["!16-18", "8-9"], ["!16-18", "78-79"], ["!16-18",
"66-70"], ["!16-18", "3-4"], ["!16-18", "65-66"], ["21-25",
"21-22"], ["21-25", "22-24"], ["21-25", "24-25"], ["45-60",
"46-50"], ["45-60", "55-56"], ["45-60", "56-60"], ["45-60",
"45-46"], ["45-60", "50-55"], ["0-71", "10-11"], ["0-71",
"60-65"], ["0-71", "36-40"], ["0-71", "25-26"], ["0-71",
"18-20"], ["0-71", "50-55"], ["0-71", "46-50"], ["0-71",
"21-22"], ["0-71", "16-17"], ["0-71", "17-18"], ["0-71",
"56-60"], ["0-71", "9-10"], ["0-71", "13-14"], ["0-71",
"15-16"], ["0-71", "20-21"], ["0-71", "26-30"], ["0-71",
"2-3"], ["0-71", "0-1"], ["0-71", "11-12"], ["0-71", "24-25"],
["0-71", "1-2"], ["0-71", "45-46"], ["0-71", "5-6"], ["0-71",
"31-35"], ["0-71", "55-56"], ["0-71", "6-7"], ["0-71", "14-15"],
["0-71", "40-45"], ["0-71", "22-24"], ["0-71", "7-8"], ["0-71",
"4-5"], ["0-71", "30-31"], ["0-71", "35-36"], ["0-71", "12-13"],
["0-71", "8-9"], ["0-71", "70-71"], ["0-71", "66-70"], ["0-71",
"3-4"], ["0-71", "65-66"]]]"""
return PartitionGraph.from_json(txt)
def age2():
return IntervalGraph(age_ranges)
def age3():
return SetGraph(sets)
def marital_status():
txt = """[["Living with someone as couple", "Other", "partner",
"widowed/surviving civil partnership", "Civil Partnership but separated",
"Civil Partnership", "Common Law", "divorced/separated",
"married/living as couple/civil partnership",
"Civil Partnership but dissolved", "Civil Partnership but widowed",
"Separated", "Divorced", "Married", "Widowed",
"divorced/disolved civil partnership", "Not re-married",
"divorced/disolved/separated", "Single", "married/civil partnership",
"Re-married", "single not divorced"], [["partner", "Common Law"],
["partner", "Living with someone as couple"],
["widowed/surviving civil partnership", "Civil Partnership but widowed"],
["widowed/surviving civil partnership", "Widowed"], ["Civil Partnership",
"Civil Partnership but widowed"], ["Civil Partnership",
"Civil Partnership but dissolved"], ["Civil Partnership",
"Civil Partnership but separated"], ["divorced/separated", "Separated"],
["divorced/separated", "Divorced"],
["married/living as couple/civil partnership",
"Living with someone as couple"],
["married/living as couple/civil partnership", "married/civil partnership"],
["Married", "Re-married"], ["Married", "Not re-married"],
["divorced/disolved civil partnership", "Civil Partnership but dissolved"],
["divorced/disolved civil partnership", "Divorced"],
["divorced/disolved/separated", "Civil Partnership but dissolved"],
["divorced/disolved/separated", "divorced/separated"],
["Single", "single not divorced"], ["Single", "Divorced"],
["married/civil partnership", "Civil Partnership"],
["married/civil partnership", "Married"]]]"""
return PartitionGraph.from_json(txt)
def test(g):
# fairly comprehensive test (although randomized)
import random
if hasattr(g, 'is_valid'):
is_valid = g.is_valid
else:
is_valid = g.structure_is_valid
assert is_valid()
g.flatten()
g.deepen()
assert is_valid()
assert g == g.__class__.from_json(g.to_json())
samp = random.sample(g.nodes, g.num_nodes//4)
for n in samp:
g.discard(n)
g.flatten()
g.deepen()
assert is_valid()
samp = random.sample(g.nodes, g.num_nodes//2)
g.prune(samp, aggressive=True)
assert is_valid()
g.flatten()
g.deepen()
assert is_valid()
if __name__ == "__main__":
for g in [age(), age2(), age3(), marital_status()]:
test(g)
|
"""
Gridded data is already aggregated by month
therefore we don't have daily gridded data, but this is
importing to the same basic table type as if it were daily.
So:
Don't make a daily table
Record what monthly aggregations are available
We only use monthly aggregations on the map and chart so
Some statistics won't be available depending on the aggregation of the gridded data.
Rainfall:
SUM -> AVERAGE -> COUNT
We want SUM
Temp:
We want MAX, MIN,
Or:
record what time_period means in the table.
aggregate month tables into year tables
leave daily tables empty so that we just don't get any values to add
but the table is ready there in case the values ever turn up.
NetCDF data includes units information so need to use this to convert the data.
"""
ClimateDataPortal = local_import("ClimateDataPortal")
InsertChunksWithoutCheckingForExistingReadings = local_import(
"ClimateDataPortal.InsertChunksWithoutCheckingForExistingReadings"
).InsertChunksWithoutCheckingForExistingReadings
def get_or_create(dict, key, creator):
try:
value = dict[key]
except KeyError:
value = dict[key] = creator()
return value
def get_or_create_record(table, query):
query_terms = []
for key, value in query.iteritems():
query_terms.append(getattr(table, key) == value)
reduced_query = reduce(
(lambda left, right: left & right),
query_terms
)
records = db(reduced_query).select()
count = len(records)
assert count <= 1, "Multiple records for %s" % query
if count == 0:
record = table.insert(**query)
db.commit()
else:
record = records.first()
return record.id
def nearly(expected_float, actual_float):
difference_ratio = actual_float / expected_float
return 0.999 < abs(difference_ratio) < 1.001
#class InsertRowsIfNoConflict(object):
# def __init__(self, database_table_name, db):
# raise NotImplemented
# self.database_table = database_table
#
# def add_reading(
# self,
# time_period,
# place_id,
# value
# ):
# database_table = self.database_table
# records = db(
# (database_table.time_period == time_period) &
# (database_table.place_id == place_id)
# ).select(database_table.value, database_table.id)
# count = len(records)
# assert count <= 1
# if count == 0:
# database_table.insert(
# time_period = time_period,
# place_id = place_id,
# value = value
# )
# else:
# existing = records.first()
# assert nearly(existing.value, value), (existing.value, value, place_id)
#
# def done(self):
# pass
import datetime
def import_climate_readings(
netcdf_file,
field_name,
add_reading,
converter,
start_date_time_string = None,
is_undefined = (lambda x: (-99.900003 < x < -99.9) or (x < -1e8) or (x > 1e8)),
time_step_string = None,
month_mapping_string = None,
skip_places = False
):
"""
Assumptions:
* there are no places
* the data is in order of places
"""
variables = netcdf_file.variables
if field_name is "?":
print ("field_name could be one of %s" % variables.keys())
else:
month_mapping_string
def to_list(variable):
result = []
for i in range(len(variable)):
result.append(variable[i])
return result
def iter_pairs(list):
for index in range(len(list)):
yield index, list[index]
time = variables["time"]
times = to_list(time)
try:
time_units_string = time.units
except AttributeError:
raise Exception("File has no time unit information")
else:
parsed_time_step_string, _, parsed_date, parsed_time = time_units_string.split(" ")
parsed_date_time_string = parsed_date+" "+parsed_time
if start_date_time_string is not None:
assert start_date_time_string == parsed_date_time_string
try:
start_date_time = datetime.datetime.strptime(
parsed_date_time_string,
"%Y-%m-%d %H:%M"
)
except ValueError:
start_date_time = datetime.datetime.strptime(
parsed_date_time_string,
"%Y-%m-%d %H:%M:%S"
)
if time_step_string is not None:
assert time_step_string == parsed_time_step_string
else:
time_step_string = parsed_time_step_string
time_step = datetime.timedelta(**{time_step_string: 1})
try:
lat_variable = variables["lat"]
except KeyError:
lat_variable = variables["latitude"]
lat = to_list(lat_variable)
try:
lon_variable = variables["lon"]
except KeyError:
lon_variable = variables["longitude"]
month_mapping = {
"rounded": ClimateDataPortal.rounded_date_to_month_number,
"twelfths": ClimateDataPortal.floored_twelfth_of_a_360_day_year,
"calendar": ClimateDataPortal.date_to_month_number
}[month_mapping_string]
try:
tt = variables[field_name]
except KeyError:
raise Exception(
"Can't find %s in %s" % (
field_name,
variables.keys()
)
)
else:
# create grid of places
place_ids = {}
lon = to_list(lon_variable)
if skip_places:
for place in db(climate_place.id > 0).select(
climate_place.latitude,
climate_place.longitude,
climate_place.id
):
place_ids[(
round(place.latitude, 6),
round(place.longitude, 6)
)] = place.id
else:
for latitude in lat:
for longitude in lon:
record = get_or_create_record(
climate_place,
dict(
longitude = longitude,
latitude = latitude
)
)
place_ids[(
round(latitude, 6),
round(longitude, 6)
)] = record
#print longitude, latitude, record
#print "up to:", len(times)
print "place_id, time_period, value"
for time_index, time_step_count in iter_pairs(times):
sys.stderr.write(
"%s %s\n" % (
time_index,
"%i%%" % int((time_index * 100) / len(times))
)
)
#print time_period
if month_mapping_string == "twelfths":
year_offset = ((time_step * int(time_step_count)).days) / 360.0
month_number = int(
ClimateDataPortal.date_to_month_number(start_date_time)
+ (year_offset * 12.0)
)
#print month_number, year_offset
else:
time_period = start_date_time + (time_step * int(time_step_count))
month_number = month_mapping(time_period)
#print month_number, time_period
values_by_time = tt[time_index]
if len(tt[time_index]) == 1:
values_by_time = values_by_time[0]
for latitude_index, latitude in iter_pairs(lat):
values_by_latitude = values_by_time[latitude_index]
for longitude_index, longitude in iter_pairs(lon):
value = values_by_latitude[longitude_index]
if not is_undefined(value):
place_id = place_ids[(round(latitude, 6), round(longitude, 6))]
converted_value = converter(value)
add_reading(
time_period = month_number,
place_id = place_id,
value = converted_value
)
add_reading.done()
db.commit()
import sys
from Scientific.IO import NetCDF
def main(argv):
import argparse
import os
styles = {
"quickly": InsertChunksWithoutCheckingForExistingReadings,
# "safely": InsertRowsIfNoConflict
}
parser = argparse.ArgumentParser(
description = "Imports climate data from NetCDF file.",
prog = argv[0],
usage = """
%(prog)s --NetCDF_file path/to/file.nc --parameter_name <parameter> --style <import style> --field_name <field name>
e.g.
python ./run.py %(prog)s --field_name rr --style quickly --parameter_name "Gridded Rainfall mm" --NetCDF_file gridded_rainfall_mm.nc
"""
)
parser.add_argument(
"--NetCDF_file",
required = True,
help="NetCDF file to import."
)
parser.add_argument(
"--parameter_name",
required = True,
choices = ClimateDataPortal.SampleTable._SampleTable__names.keys(),
help="Parameter name, which corresponds to an added table."
)
parser.add_argument(
"--clear_existing_data",
type = bool,
default = False,
help="Truncate database tables first."
)
parser.add_argument(
"--style",
required = True,
choices = styles.keys(),
default = "safely",
help="""
quickly: just insert readings into the database
safely: check that data is not overwritten
"""
)
parser.add_argument(
"--field_name",
required = True,
help="""name of netCDF field that holds the data value
e.g. "tt" or "rr". Type "?", to discover options."""
)
parser.add_argument(
"--units",
required = True,
help="""Units the data is in."""
)
parser.add_argument(
"--time_steps",
choices = ["seconds", "minutes", "hours", "days"],
help = "Time steps"
)
parser.add_argument(
"--start_date_time",
help = """Start time, YYYY-MM-DD hh:mm format
Only required if it cannot be read from the NetCDF file
e.g. "1970-01-01 00:00"
"""
)
parser.add_argument(
"--month_mapping",
required = True,
choices = [
"rounded",
"twelfths",
"calendar"
],
help = """How to map dates to months:
rounded: take later month at nearest month boundary,
twelfths: A year is taken as 360 days and is divided into 12,
calendar: Calendar months, i.e. use the actual month of the date.
"""
)
parser.add_argument(
"--skip_places",
type = bool,
default = False,
help = """Skip checking places and creating them if they don't exist.
Use this if the table has already been imported once.
"""
)
args = parser.parse_args(argv[1:])
sample_table = ClimateDataPortal.SampleTable.with_name(args.parameter_name)
sample_table.clear()
db.commit()
import_climate_readings(
netcdf_file = NetCDF.NetCDFFile(args.NetCDF_file),
field_name = args.field_name,
add_reading = styles[args.style](sample_table),
converter = ClimateDataPortal.units_in_out[args.units]["in"],
time_step_string = args.time_steps,
start_date_time_string = args.start_date_time,
month_mapping_string = args.month_mapping,
skip_places = args.skip_places
)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv))
|
<gh_stars>0
#!/usr/bin/env python3
#
# Author: <NAME>
# License: BSD 2-clause
# Last Change: Thu Jul 29, 2021 at 03:53 PM +0200
from yaml import safe_load
from argparse import ArgumentParser
from uncertainties import ufloat, UFloat
from statsmodels.stats.proportion import proportion_confint
#######################
# Uncertainty-related #
#######################
def div_with_confint(num, denom):
ratio = num / denom
intv = proportion_confint(num, denom, method='beta', alpha=0.32) # Clopper-Pearson
# Use the larger error bar and pretend its a Gaussian
err_bar = max([abs(x - ratio) for x in intv])
return ufloat(ratio, err_bar)
def div(num, denom, doErrors=True):
if isinstance(num, type(denom)):
if isinstance(num, UFloat) or (isinstance(num, (int, float)) and not doErrors):
result = num / denom
elif isinstance(num, (int, float)) and doErrors:
try:
result = div_with_confint(num, denom)
except ZeroDivisionError:
result = 'naN'
else:
result = 'naN'
else:
result = 'naN'
return result
###############
# CSV-related #
###############
CSV_HEADERS = ['Cut', 'Run 1', 'Run 2',
r'Run 1 $\epsilon$', r'Run 2 $\epsilon$', r'$\epsilon$ ratio']
def list_gen(run1_descr, run2_descr, rfactor=1, header=CSV_HEADERS):
result = [CSV_HEADERS]
run1_total_input = None
run2_total_input = None
for key, val in run1_descr.items():
if key in run2_descr.keys():
row = []
run2_row = run2_descr[key]
try:
cut_name = val['name']
except KeyError:
try:
cut_name = run2_row['name']
except Exception:
cut_name = key
row.append(cut_name)
run1_yield = val['output']
run2_yield = run2_row['output']
# Store total number of events in the raw data.
if not run1_total_input:
run1_total_input = run1_yield
if not run2_total_input:
run2_total_input = run2_yield
if len(result) > 1:
run1_eff = div(val['output'], val['input'], False)*100
run2_eff = div(run2_row['output'], run2_row['input'], False)*100
double_ratio = div(run2_eff, run1_eff, False)
else: # Don't calculate ratios for the total number of candidates
run1_eff = run2_eff = double_ratio = '-'
row += [run1_yield, run2_yield, run1_eff, run2_eff, double_ratio]
result.append(row)
# Append the total eff. ratio
run1_total_eff = div(run1_yield, run1_total_input, False)*100
run2_total_eff = div(run2_yield, run2_total_input, False)*100
result.append(['Total eff.'] + ['-']*(len(header)-4) +
[run1_total_eff, run2_total_eff,
run2_total_eff / run1_total_eff])
# Append the yield ratio
run1_total_eff = div(run1_yield, run1_total_input, False)*100
run2_total_eff = div(run2_yield, run2_total_input, False)*100
result.append(['Yield ratio x '+'{:.2f}'.format(rfactor)] +
[run1_yield, run2_yield] + ['-']*(len(header)-4) +
[run2_yield / run1_yield * rfactor])
return result
def csv_gen(lst, latex_wrapper=True):
for row in lst:
formatted = []
ielem = 0
for elem in row:
if isinstance(elem, float):
if ielem in (3, 4) and elem > 1:
formatted.append('{:.1f}'.format(elem))
else:
formatted.append('{:.2f}'.format(elem))
elif isinstance(elem, UFloat):
if latex_wrapper:
formatted.append('${:.2f}$'.format(elem))
else:
formatted.append('{:.2f}'.format(elem))
else:
formatted.append(str(elem))
ielem += 1
print(','.join(formatted))
################################
# Command line argument parser #
################################
def parse_input(descr='Generate cut flow CSV from YAML files.'):
parser = ArgumentParser(description=descr)
parser.add_argument('-o', '--runOne',
required=True,
help='specify the run 1 cutflow YAML file.'
)
parser.add_argument('-r', '--rfactor',
default='1',
help='Factor to normalize the total ratio'
)
parser.add_argument('-t', '--runTwo',
required=True,
help='specify the run 2 cutflow YAML file.'
)
parser.add_argument('-n', '--noLaTeX',
action='store_true',
help='disable LaTeX wrapping.'
)
return parser.parse_args()
if __name__ == '__main__':
args = parse_input()
with open(args.runOne) as f:
run1_descr = safe_load(f)
with open(args.runTwo) as f:
run2_descr = safe_load(f)
tab = list_gen(run1_descr, run2_descr, float(args.rfactor))
csv_gen(tab, not args.noLaTeX)
|
import json
from collections import defaultdict
from datasets.arrow_dataset import Dataset
import torch
from torch.utils.data.sampler import SequentialSampler
from torch.utils.data import DataLoader, dataloader
from transformers import default_data_collator
from transformers import AutoTokenizer, EvalPrediction
from utils_qa import postprocess_qa_predictions
class SQuAD_dataset(Dataset):
def __init__(self, path_to_dev) -> None:
self.raw_data = self.get_raw(path_to_dev)
self.column_data = self.to_column(raw_data=self.raw_data)
self.tokenized_data = self.tokenize(self.column_data)
self.feat = self.get_feature()
self.examples = self.get_example()
self.tokenized_data.pop('example_id')
self.tokenized_data.pop('offset_mapping')
self.input_ids = self.tokenized_data['input_ids']
self.token_type_ids = self.tokenized_data['token_type_ids']
self.attention_mask = self.tokenized_data['attention_mask']
def get_raw(self, path_to_dev) -> list:
raw = []
f = open(path_to_dev, 'r')
squad = json.load(f)
for example in squad["data"]:
title = example.get("title", "")
for paragraph in example["paragraphs"]:
context = paragraph["context"] # do not strip leading blank spaces GH-2585
for qa in paragraph["qas"]:
question = qa["question"]
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"] for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
data = {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {
"answer_start": answer_starts,
"text": answers,
},
}
raw.append(data)
return raw
print('finished loading')
def to_column(self, raw_data:list)->defaultdict(list):
column_data = {}
column_data['id'] = []
column_data['context'] = []
column_data['title'] = []
column_data['question'] = []
for raw in raw_data:
column_data['id'].append(raw['id'])
column_data['title'].append(raw['title'])
column_data['context'].append(raw['context'])
column_data['question'].append(raw['question'])
return column_data
def tokenize(self, column_data) -> dict:
column_data['question'] = [q.lstrip() for q in column_data['question']]
pad_on_right = True
max_seq_len = 384
tokenizer = AutoTokenizer.from_pretrained('SQuAD-test/pretrained/deberta-xlarge-tokenizer')
tokenized = tokenizer(
column_data['question' if pad_on_right else 'context'],
column_data['context' if pad_on_right else 'question'],
truncation='only_second' if pad_on_right else 'only_first',
max_length=max_seq_len,
stride=128,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
sample_mapping = tokenized.pop("overflow_to_sample_mapping")
tokenized["example_id"] = []
for i in range(len(tokenized["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized["example_id"].append(column_data["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized["offset_mapping"][i])
]
# print(tokenized.keys())
return tokenized
def get_feature(self):
features = []
keys = self.tokenized_data.keys()
for i in range(len(self.tokenized_data['input_ids'])):
feature = {}
for key in keys:
feature[key] = self.tokenized_data[key][i]
features.append(feature)
return features
def get_example(self):
examples = []
keys = self.column_data.keys()
for i in range(len(self.column_data['context'])):
example = {}
for key in keys:
example[key] = self.column_data[key][i]
examples.append(example)
return examples
def __len__(self):
return len(self.token_type_ids)
def __getitem__(self, key):
return {
'input_ids': self.input_ids[key],
'token_type_ids': self.token_type_ids[key],
'attention_mask': self.attention_mask[key],
}
def post_process_function(self, examples, features, predictions, stage='eval'):
predictions = postprocess_qa_predictions(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=True,
n_best_size=20,
max_answer_length=30,
null_score_diff_threshold=0.0,
output_dir='output',
prefix=stage,
)
if True: # squad_v2
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
return formatted_predictions
def get_loader(tokenized_dataset):
batch_size = 8
generator = torch.Generator()
generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
sampler = SequentialSampler(tokenized_dataset)
data_collator = default_data_collator
return DataLoader(
tokenized_dataset,
batch_size=batch_size,
sampler=sampler,
collate_fn=data_collator,
drop_last=False,
num_workers=0,
pin_memory=True,
)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'WorkspaceAdminPortal',
'WorkspaceAdminPortalPalette',
'WorkspaceAuthPolicy',
'WorkspaceCaptchaPolicy',
'WorkspaceFacebookSocialLogin',
'WorkspaceGithubSocialLogin',
'WorkspaceGoogleSocialLogin',
'WorkspaceHostedLogin',
'WorkspaceLockoutPolicy',
'WorkspaceMfaAuthenticationApp',
'WorkspaceMfaPolicy',
'WorkspaceMicrosoftSocialLogin',
'WorkspacePasswordPolicy',
'WorkspacePwnedPasswordEmail',
'WorkspaceResetPasswordEmail',
'WorkspaceSaml',
'WorkspaceUserActivationEmail',
'WorkspaceUserInvitationEmail',
]
@pulumi.output_type
class WorkspaceAdminPortal(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enableAccountSettings":
suggest = "enable_account_settings"
elif key == "enableApiTokens":
suggest = "enable_api_tokens"
elif key == "enableAuditLogs":
suggest = "enable_audit_logs"
elif key == "enablePersonalApiTokens":
suggest = "enable_personal_api_tokens"
elif key == "enablePrivacy":
suggest = "enable_privacy"
elif key == "enableProfile":
suggest = "enable_profile"
elif key == "enableRoles":
suggest = "enable_roles"
elif key == "enableSecurity":
suggest = "enable_security"
elif key == "enableSso":
suggest = "enable_sso"
elif key == "enableSubscriptions":
suggest = "enable_subscriptions"
elif key == "enableUsage":
suggest = "enable_usage"
elif key == "enableUsers":
suggest = "enable_users"
elif key == "enableWebhooks":
suggest = "enable_webhooks"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceAdminPortal. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceAdminPortal.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceAdminPortal.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enable_account_settings: bool,
enable_api_tokens: bool,
enable_audit_logs: bool,
enable_personal_api_tokens: bool,
enable_privacy: bool,
enable_profile: bool,
enable_roles: bool,
enable_security: bool,
enable_sso: bool,
enable_subscriptions: bool,
enable_usage: bool,
enable_users: bool,
enable_webhooks: bool,
palette: 'outputs.WorkspaceAdminPortalPalette'):
pulumi.set(__self__, "enable_account_settings", enable_account_settings)
pulumi.set(__self__, "enable_api_tokens", enable_api_tokens)
pulumi.set(__self__, "enable_audit_logs", enable_audit_logs)
pulumi.set(__self__, "enable_personal_api_tokens", enable_personal_api_tokens)
pulumi.set(__self__, "enable_privacy", enable_privacy)
pulumi.set(__self__, "enable_profile", enable_profile)
pulumi.set(__self__, "enable_roles", enable_roles)
pulumi.set(__self__, "enable_security", enable_security)
pulumi.set(__self__, "enable_sso", enable_sso)
pulumi.set(__self__, "enable_subscriptions", enable_subscriptions)
pulumi.set(__self__, "enable_usage", enable_usage)
pulumi.set(__self__, "enable_users", enable_users)
pulumi.set(__self__, "enable_webhooks", enable_webhooks)
pulumi.set(__self__, "palette", palette)
@property
@pulumi.getter(name="enableAccountSettings")
def enable_account_settings(self) -> bool:
return pulumi.get(self, "enable_account_settings")
@property
@pulumi.getter(name="enableApiTokens")
def enable_api_tokens(self) -> bool:
return pulumi.get(self, "enable_api_tokens")
@property
@pulumi.getter(name="enableAuditLogs")
def enable_audit_logs(self) -> bool:
return pulumi.get(self, "enable_audit_logs")
@property
@pulumi.getter(name="enablePersonalApiTokens")
def enable_personal_api_tokens(self) -> bool:
return pulumi.get(self, "enable_personal_api_tokens")
@property
@pulumi.getter(name="enablePrivacy")
def enable_privacy(self) -> bool:
return pulumi.get(self, "enable_privacy")
@property
@pulumi.getter(name="enableProfile")
def enable_profile(self) -> bool:
return pulumi.get(self, "enable_profile")
@property
@pulumi.getter(name="enableRoles")
def enable_roles(self) -> bool:
return pulumi.get(self, "enable_roles")
@property
@pulumi.getter(name="enableSecurity")
def enable_security(self) -> bool:
return pulumi.get(self, "enable_security")
@property
@pulumi.getter(name="enableSso")
def enable_sso(self) -> bool:
return pulumi.get(self, "enable_sso")
@property
@pulumi.getter(name="enableSubscriptions")
def enable_subscriptions(self) -> bool:
return pulumi.get(self, "enable_subscriptions")
@property
@pulumi.getter(name="enableUsage")
def enable_usage(self) -> bool:
return pulumi.get(self, "enable_usage")
@property
@pulumi.getter(name="enableUsers")
def enable_users(self) -> bool:
return pulumi.get(self, "enable_users")
@property
@pulumi.getter(name="enableWebhooks")
def enable_webhooks(self) -> bool:
return pulumi.get(self, "enable_webhooks")
@property
@pulumi.getter
def palette(self) -> 'outputs.WorkspaceAdminPortalPalette':
return pulumi.get(self, "palette")
@pulumi.output_type
class WorkspaceAdminPortalPalette(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "primaryText":
suggest = "primary_text"
elif key == "secondaryText":
suggest = "secondary_text"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceAdminPortalPalette. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceAdminPortalPalette.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceAdminPortalPalette.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
error: str,
info: str,
primary: str,
primary_text: str,
secondary: str,
secondary_text: str,
success: str,
warning: str):
pulumi.set(__self__, "error", error)
pulumi.set(__self__, "info", info)
pulumi.set(__self__, "primary", primary)
pulumi.set(__self__, "primary_text", primary_text)
pulumi.set(__self__, "secondary", secondary)
pulumi.set(__self__, "secondary_text", secondary_text)
pulumi.set(__self__, "success", success)
pulumi.set(__self__, "warning", warning)
@property
@pulumi.getter
def error(self) -> str:
return pulumi.get(self, "error")
@property
@pulumi.getter
def info(self) -> str:
return pulumi.get(self, "info")
@property
@pulumi.getter
def primary(self) -> str:
return pulumi.get(self, "primary")
@property
@pulumi.getter(name="primaryText")
def primary_text(self) -> str:
return pulumi.get(self, "primary_text")
@property
@pulumi.getter
def secondary(self) -> str:
return pulumi.get(self, "secondary")
@property
@pulumi.getter(name="secondaryText")
def secondary_text(self) -> str:
return pulumi.get(self, "secondary_text")
@property
@pulumi.getter
def success(self) -> str:
return pulumi.get(self, "success")
@property
@pulumi.getter
def warning(self) -> str:
return pulumi.get(self, "warning")
@pulumi.output_type
class WorkspaceAuthPolicy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowSignups":
suggest = "allow_signups"
elif key == "allowUnverifiedUsers":
suggest = "allow_unverified_users"
elif key == "enableApiTokens":
suggest = "enable_api_tokens"
elif key == "enableRoles":
suggest = "enable_roles"
elif key == "jwtAccessTokenExpiration":
suggest = "jwt_access_token_expiration"
elif key == "jwtRefreshTokenExpiration":
suggest = "jwt_refresh_token_expiration"
elif key == "sameSiteCookiePolicy":
suggest = "same_site_cookie_policy"
elif key == "jwtAlgorithm":
suggest = "jwt_algorithm"
elif key == "jwtPublicKey":
suggest = "jwt_public_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceAuthPolicy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceAuthPolicy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceAuthPolicy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_signups: bool,
allow_unverified_users: bool,
enable_api_tokens: bool,
enable_roles: bool,
jwt_access_token_expiration: int,
jwt_refresh_token_expiration: int,
same_site_cookie_policy: str,
jwt_algorithm: Optional[str] = None,
jwt_public_key: Optional[str] = None):
pulumi.set(__self__, "allow_signups", allow_signups)
pulumi.set(__self__, "allow_unverified_users", allow_unverified_users)
pulumi.set(__self__, "enable_api_tokens", enable_api_tokens)
pulumi.set(__self__, "enable_roles", enable_roles)
pulumi.set(__self__, "jwt_access_token_expiration", jwt_access_token_expiration)
pulumi.set(__self__, "jwt_refresh_token_expiration", jwt_refresh_token_expiration)
pulumi.set(__self__, "same_site_cookie_policy", same_site_cookie_policy)
if jwt_algorithm is not None:
pulumi.set(__self__, "jwt_algorithm", jwt_algorithm)
if jwt_public_key is not None:
pulumi.set(__self__, "jwt_public_key", jwt_public_key)
@property
@pulumi.getter(name="allowSignups")
def allow_signups(self) -> bool:
return pulumi.get(self, "allow_signups")
@property
@pulumi.getter(name="allowUnverifiedUsers")
def allow_unverified_users(self) -> bool:
return pulumi.get(self, "allow_unverified_users")
@property
@pulumi.getter(name="enableApiTokens")
def enable_api_tokens(self) -> bool:
return pulumi.get(self, "enable_api_tokens")
@property
@pulumi.getter(name="enableRoles")
def enable_roles(self) -> bool:
return pulumi.get(self, "enable_roles")
@property
@pulumi.getter(name="jwtAccessTokenExpiration")
def jwt_access_token_expiration(self) -> int:
return pulumi.get(self, "jwt_access_token_expiration")
@property
@pulumi.getter(name="jwtRefreshTokenExpiration")
def jwt_refresh_token_expiration(self) -> int:
return pulumi.get(self, "jwt_refresh_token_expiration")
@property
@pulumi.getter(name="sameSiteCookiePolicy")
def same_site_cookie_policy(self) -> str:
return pulumi.get(self, "same_site_cookie_policy")
@property
@pulumi.getter(name="jwtAlgorithm")
def jwt_algorithm(self) -> Optional[str]:
return pulumi.get(self, "jwt_algorithm")
@property
@pulumi.getter(name="jwtPublicKey")
def jwt_public_key(self) -> Optional[str]:
return pulumi.get(self, "jwt_public_key")
@pulumi.output_type
class WorkspaceCaptchaPolicy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "minScore":
suggest = "min_score"
elif key == "secretKey":
suggest = "secret_key"
elif key == "siteKey":
suggest = "site_key"
elif key == "ignoredEmails":
suggest = "ignored_emails"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceCaptchaPolicy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceCaptchaPolicy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceCaptchaPolicy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
min_score: float,
secret_key: str,
site_key: str,
ignored_emails: Optional[Sequence[str]] = None):
pulumi.set(__self__, "min_score", min_score)
pulumi.set(__self__, "secret_key", secret_key)
pulumi.set(__self__, "site_key", site_key)
if ignored_emails is not None:
pulumi.set(__self__, "ignored_emails", ignored_emails)
@property
@pulumi.getter(name="minScore")
def min_score(self) -> float:
return pulumi.get(self, "min_score")
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> str:
return pulumi.get(self, "secret_key")
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> str:
return pulumi.get(self, "site_key")
@property
@pulumi.getter(name="ignoredEmails")
def ignored_emails(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "ignored_emails")
@pulumi.output_type
class WorkspaceFacebookSocialLogin(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "redirectUrl":
suggest = "redirect_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceFacebookSocialLogin. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceFacebookSocialLogin.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceFacebookSocialLogin.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: str,
redirect_url: str,
secret: str):
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "redirect_url", redirect_url)
pulumi.set(__self__, "secret", secret)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="redirectUrl")
def redirect_url(self) -> str:
return pulumi.get(self, "redirect_url")
@property
@pulumi.getter
def secret(self) -> str:
return pulumi.get(self, "secret")
@pulumi.output_type
class WorkspaceGithubSocialLogin(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "redirectUrl":
suggest = "redirect_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceGithubSocialLogin. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceGithubSocialLogin.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceGithubSocialLogin.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: str,
redirect_url: str,
secret: str):
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "redirect_url", redirect_url)
pulumi.set(__self__, "secret", secret)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="redirectUrl")
def redirect_url(self) -> str:
return pulumi.get(self, "redirect_url")
@property
@pulumi.getter
def secret(self) -> str:
return pulumi.get(self, "secret")
@pulumi.output_type
class WorkspaceGoogleSocialLogin(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "redirectUrl":
suggest = "redirect_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceGoogleSocialLogin. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceGoogleSocialLogin.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceGoogleSocialLogin.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: str,
redirect_url: str,
secret: str):
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "redirect_url", redirect_url)
pulumi.set(__self__, "secret", secret)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="redirectUrl")
def redirect_url(self) -> str:
return pulumi.get(self, "redirect_url")
@property
@pulumi.getter
def secret(self) -> str:
return pulumi.get(self, "secret")
@pulumi.output_type
class WorkspaceHostedLogin(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowedRedirectUrls":
suggest = "allowed_redirect_urls"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceHostedLogin. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceHostedLogin.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceHostedLogin.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allowed_redirect_urls: Optional[Sequence[str]] = None):
if allowed_redirect_urls is not None:
pulumi.set(__self__, "allowed_redirect_urls", allowed_redirect_urls)
@property
@pulumi.getter(name="allowedRedirectUrls")
def allowed_redirect_urls(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "allowed_redirect_urls")
@pulumi.output_type
class WorkspaceLockoutPolicy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxAttempts":
suggest = "max_attempts"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceLockoutPolicy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceLockoutPolicy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceLockoutPolicy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_attempts: int):
pulumi.set(__self__, "max_attempts", max_attempts)
@property
@pulumi.getter(name="maxAttempts")
def max_attempts(self) -> int:
return pulumi.get(self, "max_attempts")
@pulumi.output_type
class WorkspaceMfaAuthenticationApp(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "serviceName":
suggest = "service_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceMfaAuthenticationApp. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceMfaAuthenticationApp.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceMfaAuthenticationApp.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
service_name: str):
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> str:
return pulumi.get(self, "service_name")
@pulumi.output_type
class WorkspaceMfaPolicy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowRememberDevice":
suggest = "allow_remember_device"
elif key == "deviceExpiration":
suggest = "device_expiration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceMfaPolicy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceMfaPolicy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceMfaPolicy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_remember_device: bool,
device_expiration: int,
enforce: str):
pulumi.set(__self__, "allow_remember_device", allow_remember_device)
pulumi.set(__self__, "device_expiration", device_expiration)
pulumi.set(__self__, "enforce", enforce)
@property
@pulumi.getter(name="allowRememberDevice")
def allow_remember_device(self) -> bool:
return pulumi.get(self, "allow_remember_device")
@property
@pulumi.getter(name="deviceExpiration")
def device_expiration(self) -> int:
return pulumi.get(self, "device_expiration")
@property
@pulumi.getter
def enforce(self) -> str:
return pulumi.get(self, "enforce")
@pulumi.output_type
class WorkspaceMicrosoftSocialLogin(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "redirectUrl":
suggest = "redirect_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceMicrosoftSocialLogin. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceMicrosoftSocialLogin.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceMicrosoftSocialLogin.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: str,
redirect_url: str,
secret: str):
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "redirect_url", redirect_url)
pulumi.set(__self__, "secret", secret)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="redirectUrl")
def redirect_url(self) -> str:
return pulumi.get(self, "redirect_url")
@property
@pulumi.getter
def secret(self) -> str:
return pulumi.get(self, "secret")
@pulumi.output_type
class WorkspacePasswordPolicy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowPassphrases":
suggest = "allow_passphrases"
elif key == "maxLength":
suggest = "max_length"
elif key == "minLength":
suggest = "min_length"
elif key == "minPhraseLength":
suggest = "min_phrase_length"
elif key == "minTests":
suggest = "min_tests"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspacePasswordPolicy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspacePasswordPolicy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspacePasswordPolicy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_passphrases: bool,
history: int,
max_length: int,
min_length: int,
min_phrase_length: int,
min_tests: int):
pulumi.set(__self__, "allow_passphrases", allow_passphrases)
pulumi.set(__self__, "history", history)
pulumi.set(__self__, "max_length", max_length)
pulumi.set(__self__, "min_length", min_length)
pulumi.set(__self__, "min_phrase_length", min_phrase_length)
pulumi.set(__self__, "min_tests", min_tests)
@property
@pulumi.getter(name="allowPassphrases")
def allow_passphrases(self) -> bool:
return pulumi.get(self, "allow_passphrases")
@property
@pulumi.getter
def history(self) -> int:
return pulumi.get(self, "history")
@property
@pulumi.getter(name="maxLength")
def max_length(self) -> int:
return pulumi.get(self, "max_length")
@property
@pulumi.getter(name="minLength")
def min_length(self) -> int:
return pulumi.get(self, "min_length")
@property
@pulumi.getter(name="minPhraseLength")
def min_phrase_length(self) -> int:
return pulumi.get(self, "min_phrase_length")
@property
@pulumi.getter(name="minTests")
def min_tests(self) -> int:
return pulumi.get(self, "min_tests")
@pulumi.output_type
class WorkspacePwnedPasswordEmail(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fromAddress":
suggest = "from_address"
elif key == "fromName":
suggest = "from_name"
elif key == "htmlTemplate":
suggest = "html_template"
elif key == "redirectUrl":
suggest = "redirect_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspacePwnedPasswordEmail. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspacePwnedPasswordEmail.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspacePwnedPasswordEmail.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
from_address: str,
from_name: str,
html_template: str,
subject: str,
redirect_url: Optional[str] = None):
pulumi.set(__self__, "from_address", from_address)
pulumi.set(__self__, "from_name", from_name)
pulumi.set(__self__, "html_template", html_template)
pulumi.set(__self__, "subject", subject)
if redirect_url is not None:
pulumi.set(__self__, "redirect_url", redirect_url)
@property
@pulumi.getter(name="fromAddress")
def from_address(self) -> str:
return pulumi.get(self, "from_address")
@property
@pulumi.getter(name="fromName")
def from_name(self) -> str:
return pulumi.get(self, "from_name")
@property
@pulumi.getter(name="htmlTemplate")
def html_template(self) -> str:
return pulumi.get(self, "html_template")
@property
@pulumi.getter
def subject(self) -> str:
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="redirectUrl")
def redirect_url(self) -> Optional[str]:
return pulumi.get(self, "redirect_url")
@pulumi.output_type
class WorkspaceResetPasswordEmail(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fromAddress":
suggest = "from_address"
elif key == "fromName":
suggest = "from_name"
elif key == "htmlTemplate":
suggest = "html_template"
elif key == "redirectUrl":
suggest = "redirect_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceResetPasswordEmail. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceResetPasswordEmail.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceResetPasswordEmail.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
from_address: str,
from_name: str,
html_template: str,
subject: str,
redirect_url: Optional[str] = None):
pulumi.set(__self__, "from_address", from_address)
pulumi.set(__self__, "from_name", from_name)
pulumi.set(__self__, "html_template", html_template)
pulumi.set(__self__, "subject", subject)
if redirect_url is not None:
pulumi.set(__self__, "redirect_url", redirect_url)
@property
@pulumi.getter(name="fromAddress")
def from_address(self) -> str:
return pulumi.get(self, "from_address")
@property
@pulumi.getter(name="fromName")
def from_name(self) -> str:
return pulumi.get(self, "from_name")
@property
@pulumi.getter(name="htmlTemplate")
def html_template(self) -> str:
return pulumi.get(self, "html_template")
@property
@pulumi.getter
def subject(self) -> str:
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="redirectUrl")
def redirect_url(self) -> Optional[str]:
return pulumi.get(self, "redirect_url")
@pulumi.output_type
class WorkspaceSaml(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acsUrl":
suggest = "acs_url"
elif key == "spEntityId":
suggest = "sp_entity_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceSaml. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceSaml.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceSaml.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acs_url: str,
sp_entity_id: str):
pulumi.set(__self__, "acs_url", acs_url)
pulumi.set(__self__, "sp_entity_id", sp_entity_id)
@property
@pulumi.getter(name="acsUrl")
def acs_url(self) -> str:
return pulumi.get(self, "acs_url")
@property
@pulumi.getter(name="spEntityId")
def sp_entity_id(self) -> str:
return pulumi.get(self, "sp_entity_id")
@pulumi.output_type
class WorkspaceUserActivationEmail(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fromAddress":
suggest = "from_address"
elif key == "fromName":
suggest = "from_name"
elif key == "htmlTemplate":
suggest = "html_template"
elif key == "redirectUrl":
suggest = "redirect_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceUserActivationEmail. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceUserActivationEmail.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceUserActivationEmail.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
from_address: str,
from_name: str,
html_template: str,
subject: str,
redirect_url: Optional[str] = None):
pulumi.set(__self__, "from_address", from_address)
pulumi.set(__self__, "from_name", from_name)
pulumi.set(__self__, "html_template", html_template)
pulumi.set(__self__, "subject", subject)
if redirect_url is not None:
pulumi.set(__self__, "redirect_url", redirect_url)
@property
@pulumi.getter(name="fromAddress")
def from_address(self) -> str:
return pulumi.get(self, "from_address")
@property
@pulumi.getter(name="fromName")
def from_name(self) -> str:
return pulumi.get(self, "from_name")
@property
@pulumi.getter(name="htmlTemplate")
def html_template(self) -> str:
return pulumi.get(self, "html_template")
@property
@pulumi.getter
def subject(self) -> str:
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="redirectUrl")
def redirect_url(self) -> Optional[str]:
return pulumi.get(self, "redirect_url")
@pulumi.output_type
class WorkspaceUserInvitationEmail(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fromAddress":
suggest = "from_address"
elif key == "fromName":
suggest = "from_name"
elif key == "htmlTemplate":
suggest = "html_template"
elif key == "redirectUrl":
suggest = "redirect_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceUserInvitationEmail. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceUserInvitationEmail.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceUserInvitationEmail.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
from_address: str,
from_name: str,
html_template: str,
subject: str,
redirect_url: Optional[str] = None):
pulumi.set(__self__, "from_address", from_address)
pulumi.set(__self__, "from_name", from_name)
pulumi.set(__self__, "html_template", html_template)
pulumi.set(__self__, "subject", subject)
if redirect_url is not None:
pulumi.set(__self__, "redirect_url", redirect_url)
@property
@pulumi.getter(name="fromAddress")
def from_address(self) -> str:
return pulumi.get(self, "from_address")
@property
@pulumi.getter(name="fromName")
def from_name(self) -> str:
return pulumi.get(self, "from_name")
@property
@pulumi.getter(name="htmlTemplate")
def html_template(self) -> str:
return pulumi.get(self, "html_template")
@property
@pulumi.getter
def subject(self) -> str:
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="redirectUrl")
def redirect_url(self) -> Optional[str]:
return pulumi.get(self, "redirect_url")
|
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from os import path
import copy
from bes.common.check import check
from bes.common.string_util import string_util
from bes.compat.ConfigParser import ConfigParser
from bes.compat.ConfigParser import NoOptionError
from bes.compat.ConfigParser import SafeConfigParser
from bes.compat.StringIO import StringIO
from bes.fs.file_util import file_util
from bes.key_value.key_value import key_value
from bes.system.compat import compat
from bes.system.log import log
from bes.text.text_line_parser import text_line_parser
from bes.version.software_version import software_version
from bes.system.compat import compat
class config(object):
'''
A class to manage ini style config files. Uses SafeConfigParser underneath.
[default]
color = red
fruit = apple
[new_zealand]
color = green
fruit = kiwi
[indonesia]
color = yellow
fruit = durian
[antartica]
'''
_DEFAULT_SECTION = 'default'
class Parser(SafeConfigParser):
def __init__(self, *args, **kargs):
# Python 3 breaks compatibility with SafeConfigParser by making the
# inline_comment_prefixes be None instead of ';' which is a
# ridiculous breakage but nevertheless it happened so deal with it.
if compat.IS_PYTHON3:
kargs = copy.deepcopy(kargs)
kargs['comment_prefixes'] = ('#', ';')
kargs['inline_comment_prefixes'] = (';', )
SafeConfigParser.__init__(self, *args, **kargs)
def to_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(self._defaults, **d[k])
d[k].pop('__name__', None)
return copy.deepcopy(d)
def __init__(self, parser = None, string_quote_char = None):
check.check_string(string_quote_char, allow_none = True)
if string_quote_char:
assert string_quote_char in [ '"', "'" ]
parser = parser or self.Parser()
if not isinstance(parser, ( ConfigParser, SafeConfigParser )):
raise TypeError('parser should be an instance of ConfigParser or SafeConfigParser.')
self._parser = parser
self._string_quote_char = string_quote_char
def __str__(self):
buf = StringIO()
# for section in self._parser.sections():
# for x in self._parser.items(section):
# print('HI: {} x={}'.format(section, x))
# assert False
self._parser.write(buf)
return buf.getvalue().strip() + '\n'
def _value_for_set(self, value):
if self._string_quote_char:
return string_util.quote(value, quote_char = self._string_quote_char)
return value
def _value_for_get(self, value):
if self._string_quote_char:
return string_util.unquote(value)
return value
def has_section(self, section):
check.check_string(section)
return self._parser.has_section(section)
def set_value(self, section, key, value):
check.check_string(section)
check.check_string(key)
check.check_string(value)
if not self._parser.has_section(section):
self._parser.add_section(section)
self._parser.set(section, key, self._value_for_set(value))
def set_values(self, section, values):
check.check_string(section)
check.check_dict(values, check.STRING_TYPES, check.STRING_TYPES)
if not self._parser.has_section(section):
self._parser.add_section(section)
for key, value in sorted(values.items()):
self._parser.set(section, key, self._value_for_set(value))
def update_config(self, values_dict):
check.check_dict(values_dict, check.STRING_TYPES, dict)
for _, section_values in values_dict.items():
check.check_dict(section_values, check.STRING_TYPES, check.STRING_TYPES)
for section, section_values in values_dict.items():
self.set_values(section, section_values)
def get_value(self, section, key):
check.check_string(section)
check.check_string(key)
if not self._parser.has_section(section):
raise ValueError('no such section: {}'.format(section))
if self._parser.has_option(section, key):
return self._value_for_get(self._parser.get(section, key))
if self._parser.has_option(self._DEFAULT_SECTION, key):
return self._value_for_get(self._parser.get(self._DEFAULT_SECTION, key))
raise ValueError('no such value in section {}: {}'.format(section, key))
def has_default_section(self):
return self.has_section(self._DEFAULT_SECTION)
def get_values(self, section):
check.check_string(section)
if not self._parser.has_section(section):
raise ValueError('no such section: {}'.format(section))
result = {}
if self.has_default_section():
default_values = self._get_values_for_section(self._DEFAULT_SECTION)
for key, value in default_values.items():
result[key] = self._value_for_get(value)
values = self._get_values_for_section(section)
for key, value in values.items():
result[key] = self._value_for_get(value)
return result
def _get_values_for_section(self, section):
result = {}
for key, value in dict(self._parser.items(section)).items():
result[key] = self._value_for_get(value)
return result
def has_value(self, section, key):
check.check_string(section)
check.check_string(key)
if self._parser.has_option(section, key):
return True
if self.has_default_section():
return self._parser.has_option(self._DEFAULT_SECTION, key)
return False
def save(self, filename, codec = 'utf-8'):
file_util.save(filename, content = str(self), codec = codec)
MAJOR = software_version.MAJOR
MINOR = software_version.MINOR
REVISION = software_version.REVISION
def bump_version(self, section, key, component, default_value = None, reset_lower = False):
if not self.has_value(section, key):
self.set_value(section, key, default_value or '1.0.0')
return
old_version = self.get_value(section, key)
new_version = software_version.bump_version(old_version, component, reset_lower = reset_lower)
self.set_value(section, key, new_version)
def change_version(self, section, key, component, value):
if not self.has_value(section, key):
self.set_value(key, default_value or '1.0.0')
return
old_version = self.get_value(section, key)
new_version = software_version.change_component(old_version, component, value)
self.set_value(section, key, new_version)
def sections(self):
return self._parser.sections()
def to_dict(self):
result = {}
for section in self.sections():
result[section] = self.get_values(section)
return result
def _reset(self):
for section in self._parser.sections():
for key, value in self._parser.items(section):
self._parser.set(section, key, self._value_for_set(value))
@classmethod
def load_from_text(clazz, text, filename, string_quote_char = None):
parser = clazz._make_parser_from_text(text)
cfg = config(parser = parser, string_quote_char = string_quote_char)
if string_quote_char:
cfg._reset()
return cfg
@classmethod
def load_from_file(clazz, filename, string_quote_char = None):
parser = clazz._make_parser_from_file(filename)
cfg = config(parser = parser, string_quote_char = string_quote_char)
if string_quote_char:
cfg._reset()
return cfg
@classmethod
def _make_parser_from_file(clazz, filename, codec = 'utf-8'):
text = file_util.read(filename, codec = codec)
return clazz._make_parser_from_text(text)
@classmethod
def _make_parser_from_text(clazz, text):
parser = clazz.Parser()
stream = StringIO(text)
if compat.IS_PYTHON3:
parser.read_file(stream)
else:
parser.readfp(stream)
return parser
|
<filename>calculate_linkage_disequilibria_Helen.py
import sample_utils
import config
import parse_midas_data
import os.path
import os
import pylab
import sys
import numpy
import gzip
import diversity_utils_Helen as diversity_utils
import gene_diversity_utils
import calculate_substitution_rates
import clade_utils
import stats_utils
from math import log10,ceil,fabs
from numpy.random import randint, choice
ld_directory = '%slinkage_disequilibria/' % (parse_midas_data.data_directory)
intermediate_filename_template = '%s%s.txt.gz'
low_divergence_threshold = config.between_low_divergence_threshold
#low_divergence_threshold = 5e-04 # this was picked by looking at inflection point of dN/dS vs dS plot
min_sample_size = config.between_host_min_sample_size
min_ld_sample_size = config.between_host_ld_min_sample_size
allowed_variant_types = set(['1D','4D'])
def load_ld_map(species_name):
ld_map = {}
intermediate_filename = intermediate_filename_template % (ld_directory, species_name)
if not os.path.isfile(intermediate_filename):
return ld_map
file = gzip.open(intermediate_filename,"r")
header_line = file.readline() # header
header_items = header_line.split(",")
distance_strs = [item.split(":")[-1] for item in header_items[4:]]
distances = []
intragene_idxs = []
intergene_distances = []
intergene_idxs = []
control_idx = -1
for i in xrange(0,len(distance_strs)-1):
if distance_strs[i].startswith('g'):
# an intergene distance
intergene_idxs.append(i)
intergene_distances.append(long(distance_strs[i][1:]))
else:
# an intragene distance
intragene_idxs.append(i)
distances.append(float(distance_strs[i]))
distances = numpy.array(distances)
intragene_idxs = numpy.array(intragene_idxs)
intergene_distances = numpy.array(intergene_distances)
intergene_idxs = numpy.array(intergene_idxs)
for line in file:
items = line.split(",")
if items[0].strip()!=species_name:
continue
clade_type = items[1].strip()
variant_type = items[2].strip()
pi = float(items[3])
rsquared_numerators = []
rsquared_denominators = []
lds = []
counts = []
for item in items[4:]:
subitems = item.split(":")
rsquared_numerators.append(float(subitems[0]))
rsquared_denominators.append(float(subitems[1]))
counts.append(float(subitems[2]))
rsquared_numerators = numpy.array(rsquared_numerators)
rsquared_denominators = numpy.array(rsquared_denominators)
counts = numpy.array(counts)
lds = rsquared_numerators/rsquared_denominators
control_numerator = rsquared_numerators[control_idx]
control_denominator = rsquared_denominators[control_idx]
control_count = counts[control_idx]
control_ld = control_numerator/control_denominator
intragene_rsquared_numerators = rsquared_numerators[intragene_idxs]
intragene_rsquared_denominators = rsquared_denominators[intragene_idxs]
intragene_counts = counts[intragene_idxs]
intergene_rsquared_numerators = rsquared_numerators[intergene_idxs]
intergene_rsquared_denominators = rsquared_denominators[intergene_idxs]
intergene_counts = counts[intergene_idxs]
ld_map[(clade_type, variant_type)] = (distances, intragene_rsquared_numerators, intragene_rsquared_denominators, intragene_counts, intergene_distances, intergene_rsquared_numerators, intergene_rsquared_denominators, intergene_counts, control_numerator, control_denominator, control_count, pi)
return ld_map
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="Loads only a subset of SNPs for speed", action="store_true")
parser.add_argument("--chunk-size", type=int, help="max number of records to load", default=1000000000)
parser.add_argument("--species", help="Name of specific species to run code on", default="all")
args = parser.parse_args()
debug = args.debug
chunk_size = args.chunk_size
species=args.species
# Load subject and sample metadata
sys.stderr.write("Loading sample metadata...\n")
subject_sample_map = sample_utils.parse_subject_sample_map()
sys.stderr.write("Done!\n")
good_species_list = parse_midas_data.parse_good_species_list()
if species!='all':
good_species_list = [species]
if debug and len(good_species_list)>3.5:
good_species_list = good_species_list[:3]
#good_species_list=['Bacteroides_vulgatus_57955']
# better binning scheme (multiple of 3)
distance_bin_locations = numpy.arange(0,1002)*3.0
distance_bins = numpy.arange(-1,1002)*3+1.5
distance_bins[0] = 0 # no such thing as negative distance
distance_bins[1] = 2.5 # want at least one codon separation
distance_bins[-1] = 1e09 # catch everything
neighbor_distances = numpy.array([1,2,3,4,5])
distance_strs = ["LD_N:LD_D:%g" % d for d in distance_bin_locations[1:-1]] # N=numerator and D=denominator
distance_strs = distance_strs+["LD_N:LD_D:g%d" % nd for nd in neighbor_distances]+["LD_N:LD_D:intergene"]
# header of the output file.
record_strs = [", ".join(['Species', 'CladeType', 'VariantType', 'Pi']+distance_strs)]
os.system('mkdir -p %s' % ld_directory)
for species_name in good_species_list:
sys.stderr.write("Loading haploid samples...\n")
# Only plot samples above a certain depth threshold that are "haploids"
snp_samples = diversity_utils.calculate_haploid_samples(species_name, debug=debug)
if len(snp_samples) < min_sample_size:
sys.stderr.write("Not enough haploid samples!\n")
continue
else:
sys.stderr.write("Found %d haploid samples!\n" % len(snp_samples))
sys.stderr.write("Calculating unique hosts...\n")
# Only consider one sample per person
snp_samples = snp_samples[sample_utils.calculate_unique_samples(subject_sample_map, sample_list=snp_samples)]
if len(snp_samples) < min_sample_size:
sys.stderr.write("Not enough hosts!\n")
continue
else:
sys.stderr.write("Found %d unique hosts!\n" % len(snp_samples))
# Load divergence matrices
sys.stderr.write("Loading pre-computed substitution rates for %s...\n" % species_name)
substitution_rate_map = calculate_substitution_rates.load_substitution_rate_map(species_name)
sys.stderr.write("Calculating matrix...\n")
dummy_samples, snp_difference_matrix, snp_opportunity_matrix = calculate_substitution_rates.calculate_matrices_from_substitution_rate_map(substitution_rate_map, 'core', allowed_samples=snp_samples)
snp_samples = numpy.array(dummy_samples)
substitution_rate = snp_difference_matrix*1.0/(snp_opportunity_matrix+(snp_opportunity_matrix==0))
sys.stderr.write("Done!\n")
sys.stderr.write("Clustering samples with low divergence...\n")
coarse_grained_idxs, coarse_grained_cluster_list = clade_utils.cluster_samples(substitution_rate, min_d=low_divergence_threshold) # NRG: what is this returning?
coarse_grained_samples = snp_samples[coarse_grained_idxs]
clade_sets = clade_utils.load_manual_clades(species_name)
sys.stderr.write("%d samples remaining after clustering!\n" % len(coarse_grained_samples))
if len(clade_sets)==0:
continue
clade_idxss = clade_utils.calculate_clade_idxs_from_clade_sets(coarse_grained_samples, clade_sets)
clade_sizes = numpy.array([clade_idxs.sum() for clade_idxs in clade_idxss])
largest_clade_samples = coarse_grained_samples[ clade_idxss[clade_sizes.argmax()] ]
largest_clade_set = set(largest_clade_samples)
sys.stderr.write("Top level clades: %d clades, sizes: %s\n" % (len(clade_sets), str(clade_sizes)))
sys.stderr.write("Max clade size: %d\n" % len(largest_clade_samples))
snp_samples = coarse_grained_samples
if len(largest_clade_samples) < min_ld_sample_size:
sys.stderr.write("Not enough ld samples!\n")
continue
else:
sys.stderr.write("Proceeding with %d coarse-grained samples in largest clade!\n" % len(largest_clade_samples))
# Analyze SNPs, looping over chunk sizes.
# Clunky, but necessary to limit memory usage on cluster
sys.stderr.write("Loading core genes...\n")
core_genes = parse_midas_data.load_core_genes(species_name)
sys.stderr.write("Done! Core genome consists of %d genes\n" % len(core_genes))
# Load SNP information for species_name
sys.stderr.write("Loading SNPs for %s...\n" % species_name)
sys.stderr.write("(core genes only...)\n")
clade_types = ['all','largest_clade']
variant_types = ['4D','1D']
binned_rsquared_numerators = {} #<===================== look into
binned_rsquared_denominators = {} #<===================== look into
binned_counts = {}
neighboring_gene_rsquared_numerators = {}
neighboring_gene_rsquared_denominators = {}
neighboring_gene_counts = {}
# total_control=between genes.
total_control_rsquared_numerators = {}
total_control_rsquared_denominators = {}
total_control_counts = {}
for clade_type in clade_types:
for variant_type in variant_types:
binned_rsquared_numerators[(clade_type,variant_type)] = numpy.zeros_like(distance_bin_locations)
binned_rsquared_denominators[(clade_type,variant_type)] = numpy.zeros_like(distance_bin_locations)
binned_counts[(clade_type,variant_type)] = numpy.zeros_like(distance_bin_locations)
neighboring_gene_rsquared_numerators[(clade_type,variant_type)] = numpy.zeros_like(neighbor_distances)*1.0
neighboring_gene_rsquared_denominators[ (clade_type,variant_type)] = numpy.zeros_like(neighbor_distances)*1.0
neighboring_gene_counts[(clade_type,variant_type)] = numpy.zeros_like(neighbor_distances)*1.0
total_control_rsquared_numerators[(clade_type,variant_type)] = 0
total_control_rsquared_denominators[(clade_type,variant_type)] = 0
total_control_counts[(clade_type,variant_type)] = 0
final_line_number = 0
while final_line_number >= 0:
sys.stderr.write("Loading chunk starting @ %d...\n" % final_line_number)
snp_samples, allele_counts_map, passed_sites_map, final_line_number = parse_midas_data.parse_snps(species_name, debug=debug, allowed_variant_types=allowed_variant_types, allowed_samples=snp_samples,allowed_genes=core_genes, chunk_size=chunk_size,initial_line_number=final_line_number)
sys.stderr.write("Done! Loaded %d genes\n" % len(allele_counts_map.keys()))
print(len(allele_counts_map))
print(len(allele_counts_map["537011.5.peg.2689"]))
input("breakpoint")
input("breakpoint")
largest_clade_idxs = numpy.array([sample in largest_clade_set for sample in snp_samples])
sys.stderr.write("Calculating LD...\n")
for clade_type in clade_types:
for variant_type in variant_types:
for gene_name in allele_counts_map.keys():
if gene_name not in core_genes:
continue
locations = numpy.array([location for chromosome, location in allele_counts_map[gene_name][variant_type]['locations']])*1.0
allele_counts = allele_counts_map[gene_name][variant_type]['alleles']
if len(allele_counts)==0:
# no diversity to look at!
continue
target_chromosome = allele_counts_map[gene_name][variant_type]['locations'][0][0]
if clade_type=='largest_clade':
# Now restrict to largest clade
allele_counts = allele_counts[:,largest_clade_idxs,:]
#compute the distances between all pairs of sites
# None in the two index positions results in a transpose of the vector relative to each other
# Subtraction between the two vectors results in pairwise subtraction of each element in each vector.
distances = numpy.fabs(locations[:,None]-locations[None,:])
rsquared_numerators, rsquared_denominators = diversity_utils.calculate_unbiased_sigmasquared(allele_counts, allele_counts)
# ^^^^^^^^^^^^^^^^ look into
neighbor_rsquared_numeratorss = [[] for d in neighbor_distances]
neighbor_rsquared_denominatorss = [[] for d in neighbor_distances]
for neighbor_distance_idx in xrange(0,len(neighbor_distances)):
neighbor_distance = neighbor_distances[neighbor_distance_idx]
gene_name_items = gene_name.split(".")
gene_peg_number = long(gene_name.split(".")[-1])
nearest_gene_peg_numbers = [gene_peg_number-neighbor_distance,gene_peg_number+neighbor_distance]
neighboring_genes = [".".join(gene_name_items[:-1]+[str(n)]) for n in nearest_gene_peg_numbers]
for neighboring_gene_name in neighboring_genes:
# first make sure it's a real gene
if neighboring_gene_name not in allele_counts_map:
continue
if neighboring_gene_name not in core_genes:
continue
neighboring_allele_counts = allele_counts_map[neighboring_gene_name][variant_type]['alleles']
# then make sure it has some variants
if len(neighboring_allele_counts)==0:
continue
neighboring_chromosome = allele_counts_map[neighboring_gene_name][variant_type]['locations'][0][0]
if neighboring_chromosome!=target_chromosome:
continue
if clade_type=='largest_clade':
# Now restrict to largest clade
neighboring_allele_counts = neighboring_allele_counts[:,largest_clade_idxs,:]
chunk_rsquared_numerators, chunk_rsquared_denominators = diversity_utils.calculate_unbiased_sigmasquared(allele_counts, neighboring_allele_counts)
neighbor_rsquared_numeratorss[ neighbor_distance_idx].extend( chunk_rsquared_numerators.flatten() )
neighbor_rsquared_denominatorss[ neighbor_distance_idx].extend( chunk_rsquared_denominators.flatten() )
neighbor_rsquared_numeratorss[ neighbor_distance_idx] = numpy.array( neighbor_rsquared_numeratorss[neighbor_distance_idx] )
neighbor_rsquared_denominatorss[ neighbor_distance_idx] = numpy.array( neighbor_rsquared_denominatorss[neighbor_distance_idx] )
# pick a random gene somewhere else as a control
# 10 to 1 control to regular
control_rsquared_numerators = []
control_rsquared_denominators = []
gene_peg_number = long(gene_name.split(".")[-1])
for control_idx in xrange(0,10):
control_gene_name = gene_name
control_allele_counts = []
# get the right gene name
while True:
control_gene_name = choice(allele_counts_map.keys())
if control_gene_name not in core_genes:
continue
control_gene_peg_number = long(control_gene_name.split(".")[-1])
control_allele_counts = allele_counts_map[control_gene_name][variant_type]['alleles']
if len(control_allele_counts)==0:
continue
# make sure you don't have one too close by!
if (fabs(control_gene_peg_number - gene_peg_number) < 5.5):
continue
if clade_type=='largest_clade':
# Now restrict to largest clade
control_allele_counts = control_allele_counts[:,largest_clade_idxs,:]
break
control_gene_rsquared_numerators, control_gene_rsquared_denominators = diversity_utils.calculate_unbiased_sigmasquared(allele_counts, control_allele_counts)
control_rsquared_numerators.extend( control_gene_rsquared_numerators.flatten() )
control_rsquared_denominators.extend( control_gene_rsquared_denominators.flatten() )
control_rsquared_numerators = numpy.array( control_rsquared_numerators )
control_rsquared_denominators = numpy.array( control_rsquared_denominators )
# get the indices of the upper diagonal of the distance matrix
# numpy triu_indices returns upper diagnonal including diagonal
# the 1 inside the function excludes diagonal. Diagnonal has distance of zero.
desired_idxs = numpy.triu_indices(distances.shape[0],1)
#print distances.shape, rsquared_numerators.shape
# fetch the distances and rsquared vals corresponding to the upper diagonal.
distances = distances[desired_idxs]
rsquared_numerators = rsquared_numerators[desired_idxs]
rsquared_denominators = rsquared_denominators[desired_idxs]
# fetch entries where denominator != 0 (remember, denominator=pa*(1-pa)*pb*(1-pb). If zero, then at least one site is invariant)
distances = distances[rsquared_denominators>1e-09]
rsquared_numerators = rsquared_numerators[rsquared_denominators>1e-09]
rsquared_denominators = rsquared_denominators[rsquared_denominators>1e-09]
if len(distances) == 0:
continue
# numpy.digitize: For each distance value, return the bin index it belongs to in distances_bins.
bin_idxs = numpy.digitize(distances,bins=distance_bins)-1
for i in xrange(0,len(bin_idxs)):
binned_counts[(clade_type,variant_type)][bin_idxs[i]] += 1
binned_rsquared_numerators[(clade_type,variant_type)][bin_idxs[i]] += rsquared_numerators[i]
binned_rsquared_denominators[(clade_type,variant_type)][bin_idxs[i]] += rsquared_denominators[i]
for i in xrange(0,len(neighbor_distances)):
good_idxs = (neighbor_rsquared_denominatorss[i]>1e-09)
neighboring_gene_counts[(clade_type,variant_type)][i] += good_idxs.sum()
neighboring_gene_rsquared_numerators[ (clade_type,variant_type)][i] += neighbor_rsquared_numeratorss[i][good_idxs].sum()
neighboring_gene_rsquared_denominators[ (clade_type,variant_type)][i] += neighbor_rsquared_denominatorss[i][good_idxs].sum()
total_control_counts[(clade_type,variant_type)] += (control_rsquared_denominators>1e-09).sum()
total_control_rsquared_numerators[(clade_type,variant_type)] += control_rsquared_numerators[control_rsquared_denominators>1e-09].sum()
total_control_rsquared_denominators[(clade_type,variant_type)] += control_rsquared_denominators[control_rsquared_denominators>1e-09].sum()
for clade_type in clade_types:
for variant_type in variant_types:
desired_samples = snp_samples
if clade_type=='largest_clade':
desired_samples = largest_clade_samples
# Calculate pi!
dummy_samples, snp_difference_matrix, snp_opportunity_matrix = calculate_substitution_rates.calculate_matrices_from_substitution_rate_map(substitution_rate_map, variant_type, allowed_samples=desired_samples)
substitution_rate = snp_difference_matrix*1.0/(snp_opportunity_matrix+(snp_opportunity_matrix==0))
iu = numpy.triu_indices(substitution_rate.shape[0], 1)
pi = numpy.median(substitution_rate[iu])
binned_rsquareds = binned_rsquared_numerators[(clade_type,variant_type)]*1.0/(binned_rsquared_denominators[(clade_type,variant_type)] + (binned_rsquared_denominators[(clade_type,variant_type)] == 0))
control_rsquareds = total_control_rsquared_numerators[(clade_type,variant_type)]*1.0/(total_control_rsquared_denominators[(clade_type,variant_type)]+(total_control_rsquared_denominators[(clade_type,variant_type)]==0))
rsquared_strs = ["%g:%g:%d" % (rsquared_numerator, rsquared_denominator, count) for rsquared_numerator, rsquared_denominator, count in zip(binned_rsquared_numerators[(clade_type,variant_type)], binned_rsquared_denominators[(clade_type,variant_type)], binned_counts[(clade_type,variant_type)])[1:-1]]
gene_rsquared_strs = ["%g:%g:%d" % (rsquared_numerator, rsquared_denominator, count) for rsquared_numerator, rsquared_denominator, count in zip(neighboring_gene_rsquared_numerators[(clade_type,variant_type)], neighboring_gene_rsquared_denominators[(clade_type,variant_type)], neighboring_gene_counts[(clade_type,variant_type)])]
control_rsquared_str = "%g:%g:%d" % (total_control_rsquared_numerators[(clade_type,variant_type)], total_control_rsquared_denominators[(clade_type,variant_type)], total_control_counts[(clade_type,variant_type)])
pi_str = str(pi)
record_str = ", ".join([species_name, clade_type, variant_type, pi_str]+rsquared_strs+gene_rsquared_strs+[control_rsquared_str])
record_strs.append(record_str)
sys.stderr.write("Done with %s!\n" % species_name)
sys.stderr.write("Writing intermediate file...\n")
intermediate_filename = intermediate_filename_template % (ld_directory, species_name)
file = gzip.open(intermediate_filename,"w")
record_str = "\n".join(record_strs)
file.write(record_str)
file.close()
sys.stderr.write("Done!\n")
sys.stderr.write("Done looping over species!\n")
sys.stderr.write("Testing loading...\n")
ld_map = load_ld_map(good_species_list[0])
sys.stderr.write("Done!\n")
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Copyright (c) 2013-present SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import datetime
import pathlib
import shutil
from . import darwincore_utils
from . import darwincore_meta_xml
from . import darwincore_eml_xml
class DarwinCoreZip(object):
""" """
def __init__(self, dwca_file_path,
encoding='utf-8'):
""" """
self.dwca_file_path = dwca_file_path
self.dwca_tmp_dir = None
self.encoding = encoding
def create_tmp_dir(self):
""" """
# Create tmp dir.
target_zip_path = pathlib.Path(self.dwca_file_path).parents[0] # Parent dir.
self.dwca_tmp_dir = pathlib.Path(target_zip_path.as_posix(), 'TMP_DarwinCore')
if not self.dwca_tmp_dir.exists():
self.dwca_tmp_dir.mkdir()
else:
# Remove content.
self. remove_tmp_files()
def remove_tmp_files(self):
""" """
try:
# Remove used files first.
for file_name in ['event.txt', 'occurrence.txt', 'extendedmeasurementorfact.txt',
'meta.xml', 'eml.xml']:
file_path = pathlib.Path(self.dwca_tmp_dir, file_name)
if file_path.exists():
file_path.unlink()
except Exception as e:
print('DEBUG: Failed to remove TMP_DarwinCore files: ' + str(e))
def remove_tmp_dir(self):
""" """
try:
self. remove_tmp_files()
pathlib.Path(self.dwca_tmp_dir).rmdir()
except Exception as e:
print('DEBUG: Failed to remove TMP_DarwinCore dir: ' + str(e))
def write_event_header(self, header):
""" """
if self.dwca_tmp_dir:
file_path = pathlib.Path(self.dwca_tmp_dir, 'event.txt')
with file_path.open('w', encoding=self.encoding, newline='\r\n') as file_w:
file_w.write('\t'.join(header) + '\n')
def write_event_rows(self, rows):
""" """
if self.dwca_tmp_dir:
file_path = pathlib.Path(self.dwca_tmp_dir, 'event.txt')
with file_path.open('a', encoding=self.encoding) as file_w:
for row in rows:
file_w.write('\t'.join(row) + '\n')
def write_occurrence_header(self, header):
""" """
if self.dwca_tmp_dir:
file_path = pathlib.Path(self.dwca_tmp_dir, 'occurrence.txt')
with file_path.open('w', encoding=self.encoding, newline='\r\n') as file_w:
file_w.write('\t'.join(header) + '\n')
def write_occurrence_rows(self, rows):
""" """
if self.dwca_tmp_dir:
file_path = pathlib.Path(self.dwca_tmp_dir, 'occurrence.txt')
with file_path.open('a', encoding=self.encoding) as file_w:
for row in rows:
file_w.write('\t'.join(row) + '\n')
def write_measurementorfact_header(self, header):
""" """
if self.dwca_tmp_dir:
file_path = pathlib.Path(self.dwca_tmp_dir, 'extendedmeasurementorfact.txt')
#
with file_path.open('w', encoding=self.encoding, newline='\r\n') as file_w:
file_w.write('\t'.join(header) + '\n')
def write_measurementorfact_rows(self, rows):
""" """
if self.dwca_tmp_dir:
file_path = pathlib.Path(self.dwca_tmp_dir, 'extendedmeasurementorfact.txt')
with file_path.open('a', encoding=self.encoding) as file_w:
for row in rows:
file_w.write('\t'.join(row) + '\n')
def write_dwca_eml(self, eml_xml_content):
""" """
if self.dwca_tmp_dir:
file_path = pathlib.Path(self.dwca_tmp_dir, 'eml.xml')
with file_path.open('a', encoding=self.encoding) as file_w:
for row in eml_xml_content:
file_w.write(row + '\n')
def write_dwca_meta(self, dwca_meta_xml_rows):
""" """
if self.dwca_tmp_dir:
file_path = pathlib.Path(self.dwca_tmp_dir, 'meta.xml')
# with file_path.open('a', encoding=self.encoding) as file_w:
# file_w.write('\r\n'.join(dwca_meta_xml_rows).encode('utf-8'))
with file_path.open('a', encoding=self.encoding) as file_w:
for row in dwca_meta_xml_rows:
file_w.write(row + '\n')
def create_darwingcore_zip_file(self, out_file_path='dwca_tmp.zip'):
""" """
shutil.make_archive(out_file_path.replace('.zip', ''), 'zip', self.dwca_tmp_dir.as_posix())
|
import csv
import json
from collections import defaultdict
from heapq import nlargest, nsmallest
from typing import List, Tuple, Dict
from pathlib import Path
import shutil
import random
import colorsys
import numpy as np
from io import StringIO
from math import isclose
CONFIDENCE_LOCATION = -1
TAG_CONFIDENCE_LOCATION = -2
FILENAME_LOCATION = 0
FOLDER_LOCATION = 8
HEIGHT_LOCATION = 6
WIDTH_LOCATION = 7
TAG_LOCATION = 1
TAG_STARTING_LOCATION = 2
# Should be equal to width_location
TAG_ENDING_LOCATION = 7
def make_vott_output(all_predictions, output_location, user_folders, image_loc, blob_credentials = None,
tag_names: List[str] = ["stamp"], tag_colors: List[str] = "#ed1010", max_tags_per_pixel=None):
if max_tags_per_pixel is not None:
max_tags_per_pixel = int(max_tags_per_pixel)
if user_folders:
folder_name = Path(all_predictions[0][0][FOLDER_LOCATION]).name
output_location = Path(output_location)/folder_name
else:
output_location = Path(output_location)/"Images"
output_location.mkdir(parents=True, exist_ok=True)
using_blob_storage = blob_credentials is not None
if using_blob_storage:
blob_service, container_name = blob_credentials
else:
image_loc = Path(image_loc)
if user_folders:
if using_blob_storage:
if image_loc == "":
image_loc = Path(all_predictions[0][0][FOLDER_LOCATION]).name
else:
image_loc = image_loc + "/" + Path(all_predictions[0][0][FOLDER_LOCATION]).name
else:
image_loc = image_loc/all_predictions[0][0][FOLDER_LOCATION]
for prediction in all_predictions:
if using_blob_storage:
if image_loc:
print(image_loc + "/" + prediction[0][FILENAME_LOCATION])
blob_service.get_blob_to_path(container_name, image_loc + "/" + prediction[0][FILENAME_LOCATION],
str(output_location/prediction[0][FILENAME_LOCATION]))
else:
print(prediction[0][FILENAME_LOCATION])
blob_service.get_blob_to_path(container_name, prediction[0][FILENAME_LOCATION],
str(output_location/prediction[0][FILENAME_LOCATION]))
else:
shutil.copy(str(image_loc/prediction[0][FILENAME_LOCATION]), str(output_location))
all_predictions.sort(key=lambda x: x[0][FILENAME_LOCATION])
dirjson = {}
dirjson["frames"] = {}
for i, predictions in enumerate(all_predictions):
all_frames = []
set_predictions = defaultdict(list)
if max_tags_per_pixel is None:
for prediction in predictions:
x_1, x_2, y_1, y_2, height, width = map(float, prediction[TAG_STARTING_LOCATION:TAG_ENDING_LOCATION+1])
if prediction[TAG_LOCATION]!="NULL" and (x_1,x_2,y_1,y_2)!=(0,0,0,0):
x_1 = int(x_1*width)
x_2 = int(x_2*width)
y_1 = int(y_1*height)
y_2 = int(y_2*height)
set_predictions[(x_1, x_2, y_1, y_2, height, width)].append(prediction[TAG_LOCATION])
else:
if predictions:
num_tags = np.zeros((int(predictions[0][HEIGHT_LOCATION]),int(predictions[0][WIDTH_LOCATION])), dtype=int)
for prediction in sorted(predictions, key=lambda x: float(x[TAG_CONFIDENCE_LOCATION]), reverse=True):
x_1, x_2, y_1, y_2, height, width = map(float, prediction[TAG_STARTING_LOCATION:TAG_ENDING_LOCATION+1])
if prediction[TAG_LOCATION]!="NULL" and (x_1,x_2,y_1,y_2)!=(0,0,0,0):
x_1 = int(x_1*width)
x_2 = int(x_2*width)
y_1 = int(y_1*height)
y_2 = int(y_2*height)
if np.amax(num_tags[y_1:y_2, x_1:x_2])<max_tags_per_pixel:
num_tags[y_1:y_2, x_1:x_2]+=1
set_predictions[(x_1, x_2, y_1, y_2, height, width)].append(prediction[TAG_LOCATION])
for j,(coordinates, tags) in enumerate(set_predictions.items(), 1):
# filename,tag,x1,x2,y1,y2,true_height,true_width,image_directory
x_1, x_2, y_1, y_2, height, width = coordinates
curframe = {}
curframe["x1"] = x_1
curframe["y1"] = y_1
curframe["x2"] = x_2
curframe["y2"] = y_2
curframe["id"] = j
curframe["width"] = width
curframe["height"] = height
curframe["type"] = "Rectangle"
curframe["tags"] = tags
curframe["name"] = j
all_frames.append(curframe)
dirjson["frames"][i] = all_frames
dirjson["framerate"] = "1"
dirjson["inputTags"] = ",".join(tag_names)
dirjson["suggestiontype"] = "track"
dirjson["scd"] = False
dirjson["visitedFrames"] = list(range(len(all_predictions)))
dirjson["tag_colors"] = tag_colors
with open(str(output_location)+".json","w") as json_out:
json.dump(dirjson, json_out, sort_keys = True)
def select_rows(arr_image_data, num_rows, is_largest):
if is_largest:
top = nlargest(num_rows, arr_image_data,
key=lambda x: float(x[0][CONFIDENCE_LOCATION]))
else:
top = nsmallest(num_rows, arr_image_data,
key=lambda x: float(x[0][CONFIDENCE_LOCATION]))
return top
def prepare_per_class_dict(all_files_per_folder, class_balances_cnt, tag_names):
#result = {}
result = defaultdict(list)
for k, v in all_files_per_folder.items():
v_arr = np.array(v)
classes = v_arr[:, TAG_LOCATION]
for i in range(class_balances_cnt):
class_i = tag_names[i]
if class_i in classes:
result[class_i].append(v)
return result
def parse_class_balance_setting(config_value, expected_cnt):
print("Ideal class balance (from config):", config_value)
if config_value is None:
return None
arr_np = np.genfromtxt(StringIO(config_value), dtype=float, delimiter=',', loose=True)
# check f there were non valid numbers
if np.isnan(arr_np.any()):
print("Found NaNs in ideal balance settings:", config_value)
return None
else:
if (arr_np.size != expected_cnt):
print("Size of ideal balance settings {0} is {1}. Expected {2}".format(arr_np.size, arr_np, expected_cnt))
return None
s = np.sum(arr_np)
if isclose(s, 1, abs_tol=0.01):
return arr_np
else:
print("Sum of balance settings {0} should add up to 1: {1}".format(config_value, s) )
def get_top_rows(file_location, num_rows, user_folders, pick_max, tag_names, config_class_balance):
#Add class for background
if "NULL" not in tag_names:
tag_names = tag_names + ["NULL"]
ideal_class_balance = parse_class_balance_setting(config_class_balance, len(tag_names))
with (file_location/"totag.csv").open(mode='r') as file:
reader = csv.reader(file)
header = next(reader)
csv_list = list(reader)
all_files = defaultdict(lambda: defaultdict(list))
for row in csv_list:
all_files[row[FOLDER_LOCATION]][row[0]].append(row)
all_lists = []
class_balances_cnt = 1
if ideal_class_balance is not None:
class_balances_cnt = len(ideal_class_balance)
for folder_name in all_files:
if ideal_class_balance is not None:
all_files_per_class = prepare_per_class_dict(all_files[folder_name], class_balances_cnt, tag_names)
for i in range(class_balances_cnt):
num_rows_i = round(num_rows * float(ideal_class_balance[i]))
class_i = tag_names[i]
top = select_rows(all_files_per_class[class_i], num_rows_i, is_largest = pick_max)
# drop values we selected from the dict
# the same image may have object from diff classes
for j in range(class_balances_cnt):
class_j = tag_names[j]
all_files_per_class[class_j] = [v for v in all_files_per_class[class_j]
if v not in top]
all_lists = all_lists + top
else:
top = select_rows(all_files[folder_name].values(), num_rows, is_largest = pick_max)
all_lists = all_lists + top
tagging_files = {row[0][0] for row in all_lists }
file_exists = (file_location/"tagging.csv").is_file()
with (file_location/"totag.csv").open(mode='w', newline='') as untagged, (file_location/"tagging.csv").open(mode='a', newline='') as tagging:
untagged_writer, tagging_writer = csv.writer(untagged), csv.writer(tagging)
untagged_writer.writerow(header)
if not file_exists:
tagging_writer.writerow(header)
for row in csv_list:
(tagging_writer if row[0] in tagging_files else untagged_writer).writerow(row)
return all_lists
def create_vott_json(file_location, num_rows, user_folders, pick_max, image_loc, output_location, blob_credentials=None,
tag_names = ["stamp"], max_tags_per_pixel=None, config_class_balance=None, colors = None):
all_rows = get_top_rows(file_location, num_rows, user_folders, pick_max, tag_names, config_class_balance)
# The tag_colors list generates random colors for each tag. To ensure that these colors stand out / are easy to see on a picture, the colors are generated
# in the hls format, with the random numbers biased towards a high luminosity (>=.8) and saturation (>=.75).
if colors is None:
colors = ['#%02x%02x%02x' % (int(256*r), int(256*g), int(256*b)) for
r,g,b in [colorsys.hls_to_rgb(random.random(),0.8 + random.random()/5.0, 0.75 + random.random()/4.0) for _ in tag_names]]
make_vott_output(all_rows, output_location, user_folders, image_loc, blob_credentials=blob_credentials,
tag_names=tag_names, tag_colors=colors, max_tags_per_pixel=max_tags_per_pixel)
if __name__ == "__main__":
#create_vott_json(r"C:\Users\t-yapand\Desktop\GAUCC1_1533070087147.csv",20, True, r"C:\Users\t-yapand\Desktop\GAUCC", r"C:\Users\t-yapand\Desktop\Output\GAUCC")
import re
import time
from azure.storage.blob import BlockBlobService
import sys
import os
# Allow us to import utils
config_dir = str(Path.cwd().parent / "utils")
if config_dir not in sys.path:
sys.path.append(config_dir)
from config import Config
if len(sys.argv)<3:
raise ValueError("Need to specify number of images (first arg) and config file (second arg)")
config_file = Config.parse_file(sys.argv[2])
block_blob_service = BlockBlobService(account_name=config_file["AZURE_STORAGE_ACCOUNT"], account_key=config_file["AZURE_STORAGE_KEY"])
container_name = config_file["label_container_name"]
shutil.rmtree(config_file["tagging_location"], ignore_errors=True)
csv_file_loc = Path(config_file["tagging_location"])
#csv_file_loc = #Path("test_totag.csv")
csv_file_loc.mkdir(parents=True, exist_ok=True)
file_date = [(blob.name, blob.properties.last_modified) for blob in block_blob_service.list_blobs(container_name) if re.match(r'totag_(.*).csv', blob.name)]
block_blob_service.get_blob_to_path(container_name, max(file_date, key=lambda x:x[1])[0], str(csv_file_loc/"totag.csv"))
container_name = config_file["image_container_name"]
file_date = [(blob.name, blob.properties.last_modified) for blob in block_blob_service.list_blobs(container_name) if re.match(r'tagging_(.*).csv', blob.name)]
ideal_class_balance = config_file["ideal_class_balance"].split(",")
if file_date:
block_blob_service.get_blob_to_path(container_name, max(file_date, key=lambda x:x[1])[0], str(csv_file_loc/"tagging.csv"))
create_vott_json(csv_file_loc, int(sys.argv[1]), config_file["user_folders"]=="True", config_file["pick_max"]=="True", "",
config_file["tagging_location"], blob_credentials=(block_blob_service, container_name),
tag_names=config_file["classes"].split(","),
max_tags_per_pixel=config_file.get("max_tags_per_pixel"),
config_class_balance =config_file.get("ideal_class_balance"))
container_name = config_file["label_container_name"]
block_blob_service.create_blob_from_path(container_name, "{}_{}.{}".format("tagging",int(time.time() * 1000),"csv"), str(csv_file_loc/"tagging.csv"))
block_blob_service.create_blob_from_path(container_name, "{}_{}.{}".format("totag",int(time.time() * 1000),"csv"), str(csv_file_loc/"totag.csv"))
|
<filename>pyaedt/modules/LayerStackup.py
"""
This module contains these classes: `Layer` and `Layers`.
This module provides all layer stackup functionalities for the Circuit and HFSS 3D Layout tools.
"""
from __future__ import absolute_import # noreorder
from pyaedt.generic.general_methods import pyaedt_function_handler
@pyaedt_function_handler()
def _str2bool(str0):
"""Convert a string to a Boolean value.
Parameters
----------
str0 : str
String to convert.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if str0.lower() == "false":
return False
elif str0 == "true":
return True
else:
return ""
def _conv_number(number, typen=float):
"""Convert a number.
Parameters
----------
number
Number represented a float.
typen :
The default is ``float``.
Returns
-------
"""
if typen is float:
try:
return float(number)
except:
return number
elif typen is int:
try:
return int(number)
except:
return number
@pyaedt_function_handler()
def _getIfromRGB(rgb):
"""Retrieve if from a specific layer color.
Parameters
----------
rgb :
Returns
-------
"""
red = rgb[2]
green = rgb[1]
blue = rgb[0]
RGBint = (red << 16) + (green << 8) + blue
return RGBint
class Layer(object):
"""Manages the stackup layer.
Parameters
----------
app : :class:`pyaedt.modules.LayerStackup.Layers`
layertype : string, optional
The default is ``"signal"``.
negative : bool, optional
Whether the geometry on the layer is cut away
from the layer. The default is ``False``.
Examples
--------
>>> from pyaedt import Hfss3dLayout
>>> app = Hfss3dLayout()
>>> layers = app.modeler.layers["Top"]
"""
def __init__(self, app, layertype="signal", negative=False):
self.LengthUnit = app.LengthUnit
self.LengthUnitRough = app.LengthUnit
self._layers = app
self.name = None
self.type = layertype
self.id = 0
self.color = 8026109
self.transparency = 60
self.IsVisible = True
self.IsVisibleShape = True
self.IsVisiblePath = True
self.IsVisiblePad = True
self.IsVisibleHole = True
self.IsVisibleComponent = True
self.IsMeshBackgroundMaterial = True
self.IsMeshOverlay = True
self.locked = False
self.topbottom = "neither"
self.pattern = 1
self.drawoverride = 0
self.thickness = 0
self.lowerelevation = 0
self.roughness = 0
self.botroughness = 0
self.toprounghenss = 0
self.sideroughness = 0
self.material = "copper"
self.fillmaterial = "FR4_epoxy"
self.index = 1
self.IsNegative = negative
# Etch option
self.useetch = False
self.etch = 0
# Rough option
self.user = False
self.RMdl = "Huray"
self.NR = 0.5
self.HRatio = 2.9
self.BRMdl = "Huray"
self.BNR = 0.5
self.BHRatio = 2.9
self.SRMdl = "Huray"
self.SNR = 0.5
self.SHRatio = 2.9
# Solver option
self.usp = False
self.hfssSp = {"si": True, "dt": 0, "dtv": 0.1}
self.planaremSp = {"ifg": False, "vly": False}
self.zones = []
@property
def oeditor(self):
"""Oeditor Module."""
return self._layers.oeditor
@property
def visflag(self):
"""Visibility flag for objects on the layer."""
visflag = 0
if not self.IsVisible:
visflag = 0
else:
if self.IsMeshBackgroundMaterial:
visflag += 64
if self.IsMeshOverlay:
visflag += 32
if self.IsVisibleShape:
visflag += 1
if self.IsVisiblePath:
visflag += 2
if self.IsVisiblePad:
visflag += 4
if self.IsVisibleHole:
visflag += 8
if self.IsVisibleComponent:
visflag += 16
return visflag
@pyaedt_function_handler()
def set_layer_color(self, r, g, b):
"""Update the color of the layer.
Parameters
----------
r : int
Red color value.
g : int
Green color value.
b : int
Blue color value.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oEditor.ChangeLayer
"""
rgb = [r, g, b]
self.color = _getIfromRGB(rgb)
self.update_stackup_layer()
return True
@pyaedt_function_handler()
def create_stackup_layer(self):
"""Create a stackup layer.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oEditor.AddStackupLayer
"""
self.remove_stackup_layer()
if self.type == "signal":
self.oeditor.AddStackupLayer(
[
"NAME:stackup layer",
"Name:=",
self.name,
"Type:=",
self.type,
"Top Bottom:=",
self.topbottom,
"Color:=",
self.color,
"Transparency:=",
self.transparency,
"Pattern:=",
self.pattern,
"VisFlag:=",
self.visflag,
"Locked:=",
self.locked,
"DrawOverride:=",
self.drawoverride,
[
"NAME:Sublayer",
"Thickness:=",
self.thickness,
"LowerElevation:=",
self.lowerelevation,
"Roughness:=",
self._arg_with_dim(self.roughness, self.LengthUnitRough),
"BotRoughness:=",
self._arg_with_dim(self.botroughness, self.LengthUnitRough),
"SideRoughness:=",
self._arg_with_dim(self.toprounghenss, self.LengthUnitRough),
"Material:=",
self.material.lower(),
"FillMaterial:=",
self.fillmaterial.lower(),
],
"Neg:=",
self.IsNegative,
"Usp:=",
self.usp,
[
"NAME:Sp",
"Sn:=",
"HFSS",
"Sv:=",
"so(si="
+ str(self.hfssSp["si"]).lower()
+ " , dt="
+ str(self.hfssSp["dt"])
+ ", dtv='"
+ self._arg_with_dim(self.hfssSp["dtv"])
+ "')",
],
[
"NAME:Sp",
"Sn:=",
"PlanarEM",
"Sv:=",
"so(ifg="
+ str(self.planaremSp["ifg"]).lower()
+ ", vly="
+ str(self.planaremSp["vly"]).lower()
+ ")",
],
"Etch:=",
str(self.etch),
"UseEtch:=",
self.useetch,
"UseR:=",
self.user,
"RMdl:=",
self.RMdl,
"NR:=",
self._arg_with_dim(self.NR, self.LengthUnitRough),
"HRatio:=",
str(self.HRatio),
"BRMdl:=",
self.BRMdl,
"BNR:=",
self._arg_with_dim(self.BNR, self.LengthUnitRough),
"BHRatio:=",
str(self.BHRatio),
"SRMdl:=",
self.SRMdl,
"SNR:=",
self._arg_with_dim(self.SNR, self.LengthUnitRough),
"SHRatio:=",
str(self.SHRatio),
]
)
else:
self.oeditor.AddStackupLayer(
[
"NAME:stackup layer",
"Name:=",
self.name,
"Type:=",
self.type,
"Top Bottom:=",
self.topbottom,
"Color:=",
self.color,
"Transparency:=",
self.transparency,
"Pattern:=",
self.pattern,
"VisFlag:=",
self.visflag,
"Locked:=",
self.locked,
"DrawOverride:=",
self.drawoverride,
[
"NAME:Sublayer",
"Thickness:=",
self.thickness,
"LowerElevation:=",
self.lowerelevation,
"Roughness:=",
0,
"BotRoughness:=",
0,
"SideRoughness:=",
0,
"Material:=",
self.material.lower(),
],
]
)
infos = self.oeditor.GetLayerInfo(self.name)
infos = [i.split(": ") for i in infos]
infosdict = {i[0]: i[1] for i in infos}
self.id = int(infosdict["LayerId"])
return True
@pyaedt_function_handler()
def _arg_with_dim(self, value, units=None):
"""Argument with dimensions.
Parameters
----------
value :
units :
The default is ``None``.
Returns
-------
"""
if isinstance(value, str):
val = value
else:
if units is None:
units = self.LengthUnit
val = "{0}{1}".format(value, units)
return val
@property
def _get_layer_arg(self):
if self.type == "signal":
return [
"NAME:stackup layer",
"Name:=",
self.name,
"ID:=",
self.id,
"Type:=",
self.type,
"Top Bottom:=",
self.topbottom,
"Color:=",
self.color,
"Transparency:=",
self.transparency,
"Pattern:=",
self.pattern,
"VisFlag:=",
self.visflag,
"Locked:=",
self.locked,
"DrawOverride:=",
self.drawoverride,
"Zones:=",
self.zones,
[
"NAME:Sublayer",
"Thickness:=",
self.thickness,
"LowerElevation:=",
self.lowerelevation,
"Roughness:=",
self._arg_with_dim(self.roughness, self.LengthUnitRough),
"BotRoughness:=",
self._arg_with_dim(self.botroughness, self.LengthUnitRough),
"SideRoughness:=",
self._arg_with_dim(self.toprounghenss, self.LengthUnitRough),
"Material:=",
self.material.lower(),
"FillMaterial:=",
self.fillmaterial.lower(),
],
"Neg:=",
self.IsNegative,
"Usp:=",
self.usp,
[
"NAME:Sp",
"Sn:=",
"HFSS",
"Sv:=",
"so(si="
+ str(self.hfssSp["si"]).lower()
+ " , dt="
+ str(self.hfssSp["dt"])
+ ", dtv='"
+ self._arg_with_dim(self.hfssSp["dtv"])
+ "')",
],
[
"NAME:Sp",
"Sn:=",
"PlanarEM",
"Sv:=",
"so(ifg="
+ str(self.planaremSp["ifg"]).lower()
+ ", vly="
+ str(self.planaremSp["vly"]).lower()
+ ")",
],
"Etch:=",
str(self.etch),
"UseEtch:=",
self.useetch,
"UseR:=",
self.user,
"RMdl:=",
self.RMdl,
"NR:=",
self._arg_with_dim(self.NR, self.LengthUnitRough),
"HRatio:=",
str(self.HRatio),
"BRMdl:=",
self.BRMdl,
"BNR:=",
self._arg_with_dim(self.BNR, self.LengthUnitRough),
"BHRatio:=",
str(self.BHRatio),
"SRMdl:=",
self.SRMdl,
"SNR:=",
self._arg_with_dim(self.SNR, self.LengthUnitRough),
"SHRatio:=",
str(self.SHRatio),
]
elif self.type == "dielectric":
return [
"NAME:stackup layer",
"Name:=",
self.name,
"ID:=",
self.id,
"Type:=",
self.type,
"Top Bottom:=",
self.topbottom,
"Color:=",
self.color,
"Transparency:=",
self.transparency,
"Pattern:=",
self.pattern,
"VisFlag:=",
self.visflag,
"Locked:=",
self.locked,
"DrawOverride:=",
self.drawoverride,
"Zones:=",
self.zones,
[
"NAME:Sublayer",
"Thickness:=",
self.thickness,
"LowerElevation:=",
self.lowerelevation,
"Roughness:=",
0,
"BotRoughness:=",
0,
"SideRoughness:=",
0,
"Material:=",
self.material.lower(),
],
]
else:
return [
"NAME:layer",
"Name:=",
self.name,
"ID:=",
self.id,
"Type:=",
self.type,
"Top Bottom:=",
self.topbottom,
"Color:=",
self.color,
"Transparency:=",
self.transparency,
"Pattern:=",
self.pattern,
"VisFlag:=",
self.visflag,
"Locked:=",
self.locked,
]
@pyaedt_function_handler()
def update_stackup_layer(self):
"""Update the stackup layer.
.. note::
This method is valid for release 2021 R1 and later.
This method works only for signal and dielectric layers.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oEditor.ChangeLayer
"""
self.oeditor.ChangeLayer(self._get_layer_arg)
return True
@pyaedt_function_handler()
def remove_stackup_layer(self):
"""Remove the stackup layer.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oEditor.RemoveLayer
"""
if self.name in self.oeditor.GetStackupLayerNames():
self.oeditor.RemoveLayer(self.name)
return True
return False
class Layers(object):
"""Manages layers for the Circuit and HFSS 3D Layout tools.
Parameters
----------
modeler : :class:`pyaedt.modeler.Model3DLayout.Modeler3DLayout`
roughnessunits : str, optional
Units for the roughness of layers. The default is ``"um"``.
Examples
--------
>>> from pyaedt import Hfss3dLayout
>>> app = Hfss3dLayout()
>>> layers = app.modeler.layers
"""
def __init__(self, modeler, roughnessunits="um"):
self._modeler = modeler
self._app = self._modeler._app
self._currentId = 0
self.layers = {}
self.lengthUnitRough = roughnessunits
self.logger = self._app.logger
@property
def oeditor(self):
"""Editor.
References
----------
>>> oEditor = oDesign.SetActiveEditor("Layout")"""
return self._modeler.oeditor
@property
def LengthUnit(self):
"""Length units."""
return self._modeler.model_units
@property
def all_layers(self):
"""All stackup layers.
Returns
-------
list
List of stackup layers.
References
----------
>>> oEditor.GetStackupLayerNames()
"""
return [i for i in self.oeditor.GetStackupLayerNames() if ";" not in i]
@property
def drawing_layers(self):
"""All drawing layers.
Returns
-------
list
List of stackup layers.
References
----------
>>> oEditor.GetAllLayerNames()
"""
stackup = self.all_layers
return [i for i in list(self.oeditor.GetAllLayerNames()) if i not in stackup and ";" not in i]
@property
def all_signal_layers(self):
"""All signal layers.
Returns
-------
list
List of signal layers.
"""
a = self.all_layers
sig = []
for lay in a:
layid = self.layer_id(lay)
if layid not in self.layers:
self.refresh_all_layers()
if self.layers[layid].type == "signal":
sig.append(lay)
return sig
@property
def all_diel_layers(self):
"""All dielectric layers.
Returns
-------
list
List of dielectric layers.
"""
a = self.all_layers
die = []
for lay in a:
layid = self.layer_id(lay)
if layid not in self.layers:
self.refresh_all_layers()
if self.layers[layid].type == "dielectric":
die.append(lay)
return die
@pyaedt_function_handler()
def layer_id(self, name):
"""Retrieve a layer ID.
Parameters
----------
name : str
Name of the layer.
Returns
-------
type
Layer ID if the layer name exists.
"""
for el in self.layers:
if self.layers[el].name == name:
return el
return None
@pyaedt_function_handler()
def refresh_all_layers(self):
"""Refresh all layers in the current stackup.
Returns
-------
int
Number of layers in the current stackup.
"""
layernames = [i for i in self.oeditor.GetAllLayerNames() if ";" not in i]
for el in layernames:
o = Layer(self, "signal")
o.name = el
infos = self.oeditor.GetLayerInfo(el)
infos = [i.split(": ") for i in infos]
infosdict = {i[0].strip(): i[1].strip() for i in infos}
if infosdict["Type"] == "metalizedsignal":
o.type = "signal"
else:
o.type = infosdict["Type"]
o.locked = _str2bool(infosdict["IsLocked"])
o.id = int(infosdict["LayerId"])
o.topbottom = infosdict["TopBottomAssociation"].lower()
o.IsVisible = infosdict["IsVisible"]
if "IsVisiblePath" in infosdict:
o.IsVisiblePath = infosdict["IsVisiblePath"]
o.IsVisiblePad = infosdict["IsVisiblePad"]
o.IsVisibleComponent = infosdict["IsVisibleComponent"]
o.IsVisibleShape = infosdict["IsVisibleShape"]
o.IsVisibleHole = infosdict["IsVisibleHole"]
o.color = int(infosdict["Color"][:-1])
if o.type in ["signal", "dielectric", "via"]:
o.index = int(infosdict["Index"])
o.thickness = _conv_number(infosdict["LayerThickness"])
o.lowerelevation = _conv_number(infosdict["LowerElevation0"])
o.fillmaterial = infosdict["FillMaterial0"]
o.material = infosdict["Material0"]
if "EtchFactor" in infosdict:
o.useetch = True
o.etch = _conv_number(infosdict["EtchFactor"])
if "Roughness0 Type" in infosdict:
o.user = True
o.RMdl = infosdict["Roughness0 Type"]
o.NR = infosdict["Roughness0"].split(", ")[0]
o.HRatio = _conv_number(infosdict["Roughness0"].split(", ")[1])
if "BottomRoughness0 Type" in infosdict:
o.user = True
o.BRMdl = infosdict["BottomRoughness0 Type"]
o.BNR = infosdict["BottomRoughness0"].split(", ")[0]
o.BHRatio = _conv_number(infosdict["BottomRoughness0"].split(", ")[1])
if "SideRoughness0 Type" in infosdict:
o.user = True
o.SRMdl = infosdict["SideRoughness0 Type"]
o.SNR = infosdict["SideRoughness0"].split(", ")[0]
o.SHRatio = _conv_number(infosdict["SideRoughness0"].split(", ")[1])
if o.id in self.layers: # updating the existing one
layer = self.layers[o.id]
layer.name = o.name
layer.type = o.type
layer.locked = o.locked
layer.topbottom = o.topbottom
layer.IsVisible = o.IsVisible
layer.IsVisiblePath = o.IsVisiblePath
layer.IsVisiblePad = o.IsVisiblePad
layer.IsVisibleComponent = o.IsVisibleComponent
layer.IsVisibleShape = o.IsVisibleShape
layer.IsVisibleHole = o.IsVisibleHole
layer.color = o.color
layer.index = o.index
layer.thickness = o.thickness
layer.lowerelevation = o.lowerelevation
layer.fillmaterial = o.fillmaterial
layer.material = o.material
layer.useetch = o.useetch
layer.etch = o.etch
layer.user = o.user
layer.RMdl = o.RMdl
layer.NR = o.NR
layer.HRatio = o.HRatio
layer.BRMdl = o.BRMdl
layer.BNR = o.BNR
layer.BHRatio = o.BHRatio
layer.SRMdl = o.SRMdl
layer.SNR = o.SNR
layer.SHRatio = o.SHRatio
else: # creating the new layer
self.layers[o.id] = o
return len(self.layers)
@pyaedt_function_handler()
def add_layer(
self, layername, layertype="signal", thickness="0mm", elevation="0mm", material="copper", isnegative=False
):
"""Add a layer.
Parameters
----------
layername : str
Name of the layer.
layertype : str, optional
Type of the layer. The default is ``"signal"``.
thickness : str, optional
Thickness with units. The default is ``"0mm"``.
elevation : str, optional
Elevation with units. The default is ``"0mm"``.
material : str, optional
Name of the material. The default is ``"copper"``.
isnegative : bool, optional
If ``True``, the geometry on the layer is cut away from the layer. The default is ``False``.
Returns
-------
:class:`pyaedt.modules.LayerStackup.Layer`
Layer object.
"""
newlayer = Layer(self, layertype, isnegative)
newlayer.name = layername
newlayer.thickness = thickness
newlayer.lowerelevation = elevation
newlayer.material = material
newlayer.create_stackup_layer()
self.layers[newlayer.id] = newlayer
return self.layers[newlayer.id]
@pyaedt_function_handler()
def change_stackup_type(self, mode="MultiZone", number_zones=3):
"""Change the stackup type between Multizone, Overlap and Laminate.
Parameters
----------
mode : str, optional
Stackup type. Default is `"Multizone"`. Options are `"Overlap"` and `"Laminate"`.
number_zones : int, optional
Number of zones of multizone. By default all layers will be enabled in all zones.
Returns
-------
bool
`True` if successful.
"""
if mode.lower() == "multizone":
zones = ["NAME:Zones", "Primary"]
for i in range(number_zones - 1):
zones.append("Zone{}".format(i + 1))
args = ["NAME:layers", "Mode:=", "Multizone", zones, ["NAME:pps"]]
elif mode.lower() == "overlap":
args = args = ["NAME:layers", "Mode:=", "Overlap", ["NAME:pps"]]
elif mode.lower() == "laminate":
args = args = ["NAME:layers", "Mode:=", "Laminate", ["NAME:pps"]]
else:
self.logger.error("Stackup mode has to be Multizone, Overlap or Laminate.")
return False
for v in list(self.layers.values()):
if v.type in ["signal", "dielectric"]:
if mode.lower() == "multizone":
v.zones = [i for i in range(number_zones)]
else:
v.zones = []
args.append(v._get_layer_arg)
self.oeditor.ChangeLayers(args)
return True
|
#!/usr/bin/env python
"""Ninja build configurator for mdns library"""
import sys
import os
sys.path.insert( 0, os.path.join( 'build', 'ninja' ) )
import generator
dependlibs = [ 'network', 'foundation' ]
generator = generator.Generator( project = 'mdns', dependlibs = dependlibs, variables = [ ( 'bundleidentifier', 'com.rampantpixels.mdns.$(binname)' ) ] )
target = generator.target
writer = generator.writer
toolchain = generator.toolchain
mdns_lib = generator.lib( module = 'mdns', sources = [
'discovery.c', 'mdns.c', 'query.c', 'record.c', 'response.c', 'service.c', 'socket.c', 'string.c', 'version.c' ] )
#if not target.is_ios() and not target.is_android():
# configs = [ config for config in toolchain.configs if config not in [ 'profile', 'deploy' ] ]
# if not configs == []:
# generator.bin( 'blast', [ 'main.c', 'client.c', 'server.c' ], 'blast', basepath = 'tools', implicit_deps = [ mdns_lib ], libs = [ 'network' ], configs = configs )
includepaths = generator.test_includepaths()
test_cases = [
'dnsds'
]
if target.is_ios() or target.is_android() or target.is_pnacl():
#Build one fat binary with all test cases
test_resources = []
test_extrasources = []
test_cases += [ 'all' ]
if target.is_ios():
test_resources = [ os.path.join( 'all', 'ios', item ) for item in [ 'test-all.plist', 'Images.xcassets', 'test-all.xib' ] ]
elif target.is_android():
test_resources = [ os.path.join( 'all', 'android', item ) for item in [
'AndroidManifest.xml', os.path.join( 'layout', 'main.xml' ), os.path.join( 'values', 'strings.xml' ),
os.path.join( 'drawable-ldpi', 'icon.png' ), os.path.join( 'drawable-mdpi', 'icon.png' ), os.path.join( 'drawable-hdpi', 'icon.png' ),
os.path.join( 'drawable-xhdpi', 'icon.png' ), os.path.join( 'drawable-xxhdpi', 'icon.png' ), os.path.join( 'drawable-xxxhdpi', 'icon.png' )
] ]
test_extrasources = [ os.path.join( 'all', 'android', 'java', 'com', 'rampantpixels', 'foundation', 'test', item ) for item in [
'TestActivity.java'
] ]
if target.is_pnacl():
generator.bin( module = '', sources = [ os.path.join( module, 'main.c' ) for module in test_cases ] + test_extrasources, binname = 'test-all', basepath = 'test', implicit_deps = [ mdns_lib ], libs = [ 'mdns', 'network', 'test', 'foundation' ], resources = test_resources, includepaths = includepaths )
else:
generator.app( module = '', sources = [ os.path.join( module, 'main.c' ) for module in test_cases ] + test_extrasources, binname = 'test-all', basepath = 'test', implicit_deps = [ mdns_lib ], libs = [ 'mdns', 'network', 'test', 'foundation' ], resources = test_resources, includepaths = includepaths )
else:
#Build one binary per test case
generator.bin( module = 'all', sources = [ 'main.c' ], binname = 'test-all', basepath = 'test', implicit_deps = [ mdns_lib ], libs = [ 'network', 'foundation' ], includepaths = includepaths )
for test in test_cases:
generator.bin( module = test, sources = [ 'main.c' ], binname = 'test-' + test, basepath = 'test', implicit_deps = [ mdns_lib ], libs = [ 'test', 'mdns', 'network', 'foundation' ], includepaths = includepaths )
|
<filename>tests/test_ledfx.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import pytest
import numpy as np
from ledfxcontroller.devices import DeviceManager
from ledfxcontroller.effects.rainbow import RainbowEffect
from ledfxcontroller.effects.spectrum import SpectrumAudioEffect
from ledfxcontroller.effects.wavelength import WavelengthAudioEffect
from ledfxcontroller.effects.gradient import TemporalGradientEffect
from ledfxcontroller.effects import Effect, EffectManager
# TODO: Cleanup test as they are not 100% functional yet
# BASIC_E131_CONFIG = {
# "name": "Test E131 Device",
# "e131":
# {
# "host": "192.168.1.185",
# "channel_count": 96
# }
# }
BASIC_E131_CONFIG = {
"name": "Test E131 Device",
"e131":
{
"host": "192.168.1.183",
"channel_count": 900
}
}
# def test_device_creation():
# deviceManager = DeviceManager()
# device = deviceManager.createDevice(BASIC_E131_CONFIG)
# assert device is not None
# def test_device_channel():
# deviceManager = DeviceManager()
# device = deviceManager.createDevice(BASIC_E131_CONFIG)
# assert device.outputChannels[0].pixelCount == 32
# assert len(device.outputChannels[0].pixels) == 32
# # Validate setting the pixels as a single tuple
# device.outputChannels[0].pixels = (255, 0, 0)
# for pixel in range(0, device.outputChannels[0].pixelCount):
# assert (device.outputChannels[0].pixels[pixel] == (255, 0, 0)).all()
# # Validate the output channel gets assembled into the frame
# frame = device.assembleFrame()
# for pixel in range(0, device.outputChannels[0].pixelCount):
# assert (frame[pixel] == (255, 0, 0)).all()
# # Validate setting the pixels as a numpy array of equal size
# device.outputChannels[0].pixels = np.zeros((device.outputChannels[0].pixelCount, 3))
# for pixel in range(0, device.outputChannels[0].pixelCount):
# assert (device.outputChannels[0].pixels[pixel] == (0, 0, 0)).all()
# # Validate the output channel gets assembled into the frame
# frame = device.assembleFrame()
# for pixel in range(0, device.outputChannels[0].pixelCount):
# assert (frame[pixel] == (0, 0, 0)).all()
# def test_effect_rainbow():
# deviceManager = DeviceManager()
# device = deviceManager.createDevice(BASIC_E131_CONFIG)
# assert device is not None
# effectManager = EffectManager()
# effect = effectManager.createEffect("Rainbow")
# assert effect is not None
# device.activate()
# effect.activate(device.outputChannels[0])
# time.sleep(5) # Default
# effect.updateConfig({'speed': 3.0})
# time.sleep(5) # Default w/ 3x speed
# effect.updateConfig({'frequency': 3.0})
# time.sleep(5) # Default w/ 3x frequency
# effect.deactivate()
# device.deactivate()
# def test_effect_gradient_shift():
# deviceManager = DeviceManager()
# device = deviceManager.createDevice(BASIC_E131_CONFIG)
# assert device is not None
# effectManager = EffectManager()
# effect = effectManager.createEffect("Gradient", { "gradient": "Dancefloor"})
# assert effect is not None
# device.activate()
# effect.activate(device.outputChannels[0])
# time.sleep(5)
# effect.deactivate()
# device.deactivate()
# assert False
# def test_effect_spectrum():
# deviceManager = DeviceManager()
# device = deviceManager.createDevice(BASIC_E131_CONFIG)
# assert device is not None
# effectManager = EffectManager()
# effect = effectManager.createEffect("Spectrum")
# assert effect is not None
# device.activate()
# effect.activate(device.outputChannels[0])
# time.sleep(20)
# effect.deactivate()
# device.deactivate()
# assert False
|
<gh_stars>0
#!/usr/bin/env python3
'''
To run this script with aegea do:
aegea batch submit --command="cd /mnt; git clone https://github.com/chanzuckerberg/idseq-copy-tool.git; cd idseq-copy-tool; pip3 install schedule; python3 main.py " --storage /mnt=500 --volume-type gp2 --ecr-image idseq_dag --memory 120000 --queue idseq-prod-lomem --vcpus 16 --job-role idseq-pipeline
'''
import argparse
import boto3
import botocore
import datetime
import os
import schedule
import subprocess
import time
s3_bucket = "idseq-database"
s3_top_folder = "ncbi-sources"
s3 = boto3.resource("s3")
remote_server = "ftp.ncbi.nih.gov"
files_to_download = [
"/blast/db/FASTA/nt.gz",
"/blast/db/FASTA/nt.gz.md5",
"/blast/db/FASTA/nr.gz",
"/blast/db/FASTA/nr.gz.md5",
"/pub/taxonomy/taxdump.tar.gz",
"/pub/taxonomy/taxdump.tar.gz.md5",
]
files_to_unzip = set(["/blast/db/FASTA/nt.gz", "/blast/db/FASTA/nr.gz"])
folders_to_download = ["/pub/taxonomy/accession2taxid"]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--run-as-daemom', dest='run_as_daemon', action='store_true')
parser.set_defaults(run_as_daemon=False)
start_copy_flow()
args = parser.parse_args()
if args.run_as_daemon: # Infinite loop
schedule.every(7).days.do(start_copy_flow)
print("Scheduler running...")
while True:
schedule.run_pending()
time.sleep(1)
def start_copy_flow():
date_tag = datetime.datetime.today().strftime("%Y-%m-%d")
dated_subfolder = f"{s3_top_folder}/{date_tag}"
print(f"Dated subfolder: {dated_subfolder}")
try:
# Don't run if the done file is there already
s3.Object(s3_bucket, f"{dated_subfolder}/done").load()
print("Done file exists already. Skipping this run.")
return
except botocore.exceptions.ClientError:
print(f"Done file doesn't exist. Should run.")
try:
for file in files_to_download:
download_file(f"{remote_server}{file}")
for folder in folders_to_download:
download_folder(f"{remote_server}{folder}")
for file in files_to_download:
upload_temp_file(file, dated_subfolder, file in files_to_unzip)
for folder in folders_to_download:
upload_temp_folder(folder, dated_subfolder)
write_done_file(dated_subfolder)
print(f"Copy flow finished successfully.")
except RuntimeError as e:
print(f"Error in the copy flow. Aborting run. {e}")
def download_file(file):
print(f"Downloading file {file} ...")
cmd = f"wget -P temp -cnv {file}"
command_execute(cmd)
def download_folder(folder):
print(f"Downloading folder {folder} ...")
# wget to folder 'temp', no verbose, don't follow parent links,
# don't include host name, cut out 2 middle directories, ignore
# robots.txt, ignore index.html
cmd = f"wget -P temp -crnv -np -nH --cut-dirs=2 -e robots=off -R 'index.html*' {folder}/"
command_execute(cmd)
def command_execute(cmd):
print(f"Command: {cmd}")
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
stdout, stderr = proc.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
if proc.returncode != 0:
raise RuntimeError(f"Command error: {stderr}")
def upload_temp_file(file, dated_subfolder, unzip):
file = os.path.basename(file)
src = f"temp/{file}"
dst = f"{dated_subfolder}/{file}"
upload_file(src, dst)
if unzip: # the file
# A lot of assumptions here. main assumption is that the unzipped file is src[:-3]
command_execute(f"gunzip {src}")
upload_file(src[:-3], dst[:-3])
def upload_file(src, dst):
print(f"Uploading {src} ...")
s3.Bucket(s3_bucket).upload_file(src, dst)
def upload_temp_folder(folder, dated_subfolder):
folder = os.path.basename(folder)
for base in os.listdir(f"temp/{folder}"):
src = f"temp/{folder}/{base}"
dst = f"{dated_subfolder}/{folder}/{base}"
upload_file(src, dst)
def write_done_file(dated_subfolder):
print(f"Uploading done file ...")
s3.Object(s3_bucket, f"{dated_subfolder}/done").put(Body="")
main()
|
<gh_stars>0
import datetime
from api.qymatix import uploader
# from . import file_uploader
class EtlBase():
def __init__(self, dbname, file_name=None, since=None):
import datetime
from api.qymatix import uploader
self.dbname = dbname
self.file_name = file_name
self.since = since
if since is None:
self.since = datetime.date(1999, 1, 1)
self.data = uploader.load_data(filename=file_name, nrows=None)
self.transform_and_filter()
def transform_and_filter(self, data=None):
"""
:return:
"""
from pandas import to_datetime
if 'pfisterer' in self.dbname:
cols = {
'PRODUCT FAMILY': 'product class',
'PRODUCT GROUP': 'product line',
'PRODUCT SUB GROUP': 'product type',
'Item nr': 'product',
'POSITION_BEZ1': 'description',
'Account': 'Account Name',
'Invoice Date': 'Date',
'Customer Name': 'Account Name',
'Customer No_': 'nn---',
# 'Customer No_': 'Account id',
'City': 'city',
'VORGANG_L_PLZ': 'postcode',
'External Sales Rep': 'kam',
'POSITION_SUMB': 'goal',
'Quantity Sold': 'quantity',
'VORGANG_NR': 'invoice',
'Price (EUR)': 'price',
'Margin (EUR)': 'margin',
'Cost Price/unit (Ex VAT)': 'cost',
'YEAR': 'Jahr',
}
else:
cols = {
'ARTIKELWG_MASTER': 'product line',
'Item': 'product',
'Item Group': 'product type',
'POSITION_BEZ1': 'description',
'Account': 'Account Name',
'Invoice Date': 'Date',
'ADRESSEN_NAM1': 'Account Name',
'ADRESSEN_KEY': 'customer id',
'ADRESSEN_ORT': 'city',
'VORGANG_L_PLZ': 'postcode',
'VORGANG_LIEFADR': 'customer_id',
'VORGANG_VERTRETER': 'kam',
'VORGANG_DATUM': 'Date',
'POSITION_ARTNR': 'product',
'ARTIKEL_WG': 'product type',
'POSITION_SUMB': 'goal',
'VORGANG_MENGE': 'quantity',
'VORGANG_NR': 'invoice',
'Account id': 'customer_id',
'VORGANG_RECHADR':'customer_id',
'POSITION_SUMN': 'price',
'POSITION_SUME': 'cost',
'YEAR': 'Jahr',
}
import datetime
if 'Date' not in self.data.columns:
self.data['Date'] = self.data.apply(lambda x: datetime.date(x['Sales Year'], x['Sales Month'], x['Sales Day of Month']), axis=1)
else:
self.data['Date'] = to_datetime(self.data['Date'])
if self.since is not None:
self.data = self.data[self.data['Date'] > self.since]
self.data.rename(columns=cols, inplace=True)
# data = data[(data['Jahr'] == 2018)]
# data = data[data['Art'] == 'Verkauf']
# data = data[data['customer_id'] < 300000]
self.data['cost'] = self.data['price'] - self.data['margin']
data = self.data.copy()
data_accounts = self.data.copy()
if 'Account id' not in self.data.columns:
data_accounts.drop_duplicates('Account Name', inplace=True)
data_accounts['Account id'] = data_accounts.index + 1000
for n in data_accounts['Account Name'].values:
data.loc[data['Account Name'] == n, 'Account id'] = \
data_accounts[data_accounts['Account Name'] == n]['Account id'].values[0]
self.data = data
def upload_products_class(self, file_name_product_class=None):
"""
"""
from api.qymatix import uploader
if file_name_product_class is not None:
data = uploader.load_data(filename=file_name_product_class)
else:
data = self.data
data.drop_duplicates('product class', inplace=True)
uploader.upload_product_class(dbname=self.dbname, data=data)
def upload_products_line(self, file_name_product_line=None):
"""
"""
from api.qymatix import uploader
if file_name_product_line is not None:
data = uploader.load_data(filename=file_name_product_line)
self.transform_and_filter()
else:
data = self.data
data.drop_duplicates('product line', inplace=True)
uploader.upload_product_line(dbname=self.dbname, data=data)
def upload_products_type(self, file_name_product_type=None):
"""
"""
from api.qymatix import uploader
if file_name_product_type is not None:
data = uploader.load_data(filename=file_name_product_type)
self.transform_and_filter()
else:
data = self.data
data.drop_duplicates('product type', inplace=True)
uploader.upload_product_type(dbname=self.dbname, data=data)
def upload_products(self, file_name_products=None):
"""
"""
from api.qymatix import uploader
if file_name_products is not None:
data = uploader.load_data(filename=file_name_products)
self.transform_and_filter(data)
else:
data = self.data
data.drop_duplicates('product', inplace=True)
uploader.upload_products(dbname=self.dbname, data=data)
def upload_customers(self, file_name_customers=None):
"""
"""
from api.qymatix import uploader
if file_name_customers is not None:
data = uploader.load_data(filename=file_name_customers)
else:
data = self.data
if 'Account id' in data.keys():
data.drop_duplicates('Account id', inplace=True)
else:
data.drop_duplicates('Account Name', inplace=True)
uploader.upload_customers(dbname=self.dbname, data=data)
def upload_kams(self, file_name_kams=None):
"""
:param what:
:return:
"""
from api.qymatix import uploader
if file_name_kams is not None:
data = uploader.load_data(filename=file_name_kams)
else:
data = self.data
data.drop_duplicates('kam', inplace=True)
uploader.upload_kam(dbname=self.dbname, data=data)
def upload_sales(self, file_name_sales=None):
"""
:param file_name_sales:
:return:
"""
from api.qymatix import uploader
if file_name_sales is not None:
data_sales = uploader.load_data(filename=file_name)
else:
data_sales = self.data
uploader.upload_sales(dbname=self.dbname, data=data_sales)
def upload_plans(self, file_name_plans=None, skiprows=None):
"""
:param file_name_plans:
:param skiprows:
:return:
"""
from api.qymatix import uploader
if file_name_plans is not None:
data = uploader.load_data(filename=file_name_plans, skiprows=skiprows)
else:
data = self.data
data.drop_duplicates('plan name', inplace=True)
uploader.upload_plans(dbname=self.dbname, data=data)
file_name = '/var/www/qyapp/pfisterer_000001.xlsx'
database_name = 'pfisterer_de'
etl = EtlBase(database_name, file_name)
# print("Importing product classes...")
# etl.upload_products_class()
#
# print('\n')
# print('\n')
# print("Importing product lines...")
# etl.upload_products_line()
#
# print('\n')
# print('\n')
# print("Importing product types...")
# etl.upload_products_type()
#
# print('\n')
# print('\n')
# print("Importing products...")
# etl.upload_products()
# print("uploading customers....")
# etl.upload_customers()
# etl.upload_kams()
print("uploading sales....")
etl.upload_sales()
# if __name__ == "__main__":
#
# file_name = '/var/www/qyapp/pfisterer_file_000001.xlsx'
#
# database_name = 'data_pfisterer_de'
#
# etl = EtlBase(database_name, file_name)
#
# print("Importing product classes...")
# etl.upload_products_class()
# etl.upload_products_line()
# print("uploading product types....")
# etl.upload_products_type()
# print("uploading products....")
# etl.upload_products()
# print("uploading customers....")
# etl.upload_customers()
# etl.upload_kams()
# print("uploading sales....")
# etl.upload_sales()
|
#!/usr/bin/env python
import os
import argparse
import sys
import nibabel as nib
from builtins import str
import matplotlib.pyplot as plt
import numpy as np
import nipype.algorithms.confounds as npalg
import nilearn.plotting as nlp
import nilearn.image as nimg
import nilearn.signal as sgn
import configparser
config = configparser.ConfigParser()
config.read('params.ini')
sys.path.append(config['PATHS']['qclib_path'])
import qclib.motion_handler as mh
import qclib as qc
def plot_brain_overlay(data, bgImg, rangeVal, figDpi, slAxis='z', thr=0, is_stat=False):
fig = plt.figure(figsize=(16,12), dpi=figDpi, facecolor='w', edgecolor='k')
# z = Axial
# y = coronal
# x = sagital
if slAxis == 'z':
zSlices = np.linspace(-60, 80, 60)
if slAxis == 'x':
zSlices = np.linspace(-60, 60, 60)
if slAxis == 'y':
zSlices = np.linspace(-100, 60, 60)
nRow = 7
rowPlace = np.linspace(0, 1, nRow + 1)
step= int( np.floor( 60/nRow ) )
t0 = 0
tf = step
for i in range(nRow):
if is_stat == True:
nlp.plot_stat_map(data, display_mode=slAxis, figure=fig, draw_cross=False, vmax=.9, cmap='cold_hot', \
cut_coords=zSlices[t0:tf], axes=(0,rowPlace[i],1,1/nRow), colorbar=True, \
bg_img=bgImg, threshold=thr)
else:
nlp.plot_epi(data, display_mode=slAxis, figure=fig, draw_cross=False, vmin=0, vmax=rangeVal, cmap='gnuplot2', cut_coords=zSlices[t0:tf], axes=(0,rowPlace[i],1,1/nRow), colorbar=True)
t0 = t0 + step
tf = tf + step
def vcorrcoef(X,y):
# Vectorized, faster why to compute correlation for large matrices
Xm = np.reshape(np.mean(X,axis=1),(X.shape[0],1))
ym = np.mean(y)
r_num = np.sum((X-Xm)*(y-ym),axis=1)
r_den = np.sqrt(np.sum((X-Xm)**2,axis=1)*np.sum((y-ym)**2))
r = r_num/r_den
return r
# ++++++++++++++++++++++++++++++++++++++++++++++
#
# END OF function definitions
#
# ++++++++++++++++++++++++++++++++++++++++++++++
parser = argparse.ArgumentParser(description='Save QA check Plots')
# Required options
# TODO Adjust grouping of arguments
reqoptions = parser.add_argument_group('Required arguments')
reqoptions.add_argument('-o', '-out', dest="outDir", required=True, help='Directory where images are to be saved' )
reqoptions.add_argument('-a', '-in', dest="inDir", required=True, help='Dir where EPI + masks are stored [MNI SPACE]' )
reqoptions.add_argument('-f', '-fname', dest="fname", required=True, help='EPI image name ' )
reqoptions.add_argument('-x', '-outf', dest="outFname", required=True, help='Name of the output plot' )
reqoptions.add_argument('-b', '-bg', dest="bg", required=True, help='Background Image for plots (e.g. MNI or mean func file path)' )
reqoptions.add_argument('-c', '-plane', dest="plane", required=False, default='axial', help='Plane to slice [Axial (Z)], Sagital (x), Coronal (y)' )
reqoptions.add_argument('-t', '-type', dest="type", required=False, default='std', help='Type of plot [STD], GlobalCorr, MotionCorr' )
reqoptions.add_argument('-e', '-mpe', dest="mpe", required=False, default='motion_estimate.par', help='Motion Estimate File' )
reqoptions.add_argument('-p', '-prog', dest="prog", required=False, default='AFNI', help='Software which generated the motion estimate: [AFNI], FSL or SPM' )
reqoptions.add_argument('-r', '-range', dest="range", required=False, default='80%', help='Range Plot' )
reqoptions.add_argument('-l', '-thr', dest="thr", required=False, default=0, help='(ABsolute) Threshold for plotting [0.3]' )
reqoptions.add_argument('-d', '-dpi', dest="dpi", required=False, default=120, help='Saved figure DPI' )
reqoptions.add_argument('-s', '-smooth', dest="smooth", required=False, default=0, help='Visualisation smoothness [FWHM, mm]' )
reqoptions.add_argument('-n', '-save_nii', dest="save_nii", required=False, default=0, help='Save the 3d volume' )
args = parser.parse_args()
outDir = args.outDir
inDir = args.inDir
outFile = outDir + '/' + args.outFname
rpFile = inDir + '/' + args.mpe
plane = args.plane.lower()
if plane == 'axial':
plane ='z'
if plane == 'coronal':
plane ='y'
if plane == 'sagital':
plane ='x'
# PNG resolution of the saved file
figDpi=int(args.dpi)
saveNii = int(args.save_nii)==1
rangeVal=args.range
usePctl=False
if rangeVal[-1] == '%':
usePctl=True
rangeVal=float(rangeVal[0:-1])
smooth = float(args.smooth)
# Font size and weight for ALL plots
plt.rcParams.update({'font.size': 20, 'font.weight':'bold'} )
funcImgFile = inDir + '/' + args.fname
thr = float(args.thr)
bgImgPath = args.bg
bgImg = nimg.load_img(args.bg)
plotType = args.type.lower()
# ++++++++++++++++++++++++++++++++++++++++++++++++
#
# END OF PARAMETER/SETUP
#
# ++++++++++++++++++++++++++++++++++++++++++++++++
print('\n\n ===============================================================================================\n\n')
print('Starting Temporal SD/Correlation QC')
print('\n\n ===============================================================================================\n\n')
# =============================================
#
# 3. Temporal Standard Deviation
#
# =============================================
if plotType == 'std':
funcImg = nib.load(funcImgFile)
data = np.array(funcImg.get_data())
data = np.std(data, axis=3)
X,Y,Z = data.shape
if usePctl == True:
rangeVal=np.percentile( np.reshape(data, (X*Y*Z,1)), rangeVal)
data = nimg.new_img_like(funcImg, data)
plot_brain_overlay(data, None, rangeVal, figDpi, slAxis=plane)
plt.savefig(outFile)
if saveNii == True:
nib.save(data, outDir + '/tSD.nii')
# =============================================
#
# 4. Voxelwise global signal correlation
#
# =============================================
if plotType == 'globalcorr':
funcImg = nib.load(funcImgFile)
data = np.array(funcImg.get_data())
globalSignal = data
X,Y,Z,N = data.shape
nVox = X*Y*Z
data = np.reshape(data, (X*Y*Z, N))
dataMask = np.std( data, axis=1 )
np.where(np.abs(data)<thr, 0, data)
globalSignal = np.mean( np.reshape(globalSignal, (X*Y*Z, N)), axis=0 )
corrMap = np.zeros((nVox, 1))
corrMap[dataMask>0,0] = vcorrcoef( data[dataMask>0, :], globalSignal )
corrMap = np.reshape( corrMap, (X,Y,Z) )
data = nimg.new_img_like(funcImg, corrMap)
# For visualization purposes, slightly smooths image [5mm FWHM Gaussian kernel]
#data = nimg.smooth_img(data, 5)
if smooth > 0:
data = nimg.smooth_img(data, smooth)
plot_brain_overlay(data, bgImg, rangeVal, figDpi, slAxis=plane, thr=thr, is_stat=True)
plt.savefig(outFile)
if saveNii == True:
nib.save(data, outDir + '/Global_Corr.nii')
if plotType == 'motioncorr':
rp = mh.convert_motion(np.loadtxt(rpFile), prog=args.prog)
funcImg = nib.load(funcImgFile)
data = np.array(funcImg.get_data())
X,Y,Z,N = data.shape
nVox = X*Y*Z
data = np.reshape(data, (X*Y*Z, N))
dataMask = np.std( data, axis=1 )
np.where(np.abs(data)<thr, 0, data)
corrMapRp = np.zeros((nVox, 1))
for p in range(6):
re = vcorrcoef( data[dataMask>0.5, :], rp[:,p] )
tmp = corrMapRp[dataMask>0.5,0]
comp = [ x>y for (x,y) in zip(abs(re), abs(tmp))]
tmp = np.where( comp, re, tmp)
corrMapRp[dataMask>0.5,0] = tmp
#corrMapRp[dataMask>0.5,0] = vcorrcoef( data[dataMask>0.5, :], rp[:,p] )
corrMapRp = np.reshape( corrMapRp, (X,Y,Z) )
data = nimg.new_img_like(funcImg, corrMapRp)
if smooth > 0:
data = nimg.smooth_img(data, smooth)
plot_brain_overlay(data, bgImg, rangeVal, figDpi, slAxis=plane, thr=thr, is_stat=True)
plt.savefig(outFile)
if saveNii == True:
nib.save(data, outDir + '/Motion_Corr.nii')
|
<reponame>omerk2511/dropbox
from Tkinter import *
from common import Codes
from ..controllers import FileController # EditorController (?)
from ..handlers.data import Data
class Editors(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.elements = {}
title_frame = Frame(self)
title_frame.pack(expand=True, fill=BOTH, padx=70, pady=(30, 20))
self.elements['title'] = Label(title_frame, text='Editors',
fg='#003399', font=('Arial', 28))
self.elements['title'].pack(side=TOP)
self.elements['editors_frame'] = Frame(self)
self.elements['editors_frame'].pack(side=TOP, padx=120, pady=30,
expand=False, fill=BOTH)
self.elements['new_editor_frame'] = Frame(self)
self.elements['new_editor_frame'].pack(side=TOP, padx=120, pady=30,
expand=False, fill=BOTH)
self.elements['editor_frames'] = []
self.current_file = None
def initialize(self):
self.current_file = Data().get_current_file()
self.editors = FileController.get_file_editors(self.current_file, Data().get_token())
for editor_frame in self.elements['editor_frames']:
editor_frame.pack_forget()
self.elements['editor_frames'] = []
self.elements['editors_frame'].pack_forget()
self.elements['editors_frame'].pack(side=TOP, padx=120, pady=30,
expand=False, fill=BOTH)
self.elements['new_editor_frame'].pack_forget()
self.elements['new_editor_frame'].pack(side=TOP, padx=120, pady=(10, 30),
expand=False, fill=BOTH)
if not self.editors:
no_editors_label = Label(self.elements['editors_frame'], bg='gray',
text='There are no editors for this file.', font=('Arial', 22), anchor='w')
no_editors_label.pack(side=LEFT, expand=True, fill=X)
self.elements['editor_frames'].append(no_editors_label)
for editor in self.editors:
editor_frame = Frame(self.elements['editors_frame'], bg='gray')
editor_frame.pack(side=TOP, expand=False, fill=X, pady=10)
editor_label = Label(editor_frame, font=('Arial', 18), bg='gray',
text='%s (%s)' % (editor['user']['username'], editor['user']['full_name']))
editor_label.pack(side=LEFT, padx=20, pady=10)
remove_editor_button = Button(editor_frame, text='Remove',
font=('Arial', 16), bg='#990000', fg='#ffffff', activebackground='#b30000',
activeforeground='#ffffff', command=self.generate_remove_editor(editor['id']))
remove_editor_button.pack(side=RIGHT, padx=20, pady=10)
self.elements['editor_frames'].append(editor_frame)
if 'editor_entry' in self.elements:
self.elements['editor_entry'].pack_forget()
self.elements['editor_entry'] = Entry(self.elements['new_editor_frame'],
font=('Arial', 18))
self.elements['editor_entry'].pack(side=LEFT, padx=(0, 10), expand=True, fill=BOTH)
if 'add_editor_button' in self.elements:
self.elements['add_editor_button'].pack_forget()
self.elements['add_editor_button'] = Button(self.elements['new_editor_frame'],
text='Add Editor', font=('Arial', 18), bg='#003399', activebackground='#002266',
fg='#ffffff', activeforeground='#ffffff', command=self.add_editor)
self.elements['add_editor_button'].pack(side=LEFT, expand=False, fill=X)
def add_editor(self):
editor_username = self.elements['editor_entry'].get()
self.elements['editor_entry'].delete(0, END)
if not editor_username:
self.parent.display_error('You have to enter an editor username.')
return
response = FileController.add_file_editor(self.current_file, editor_username,
Data().get_token())
if response.code == Codes.SUCCESS:
self.parent.display_info('Editor added successfully!')
self.initialize()
else:
self.parent.display_error(response.payload['message'])
def generate_remove_editor(self, editor_id):
return lambda: self.remove_editor(editor_id)
def remove_editor(self, editor_id):
response = FileController.remove_file_editor(editor_id, Data().get_token())
if response.code == Codes.SUCCESS:
self.parent.display_info('Editor removed successfully!')
self.initialize()
else:
self.parent.display_error(response.payload['message']) |
#!/usr/bin/env python
"""
navigation using only machine learning model
@author: <NAME>
"""
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Imu
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from PIL import Image as IMM
from keras.models import load_model
from deep_vineyard.msg import tupla
import rospy
import numpy as np
import cv2, os
import threading
import tf.transformations as trans
class INIT():
def __init__(self):
rospy.init_node('ROS_API')
global ranges, pose, angular_velocity, linear_acceleration, image, imgRGB
image = np.zeros((480, 640), np.uint8)
imgRGB = np.zeros((224, 224, 3), np.uint8)
#224 224 for MobileNet , 299 299 for Xception
class OUTPUT():
def __init__(self, cmd_vel='/cmd_vel'):
self.pub_vel = rospy.Publisher(cmd_vel, Twist, queue_size=10)
self.pub_tupla = rospy.Publisher('control_value', tupla, queue_size=10)
# rate = rospy.Rate(2)
self.vel = Twist()
self.tupla = tupla()
def Move(self, linear_x, angular_theta):
'''
Publish the components of velocity
:param linear_x:
:param angular_theta:
:return: none
'''
self.vel.linear.x=linear_x
self.vel.angular.z=angular_theta
self.pub_vel.publish(self.vel)
def Communication(self, flag, controller_data):
'''
Publish the results for the controllers
:param flag_choice 0 DEPTH 1 ML:
:param distance or prediction:
:return: none
'''
self.tupla.flag=flag
self.tupla.control=controller_data
self.pub_tupla.publish(self.tupla)
class INPUT(threading.Thread):
def __init__(self, scan='/scan', odom='/odom', imu='/imu', camera='/camera/aligned_depth_to_color/image_raw', cameraRGB='/camera/color/image_raw'):
threading.Thread.__init__(self)
self.scan = scan
self.odom = odom
self.imu = imu
#self.camera = camera
self.cameraRGB = cameraRGB
def run(self):
self.sub_lid = rospy.Subscriber(self.scan, LaserScan, self.Lidar)
self.sub_odom = rospy.Subscriber(self.odom, Odometry, self.Odometry)
self.sub_odom = rospy.Subscriber(self.imu, Imu, self.Imu)
self.bridge = CvBridge()
#self.image_sub = rospy.Subscriber(self.camera, Image, self.Camera)
self.imageRGB_sub = rospy.Subscriber(self.cameraRGB, Image, self.CameraColor)
rospy.spin()
def Lidar(self, msg):
'''
Control the LiDAR inputs
:param msg:
:return: ranges of the 360 values as numpy array
'''
global ranges
ranges = msg.ranges
def Odometry(self, msg):
'''
Control the odometry from the robot
:param msg:
:return: pose [coordinates, euler angles] as numpy array
'''
global pose
position = msg.pose.pose.position
orientation = msg.pose.pose.orientation
coordinates = [position.x, position.y, position.z]
euler = list(trans.euler_from_quaternion(
(orientation.x, orientation.y, orientation.z, orientation.w)
))
pose = [coordinates, euler] # np.asarray([coordinates, euler])
# yaw = euler[2] * 180 / math.pi
def Imu(self, msg):
'''
Control the Inertia Measurement Unit from the robot
:param msg:
:return: angular velocity and linear acceleration as numpy arrays
'''
global angular_velocity, linear_acceleration
angular_velocity = np.asarray([msg.angular_velocity.x,
msg.angular_velocity.y,
msg.angular_velocity.z])
linear_acceleration = np.asarray([msg.linear_acceleration.x,
msg.linear_acceleration.y,
msg.linear_acceleration.z])
def CameraColor(self, data):
global imgRGB
#resize img to network input shape
imgRGB = cv2.resize(self.bridge.imgmsg_to_cv2(data, "bgr8"), (224,224))
#load the desired ML model
def deepVineyardModel(pathModel):
model = load_model(pathModel)
return model
if __name__ == '__main__':
#model path definition
real_path = os.path.dirname(os.path.realpath(__file__))
model=deepVineyardModel(os.path.join(real_path,'models','MobileNet_final_retrained.h5'))
#pre defined classes/labels
classes = ['left', 'center', 'right']
bridge=CvBridge()
init = INIT()
IN2 = INPUT(cameraRGB="/camera/color/image_raw")
IN2.start()
OUT = OUTPUT()
while not rospy.is_shutdown():
try:
imgRGB=(imgRGB/255.)
y_pred = model.predict(imgRGB[None,...]) #adding one dimension
ML_predict=np.argmax(y_pred, axis=-1)[0] #reading model prediction
OUT.Communication(1,ML_predict) #SENDING COMMANDS to jackal_controller
cv2.imshow('ROS API color', imgRGB)
except Exception as e:
print(e)
k = cv2.waitKey(1) & 0xFF
if k == 27:
OUT.Move(0, 0)
break
#out.release()
cv2.destroyAllWindows()
rospy.signal_shutdown('Closing the program...')
|
<gh_stars>0
#!/usr/bin/python3
import os
import sys
import shutil
import click
import importlib.util
import atexit
from typing import Any
import yaml
import builtins
from requre.import_system import upgrade_import_system, UpgradeImportSystem
from requre.postprocessing import DictProcessing
from requre.storage import DataMiner, PersistentObjectStorage
from requre.constants import (
ENV_REPLACEMENT_FILE,
ENV_STORAGE_FILE,
ENV_DEBUG,
REPLACE_DEFAULT_KEY,
ENV_APPLY_LATENCY,
ENV_REPLACEMENT_NAME,
)
"""
This is command line tool for E2E and functional testing to enable requre
without access python code.
It apply itself as sitecustomize.py script
to user home path:
~/.local/lib/python{version}/site-packages
or to system path via option --system:
/usr/lib/python{version}/site-packages
when tool is installed call python code with enviroment variables:
RESPONSE_FILE - Storage file path for session recording.
In case file does not exists, it will use write mode
In case file exists, it will use read mode for Storage
REPLACEMENT_FILE - Replacement file path for import system.
It is important to have there set variable FILTERS what will
be used as replacements list for upgrade_import_system function.
For more details see doc: https://github.com/packit-service/requre/
REPLACEMENT_VAR - Overrides default value of variable in REPLACEMENT_FILE
what will be used as replacement variable.
DEBUG - if set, print debugging information, fi requre is applied
LATENCY - apply latency waits for test, to have simiar test timing
It is important when using some async/messaging calls
"""
FILE_NAME = "sitecustomize.py"
def debug_print(*args):
if os.getenv(ENV_DEBUG):
print("REQURE DEBUG:", *args, file=sys.__stderr__)
def raise_error(ret_code: int, msg: Any):
"""
When installed as sitecustomization.py, exceptions are not propagated to main process
process, ends successfully, although it contains traceback/
:param ret_code: return code to return
:param msg: message to write to stderr
:return: None
"""
print(msg)
os._exit(ret_code)
def apply_fn():
"""
This function is used when installed as sitecustomize.py script
to enable replacing system, please set env vars RESPONSE_FILE
REPLACEMENT_FILE, see doc string of this file
"""
# file name of storage file
storage_file = os.getenv(ENV_STORAGE_FILE)
# file name of replaces for updated import system
replacement_file = os.getenv(ENV_REPLACEMENT_FILE)
if_latency = os.getenv(ENV_APPLY_LATENCY)
replacement_var = os.getenv(ENV_REPLACEMENT_NAME, REPLACE_DEFAULT_KEY)
debug_print(
f"You have patched version of your python by requre project "
f"(python {sys.version_info.major}.{sys.version_info.minor}, {__file__}) "
)
if not (storage_file and replacement_file):
debug_print(
f"\tYou have to set {ENV_STORAGE_FILE} and "
f"{ENV_REPLACEMENT_FILE} env variables to work properly"
)
else:
if not os.path.exists(replacement_file):
raise FileExistsError(
f"{replacement_file} has to exist to work properly "
f"(python file with replacements definition)"
)
if if_latency:
debug_print("Use latency for function calls")
DataMiner().use_latency = True
PersistentObjectStorage().storage_file = storage_file
spec = importlib.util.spec_from_file_location("replacements", replacement_file)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if hasattr(module, replacement_var):
replacement = getattr(module, replacement_var)
debug_print(f"Replaces: {replacement}")
if isinstance(replacement, UpgradeImportSystem):
debug_print(
f"{replacement_var} is {UpgradeImportSystem.__name__} object"
)
elif isinstance(replacement, list):
debug_print(
f"{replacement_var} is list of replacements, apply upgrading"
)
upgrade_import_system(filters=replacement)
else:
raise_error(126, f"Bad type of {replacement_var}, see documentation")
else:
raise_error(
125,
f"in {replacement_file} there is not defined '{replacement_var}' variable",
)
# register dump command, when python finish
atexit.register(PersistentObjectStorage().dump)
def get_current_python_version():
"""
Get current running version of python
:return: str in format X.Y
"""
return f"{sys.version_info.major}.{sys.version_info.minor}"
def path_to_python_customize(version: str, global_path: bool = False):
"""
return path to sitecustomize.py file based on python version and
if it should be local for user or installed to system
:param version: str: version string
:param global_path: bool - if apply it to whole system
:return: str with full path to python file
"""
if global_path:
site_path = f"/usr/lib/python{version}/site-packages"
else:
site_path = os.path.expanduser(f"~/.local/lib/python{version}/site-packages")
os.makedirs(site_path, exist_ok=True)
customize_file = os.path.join(site_path, FILE_NAME)
return customize_file
def patch_verify(pathname: str):
"""
Check if patch file already exists
:param pathname: path to sitecustomization file
:return: bool if exists
"""
return os.path.exists(pathname)
@click.group("requre")
@click.option(
"--version", default=get_current_python_version(), help="Version of python to patch"
)
@click.option(
"--system",
is_flag=True,
default=False,
help="Use system python path, instead of user home dir",
)
@click.pass_context
def requre_base(ctx, version, system):
ctx.obj = {FILE_NAME: path_to_python_customize(version=version, global_path=system)}
@requre_base.command()
@click.pass_context
def verify(ctx):
if patch_verify(ctx.obj[FILE_NAME]):
click.echo(f"Python patched (file: {ctx.obj[FILE_NAME]})")
else:
raise click.ClickException(f"Python not patched (file: {ctx.obj[FILE_NAME]})")
@requre_base.command()
@click.pass_context
def apply(ctx):
if patch_verify(ctx.obj[FILE_NAME]):
raise click.ClickException(
f"Python already patched (file: {ctx.obj[FILE_NAME]})"
)
else:
click.echo(f"Applying import patch to python (file: {ctx.obj[FILE_NAME]})")
shutil.copy(__file__, ctx.obj[FILE_NAME])
@requre_base.command()
@click.pass_context
def clean(ctx):
if patch_verify(ctx.obj[FILE_NAME]):
os.remove(ctx.obj[FILE_NAME])
else:
raise click.ClickException(
f"Patch not applied (file: {ctx.obj[FILE_NAME]}), nothing to do"
)
@requre_base.command()
@click.option(
"--replaces",
help="match_string:key:type_of_value:value = Substitution query in format, "
"where match_string is in format of selecting dictionary keys:"
"selector1%selector2, type_of_value is some object what is serializable "
"and part or builtins module (e.g. int)",
multiple=True,
)
@click.argument("files", nargs=-1, type=click.File("r"))
@click.option(
"--dry-run", is_flag=True, default=False, help="Do not write changes back"
)
@click.option(
"--simplify",
is_flag=True,
default=False,
help="Simplify dict structure if possible (experimental feature)",
)
def purge(replaces, files, dry_run, simplify):
for one_file in files:
click.echo(f"Processing file: {one_file.name}")
object_representation = yaml.safe_load(one_file)
processor = DictProcessing(object_representation)
for item in replaces:
click.echo(f"\tTry to apply: {item}")
selector_str, key, type_of_value, value = item.split(":", 3)
selector_list = [] if not selector_str else selector_str.split("%")
# retype the output object to proper type ()
value = getattr(builtins, type_of_value)(value)
for matched in processor.match(selector=selector_list):
click.echo(f"\t\tMatched {selector_list}")
processor.replace(obj=matched, key=key, value=value)
if simplify:
processor.simplify()
if not dry_run:
click.echo(f"Writing content back to file: {one_file.name}")
with open(one_file.name, mode="w") as outfile:
outfile.write(yaml.safe_dump(object_representation))
if __name__ == "__main__" or not (__file__ and __file__.endswith(FILE_NAME)):
requre_base()
else:
apply_fn()
|
<filename>leo.py<gh_stars>1-10
#!/usr/bin/python
import logging, sys
import RPi.GPIO as GPIO
import time
from LMSTools import LMSDiscovery, LMSServer, LMSPlayer
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
# player control
pin_play_pause = 25
pin_track_previous = 5
pin_track_next = 6
pin_volume_lower = 12
pin_volume_raise = 13
playing = False
pause = False
# status led
pin_led_red = 17
pin_led_green = 27
led_blink_interval = 0.25 # in seconds
# iButton
w1_slave_dir = "/sys/devices/w1_bus_master1/w1_master_slaves"
active_button = False
# mac id
mac_id_dir = "/sys/class/net/wlan0/address"
f = open(mac_id_dir, "r")
mac_id = f.read().strip()
f.close()
def doPlayPause(channel):
global playing, pause
logging.debug ("button play/pause")
if playing == False:
playing = True
logging.debug(">> play first playlist item")
playButtonId()
elif playing == True and pause == False:
pause = True
logging.debug(">> pause")
player.pause()
else:
pause = False
logging.debug(">> play")
player.play()
def doTrackPrevious(channel):
logging.debug ("button previous track")
player.prev()
def doTrackNext(channel):
logging.debug ("button next track")
player.next()
def doVolumeLower(channel):
logging.debug ("button lower volume")
player.volume_down()
def doVolumeRaise(channel):
logging.debug ("button raise volume")
player.volume_up()
def playButtonId(button_id=""):
global player, playing, active_button
# search for player name folder in favorites
r = searchFavorites("", player.name)
# strip obsolete id from beginning
id = r[0]["id"].split(".")[1]
# search for iButton id
r = searchFavorites(id, button_id)
if len (r) > 0:
# play returned item
playFavorite(r[0]["id"])
playing = True
active_button = button_id
def playFavorite(item_id):
global player
player.request("favorites playlist play item_id:{}".format(item_id))
def searchFavorites(item_id, search=""):
global player
try:
response = player.parse_request("favorites items 0 item_id:{}".format(item_id), "loop_loop")
except:
response = []
favorites = []
for item in response:
if search == "" or search in item['name']:
favorites.append(item)
return favorites
def main():
global player, playing, pause
# LMSTools
servers = LMSDiscovery().all()
logging.debug("LMS host = {}".format(servers[0]["host"]))
logging.debug("LMS port = {}".format(servers[0]["port"]))
server = LMSServer(servers[0]["host"], servers[0]["port"])
logging.debug("MAC ID = {}".format(mac_id))
player = LMSPlayer(mac_id, server)
logging.debug("Player name = {}".format(player.name))
# GPIO settings
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin_play_pause, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(pin_track_previous, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(pin_track_next, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(pin_volume_lower, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(pin_volume_raise, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(pin_led_red, GPIO.OUT)
GPIO.setup(pin_led_green, GPIO.OUT)
GPIO.add_event_detect(pin_play_pause, GPIO.RISING, callback = doPlayPause, bouncetime = 200)
GPIO.add_event_detect(pin_track_previous, GPIO.RISING, callback = doTrackPrevious, bouncetime = 200)
GPIO.add_event_detect(pin_track_next, GPIO.RISING, callback = doTrackNext, bouncetime = 200)
GPIO.add_event_detect(pin_volume_lower, GPIO.RISING, callback = doVolumeLower, bouncetime = 200)
GPIO.add_event_detect(pin_volume_raise, GPIO.RISING, callback = doVolumeRaise, bouncetime = 200)
while True:
# init player state
if player.mode == "play":
playing = True
pause = False
elif player.mode == "pause":
playing = True
pause = True
# status led
# player ready: led blink slow (2x interval)
if playing == False and pause == False:
GPIO.output(pin_led_red, True)
GPIO.output(pin_led_green, False)
time.sleep(2*led_blink_interval)
GPIO.output(pin_led_red, False)
time.sleep(2*led_blink_interval)
# player pause: led blink fast (1x interval)
elif pause == True:
GPIO.output(pin_led_red, False)
GPIO.output(pin_led_green, True)
time.sleep(led_blink_interval)
GPIO.output(pin_led_green, False)
time.sleep(led_blink_interval)
GPIO.output(pin_led_green, True)
time.sleep(led_blink_interval)
GPIO.output(pin_led_green, False)
time.sleep(led_blink_interval)
# playing: led on (just wait 4x interval)
else:
GPIO.output(pin_led_red, False)
GPIO.output(pin_led_green, True)
time.sleep(4*led_blink_interval)
# detect iButton
f = open(w1_slave_dir, "r")
current_button = f.read().strip()
f.close()
if current_button != 'not found.':
logging.debug("iButton ID = {}".format(current_button))
if current_button != active_button:
# change track
playButtonId(current_button)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup() |
import numpy as np
import os
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
#import helper functions
import helpers as hp
dataset = "web-Google-diades"
pagerankFilePath = "./results/pageranks/" + dataset + ".data"
serialPagerankFilePath = "./results/serial/pageranks/" + dataset + ".data"
logFilePath = "./results/logs/" + dataset + ".txt"
serialLogFilePath = "./results/serial/logs/" + dataset + ".txt"
plt.style.use('seaborn-deep')
dataPath, loadToCrsTime, makeStochasticTime, colorTime, colorGroups, iterationTimes, errorProgression = hp.loadParallelLogData(logFilePath)
SdataPath, SloadToCrsTime, SmakeStochasticTime, SiterationTimes, SerrorProgression = hp.loadSerialLogData(serialLogFilePath)
x = np.arange(1,iterationTimes.size+1,1)
# Time plot
fig1 = plt.figure(1)
ax1 = fig1.subplots()
ax1.tick_params(axis='y')
plt.bar(x, np.cumsum(iterationTimes), alpha=0.6, label='Parallel (ms)')
ax1.bar(x, np.cumsum(SiterationTimes), alpha=0.4, label='Serial (ms)')
ax1.set_ylabel('Cumulative Time (ms)')
ax1.set_xlabel('Iteration #')
ax1.grid(True, linestyle='--', linewidth=0.2)
legend1 = ax1.legend(loc=0, shadow=True, title="Iteration cumulative Time")
plt.xlim([0, iterationTimes.size + 1])
plt.title('Pagerank calculation\'s times.')
plt.suptitle(dataPath, fontweight='bold')
plt.show()
# Speed up-convergence delta plot
fig2 = plt.figure(2)
ax2_1 = fig2.subplots()
## data
speed_ups = SiterationTimes / iterationTimes
meanSpeedUp = np.cumsum(speed_ups) / np.arange(1, speed_ups.size+1)
ax2_1.plot(x, meanSpeedUp, label='mean speed up', linestyle='--')
ax2_1.set_xlabel('Iteration #')
ax2_1.set_ylabel('Mean speed up')
legend2_1 = ax2_1.legend(loc=0, shadow=True)
ax2_2 = ax2_1.twinx()
ax2_2.semilogy()
ax2_2.tick_params(axis='y', colors='C2')
ax2_2.plot(x, errorProgression, color='C2', label='Parallel')
ax2_2.plot(x, SerrorProgression, color='C2', ls='', marker='*', label='Serial')
ax2_2.set_ylabel('convergence delta')
ax2_2.grid(True, color='C2', linestyle='--', linewidth=0.1)
legend2_2 = ax2_2.legend(loc=0, shadow=True, bbox_to_anchor=(1,0.9), title="Convergence delta")
plt.xlim([0, iterationTimes.size+1])
plt.suptitle(dataPath, fontweight='bold')
plt.title('How convergence_delta, speed_up and iteration_times relate.')
plt.show()
'''
'''
# Difference plot
## data
pr = np.fromfile(pagerankFilePath, dtype=float)
pr_cor = np.fromfile(serialPagerankFilePath, dtype=float)
fig3 = plt.figure(3)
ax3 = fig3.subplots()
ax3.plot(pr-pr_cor)
plt.xlim([0, pr.size])
plt.ylim(bottom=-1e-12, top=1e-12)
plt.title("Pagerank's vector difference between\nthe two implementations.")
plt.suptitle(dataPath, fontweight='bold')
plt.xlabel('index')
plt.ylabel('difference')
plt.show()
fig4 = plt.figure(4)
ax4 = fig4.subplots()
ax4.plot(pr)
plt.suptitle(dataPath, fontweight='bold')
plt.title("Pagerank vector.")
plt.xlabel('index')
plt.ylabel('pagerank')
plt.show()
'''
'''
# Console messages
prCalcTime = sum(iterationTimes)
print("Data preperation time:\n\tLoad to memory = %0.2fms\n\tColor data in %d groups = %0.2fms\n\tMake matrix stochastic = %0.2fms vs %0.2fms" % (loadToCrsTime, colorGroups, colorTime, makeStochasticTime, SmakeStochasticTime))
print("Pagerank calculation time: %0.2fms" % prCalcTime)
print("Total Time = %0.2fms" % (prCalcTime+loadToCrsTime+makeStochasticTime+colorTime))
print("Pagerank vector sum = %0.2f" % (sum(pr))) |
<reponame>rsdoherty/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._azure_monitor_client_enums import *
class MonitorDomain(msrest.serialization.Model):
"""The abstract common base of all domains.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param version: Required. Schema version.
:type version: int
"""
_validation = {
'version': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'version': {'key': 'ver', 'type': 'int'},
}
def __init__(
self,
*,
version: int = 2,
additional_properties: Optional[Dict[str, object]] = None,
**kwargs
):
super(MonitorDomain, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.version = version
class AvailabilityData(MonitorDomain):
"""Instances of AvailabilityData represent the result of executing an availability test.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param version: Required. Schema version.
:type version: int
:param id: Required. Identifier of a test run. Use it to correlate steps of test run and
telemetry generated by the service.
:type id: str
:param name: Required. Name of the test that these availability results represent.
:type name: str
:param duration: Required. Duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 days.
:type duration: str
:param success: Required. Success flag.
:type success: bool
:param run_location: Name of the location where the test was run from.
:type run_location: str
:param message: Diagnostic message for the result.
:type message: str
:param properties: Collection of custom properties.
:type properties: dict[str, str]
:param measurements: Collection of custom measurements.
:type measurements: dict[str, float]
"""
_validation = {
'version': {'required': True},
'id': {'required': True, 'max_length': 512, 'min_length': 0},
'name': {'required': True, 'max_length': 1024, 'min_length': 0},
'duration': {'required': True},
'success': {'required': True},
'run_location': {'max_length': 1024, 'min_length': 0},
'message': {'max_length': 8192, 'min_length': 0},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'version': {'key': 'ver', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'duration': {'key': 'duration', 'type': 'str'},
'success': {'key': 'success', 'type': 'bool'},
'run_location': {'key': 'runLocation', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'measurements': {'key': 'measurements', 'type': '{float}'},
}
def __init__(
self,
*,
version: int = 2,
id: str,
name: str,
duration: str,
success: bool,
additional_properties: Optional[Dict[str, object]] = None,
run_location: Optional[str] = None,
message: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
measurements: Optional[Dict[str, float]] = None,
**kwargs
):
super(AvailabilityData, self).__init__(additional_properties=additional_properties, version=version, **kwargs)
self.id = id
self.name = name
self.duration = duration
self.success = success
self.run_location = run_location
self.message = message
self.properties = properties
self.measurements = measurements
class MessageData(MonitorDomain):
"""Instances of Message represent printf-like trace statements that are text-searched. Log4Net, NLog and other text-based log file entries are translated into instances of this type. The message does not have measurements.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param version: Required. Schema version.
:type version: int
:param message: Required. Trace message.
:type message: str
:param severity_level: Trace severity level. Possible values include: "Verbose", "Information",
"Warning", "Error", "Critical".
:type severity_level: str or ~azure_monitor_client.models.SeverityLevel
:param properties: Collection of custom properties.
:type properties: dict[str, str]
:param measurements: Collection of custom measurements.
:type measurements: dict[str, float]
"""
_validation = {
'version': {'required': True},
'message': {'required': True, 'max_length': 32768, 'min_length': 0},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'version': {'key': 'ver', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
'severity_level': {'key': 'severityLevel', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'measurements': {'key': 'measurements', 'type': '{float}'},
}
def __init__(
self,
*,
version: int = 2,
message: str,
additional_properties: Optional[Dict[str, object]] = None,
severity_level: Optional[Union[str, "SeverityLevel"]] = None,
properties: Optional[Dict[str, str]] = None,
measurements: Optional[Dict[str, float]] = None,
**kwargs
):
super(MessageData, self).__init__(additional_properties=additional_properties, version=version, **kwargs)
self.message = message
self.severity_level = severity_level
self.properties = properties
self.measurements = measurements
class MetricDataPoint(msrest.serialization.Model):
"""Metric data single measurement.
All required parameters must be populated in order to send to Azure.
:param namespace: Namespace of the metric.
:type namespace: str
:param name: Required. Name of the metric.
:type name: str
:param data_point_type: Metric type. Single measurement or the aggregated value. Possible
values include: "Measurement", "Aggregation".
:type data_point_type: str or ~azure_monitor_client.models.DataPointType
:param value: Required. Single value for measurement. Sum of individual measurements for the
aggregation.
:type value: float
:param count: Metric weight of the aggregated metric. Should not be set for a measurement.
:type count: int
:param min: Minimum value of the aggregated metric. Should not be set for a measurement.
:type min: float
:param max: Maximum value of the aggregated metric. Should not be set for a measurement.
:type max: float
:param std_dev: Standard deviation of the aggregated metric. Should not be set for a
measurement.
:type std_dev: float
"""
_validation = {
'namespace': {'max_length': 256, 'min_length': 0},
'name': {'required': True, 'max_length': 1024, 'min_length': 0},
'value': {'required': True},
}
_attribute_map = {
'namespace': {'key': 'ns', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'data_point_type': {'key': 'kind', 'type': 'str'},
'value': {'key': 'value', 'type': 'float'},
'count': {'key': 'count', 'type': 'int'},
'min': {'key': 'min', 'type': 'float'},
'max': {'key': 'max', 'type': 'float'},
'std_dev': {'key': 'stdDev', 'type': 'float'},
}
def __init__(
self,
*,
name: str,
value: float,
namespace: Optional[str] = None,
data_point_type: Optional[Union[str, "DataPointType"]] = None,
count: Optional[int] = None,
min: Optional[float] = None,
max: Optional[float] = None,
std_dev: Optional[float] = None,
**kwargs
):
super(MetricDataPoint, self).__init__(**kwargs)
self.namespace = namespace
self.name = name
self.data_point_type = data_point_type
self.value = value
self.count = count
self.min = min
self.max = max
self.std_dev = std_dev
class MetricsData(MonitorDomain):
"""An instance of the Metric item is a list of measurements (single data points) and/or aggregations.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param version: Required. Schema version.
:type version: int
:param metrics: Required. List of metrics. Only one metric in the list is currently supported
by Application Insights storage. If multiple data points were sent only the first one will be
used.
:type metrics: list[~azure_monitor_client.models.MetricDataPoint]
:param properties: Collection of custom properties.
:type properties: dict[str, str]
"""
_validation = {
'version': {'required': True},
'metrics': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'version': {'key': 'ver', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[MetricDataPoint]'},
'properties': {'key': 'properties', 'type': '{str}'},
}
def __init__(
self,
*,
version: int = 2,
metrics: List["MetricDataPoint"],
additional_properties: Optional[Dict[str, object]] = None,
properties: Optional[Dict[str, str]] = None,
**kwargs
):
super(MetricsData, self).__init__(additional_properties=additional_properties, version=version, **kwargs)
self.metrics = metrics
self.properties = properties
class MonitorBase(msrest.serialization.Model):
"""Data struct to contain only C section with custom fields.
:param base_type: Name of item (B section) if any. If telemetry data is derived straight from
this, this should be null.
:type base_type: str
:param base_data: The data payload for the telemetry request.
:type base_data: ~azure_monitor_client.models.MonitorDomain
"""
_attribute_map = {
'base_type': {'key': 'baseType', 'type': 'str'},
'base_data': {'key': 'baseData', 'type': 'MonitorDomain'},
}
def __init__(
self,
*,
base_type: Optional[str] = None,
base_data: Optional["MonitorDomain"] = None,
**kwargs
):
super(MonitorBase, self).__init__(**kwargs)
self.base_type = base_type
self.base_data = base_data
class PageViewData(MonitorDomain):
"""An instance of PageView represents a generic action on a page like a button click. It is also the base type for PageView.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param version: Required. Schema version.
:type version: int
:param id: Required. Identifier of a page view instance. Used for correlation between page view
and other telemetry items.
:type id: str
:param name: Required. Event name. Keep it low cardinality to allow proper grouping and useful
metrics.
:type name: str
:param url: Request URL with all query string parameters.
:type url: str
:param duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view
(PageViewData), this is the duration. For a page view with performance information
(PageViewPerfData), this is the page load time. Must be less than 1000 days.
:type duration: str
:param referred_uri: Fully qualified page URI or URL of the referring page; if unknown, leave
blank.
:type referred_uri: str
:param properties: Collection of custom properties.
:type properties: dict[str, str]
:param measurements: Collection of custom measurements.
:type measurements: dict[str, float]
"""
_validation = {
'version': {'required': True},
'id': {'required': True, 'max_length': 512, 'min_length': 0},
'name': {'required': True, 'max_length': 1024, 'min_length': 0},
'url': {'max_length': 2048, 'min_length': 0},
'referred_uri': {'max_length': 2048, 'min_length': 0},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'version': {'key': 'ver', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'duration': {'key': 'duration', 'type': 'str'},
'referred_uri': {'key': 'referredUri', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'measurements': {'key': 'measurements', 'type': '{float}'},
}
def __init__(
self,
*,
version: int = 2,
id: str,
name: str,
additional_properties: Optional[Dict[str, object]] = None,
url: Optional[str] = None,
duration: Optional[str] = None,
referred_uri: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
measurements: Optional[Dict[str, float]] = None,
**kwargs
):
super(PageViewData, self).__init__(additional_properties=additional_properties, version=version, **kwargs)
self.id = id
self.name = name
self.url = url
self.duration = duration
self.referred_uri = referred_uri
self.properties = properties
self.measurements = measurements
class PageViewPerfData(MonitorDomain):
"""An instance of PageViewPerf represents: a page view with no performance data, a page view with performance data, or just the performance data of an earlier page request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param version: Required. Schema version.
:type version: int
:param id: Required. Identifier of a page view instance. Used for correlation between page view
and other telemetry items.
:type id: str
:param name: Required. Event name. Keep it low cardinality to allow proper grouping and useful
metrics.
:type name: str
:param url: Request URL with all query string parameters.
:type url: str
:param duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view
(PageViewData), this is the duration. For a page view with performance information
(PageViewPerfData), this is the page load time. Must be less than 1000 days.
:type duration: str
:param perf_total: Performance total in TimeSpan 'G' (general long) format: d:hh:mm:ss.fffffff.
:type perf_total: str
:param network_connect: Network connection time in TimeSpan 'G' (general long) format:
d:hh:mm:ss.fffffff.
:type network_connect: str
:param sent_request: Sent request time in TimeSpan 'G' (general long) format:
d:hh:mm:ss.fffffff.
:type sent_request: str
:param received_response: Received response time in TimeSpan 'G' (general long) format:
d:hh:mm:ss.fffffff.
:type received_response: str
:param dom_processing: DOM processing time in TimeSpan 'G' (general long) format:
d:hh:mm:ss.fffffff.
:type dom_processing: str
:param properties: Collection of custom properties.
:type properties: dict[str, str]
:param measurements: Collection of custom measurements.
:type measurements: dict[str, float]
"""
_validation = {
'version': {'required': True},
'id': {'required': True, 'max_length': 512, 'min_length': 0},
'name': {'required': True, 'max_length': 1024, 'min_length': 0},
'url': {'max_length': 2048, 'min_length': 0},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'version': {'key': 'ver', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'duration': {'key': 'duration', 'type': 'str'},
'perf_total': {'key': 'perfTotal', 'type': 'str'},
'network_connect': {'key': 'networkConnect', 'type': 'str'},
'sent_request': {'key': 'sentRequest', 'type': 'str'},
'received_response': {'key': 'receivedResponse', 'type': 'str'},
'dom_processing': {'key': 'domProcessing', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'measurements': {'key': 'measurements', 'type': '{float}'},
}
def __init__(
self,
*,
version: int = 2,
id: str,
name: str,
additional_properties: Optional[Dict[str, object]] = None,
url: Optional[str] = None,
duration: Optional[str] = None,
perf_total: Optional[str] = None,
network_connect: Optional[str] = None,
sent_request: Optional[str] = None,
received_response: Optional[str] = None,
dom_processing: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
measurements: Optional[Dict[str, float]] = None,
**kwargs
):
super(PageViewPerfData, self).__init__(additional_properties=additional_properties, version=version, **kwargs)
self.id = id
self.name = name
self.url = url
self.duration = duration
self.perf_total = perf_total
self.network_connect = network_connect
self.sent_request = sent_request
self.received_response = received_response
self.dom_processing = dom_processing
self.properties = properties
self.measurements = measurements
class RemoteDependencyData(MonitorDomain):
"""An instance of Remote Dependency represents an interaction of the monitored component with a remote component/service like SQL or an HTTP endpoint.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param version: Required. Schema version.
:type version: int
:param id: Identifier of a dependency call instance. Used for correlation with the request
telemetry item corresponding to this dependency call.
:type id: str
:param name: Required. Name of the command initiated with this dependency call. Low cardinality
value. Examples are stored procedure name and URL path template.
:type name: str
:param result_code: Result code of a dependency call. Examples are SQL error code and HTTP
status code.
:type result_code: str
:param data: Command initiated by this dependency call. Examples are SQL statement and HTTP URL
with all query parameters.
:type data: str
:param type: Dependency type name. Very low cardinality value for logical grouping of
dependencies and interpretation of other fields like commandName and resultCode. Examples are
SQL, Azure table, and HTTP.
:type type: str
:param target: Target site of a dependency call. Examples are server name, host address.
:type target: str
:param duration: Required. Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than
1000 days.
:type duration: str
:param success: Indication of successful or unsuccessful call.
:type success: bool
:param properties: Collection of custom properties.
:type properties: dict[str, str]
:param measurements: Collection of custom measurements.
:type measurements: dict[str, float]
"""
_validation = {
'version': {'required': True},
'id': {'max_length': 512, 'min_length': 0},
'name': {'required': True, 'max_length': 1024, 'min_length': 0},
'result_code': {'max_length': 1024, 'min_length': 0},
'data': {'max_length': 8192, 'min_length': 0},
'type': {'max_length': 1024, 'min_length': 0},
'target': {'max_length': 1024, 'min_length': 0},
'duration': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'version': {'key': 'ver', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'result_code': {'key': 'resultCode', 'type': 'str'},
'data': {'key': 'data', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'duration': {'key': 'duration', 'type': 'str'},
'success': {'key': 'success', 'type': 'bool'},
'properties': {'key': 'properties', 'type': '{str}'},
'measurements': {'key': 'measurements', 'type': '{float}'},
}
def __init__(
self,
*,
version: int = 2,
name: str,
duration: str,
additional_properties: Optional[Dict[str, object]] = None,
id: Optional[str] = None,
result_code: Optional[str] = None,
data: Optional[str] = None,
type: Optional[str] = None,
target: Optional[str] = None,
success: Optional[bool] = True,
properties: Optional[Dict[str, str]] = None,
measurements: Optional[Dict[str, float]] = None,
**kwargs
):
super(RemoteDependencyData, self).__init__(additional_properties=additional_properties, version=version, **kwargs)
self.id = id
self.name = name
self.result_code = result_code
self.data = data
self.type = type
self.target = target
self.duration = duration
self.success = success
self.properties = properties
self.measurements = measurements
class RequestData(MonitorDomain):
"""An instance of Request represents completion of an external request to the application to do work and contains a summary of that request execution and the results.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param version: Required. Schema version.
:type version: int
:param id: Required. Identifier of a request call instance. Used for correlation between
request and other telemetry items.
:type id: str
:param name: Name of the request. Represents code path taken to process request. Low
cardinality value to allow better grouping of requests. For HTTP requests it represents the
HTTP method and URL path template like 'GET /values/{id}'.
:type name: str
:param duration: Required. Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than
1000 days.
:type duration: str
:param success: Required. Indication of successful or unsuccessful call.
:type success: bool
:param response_code: Required. Result of a request execution. HTTP status code for HTTP
requests.
:type response_code: str
:param source: Source of the request. Examples are the instrumentation key of the caller or the
ip address of the caller.
:type source: str
:param url: Request URL with all query string parameters.
:type url: str
:param properties: Collection of custom properties.
:type properties: dict[str, str]
:param measurements: Collection of custom measurements.
:type measurements: dict[str, float]
"""
_validation = {
'version': {'required': True},
'id': {'required': True, 'max_length': 512, 'min_length': 0},
'name': {'max_length': 1024, 'min_length': 0},
'duration': {'required': True},
'success': {'required': True},
'response_code': {'required': True, 'max_length': 1024, 'min_length': 0},
'source': {'max_length': 1024, 'min_length': 0},
'url': {'max_length': 2048, 'min_length': 0},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'version': {'key': 'ver', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'duration': {'key': 'duration', 'type': 'str'},
'success': {'key': 'success', 'type': 'bool'},
'response_code': {'key': 'responseCode', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'measurements': {'key': 'measurements', 'type': '{float}'},
}
def __init__(
self,
*,
version: int = 2,
id: str,
duration: str,
success: bool = True,
response_code: str,
additional_properties: Optional[Dict[str, object]] = None,
name: Optional[str] = None,
source: Optional[str] = None,
url: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
measurements: Optional[Dict[str, float]] = None,
**kwargs
):
super(RequestData, self).__init__(additional_properties=additional_properties, version=version, **kwargs)
self.id = id
self.name = name
self.duration = duration
self.success = success
self.response_code = response_code
self.source = source
self.url = url
self.properties = properties
self.measurements = measurements
class StackFrame(msrest.serialization.Model):
"""Stack frame information.
All required parameters must be populated in order to send to Azure.
:param level: Required.
:type level: int
:param method: Required. Method name.
:type method: str
:param assembly: Name of the assembly (dll, jar, etc.) containing this function.
:type assembly: str
:param file_name: File name or URL of the method implementation.
:type file_name: str
:param line: Line number of the code implementation.
:type line: int
"""
_validation = {
'level': {'required': True},
'method': {'required': True, 'max_length': 1024, 'min_length': 0},
'assembly': {'max_length': 1024, 'min_length': 0},
'file_name': {'max_length': 1024, 'min_length': 0},
}
_attribute_map = {
'level': {'key': 'level', 'type': 'int'},
'method': {'key': 'method', 'type': 'str'},
'assembly': {'key': 'assembly', 'type': 'str'},
'file_name': {'key': 'fileName', 'type': 'str'},
'line': {'key': 'line', 'type': 'int'},
}
def __init__(
self,
*,
level: int,
method: str,
assembly: Optional[str] = None,
file_name: Optional[str] = None,
line: Optional[int] = None,
**kwargs
):
super(StackFrame, self).__init__(**kwargs)
self.level = level
self.method = method
self.assembly = assembly
self.file_name = file_name
self.line = line
class TelemetryErrorDetails(msrest.serialization.Model):
"""The error details.
:param index: The index in the original payload of the item.
:type index: int
:param status_code: The item specific `HTTP Response status code <#Response Status Codes>`_.
:type status_code: int
:param message: The error message.
:type message: str
"""
_attribute_map = {
'index': {'key': 'index', 'type': 'int'},
'status_code': {'key': 'statusCode', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
index: Optional[int] = None,
status_code: Optional[int] = None,
message: Optional[str] = None,
**kwargs
):
super(TelemetryErrorDetails, self).__init__(**kwargs)
self.index = index
self.status_code = status_code
self.message = message
class TelemetryEventData(MonitorDomain):
"""Instances of Event represent structured event records that can be grouped and searched by their properties. Event data item also creates a metric of event count by name.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param version: Required. Schema version.
:type version: int
:param name: Required. Event name. Keep it low cardinality to allow proper grouping and useful
metrics.
:type name: str
:param properties: Collection of custom properties.
:type properties: dict[str, str]
:param measurements: Collection of custom measurements.
:type measurements: dict[str, float]
"""
_validation = {
'version': {'required': True},
'name': {'required': True, 'max_length': 512, 'min_length': 0},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'version': {'key': 'ver', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'measurements': {'key': 'measurements', 'type': '{float}'},
}
def __init__(
self,
*,
version: int = 2,
name: str,
additional_properties: Optional[Dict[str, object]] = None,
properties: Optional[Dict[str, str]] = None,
measurements: Optional[Dict[str, float]] = None,
**kwargs
):
super(TelemetryEventData, self).__init__(additional_properties=additional_properties, version=version, **kwargs)
self.name = name
self.properties = properties
self.measurements = measurements
class TelemetryExceptionData(MonitorDomain):
"""An instance of Exception represents a handled or unhandled exception that occurred during execution of the monitored application.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param version: Required. Schema version.
:type version: int
:param exceptions: Required. Exception chain - list of inner exceptions.
:type exceptions: list[~azure_monitor_client.models.TelemetryExceptionDetails]
:param severity_level: Severity level. Mostly used to indicate exception severity level when it
is reported by logging library. Possible values include: "Verbose", "Information", "Warning",
"Error", "Critical".
:type severity_level: str or ~azure_monitor_client.models.SeverityLevel
:param problem_id: Identifier of where the exception was thrown in code. Used for exceptions
grouping. Typically a combination of exception type and a function from the call stack.
:type problem_id: str
:param properties: Collection of custom properties.
:type properties: dict[str, str]
:param measurements: Collection of custom measurements.
:type measurements: dict[str, float]
"""
_validation = {
'version': {'required': True},
'exceptions': {'required': True},
'problem_id': {'max_length': 1024, 'min_length': 0},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'version': {'key': 'ver', 'type': 'int'},
'exceptions': {'key': 'exceptions', 'type': '[TelemetryExceptionDetails]'},
'severity_level': {'key': 'severityLevel', 'type': 'str'},
'problem_id': {'key': 'problemId', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'measurements': {'key': 'measurements', 'type': '{float}'},
}
def __init__(
self,
*,
version: int = 2,
exceptions: List["TelemetryExceptionDetails"],
additional_properties: Optional[Dict[str, object]] = None,
severity_level: Optional[Union[str, "SeverityLevel"]] = None,
problem_id: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
measurements: Optional[Dict[str, float]] = None,
**kwargs
):
super(TelemetryExceptionData, self).__init__(additional_properties=additional_properties, version=version, **kwargs)
self.exceptions = exceptions
self.severity_level = severity_level
self.problem_id = problem_id
self.properties = properties
self.measurements = measurements
class TelemetryExceptionDetails(msrest.serialization.Model):
"""Exception details of the exception in a chain.
All required parameters must be populated in order to send to Azure.
:param id: In case exception is nested (outer exception contains inner one), the id and outerId
properties are used to represent the nesting.
:type id: int
:param outer_id: The value of outerId is a reference to an element in ExceptionDetails that
represents the outer exception.
:type outer_id: int
:param type_name: Exception type name.
:type type_name: str
:param message: Required. Exception message.
:type message: str
:param has_full_stack: Indicates if full exception stack is provided in the exception. The
stack may be trimmed, such as in the case of a StackOverflow exception.
:type has_full_stack: bool
:param stack: Text describing the stack. Either stack or parsedStack should have a value.
:type stack: str
:param parsed_stack: List of stack frames. Either stack or parsedStack should have a value.
:type parsed_stack: list[~azure_monitor_client.models.StackFrame]
"""
_validation = {
'type_name': {'max_length': 1024, 'min_length': 0},
'message': {'required': True, 'max_length': 32768, 'min_length': 0},
'stack': {'max_length': 32768, 'min_length': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'outer_id': {'key': 'outerId', 'type': 'int'},
'type_name': {'key': 'typeName', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'has_full_stack': {'key': 'hasFullStack', 'type': 'bool'},
'stack': {'key': 'stack', 'type': 'str'},
'parsed_stack': {'key': 'parsedStack', 'type': '[StackFrame]'},
}
def __init__(
self,
*,
message: str,
id: Optional[int] = None,
outer_id: Optional[int] = None,
type_name: Optional[str] = None,
has_full_stack: Optional[bool] = True,
stack: Optional[str] = None,
parsed_stack: Optional[List["StackFrame"]] = None,
**kwargs
):
super(TelemetryExceptionDetails, self).__init__(**kwargs)
self.id = id
self.outer_id = outer_id
self.type_name = type_name
self.message = message
self.has_full_stack = has_full_stack
self.stack = stack
self.parsed_stack = parsed_stack
class TelemetryItem(msrest.serialization.Model):
"""System variables for a telemetry item.
All required parameters must be populated in order to send to Azure.
:param version: Envelope version. For internal use only. By assigning this the default, it will
not be serialized within the payload unless changed to a value other than #1.
:type version: int
:param name: Required. Type name of telemetry data item.
:type name: str
:param time: Required. Event date time when telemetry item was created. This is the wall clock
time on the client when the event was generated. There is no guarantee that the client's time
is accurate. This field must be formatted in UTC ISO 8601 format, with a trailing 'Z'
character, as described publicly on https://en.wikipedia.org/wiki/ISO_8601#UTC. Note: the
number of decimal seconds digits provided are variable (and unspecified). Consumers should
handle this, i.e. managed code consumers should not use format 'O' for parsing as it specifies
a fixed length. Example: 2009-06-15T13:45:30.0000000Z.
:type time: ~datetime.datetime
:param sample_rate: Sampling rate used in application. This telemetry item represents 1 /
sampleRate actual telemetry items.
:type sample_rate: float
:param sequence: Sequence field used to track absolute order of uploaded events.
:type sequence: str
:param instrumentation_key: The instrumentation key of the Application Insights resource.
:type instrumentation_key: str
:param tags: A set of tags. Key/value collection of context properties. See ContextTagKeys for
information on available properties.
:type tags: dict[str, str]
:param data: Telemetry data item.
:type data: ~azure_monitor_client.models.MonitorBase
"""
_validation = {
'name': {'required': True},
'time': {'required': True},
'sequence': {'max_length': 64, 'min_length': 0},
}
_attribute_map = {
'version': {'key': 'ver', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'time': {'key': 'time', 'type': 'iso-8601'},
'sample_rate': {'key': 'sampleRate', 'type': 'float'},
'sequence': {'key': 'seq', 'type': 'str'},
'instrumentation_key': {'key': 'iKey', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'data': {'key': 'data', 'type': 'MonitorBase'},
}
def __init__(
self,
*,
name: str,
time: datetime.datetime,
version: Optional[int] = 1,
sample_rate: Optional[float] = 100,
sequence: Optional[str] = None,
instrumentation_key: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
data: Optional["MonitorBase"] = None,
**kwargs
):
super(TelemetryItem, self).__init__(**kwargs)
self.version = version
self.name = name
self.time = time
self.sample_rate = sample_rate
self.sequence = sequence
self.instrumentation_key = instrumentation_key
self.tags = tags
self.data = data
class TrackResponse(msrest.serialization.Model):
"""Response containing the status of each telemetry item.
:param items_received: The number of items received.
:type items_received: int
:param items_accepted: The number of items accepted.
:type items_accepted: int
:param errors: An array of error detail objects.
:type errors: list[~azure_monitor_client.models.TelemetryErrorDetails]
"""
_attribute_map = {
'items_received': {'key': 'itemsReceived', 'type': 'int'},
'items_accepted': {'key': 'itemsAccepted', 'type': 'int'},
'errors': {'key': 'errors', 'type': '[TelemetryErrorDetails]'},
}
def __init__(
self,
*,
items_received: Optional[int] = None,
items_accepted: Optional[int] = None,
errors: Optional[List["TelemetryErrorDetails"]] = None,
**kwargs
):
super(TrackResponse, self).__init__(**kwargs)
self.items_received = items_received
self.items_accepted = items_accepted
self.errors = errors
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Entity",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("string", models.TextField()),
("string_plural", models.TextField(blank=True)),
("key", models.TextField(blank=True)),
("comment", models.TextField(blank=True)),
("order", models.PositiveIntegerField(default=0)),
("source", models.TextField(blank=True)),
("obsolete", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name="Locale",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("code", models.CharField(unique=True, max_length=20)),
("name", models.CharField(max_length=128)),
("nplurals", models.SmallIntegerField(null=True, blank=True)),
("plural_rule", models.CharField(max_length=128, blank=True)),
],
),
migrations.CreateModel(
name="Project",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("name", models.CharField(unique=True, max_length=128)),
("slug", models.SlugField(unique=True)),
(
"repository_type",
models.CharField(
default=b"File",
max_length=20,
verbose_name=b"Type",
choices=[
(b"file", b"File"),
(b"git", b"Git"),
(b"hg", b"HG"),
(b"svn", b"SVN"),
(b"transifex", b"Transifex"),
],
),
),
(
"repository_url",
models.CharField(max_length=2000, verbose_name=b"URL", blank=True),
),
("repository_path", models.TextField(blank=True)),
(
"transifex_project",
models.CharField(
max_length=128, verbose_name=b"Project", blank=True
),
),
(
"transifex_resource",
models.CharField(
max_length=128, verbose_name=b"Resource", blank=True
),
),
(
"info_brief",
models.TextField(verbose_name=b"Project info", blank=True),
),
("url", models.URLField(verbose_name=b"URL", blank=True)),
(
"width",
models.PositiveIntegerField(
null=True,
verbose_name=b"Default website (iframe) width in pixels. If set, sidebar will be opened by default.",
blank=True,
),
),
(
"links",
models.BooleanField(
verbose_name=b"Keep links on the project website clickable"
),
),
("disabled", models.BooleanField(default=False)),
("locales", models.ManyToManyField(to="base.Locale")),
],
options={
"permissions": (
("can_manage", "Can manage projects"),
("can_localize", "Can localize projects"),
),
},
),
migrations.CreateModel(
name="Resource",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("path", models.TextField()),
("entity_count", models.PositiveIntegerField(default=0)),
(
"format",
models.CharField(
blank=True,
max_length=20,
verbose_name=b"Format",
choices=[
(b"po", b"po"),
(b"properties", b"properties"),
(b"dtd", b"dtd"),
(b"ini", b"ini"),
(b"lang", b"lang"),
],
),
),
("project", models.ForeignKey(to="base.Project")),
],
),
migrations.CreateModel(
name="Stats",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("translated_count", models.PositiveIntegerField(default=0)),
("approved_count", models.PositiveIntegerField(default=0)),
("fuzzy_count", models.PositiveIntegerField(default=0)),
("locale", models.ForeignKey(to="base.Locale")),
("resource", models.ForeignKey(to="base.Resource")),
],
),
migrations.CreateModel(
name="Subpage",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("name", models.CharField(max_length=128)),
("url", models.URLField(verbose_name=b"URL", blank=True)),
("project", models.ForeignKey(to="base.Project")),
],
),
migrations.CreateModel(
name="Translation",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("string", models.TextField()),
("plural_form", models.SmallIntegerField(null=True, blank=True)),
("date", models.DateTimeField()),
("approved", models.BooleanField(default=False)),
("approved_date", models.DateTimeField(null=True, blank=True)),
("fuzzy", models.BooleanField(default=False)),
(
"approved_user",
models.ForeignKey(
related_name="approvers",
blank=True,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
("entity", models.ForeignKey(to="base.Entity")),
("locale", models.ForeignKey(to="base.Locale")),
(
"user",
models.ForeignKey(
blank=True, to=settings.AUTH_USER_MODEL, null=True
),
),
],
),
migrations.CreateModel(
name="UserProfile",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("transifex_username", models.CharField(max_length=40, blank=True)),
("transifex_password", models.CharField(max_length=128, blank=True)),
("svn_username", models.CharField(max_length=40, blank=True)),
("svn_password", models.CharField(max_length=128, blank=True)),
("quality_checks", models.BooleanField(default=True)),
("user", models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name="entity",
name="resource",
field=models.ForeignKey(to="base.Resource"),
),
]
|
<filename>source/classify/evaluation.py
import numpy as np
from sklearn import svm
from sklearn.metrics import confusion_matrix, roc_auc_score
# randomly permutes in equal amount and segments
# Samples,labels by a factor of "fact"
# Similarity Touple:
# (S1,S2,...,Sk) where k the number of categories
# and S1.size = n1 x (n1 + ... + nk)
# S2.size = n2 x (n1 + ... + nk)
# . . . .
# . . . .
# . . . .
# Sk.size = nk x (n1 + ... + nk)
# where ni the number of samples in the ith category
# concat(S1,...,Sk) = SimilarityMatrix
#
# Labels Touple:
# (L1, ... , Lk): where Li a ni x 1 matrix with all elements
# equal to the category number corresponding to Si
def randPerm(Similarity_Touple,LabelesTouple,fact=0.8):
ltotal = 0
for i in range(len(Similarity_Touple)):
l = Similarity_Touple[i].shape[0]
# indices are taken randomly
# based on the sample size and append
# by a total length
indices = np.random.permutation(l)+ltotal
limit = int(l*fact)
if(i==0):
training_idx = indices[:limit]
testing_idx = indices[limit:]
Similarity = Similarity_Touple[i]
Labels = LabelesTouple[i]
else:
training_idx = np.append(training_idx,indices[:limit],axis=0)
testing_idx = np.append(testing_idx,indices[limit:],axis=0)
Similarity = np.append(Similarity, Similarity_Touple[i], axis=0)
Labels = np.append(Labels, LabelesTouple[i], axis=0)
ltotal+=l
training = Similarity[training_idx,:]
training = training[:,training_idx]
testing = Similarity[testing_idx,:]
testing = testing[:,training_idx]
training_labels = Labels[training_idx]
testing_labels = Labels[testing_idx]
return training,training_labels,testing,testing_labels
# Calculate TP,FP,TN,FN
def calculateTFNP(ConfusionMatrix):
s = np.sum(ConfusionMatrix)
TP = np.zeros(ConfusionMatrix.shape[0])
FP = np.zeros(ConfusionMatrix.shape[0])
FN = np.zeros(ConfusionMatrix.shape[0])
TN = np.zeros(ConfusionMatrix.shape[0])
for i in range(ConfusionMatrix.shape[0]):
TP[i] = ConfusionMatrix[i, i]
FP[i] = np.sum(ConfusionMatrix, axis=0)[i] - ConfusionMatrix[i, i]
FN[i] = np.sum(ConfusionMatrix, axis=1)[i] - ConfusionMatrix[i, i]
TN[i] = s - TP[i] - FP[i] - TN[i]
return FP,FN,TP,TN
# has dummy indicates if class 0
# signifies a dummy (negative for all)
# class
def CalculateMetrics(ConfusionMatrix,has_dummy=False):
# considers no dummy class
FP,FN,TP,TN = calculateTFNP(ConfusionMatrix)
metrics = dict()
if(has_dummy):
TP = np.delete(TP,0,0)
TN = np.delete(TN,0,0)
FP = np.delete(FP,0,0)
FN = np.delete(FN,0,0)
# True Positive
metrics['TP'] = TP
# False Positive
metrics['FP'] = FP
# False Negative
metrics['FN'] = FN
# True Negative
metrics['TN'] = TN
# Microaverage metrics
tmetrics = dict()
# Sensitivity, hit rate, recall, or true positive rate
TPs = float(np.sum(TP,axis=0))
FNs = float(np.sum(FN,axis=0))
TNs = float(np.sum(TN,axis=0))
FPs = float(np.sum(FP,axis=0))
tmetrics['recall'] = np.divide(TPs,(TPs+FNs))
# Specificity or true negative rate
tmetrics['specifity'] = np.divide(TNs,(TNs+FPs))
# Precision or positive predictive value
tmetrics['precision'] = np.divide(TPs,(TPs+FPs))
# Negative predictive value
tmetrics['negative_predictive_value'] = np.divide(TNs,(TNs+FNs))
# Fall out or false positive rate
tmetrics['fall_out'] = np.divide(FPs,(FPs+TNs))
# False negative rate
tmetrics['false_negative_rate'] = np.divide(FNs,(TPs+FNs))
# False discovery rate
tmetrics['false_discovery_rate'] = np.divide(FPs,(TPs+FPs))
# Fmeasure
tmetrics['Fmeasure'] = np.divide(2*TPs,(2*TPs+FNs+FPs))
# Overall accuracy
tmetrics['accuracy'] = np.divide((TPs+TNs),(TPs+FPs+FNs+TNs))
metrics['microaverage'] = tmetrics
# Macroaverage metrics
tmetrics = dict()
# Sensitivity, hit rate, recall, or true positive rate
tmetrics['recall'] = np.mean(np.divide(TP,1.0*np.add(TP,FN)))
# Specificity or true negative rate
tmetrics['specifity'] = np.mean(np.divide(TN,1.0*(np.add(TN,FP))))
# Precision or positive predictive value
tmetrics['precision'] = np.mean(np.divide(TP,1.0*(np.add(TP,FP))))
# Negative predictive value
tmetrics['negative_predictive_value'] = np.mean(np.divide(TN,1.0*(np.add(TN,FN))))
# Fall out or false positive rate
tmetrics['fall_out'] = np.mean(np.divide(FP,1.0*(np.add(FP,TN))))
# False negative rate
tmetrics['false_negative_rate'] = np.mean(np.divide(FN,1.0*(np.add(TP,FN))))
# False discovery rate
tmetrics['false_discovery_rate'] = np.mean(np.divide(FP,1.0*(np.add(TP,FP))))
# Fmeasure
tmetrics['Fmeasure'] = np.mean(np.divide(2*TP,1.0*(np.add(np.add(2*TP,FN),FP))))
# Overall accuracy
tmetrics['accuracy'] = np.mean(np.divide(np.add(TP,TN),np.add(np.add(np.add(TP,FP),FN),TN)))
metrics['macroaverage'] = tmetrics
return metrics
def displayMetrics(Metrics,case = 1):
if(case<=1):
metrics = Metrics['microaverage']
print "Microaverage Metrics.. \n"
print "Sensitivity: ",metrics['recall']
print "Specifity: ",metrics['specifity']
print "Precision: ",metrics['precision']
print "Negative predictive value: ", metrics['negative_predictive_value']
print "Fall out: ",metrics['fall_out']
print "False negative rate: ", metrics['false_negative_rate']
print "False discovery rate: ", metrics['false_discovery_rate']
print "Fmeasure: ", metrics['Fmeasure']
print "Accuracy: ", metrics['accuracy']
if(case>=1):
metrics = Metrics['macroaverage']
print "\nMacroaverage Metrics.. \n"
print "Sensitivity: ",metrics['recall']
print "Specifity: ",metrics['specifity']
print "Precision: ",metrics['precision']
print "Negative predictive value: ", metrics['negative_predictive_value']
print "Fall out: ",metrics['fall_out']
print "False negative rate: ", metrics['false_negative_rate']
print "False discovery rate: ", metrics['false_discovery_rate']
print "Fmeasure: ", metrics['Fmeasure']
print "Accuracy: ", metrics['accuracy']
class Evaluator:
def __init__(self,classifier):
self._Classifier = classifier
# calculates AUC
def AUC(self,training,training_labels,testing,testing_labels):
classifier = self._Classifier
classifier.learn_mat(training,training_labels,probability = True)
probabilities = classifier.predict_prob(testing)
return roc_auc_score(testing_labels, probabilities[:,1])
# single classification experiment
# has dummy variable is given to calculate metric has dummy var
def single(self,training,training_labels,testing,testing_labels,calculate_metrics = True, has_dummy = False):
classifier = self._Classifier
classifier.learn_mat(training,training_labels)
Lp = classifier.classify(testing)
ConfusionMatrix = confusion_matrix(testing_labels, Lp)
if (calculate_metrics==True):
return CalculateMetrics(ConfusionMatrix,has_dummy),ConfusionMatrix
else:
return ConfusionMatrix
# conduct a randomized k-fold experiment
# Verbose determines state printing
# for Labeles Touple, Similarity Touple
# see how they are defined on randPerm() -> goto page top
def Randomized_kfold(self,SimilarityTouple,LabelesTouple,k,fact =0.8,verbose = False):
for i in range(1, k+1):
if verbose:
print "Classification round: "+str(i)
training,training_labels,testing,testing_labels = randPerm(SimilarityTouple,LabelesTouple,fact=fact)
if(i==1):
ConfusionMatrix = self.single(training,training_labels,testing,testing_labels,calculate_metrics = False)
else:
ConfusionMatrix = np.add(ConfusionMatrix,self.single(training,training_labels,testing,testing_labels,calculate_metrics = False))
metrics = CalculateMetrics(ConfusionMatrix)
if verbose:
print "Displaying Metrics.."
displayMetrics(metrics)
return metrics, ConfusionMatrix
|
<gh_stars>0
import io
from torchtext.utils import download_from_url, extract_archive, unicode_csv_reader
from torchtext.experimental.datasets.raw.common import RawTextIterableDataset
URLS = {
'AG_NEWS':
{'train': 'https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv',
'test': 'https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/test.csv'},
'SogouNews':
'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbUkVqNEszd0pHaFE',
'DBpedia':
'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbQ2Vic1kxMmZZQ1k',
'YelpReviewPolarity':
'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbNUpYQ2N3SGlFaDg',
'YelpReviewFull':
'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbZlU4dXhHTFhZQU0',
'YahooAnswers':
'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9Qhbd2JNdDBsQUdocVU',
'AmazonReviewPolarity':
'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM',
'AmazonReviewFull':
'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbZVhsUnRWRDhETzA',
'IMDB':
'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
}
def _create_data_from_csv(data_path):
with io.open(data_path, encoding="utf8") as f:
reader = unicode_csv_reader(f)
for row in reader:
yield int(row[0]), ' '.join(row[1:])
def _setup_datasets(dataset_name, root='.data'):
if dataset_name == 'AG_NEWS':
extracted_files = [download_from_url(URLS[dataset_name][item], root=root) for item in ('train', 'test')]
else:
dataset_tar = download_from_url(URLS[dataset_name], root=root)
extracted_files = extract_archive(dataset_tar)
for fname in extracted_files:
if fname.endswith('train.csv'):
train_csv_path = fname
if fname.endswith('test.csv'):
test_csv_path = fname
train_iter = _create_data_from_csv(train_csv_path)
test_iter = _create_data_from_csv(test_csv_path)
return (RawTextIterableDataset(dataset_name, NUM_LINES[dataset_name], train_iter),
RawTextIterableDataset(dataset_name, NUM_LINES[dataset_name], test_iter))
def AG_NEWS(*args, **kwargs):
""" Defines AG_NEWS datasets.
Create supervised learning dataset: AG_NEWS
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> train, test = torchtext.experimental.datasets.raw.AG_NEWS()
"""
return _setup_datasets(*(("AG_NEWS",) + args), **kwargs)
def SogouNews(*args, **kwargs):
""" Defines SogouNews datasets.
Create supervised learning dataset: SogouNews
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> train, test = torchtext.experimental.datasets.raw.SogouNews()
"""
return _setup_datasets(*(("SogouNews",) + args), **kwargs)
def DBpedia(*args, **kwargs):
""" Defines DBpedia datasets.
Create supervised learning dataset: DBpedia
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> train, test = torchtext.experimental.datasets.raw.DBpedia()
"""
return _setup_datasets(*(("DBpedia",) + args), **kwargs)
def YelpReviewPolarity(*args, **kwargs):
""" Defines YelpReviewPolarity datasets.
Create supervised learning dataset: YelpReviewPolarity
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> train, test = torchtext.experimental.datasets.raw.YelpReviewPolarity()
"""
return _setup_datasets(*(("YelpReviewPolarity",) + args), **kwargs)
def YelpReviewFull(*args, **kwargs):
""" Defines YelpReviewFull datasets.
Create supervised learning dataset: YelpReviewFull
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> train, test = torchtext.experimental.datasets.raw.YelpReviewFull()
"""
return _setup_datasets(*(("YelpReviewFull",) + args), **kwargs)
def YahooAnswers(*args, **kwargs):
""" Defines YahooAnswers datasets.
Create supervised learning dataset: YahooAnswers
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> train, test = torchtext.experimental.datasets.raw.YahooAnswers()
"""
return _setup_datasets(*(("YahooAnswers",) + args), **kwargs)
def AmazonReviewPolarity(*args, **kwargs):
""" Defines AmazonReviewPolarity datasets.
Create supervised learning dataset: AmazonReviewPolarity
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> train, test = torchtext.experimental.datasets.raw.AmazonReviewPolarity()
"""
return _setup_datasets(*(("AmazonReviewPolarity",) + args), **kwargs)
def AmazonReviewFull(*args, **kwargs):
""" Defines AmazonReviewFull datasets.
Create supervised learning dataset: AmazonReviewFull
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> train, test = torchtext.experimental.datasets.raw.AmazonReviewFull()
"""
return _setup_datasets(*(("AmazonReviewFull",) + args), **kwargs)
def generate_imdb_data(key, extracted_files):
for fname in extracted_files:
if 'urls' in fname:
continue
elif key in fname and ('pos' in fname or 'neg' in fname):
with io.open(fname, encoding="utf8") as f:
label = 'pos' if 'pos' in fname else 'neg'
yield label, f.read()
def IMDB(root='.data'):
""" Defines IMDB datasets.
Create supervised learning dataset: IMDB
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> train, test = torchtext.experimental.datasets.raw.IMDB()
"""
dataset_tar = download_from_url(URLS['IMDB'], root=root)
extracted_files = extract_archive(dataset_tar)
train_iter = generate_imdb_data('train', extracted_files)
test_iter = generate_imdb_data('test', extracted_files)
return (RawTextIterableDataset("IMDB", NUM_LINES["IMDB"], train_iter),
RawTextIterableDataset("IMDB", NUM_LINES["IMDB"], test_iter))
DATASETS = {
'AG_NEWS': AG_NEWS,
'SogouNews': SogouNews,
'DBpedia': DBpedia,
'YelpReviewPolarity': YelpReviewPolarity,
'YelpReviewFull': YelpReviewFull,
'YahooAnswers': YahooAnswers,
'AmazonReviewPolarity': AmazonReviewPolarity,
'AmazonReviewFull': AmazonReviewFull,
'IMDB': IMDB
}
NUM_LINES = {
'AG_NEWS': 120000,
'SogouNews': 450000,
'DBpedia': 560000,
'YelpReviewPolarity': 560000,
'YelpReviewFull': 650000,
'YahooAnswers': 1400000,
'AmazonReviewPolarity': 3600000,
'AmazonReviewFull': 3000000,
'IMDB': 25000
}
|
""" Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
#How to retrieve an Meraki api key: https://developer.cisco.com/meraki/api-v1/#!getting-started/find-your-organization-id
#Meraki Dashboard API call documentation: https://developer.cisco.com/meraki/api-v1/#!overview/api-key
# Import Section
import meraki
import os
from dotenv import load_dotenv
from prettyprinter import pprint
import json
import traceback
# load all environment variables
load_dotenv()
class Meraki:
def __init__(self, api_key, base_url="https://api.meraki.com/api/v1"):
self.__DASHBOARD = meraki.DashboardAPI(
api_key=api_key,
base_url=base_url,
print_console=False,
suppress_logging=True)
for org in self.__DASHBOARD.organizations.getOrganizations():
if org['name'] == os.environ['MERAKI_ORGANIZATION']:
self.__org_id = org['id']
#API calls
#Organizations
def getOrganizations(self):
response = self.__DASHBOARD.organizations.getOrganizations()
print(response)
return response
#Networks
def getNetworks(self):
response = self.__DASHBOARD.organizations.getOrganizationNetworks(
self.__org_id, total_pages='all'
)
return(response)
def getOrganzationSSIDS(self):
ssids = {'enabled':{}, 'disabled':{}}
for network in self.getNetworks():
try:
for ssid in self.__DASHBOARD.wireless.getNetworkWirelessSsids(network['id']):
if ssid['enabled'] == True:
ssid['network_id'] = network['id']
if network['name'] in ssids['enabled']:
ssids['enabled'][network['name']].append(ssid)
else:
ssids['enabled'][network['name']] = []
ssids['enabled'][network['name']].append(ssid)
else:
ssid['network_id'] = network['id']
if network['name'] in ssids['disabled']:
ssids['disabled'][network['name']].append(ssid)
else:
ssids['disabled'][network['name']] = []
ssids['disabled'][network['name']].append(ssid)
except Exception as e:
continue
return ssids
def getNetworkSSID(self, network_id, ssid_number):
return self.__DASHBOARD.wireless.getNetworkWirelessSsid(network_id,ssid_number)
def getDashboard(self):
return self.__DASHBOARD
def updateAllSsidConfigurations(self, golden_config_network_id, golden_config_ssid_number, demo_config=False, targeted_networks = {}, selected_configs=[]):
if demo_config:
try:
with open("demo_configurations.txt") as f:
ssids = json.loads(f.read())
pprint(ssids)
for network, value in ssids['enabled'].items():
for ssid in value:
ssid_copy = ssid.copy()
del ssid_copy['number']
self.__DASHBOARD.wireless.updateNetworkWirelessSsid(networkId=ssid['network_id'], number=ssid['number'], **ssid_copy)
print("success")
for network, value in ssids['disabled'].items():
for ssid in value:
ssid_copy = ssid.copy()
del ssid_copy['number']
self.__DASHBOARD.wireless.updateNetworkWirelessSsid(networkId=ssid['network_id'], number=ssid['number'], **ssid_copy)
print("success")
return True
except Exception as e:
print("ERROR")
print(str(e))
return False
else:
if targeted_networks and selected_configs:
golden_config = self.getNetworkSSID(golden_config_network_id, golden_config_ssid_number)
new_golden_config = {k: golden_config[k] for k in selected_configs if k in golden_config}
try:
for network, value in self.getOrganzationSSIDS()['enabled'].items():
if network in targeted_networks:
for ssid in value:
try:
if ssid['name'] in targeted_networks[network]:
self.__DASHBOARD.wireless.updateNetworkWirelessSsid(networkId=ssid['network_id'],
number=ssid['number'],
**new_golden_config)
except Exception as e:
if "Each enabled SSID must have a unique name" in str(e):
continue
return True
except Exception as e:
print("ERROR in TARGETED NETWORKS STATEMENT")
traceback.print_exc()
return False
else:
print("No targeted networks given.")
return False
def createNewSsidConfigurationAllNetworks(self, golden_config_network_id, golden_config_ssid_number, targeted_networks = []):
golden_config = self.getNetworkSSID(golden_config_network_id, golden_config_ssid_number)
del golden_config['number']
try:
if targeted_networks:
for network, value in self.getOrganzationSSIDS()['disabled'].items():
if network in targeted_networks:
for ssid in value:
try:
self.__DASHBOARD.wireless.updateNetworkWirelessSsid(networkId=ssid['network_id'], number=ssid['number'], **golden_config)
break
except Exception as e:
if "Each enabled SSID must have a unique name" in str(e):
continue
else:
return False
return True
else:
print("No targeted networks given.")
return False
except Exception as e:
print(str(e))
return False
|
<reponame>basicskywards/cyclegan-yolo
# Prepare COCO annotation for training YOLOv3
# To convert VOC annotation format to COCO
from __future__ import print_function, division
import os
#import pandas as pd
import numpy as np
def read_txt(txt_path):
f = open(txt_path, "r")
for line in f:
yield line
def parse_line(line):
'''
Inputs: a line from train/test txt file
line format: line_index, img_path, img_width, img_height, [box_info_1 (5 number)], ...
Returns:
image_path: string
boxes: [N, 4], N = number of boxes, 4 = x_min, y_min, x_max, y_max
labels: class index
img_width: int
img_height: int
'''
if 'str' not in str(type(line)):
line = line.decode()
str_line = line.strip().split(' ')
assert len(str_line) > 8, 'Annotation error 1!'
image_path = str_line[1]
img_width = int(str_line[2])
img_height = int(str_line[3])
bboxes = str_line[4:]
assert len(bboxes) % 5 == 0, 'Annotation error 2!'
box_cnt = len(bboxes) // 5
boxes = []
labels = []
for i in range(box_cnt):
label = int(bboxes[i * 5])
x_min = float(bboxes[i * 5 + 1])
y_min = float(bboxes[i * 5 + 2])
x_max = float(bboxes[i * 5 + 3])
y_max = float(bboxes[i * 5 + 4])
boxes.append([x_min, y_min, x_max, y_max])
labels.append(label)
boxes = np.asarray(boxes, np.float32)
labels = np.asarray(labels, np.int64)
return image_path, boxes, labels, img_width, img_height
def convert_voc2coco(bbox, img_width, img_height):
'''
Here is COCO label format:
label <1> <2> <3> <4>
Here is how to convert COCO to VOC label format
<1>*w = (xmax-xmin)/2 + xmin
<2>*h = (ymax-ymin)/2 + ymin
<3> = (xmax-xmin)/w
<4> = (ymax-ymin)/h
Returns: x_min, y_min, w, h scaled into [0, 1]
'''
x = (((bbox[2] - bbox[0])/2 + bbox[0]) - 1) / img_width
y = (((bbox[3] - bbox[1]) / 2 + bbox[1]) - 1) / img_height
w = (bbox[2] - bbox[0]) / img_width
h = (bbox[3] - bbox[1]) / img_height
return x, y, w, h
# def gen_coco_txt(save_path):
# f = open(save_path, 'w')
# pass
def main():
txt_path = '/media/basic/ssd256/traffic_cone_syn/voc/train_full.txt'
label_path = '/media/basic/ssd256/traffic_cone_syn/labels/'
train_test_path = '/media/basic/ssd256/traffic_cone_real/'
name_txt = 'train.txt'
f1 = open(train_test_path + name_txt, 'w')
for line in read_txt(txt_path):
img_path, boxes, labels, img_w, img_h = parse_line(line)
img_name = img_path.split('/')[-1]
#tmp = img_name + '\n'
f1.write(img_path + '\n')
img_txt = img_name.split('.')[0] + '.txt'
f2 = open(label_path + img_txt, 'w')
#object_line = []
for i in range(len(labels)):
bbox = boxes[i, :]
#print('bbox = ', bbox[0])
label = labels[i]
#object_line.append(str(label))
x, y, w, h = convert_voc2coco(bbox, img_w, img_h)
#object_line.append([str(label), str(x), str(y), str(w), str(h)])
tmp_line = [str(label), str(x), str(y), str(w), str(h)]
#print('object_line = ', object_line[0])
tmp_line = ' '.join(v for v in tmp_line) + '\n'
#print('tmp_line = ', tmp_line)
f2.write(tmp_line)
f1.close()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import random as rn
from sklearn.metrics import roc_auc_score
import sys
import os
from lib_util import get_target,get_opt
import lightgbm as lgb
from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate, BatchNormalization, SpatialDropout1D
from keras.callbacks import Callback
from keras.initializers import RandomUniform
from keras.models import Model
from keras.optimizers import Adam
import gc
def get_params(params_str):
if get_opt('model') == 'keras':
names = ['batch_size', 'dense_cate', 'dense_nume_n_layers', 'drop', 'emb_cate', 'epochs_for_lr', 'lr', 'lr_fin', 'lr_init', 'max_epochs', 'n_layers', 'patience']
elif 'LGBM' in get_opt('model'):
names = ['boosting_type','colsample_bytree','learning_rate','max_bin','max_depth','metric','min_child_samples','min_child_weight','min_split_gain','nthread','num_leaves','objective','reg_alpha','reg_lambda','scale_pos_weight','subsample','subsample_for_bin','subsample_freq','verbose']
else:
print("no valid target")
sys.exit(1)
pvals = params_str.split(',')
del pvals[0]
if len(pvals) != len(names):
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!ERR!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('params: count is not fit',len(pvals), len(names))
print('params_str:',params_str)
print('names:',names)
print('param_values:',pvals)
sys.exit()
params = dict(zip(names, pvals))
return params
def LGBM(X_tr,X_va,X_te,predictors,cat_feats,seed=2018):
params_str = get_opt('params')
if params_str != None:
params = get_params(params_str)
return LGBM_helper(X_tr,X_va,X_te,predictors,cat_feats,params,seed=2018)
def Keras(X_tr,X_va,X_te,predictors,cat_feats,seed=2018):
params_str = get_opt('params')
if params_str != None:
params = get_params(params_str)
return Keras0_helper(X_tr,X_va,X_te,predictors,cat_feats,params,seed=2018)
def LGBM_helper(_X_tr,_X_va,_X_te,predictors,cat_feats,params,seed=2018):
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
rn.seed(seed)
X_tr = _X_tr[predictors]
X_va = _X_va[predictors]
X_te = _X_te[predictors]
y_tr = _X_tr['is_attributed']
y_va = _X_va['is_attributed']
y_te = _X_te['is_attributed']
params['feature_fraction_seed'] = seed
params['bagging_seed'] = seed
params['drop_seed'] = seed
params['data_random_seed'] = seed
params['num_leaves'] = int(params['num_leaves'])
params['subsample_for_bin'] = int(params['subsample_for_bin'])
params['max_depth'] = int(np.log2(params['num_leaves'])+1.2)
params['max_bin'] = int(params['max_bin'])
print('*'*50)
for k,v in sorted(params.items()):
print(k,':',v)
columns = X_tr.columns
print('start for lgvalid')
lgvalid = lgb.Dataset(X_va, label=y_va, categorical_feature=cat_feats)
_X_va.drop(predictors,axis=1)
del _X_va, X_va, y_va
gc.collect()
print('start for lgtrain')
lgtrain = lgb.Dataset(X_tr, label=y_tr, categorical_feature=cat_feats)
_X_te.drop(predictors,axis=1)
del _X_tr, X_tr, y_tr
gc.collect()
evals_results = {}
if get_opt('trainCheck','-') == 'on':
valid_names=['train','valid']
valid_sets=[lgtrain, lgvalid]
else:
valid_names=['valid']
valid_sets=[lgvalid]
if get_opt('testCheck','-') == 'on':
valid_names.append('test')
lgtest = lgb.Dataset(X_te, label=y_te, categorical_feature=cat_feats)
valid_sets.append(lgtest)
print('start training')
bst = lgb.train(params,
lgtrain,
valid_sets=valid_sets,
valid_names=valid_names,
evals_result=evals_results,
num_boost_round=2000,
early_stopping_rounds=100,
verbose_eval=10,
)
importance = bst.feature_importance()
print('importance (count)')
tuples = sorted(zip(columns, importance), key=lambda x: x[1],reverse=True)
for col, val in tuples:
print(val,"\t",col)
importance = bst.feature_importance(importance_type='gain')
print('importance (gain)')
tuples = sorted(zip(columns, importance), key=lambda x: x[1],reverse=True)
for col, val in tuples:
print(val,"\t",col)
n_estimators = bst.best_iteration
metric = params['metric']
auc = evals_results['valid'][metric][n_estimators-1]
_X_te['pred'] = bst.predict(X_te)
return auc
class EarlyStopping(Callback):
def __init__(self,training_data=False,validation_data=False, testing_data=False, min_delta=0, patience=0, model_file=None, verbose=0):
super(EarlyStopping, self).__init__()
self.best_epoch = 0
self.patience = patience
self.verbose = verbose
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
self.monitor_op = np.greater
if training_data:
self.x_tr = training_data[0]
self.y_tr = training_data[1]
else:
self.x_tr = False
self.y_tr = False
self.x_val = validation_data[0]
self.y_val = validation_data[1]
if testing_data:
self.x_te = testing_data[0]
self.y_te = testing_data[1]
else:
self.x_te = False
self.y_te = False
self.model_file = model_file
def on_train_begin(self, logs={}):
self.wait = 0
self.best_epoch = 0
self.stopped_epoch = 0
self.best = -np.Inf
def on_train_end(self, logs={}):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch ',self.best_epoch,': EarlyStopping')
def on_epoch_end(self, epoch, logs={}):
if self.x_tr:
y_pred = self.model.predict(self.x_tr,batch_size=100000)
roc_tr = roc_auc_score(self.y_tr, y_pred)
else:
roc_tr = 0
y_hat_val=self.model.predict(self.x_val,batch_size=100000)
roc_val = roc_auc_score(self.y_val, y_hat_val)
if self.x_te:
y_hat_te=self.model.predict(self.x_te,batch_size=100000)
roc_te = roc_auc_score(self.y_te, y_hat_te)
else:
roc_te = 0
print('roc-auc: %s - roc-auc_val: %s - roc-auc_test: %s' % (str(round(roc_tr,6)),str(round(roc_val,6)), str(round(roc_te,6))),end=100*' '+'\n')
if self.model_file:
print("saving",self.model_file+'.'+str(epoch))
self.model.save_weights(self.model_file+'.'+str(epoch))
if(self.x_val):
if get_opt('testCheck','-') == 'on':
current = roc_te
else:
current = roc_val
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.best_epoch = epoch
self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
def Keras0_helper(_X_tr,_X_va,_X_te,predictors,cat_feats,params,seed=2018):
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
rn.seed(seed)
X_tr = _X_tr[predictors]
X_va = _X_va[predictors]
X_te = _X_te[predictors]
y_tr = _X_tr['is_attributed']
y_va = _X_va['is_attributed']
y_te = _X_te['is_attributed']
print('*************params**************')
for f in sorted(params): print(f+":",params[f])
batch_size = int(params['batch_size'])
epochs_for_lr = float(params['epochs_for_lr'])
max_epochs = int(params['max_epochs'])
emb_cate = int(params['emb_cate'])
dense_cate = int(params['dense_cate'])
dense_nume_n_layers = int(params['dense_nume_n_layers'])
drop = float(params['drop'])
lr= float(params['lr'])
lr_init = float(params['lr_init'])
lr_fin = float(params['lr_fin'])
n_layers = int(params['n_layers'])
patience = int(params['patience'])
train_dict = {}
valid_dict = {}
test_dict = {}
input_list = []
emb_list = []
numerical_feats = []
tot_emb_n = 0
for col in X_tr:
if col not in cat_feats:
numerical_feats.append(col)
if len(cat_feats) > 0:
for col in cat_feats:
train_dict[col] = np.array(X_tr[col])
valid_dict[col] = np.array(X_va[col])
test_dict[col] = np.array(X_te[col])
inpt = Input(shape=[1], name = col)
input_list.append(inpt)
max_val = np.max([X_tr[col].max(), X_va[col].max(), X_te[col].max()])+1
emb_n = np.min([emb_cate, max_val])
if get_opt('fixEmb','on') == 'on':
emb_n = emb_cate
tot_emb_n += emb_n
if emb_n == 1:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Warinig!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! emb_1 = 1")
return 0
print('Embedding size:',max_val, emb_cate, X_tr[col].max(), X_va[col].max(), X_te[col].max(), emb_n,col)
embd = Embedding(max_val, emb_n)(inpt)
emb_list.append(embd)
if len(emb_list) == 1:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Warinig!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! emb_list = 1")
return 0
fe = concatenate(emb_list)
s_dout = SpatialDropout1D(drop)(fe)
x1 = Flatten()(s_dout)
if get_opt('sameNDenseAsEmb','-') == 'on':
dense_cate = tot_emb_n
if len(numerical_feats) > 0:
train_dict['numerical'] = X_tr[numerical_feats].values
valid_dict['numerical'] = X_va[numerical_feats].values
test_dict['numerical'] = X_te[numerical_feats].values
inpt = Input((len(numerical_feats),),name='numerical')
input_list.append(inpt)
x2 = inpt
for n in range(dense_nume_n_layers):
x2 = Dense(dense_cate,activation='relu',kernel_initializer=RandomUniform(seed=seed))(x2)
if get_opt('numeDropout','on') != 'off':
x2 = Dropout(drop)(x2)
if get_opt('NumeBatchNormalization','on') != 'off':
x2 = BatchNormalization()(x2)
if len(numerical_feats) > 0 and len(cat_feats) > 0:
x = concatenate([x1, x2])
elif len(numerical_feats) > 0:
x = x2
elif len(cat_feats) > 0:
x = x1
else:
return 0 # for small data test
for n in range(n_layers):
x = Dense(dense_cate,activation='relu',kernel_initializer=RandomUniform(seed=seed))(x)
if get_opt('lastDropout','on') != 'off':
x = Dropout(drop)(x)
if get_opt('BatchNormalization','off') == 'on' or get_opt('LastBatchNormalization','off') == 'on':
x = BatchNormalization()(x)
outp = Dense(1,activation='sigmoid',kernel_initializer=RandomUniform(seed=seed))(x)
model = Model(inputs=input_list, outputs=outp)
if get_opt('optimizer','expo') == 'adam':
optimizer = Adam(lr=lr)
elif get_opt('optimizer','expo') == 'nadam':
optimizer = Nadam(lr=lr)
else:
exp_decay = lambda init, fin, steps: (init/fin)**(1/(steps-1)) - 1
steps = int(len(X_tr) / batch_size) * epochs_for_lr
lr_init, lr_fin = 0.001, 0.0001
lr_decay = exp_decay(lr_init, lr_fin, steps)
optimizer = Adam(lr=lr, decay=lr_decay)
model.compile(loss='binary_crossentropy',optimizer=optimizer)
model.summary()
#from keras.utils import plot_model
#plot_model(model, to_file='model.png')
model_file = '../work/weights.'+str(os.getpid())+'.hdf5'
if get_opt('trainCheck','-') == 'on':
training_data=(train_dict, y_tr)
else:
training_data=False
if get_opt('testCheck','-') == 'on':
testing_data=(test_dict, y_te)
else:
testing_data=False
aucEarlyStopping = EarlyStopping(
training_data=training_data,
validation_data=(valid_dict,y_va),
testing_data=testing_data,
patience=patience,
model_file=model_file,
verbose=1)
model.fit(train_dict,
y_tr,
validation_data=[valid_dict, y_va],
batch_size=batch_size,
epochs=max_epochs,
shuffle=True,
verbose=2,
callbacks=[aucEarlyStopping])
best_epoch = aucEarlyStopping.best_epoch
print('loading',model_file+'.'+str(best_epoch))
model.load_weights(model_file+'.'+str(best_epoch))
_X_te['pred'] = model.predict(test_dict, batch_size=batch_size, verbose=2)[:,0]
_X_va['pred'] = model.predict(valid_dict, batch_size=batch_size, verbose=2)[:,0]
if get_opt('avgEpoch',0) > 0:
added = 1
for i in range(min(get_opt('avgEpoch',0),patience)):
best_epoch = aucEarlyStopping.best_epoch + (i+1)
if best_epoch >= max_epochs:
continue
print('loading',model_file+'.'+str(best_epoch))
model.load_weights(model_file+'.'+str(best_epoch))
_X_te['pred'] += model.predict(test_dict, batch_size=batch_size, verbose=2)[:,0]*0.5
_X_va['pred'] += model.predict(valid_dict, batch_size=batch_size, verbose=2)[:,0]*0.5
added += 0.5
best_epoch = aucEarlyStopping.best_epoch - (i+1)
if best_epoch < 0:
continue
print('loading',model_file+'.'+str(best_epoch))
model.load_weights(model_file+'.'+str(best_epoch))
_X_te['pred'] += model.predict(test_dict, batch_size=batch_size, verbose=2)[:,0]*0.5
_X_va['pred'] += model.predict(valid_dict, batch_size=batch_size, verbose=2)[:,0]*0.5
added += 0.5
_X_te['pred'] /= added
_X_va['pred'] /= added
os.system('rm -f '+model_file+'.*')
auc = roc_auc_score(y_va, _X_va.pred)
return auc
def Predict(X_tr,X_va,X_te,predictors,cat_feats,seed=2018):
model = get_opt('model')
if 'LGBM' in model:
return LGBM(X_tr,X_va,X_te,predictors,cat_feats,seed=2018)
elif 'keras' in model:
return Keras(X_tr,X_va,X_te,predictors,cat_feats,seed=2018)
else:
print("no valid model")
sys.exit(1)
|
<gh_stars>1-10
#!/usr/bin/env python
# Author: <NAME> (t-sigai at microsoft dot com))
import os
import cv2
from datetime import date
import numpy as np
import matplotlib.ticker as ticker
np.set_printoptions(threshold=np.inf)
import argparse
from zernike import RZern
import pdb
# external modules
from preprocess import preprocess_image
from mire_detection import process, clean_points, clean_points_support
from camera_size import get_arc_step_params
from arc_step_method import arc_step
from get_maps import *
from utils import *
from metrics import *
# command line arguments (if any)
parser = argparse.ArgumentParser(description="KT Processing Pipeline")
parser.add_argument(
"--start_angle",
default=0,
type=float,
help="Starting meridian",
)
parser.add_argument("--end_angle",
default=360,
type=float,
help="Ending Meridian",
)
parser.add_argument("--jump",
default=1,
type=float,
help="Jump between meridians",
)
parser.add_argument(
"--n_mires",
default=None,
type=int,
help="Number of mires to process",
)
parser.add_argument(
"--working_distance",
default=75.0,
type=float,
help="Distance of cone end from cornea",
)
parser.add_argument(
"--camera_params",
default=None,
type=str,
help="Camera parameters: sensor dimensions (width x height), focal length (space separated string)",
)
parser.add_argument(
"--model_file",
default=None,
type=str,
help="File with details about the placido head model",
)
parser.add_argument(
"--base_dir",
default="images",
type=str,
help="Image data directory",
)
parser.add_argument(
"--image_name",
default=None,
type=str,
help="Test input image.",
)
parser.add_argument(
"--upsample",
default=1,
type=int,
help="Increase resolution of input image.",
)
parser.add_argument(
"--gap1",
default=-3,
type=float,
help="Accounting for gap (in mm) between eye and largest ring.",
)
parser.add_argument(
"--gap2",
default=5,
type=float,
help="Accounting for gap (in mm) between camera pupil and smallest ring.",
)
class corneal_top_gen:
def __init__(
self, model_file, working_distance, sensor_dims, f_len, start_angle, end_angle, jump, upsample, n_mires, f_gap1, zernike_degree=[8], test_name=None
):
self.model_file = model_file # file which consists of the placido head dimensions
self.working_distance = working_distance # distance between camera pupil and cornea apex
self.sensor_dims = sensor_dims # width x height of camera sensor
self.f_len = f_len # focal length of camera
self.f_gap1 = f_gap1 # function which maps 1/mire_21_radius to gap1
self.start_angle = start_angle # start meridian
self.end_angle = end_angle # end meridian
self.jump = jump # diff between the consec angles when processing mires
self.ups = upsample # if the image has to be upsampled or not
self.n_mires = n_mires # number of mires to process
self.zernike_degree = zernike_degree # degree of the zernike polynomial used for fitting
if test_name == None:
self.test_name = date.today().strftime("%d_%m_%Y")
def zernike_smoothening(self, image_name, plot_x, plot_y, plot_z,
xy_norm, xv, yv, max_r, relative_points):
error = -1
for zern_deg in self.zernike_degree:
zern_deg = int(zern_deg)
# zernike fitting takes place, c1: zernike coefficients
cart = RZern(zern_deg)
cart.make_cart_grid(plot_x, plot_y, scale_by=xy_norm)
c1 = cart.fit_cart_grid(plot_z)[0]
#print(image_name, "Zernike Coeffs", list(c1)); print("XY_NORM", xy_norm);
# for grid xv, yv
cart = RZern(zern_deg)
cart.make_cart_grid(xv, yv, scale_by=xy_norm)
Phi = cart.eval_grid(c1, matrix=True)
Phi = Phi[np.isfinite(Phi)].max() - Phi
rho = np.abs(np.sqrt(xv ** 2 + yv ** 2) * xy_norm)
# compute curvatures k1 (instantaneous) and k2 (axial)
k1, k2 = cart.eval_curvature_grid(c1, matrix=True)
k1, k2 = abs(k1), abs(k2)
k1_raw = k1.copy()
inst_roc, axial_roc = 1 / k1, 1 / k2 # computing roc from curvatures
check0 = np.isfinite(inst_roc) * (rho<=xy_norm*0.7) # getting the central 70% region
error = (np.abs(inst_roc[check0]-7.8)/7.8).mean()*100 # computing error w.r.t a normal eye
check0 = np.isfinite(inst_roc) * (rho <= 1.5) # getting only points within 3 mm diameter
# find k1 angle
angle_k1 = np.argwhere(k1[check0].max() == k1)[0]
angle_k1 = np.arctan(
(angle_k1[0] - k1.shape[0] // 2) / (angle_k1[1] - k1.shape[1] // 2 + 1e-9)
)
angle = round(-angle_k1 * 180 / np.pi, 0)
k1, k2 = k1[check0], k2[check0]
sim_k1 = round(337.5 * k1.max(), 2)
sim_k2 = round(337.5 * k2[np.argmax(k1)], 2)
average_k, diff = round((sim_k1 + sim_k2) / 2.0, 2), round(sim_k1 - sim_k2, 2)
check = np.isnan(inst_roc); inst_roc[check] = 1e6;
check = np.isnan(axial_roc); axial_roc[check] = 1e6;
tan_map = generate_tan_map(
inst_roc,
gt_pal,
gt_r,
(inst_roc.shape[1] // 2, inst_roc.shape[0] // 2),
max_r,
None
#str(err1) + "_" + str(err2) + "_" + str(zern_deg) + "_" + str(jump) + "_" + image_name,
#output_folder=self.output + "/" + image_name,
)
# generate axial map using the meridonial averaging method
axial_map, k2 = generate_axial_map(
1 / inst_roc,
gt_pal,
gt_p,
(inst_roc.shape[1] // 2, inst_roc.shape[0] // 2),
max_r,
None
#str(err1) + "_" + str(zern_deg) + "_" + str(jump) + "_" + image_name,
#output_folder=self.output + "/" + image_name,
)
# re-computed after generating axial map using the averaging method
k2_raw = k2.copy()
k2 = k2[check0]
sim_k2 = round(337.5 * k2[np.argmax(k1)], 2)
average_k, diff = round((sim_k1 + sim_k2) / 2.0, 2), round(sim_k1 - sim_k2, 2)
# draw the 3mm, 5mm, 7mm circles
r_1 = int(float(max_r)/xy_norm*0.5)
r_2 = int(float(max_r)/xy_norm*1.0)
r_3 = int(float(max_r)/xy_norm*1.5)
r_3_5 = int(float(max_r)/xy_norm*1.75)
r_5 = int(float(max_r)/xy_norm*2.5)
r_7 = int(float(max_r)/xy_norm*3.5)
tan_map = draw_circles(
tan_map,
(inst_roc.shape[1] // 2, inst_roc.shape[0] // 2),
[r_3, r_5, r_7],
angle_k1,
(sim_k1, sim_k2)
)
axial_map = draw_circles(
axial_map,
(inst_roc.shape[1] // 2, inst_roc.shape[0] // 2),
[r_3, r_5, r_7],
angle_k1,
(sim_k1, sim_k2)
)
# compute CLMI & PPK score
clmi_ppk(
k2_raw.copy(),
axial_map.copy(),
r_2,
r_7,
(inst_roc.shape[1] // 2, inst_roc.shape[0] // 2),
)
# compute KISA score
KISA(
k1_raw.copy(),
(k1_raw.shape[1] // 2, k1_raw.shape[0] // 2),
relative_points,
r_3,
diff,
)
#KISA(k2_raw, (k2_raw.shape[1]//2, k2_raw.shape[0]//2), relative_points, r_3, diff)
#compute_tilt_factor(k1_raw.copy(), tan_map.copy(), r_1, r_3_5, (k1_raw.shape[1]//2, k1_raw.shape[0]//2), angle_k1, image_name)
compute_tilt_factor(k2_raw.copy(), axial_map.copy(), r_1, r_3_5,
(k1_raw.shape[1]//2, k1_raw.shape[0]//2), angle_k1, image_name, output_folder=self.output)
return error, tan_map, axial_map, sim_k1, sim_k2, angle, average_k, diff
def run_arc_step_gen_maps(self, image_seg, image_name, center, coords, h, w, err1=0, err2=0):
blank = np.full((image_seg.shape[0], image_seg.shape[1]), -1e6, dtype="float64")
elevation, error_map = blank.copy(), blank.copy()
# Step 5: For each point compute image size on sensor (to calculate slope)
# store arc_step K
arc_step_k = [] # this is for first processing
relative_points = [] # this is for computation of metrics PPK, KISA, etc.
for mire in range(len(coords)):
relative_points.append([])
# get arc-step parameters for processing further (first processing)
for idx, angle in enumerate(np.arange(self.start_angle, self.end_angle, self.jump)):
# get width & height of each point in pixels
pixels_size = []
for mire in range(len(coords)):
y, x = coords[mire][idx]
obj_width, obj_height = abs(x - center[0]), abs(y - center[1])
r_new = (obj_width ** 2 + obj_height ** 2) ** 0.5
pixels_size.append([obj_width, obj_height])
relative_points[mire].append((y - center[1], x - center[0]))
k, oz, oy = get_arc_step_params(
pixels_size,
w,
h,
self.sensor_dims,
self.f_len,
self.working_distance + err1,
self.model_file,
mid_point=True,
)
arc_step_k.append(k)
max_r = -1
three_d_points = []
plot_x, plot_y, plot_z = [], [], []
# traverse each meridian and run arc-step for each meridian
for idx, angle in enumerate(np.arange(self.start_angle, self.end_angle, self.jump)):
# get width & height of each point in pixels
pixels_size = []
for mire in range(len(coords)):
y, x = coords[mire][idx]
obj_width, obj_height = abs(x - center[0]), abs(y - center[1])
r_new = (obj_width ** 2 + obj_height ** 2) ** 0.5
max_r = max(r_new, max_r)
pixels_size.append([obj_width, obj_height])
# Step 6: Fetch Real World coordinates & parameters for Arc-Step Method
# mid_point=True for when mid points of rings used instead of edges,
# else mid_point=False
k, oz, oy = get_arc_step_params(
pixels_size,
w,
h,
self.sensor_dims,
self.f_len,
self.working_distance + err1,
self.model_file,
mid_point=True,
)
# get diametrically opposite angle, uncomment below
opposite_angle = (angle + 180) % 360
assert k[0] == arc_step_k[angle][0], "FATAL ERROR, k_0 angles not equal"
k[0] = (k[0] + arc_step_k[opposite_angle][0]) / 2.0
# Step 7: Run arc step method
# Output => Tangential Map
zone = check_angle(angle, self.skip_angles)
if zone == 1 or zone == 3:
continue
try:
rocs, zs, ys = arc_step(
len(k) - 1, -(self.working_distance + err1 + err2), oz, oy, k
)
except:
continue
# put radius value in blank image and run regression
for mire in range(len(coords)):
y, x = coords[mire][idx]
blank[int(y), int(x)] = rocs[mire + 1]
elevation[int(y), int(x)] = zs[mire + 1]
# get 3d points
angle_three_d_points = []
for mire in range(len(coords)):
x, y, z = get_three_d_points(ys[mire + 1], zs[mire + 1], angle)
angle_three_d_points.append((x, y, z))
plot_x.append(x); plot_y.append(y); plot_z.append(z);
three_d_points.append(angle_three_d_points)
# convert plot_x/y/z to numpy arrays
plot_x.append(0.0); plot_y.append(0.0); plot_z.append(0.0);
number_of_points = len(plot_x)
plot_x = np.array(plot_x).reshape((number_of_points, 1))
plot_y = np.array(plot_y).reshape((number_of_points, 1))
plot_z = np.array(plot_z).reshape((number_of_points, 1))
# normalize plot_x & plot_y by rho
rho = np.sqrt(np.square(plot_x) + np.square(plot_y))
xy_norm = rho.max()
plot_x, plot_y = plot_x / xy_norm, plot_y / xy_norm
# grid for the final map
max_r = (max_r // 2) * 2
ddx = np.linspace(-1.0, 1.0, int(2 * max_r))
ddy = np.linspace(-1.0, 1.0, int(2 * max_r))
xv, yv = np.meshgrid(ddx, ddy)
error, tan_map, axial_map, sim_k1, sim_k2, angle, average_k, diff = self.zernike_smoothening(image_name, plot_x, plot_y, plot_z,
xy_norm, xv, yv, max_r, relative_points)
return error, tan_map, axial_map, [sim_k1, sim_k2, angle, average_k, diff]
# main runner function to generate topography maps from input image
def generate_topography_maps(
self, base_dir, image_name, crop_dims=(1200,1200), iso_dims=500,
center=(-1, -1), downsample=False, blur=True, upsample=None,
err1=[0], err2=[0], skip_angles=[[-1, -1], [-1, -1]],
center_selection="manual",
):
self.output = self.test_name
self.skip_angles = skip_angles
# create output directory if not present
if not os.path.isdir(self.output):
os.mkdir(self.output)
# create directory to store output
if not (os.path.isdir(self.output+"/"+image_name.split(".jpg")[0])):
os.mkdir(self.output+"/"+image_name.split(".jpg")[0])
# Step 1: Image Centering and Cropping
# Step 2: Image Enhancement, Cleaning & Enhancement
# Step 3: Locate Image Center
# This can be done in 3 ways:
# 1 Using center of central mire, or
# 2 Centroid of Segmented Image (compute it's center of mass)
# 3 User selects center manually if center = (-1, -1)
image_gray, image_seg, image_edge, center, iris_pix_dia = preprocess_image(
base_dir,
image_name,
center,
downsample=downsample,
blur=blur,
crop_dims=crop_dims,
iso_dims=iso_dims,
output_folder=self.output,
filter_radius=10,
center_selection=center_selection
)
if upsample is not None:
self.ups = upsample
# upsample image to higher resolution
if self.ups > 1:
image_gray, image_seg, image_edge, center = increase_res(
image_gray, image_seg, image_edge, center, self.ups, image_name.split(".jpg")[0]
)
# Step 4: Mire detection + detect meridinial points on respective mires
image_cent_list, center, others = process(
image_seg, image_gray, center, self.jump, self.start_angle, self.end_angle
)
_, _, image_mp = others
# copy the processed images to out
image_name = image_name.split(".jpg")[0]
cv2.imwrite(self.output+"/" + image_name + "/" + image_name + "_mp.png", image_mp)
# plot points (uncomment to display plots)
# plot_highres(image_cent_list, center, self.n_mires, self.jump, self.start_angle, self.end_angle)
# clean points
r_pixels, coords = clean_points(
image_cent_list, image_gray.copy(), image_name, center, self.n_mires, self.jump, self.start_angle, self.end_angle, output_folder=self.output,
)
#r_pixels, coords = clean_points_support(image_cent_list, image_gray.copy(), image_name,
# center, n_mires, jump, start_angle, end_angle, skip_angles=skip_angles, output_folder=self.output)
mire_20_radius = np.mean(r_pixels[20][15:330])*2.0
if self.f_gap1 is not None:
err1 = [round(self.f_gap1(1/mire_20_radius),2)]
# get image real dimensions, account for upsampling
h, w = cv2.imread(base_dir + "/" + image_name + ".jpg").shape[:2]
h, w = self.ups * h, self.ups * w
#h, w = 8000, 6000 # just hard coding for now
errors = []
# Steps 5, 6 & 7
for e1 in err1:
for e2 in err2:
error, tan_map, axial_map, sims = self.run_arc_step_gen_maps(
image_seg, image_name, center, coords, h, w, err1=e1, err2=e2
)
errors.append(error)
# overlay on gray image
image_overlay = np.dstack((image_gray, np.dstack((image_gray, image_gray)))).astype(np.uint8)
temp_map = np.zeros_like(image_overlay)
# get tangential map overlay
temp_map[
center[1] - tan_map.shape[0] // 2 : center[1] + tan_map.shape[0] // 2,
center[0] - tan_map.shape[1] // 2 : center[0] + tan_map.shape[1] // 2,
:] = tan_map
tan_map = temp_map.copy()
# get axial map overlay
temp_map = np.zeros_like(image_overlay)
temp_map[
center[1] - axial_map.shape[0] // 2 : center[1] + axial_map.shape[0] // 2,
center[0] - axial_map.shape[1] // 2 : center[0] + axial_map.shape[1] // 2,
:] = axial_map
axial_map = temp_map.copy()
mask = axial_map[:, :, 0] > 0
image_overlay[mask] = [0, 0, 0]
tan_map_overlay = image_overlay + tan_map
axial_map_overlay = image_overlay + axial_map
cv2.putText(
tan_map_overlay,
"Sim K1: "+ str(sims[0])+ "D @"+ str(sims[2])+ " K2: "+ str(sims[1])+ "D @"+ str(sims[2] + 90),
(5, 20),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
2,
)
cv2.putText(tan_map_overlay,
"Avg: " + str(sims[3]) + "D Diff: " + str(sims[4]) + "D",
(5, 40),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
2,
)
cv2.putText(
axial_map_overlay,
"Sim K1: "+ str(sims[0]) + "D @" + str(sims[2]) + " K2: "+ str(sims[1])+ "D @"+ str(sims[2] + 90),
(5, 20),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
2,
)
cv2.putText(
axial_map_overlay,
"Avg: " + str(sims[3]) + "D Diff: " + str(sims[4]) + "D",
(5, 40),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
2,
)
cv2.imwrite(
self.output+"/" + image_name + "/" + image_name + "_tan_map_overlay.png",
tan_map_overlay,
)
cv2.imwrite(
self.output+"/" + image_name + "/" + image_name + "_axial_map_overlay.png",
axial_map_overlay,
)
print("Test Complete!")
return errors
if __name__ == "__main__":
# parsing arguments
args = parser.parse_args()
# getting parameters for corneal_top_obj
f_inv_20_5 = np.poly1d([3583.52156815, -17.31674123]) # 5 mm gap2, mire_21, id_20
sensor_dims = (
float(args.camera_params.split()[0]),
float(args.camera_params.split()[1]),
) # "4.27, 5.68, 4.25"
f_len = float(args.camera_params.split()[2]) # focal length of the camera
# create the corneal_top_gen class object
corneal_top_obj = corneal_top_gen(
args.model_file, args.working_distance, sensor_dims,
f_len, args.start_angle, args.end_angle, args.jump,
args.upsample, args.n_mires, f_inv_20_5,
)
# get details for current test image
base_dir = args.base_dir # base directory
skip_angles = [[-1, -1], [-1, -1]]
center = (-1, -1)
#image_name = "01010_left_1.jpg"
# call function to run pipeline and generate_topography_maps
# expects image to be in .jpg format
error = corneal_top_obj.generate_topography_maps(
base_dir,
"607348_right_2.jpg",
center=center,
downsample=True,
blur=True,
err1=[args.gap1],
err2=[args.gap2],
) |
<filename>source/FnAssetAPI/ManagerFactory.py
import os
from . import logging
from .core import PluginManager
from .Manager import Manager
__all__ = ['ManagerFactory',]
class ManagerFactory(object):
"""
A Factory to manage @ref python.implementation.ManagerPlugin derived plugins
and instantiation of Manager and UIDelegate instances. Not usually used
directly by a @ref Host, which instead uses the @ref python.SessionManager
@envvar **FOUNDRY_ASSET_PLUGIN_PATH** *str* A PATH-style list of directories to
search for @ref python.implementation.ManagerPlugin based plugins. It uses
the platform-native delimiter. Searched left to right.
"""
## The Environment Variable to read the plug-in search path from
kPluginEnvVar = "FOUNDRY_ASSET_PLUGIN_PATH"
__instance = None
@classmethod
def instance(cls):
"""
@return ManagerFactory, returns a lazily-constructed singleton instance of
the ManagerFactory.
"""
if not cls.__instance:
cls.__instance = ManagerFactory()
return cls.__instance
def __init__(self):
super(ManagerFactory, self).__init__()
self.__pluginManager = None
self.__instances = {}
self.__delegates = {}
def scan(self, paths=None):
"""
Scans for ManagerPlugins, and registers them with the factory instance.
@param paths str, A searchpath string to search for plug-ins. If None, then
the contents of the Environment Variable @ref kPluginEnvVar is used instead.
"""
if not paths:
paths = os.environ.get(self.kPluginEnvVar, "")
if not paths:
logging.warning(("%s is not set. Its somewhat unlikely that you will "
+"find any plugins...") % self.kPluginEnvVar)
if not self.__pluginManager:
self.__pluginManager = PluginManager.instance()
# We do this after instantiating, so that the lifetime of the manager is
# consistent with cases where some paths were set.
if not paths:
return
self.__pluginManager.scan(paths)
def identifiers(self):
"""
@return list, all identifiers known to the factory.
@see python.implementation.ManagerPlugin
"""
if not self.__pluginManager:
return []
return self.__pluginManager.identifiers()
def managers(self):
"""
@return dict, Keyed by identifiers, each value is a dict containing
information about the Manager provided by the plugin. This dict has the
following keys:
@li **name** The display name of the Manager suitable for UI use.
@li **identifier** It's identifier
@li **info** The info dict from the Manager (see: @ref
python.implementation.ManagerInterfaceBase.getInfo
"ManagerInterfaceBase.getInfo()")
@li **plugin** The plugin class that represents the Manager (see: @ref
python.implementation.ManagerPlugin)
"""
if not self.__pluginManager:
return {}
managers = {}
identifiers = self.__pluginManager.identifiers()
for i in identifiers:
try:
p = self.__pluginManager.getPlugin(i)
interface = p.getInterface()
except Exception as e:
logging.critical("Error loading plugin for '%s': %s" % (i, e))
continue
managerIdentifier = interface.getIdentifier()
managers[i] = {
'name' : interface.getDisplayName(),
'identifier' : managerIdentifier,
'info' : interface.getInfo(),
'plugin' : p
}
if i != managerIdentifier:
msg = ("Manager '%s' is not registered with the same identifier as "+\
"it's plugin ('%s' instead of '%s')") % (interface.getDisplayName(),
managerIdentifier, i)
logging.log(msg, logging.kWarning)
return managers
def managerRegistered(self, identifier):
"""
@return bool, True if the supplied identifier is known to the factory.
"""
return identifier in self.__pluginManager.identifiers()
def instantiate(self, identifier, cache=True):
"""
Creates an instance of the @ref ManagerInterfaceBase with the specified
identifier, and wraps it as a @ref Manager.
@param cache bool, When True the created instance will be cached, and
immediately returned by subsequence calls to this function with the same
identifier - instead of creating a new instance. If False, a new instance
will be created each, and never retained.
"""
if not self.__pluginManager:
raise RuntimeError, "No plugins have been scanned for"
if cache and identifier in self.__instances:
return self.__instances[identifier]
plugin = self.__pluginManager.getPlugin(identifier)
interface = plugin.getInterface()
manager = Manager(interface)
if cache:
self.__instances[identifier] = manager
return manager
def instantiateUIDelegate(self, managerInterfaceInstance, cache=True):
"""
Creates an instance of the @ref ManagerUIDelegate for the specified
identifier.
@param the instance of a ManagerInterface to retrieve the UI delegate for.
@param cache bool, When True the created instance will be cached, and
immediately returned by subsequence calls to this function with the same
identifier - instead of creating a new instance. If False, a new instance
will be created each, and never retained.
"""
if not self.__pluginManager:
raise RuntimeError, "No plugins have been scanned for"
## @todo This probably has some retention issues we need to deal with
if cache and managerInterfaceInstance in self.__delegates:
return self.__delegates[managerInterfaceInstance]
identifier = managerInterfaceInstance.getIdentifier()
plugin = self.__pluginManager.getPlugin(identifier)
delegateInstance = plugin.getUIDelegate(managerInterfaceInstance)
if cache:
self.__delegates[managerInterfaceInstance] = delegateInstance
return delegateInstance
|
import os
import re
import sys
import time
import ctypes
import signal
import socket
import pyping
import ftplib
import poplib
import shutil
import hashlib
import smtplib
import logging
import binascii
import platform
import requests
import netifaces
import subprocess
import irc.client
import ConfigParser
from collections import OrderedDict
logger = logging.getLogger('FakeNetTests')
logging.basicConfig(format='%(message)s', level=logging.INFO)
def is_admin():
result = False
try:
result = os.getuid() == 0
except AttributeError:
result = ctypes.windll.shell32.IsUserAnAdmin() != 0
return result
def execute_detached(execute_cmd, winders=False):
DETACHED_PROCESS = 0x00000008
cflags = DETACHED_PROCESS if winders else 0
cfds = False if winders else True
shl = False if winders else True
def ign_sigint():
# Prevent KeyboardInterrupt in FakeNet-NG's console from
# terminating child processes
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec = None if winders else ign_sigint
try:
pid = subprocess.Popen(execute_cmd, creationflags=cflags,
shell=shl,
close_fds = cfds,
preexec_fn = preexec).pid
except Exception, e:
logger.info('Error: Failed to execute command: %s', execute_cmd)
logger.info(' %s', e)
return None
else:
return pid
def get_ips(ipvers):
"""Return IP addresses bound to local interfaces including loopbacks.
Parameters
----------
ipvers : list
IP versions desired (4, 6, or both); ensures the netifaces semantics
(e.g. netiface.AF_INET) are localized to this function.
"""
specs = []
results = []
for ver in ipvers:
if ver == 4:
specs.append(netifaces.AF_INET)
elif ver == 6:
specs.append(netifaces.AF_INET6)
else:
raise ValueError('get_ips only supports IP versions 4 and 6')
for iface in netifaces.interfaces():
for spec in specs:
addrs = netifaces.ifaddresses(iface)
# If an interface only has an IPv4 or IPv6 address, then 6 or 4
# respectively will be absent from the keys in the interface
# addresses dictionary.
if spec in addrs:
for link in addrs[spec]:
if 'addr' in link:
results.append(link['addr'])
return results
def get_external_ip():
addrs = get_ips([4])
for addr in addrs:
if not addr.startswith('127.'):
return addr
class IrcTester(object):
def __init__(self, hostname, port=6667):
self.hostname = hostname
self.port = port
self.nick = 'dr_evil'
self.join_chan = '#whatevs'
self.clouseau = 'inspector_clouseau'
self.safehouse = "I'm looking for a safe house."
self.pub_chan = '#evil_bartenders'
self.black_market = 'Black Market'
def _irc_evt_handler(self, srv, evt):
"""Check for each case and set the corresponding success flag."""
if evt.type == 'join':
if evt.target.startswith(self.join_chan):
self.join_ok = True
elif evt.type == 'welcome':
if evt.arguments[0].startswith('Welcome to IRC'):
self.welcome_ok = True
elif evt.type == 'privmsg':
if (evt.arguments[0].startswith(self.safehouse) and
evt.source.startswith(self.clouseau)):
self.privmsg_ok = True
elif evt.type == 'pubmsg':
if (evt.arguments[0].startswith(self.black_market) and
evt.target == self.pub_chan):
self.pubmsg_ok = True
def _irc_script(self, srv):
"""Callback manages individual test cases for IRC."""
# Clear success flags
self.welcome_ok = False
self.join_ok = False
self.privmsg_ok = False
self.pubmsg_ok = False
# This handler should set the success flags in success cases
srv.add_global_handler('join', self._irc_evt_handler)
srv.add_global_handler('welcome', self._irc_evt_handler)
srv.add_global_handler('privmsg', self._irc_evt_handler)
srv.add_global_handler('pubmsg', self._irc_evt_handler)
# Issue all commands, indirectly invoking the event handler for each
# flag
srv.join(self.join_chan)
srv.process_data()
srv.privmsg(self.pub_chan, self.black_market)
srv.process_data()
srv.privmsg(self.clouseau, self.safehouse)
srv.process_data()
srv.quit()
srv.process_data()
if not self.welcome_ok:
raise FakeNetTestException('Welcome test failed')
if not self.join_ok:
raise FakeNetTestException('Join test failed')
if not self.privmsg_ok:
raise FakeNetTestException('privmsg test failed')
if not self.pubmsg_ok:
raise FakeNetTestException('pubmsg test failed')
return all([
self.welcome_ok,
self.join_ok,
self.privmsg_ok,
self.pubmsg_ok
])
def _run_irc_script(self, nm, callback):
"""Connect to server and give control to callback."""
r = irc.client.Reactor()
srv = r.server()
srv.connect(self.hostname, self.port, self.nick)
retval = callback(srv)
srv.close()
return retval
def test_irc(self):
return self._run_irc_script('testnm', self._irc_script)
class FakeNetTestException(Exception):
"""A recognizable exception type indicating a known failure state based on
test criteria. HTTP test uses this, others may in the future, too.
"""
pass
class FakeNetTester(object):
"""Controller for FakeNet-NG that runs test cases"""
def __init__(self, settings):
self.settings = settings
self.pid_fakenet = None
def _setStopFlag(self):
with open(self.settings.stopflag, 'w') as f:
f.write('1')
def _clearStopFlag(self):
if os.path.exists(self.settings.stopflag):
os.remove(self.settings.stopflag)
def _confirmFakenetStopped(self):
return not os.path.exists(self.settings.stopflag)
def _waitFakenetStopped(self, timeoutsec=None):
retval = False
while True:
if self._confirmFakenetStopped():
retval = True
break
time.sleep(1)
if timeoutsec is not None:
timeoutsec -= 1
if timeoutsec <= 0:
break
return retval
def _checkPid(self, pid):
retval = False
if self.settings.windows:
PROCESS_TERMINATE = 1
p = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, 0, pid)
retval = p != 0;
if p:
ctypes.windll.kernel32.CloseHandle(p)
else:
# https://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid-in-python
try:
os.kill(pid, 0)
except OSError:
pass
else:
retval = True
return retval
def _kill(self, pid):
if self.settings.windows:
PROCESS_TERMINATE = 1
# Note, this will get a handle even after the process terminates,
# in which case TerminateProcess will simply return FALSE.
p = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, 0, pid)
if p:
ok = ctypes.windll.kernel32.TerminateProcess(p, 1)
ctypes.windll.kernel32.CloseHandle(p)
else:
os.kill(pid, signal.SIGKILL)
def stopFakenetAndWait(self, timeoutsec=None, kill=False):
if not self.pid_fakenet:
raise RuntimeError('FakeNet-NG not running, nothing to stop')
self._setStopFlag()
stopped_responsive = self._waitFakenetStopped(timeoutsec)
if not stopped_responsive:
self._clearStopFlag()
if kill and self._checkPid(self.pid_fakenet):
self._kill(self.pid_fakenet)
self.pid_fakenet = None
return stopped_responsive
def executeFakenet(self):
if self.pid_fakenet:
raise RuntimeError('FakeNet-NG already running, PID %d' %
(self.pid_fakenet))
os.chdir(self.settings.fndir)
max_del_attempts = 3
if os.path.exists(self.settings.logpath):
for i in range(1, max_del_attempts + 1):
try:
os.remove(self.settings.logpath)
except WindowsError: # i.e. log file locked by another process
logger.warning('Failed to delete %s, attempt %d' %
(self.settings.logpath, i))
if i == max_del_attempts:
logger.error('Final attempt, re-raising exception')
raise
else:
logger.warning('Retrying in %d seconds...' % (i))
time.sleep(i)
else:
break
cmd = self.settings.genFakenetCmd()
logger.info('About to run %s' % (cmd))
self.pid_fakenet = execute_detached(cmd, self.settings.windows)
if self.pid_fakenet:
logger.info('FakeNet started with PID %s' % (str(self.pid_fakenet)))
return (self.pid_fakenet is not None)
def delConfig(self):
if os.path.exists(self.settings.configpath):
os.remove(self.settings.configpath)
if os.path.exists(self.settings.configpath_http):
os.remove(self.settings.configpath_http)
def doTests(self, match_spec):
self.testGeneral(match_spec)
self.testNoRedirect(match_spec)
self.testBlacklistProcess(match_spec)
self.testWhitelistProcess(match_spec)
def _printStatus(self, desc, passed):
status = 'Passed' if passed else 'FAILED'
punc = '[ + ]' if passed else '[!!!]'
logger.info('%s %s: %s' % (punc, status, desc))
def _tryTest(self, desc, callback, args, expected):
retval = None
try:
retval = callback(*args)
except Exception as e:
logger.info('Test %s: Uncaught exception of type %s: %s' %
(desc, str(type(e)), str(e)))
passed = (retval == expected)
return passed
def _filterMatchingTests(self, tests, matchspec):
"""Remove tests that match negative specifications (regexes preceded by
a minus sign) or do not match positive specifications (regexes not
preceded by a minus sign).
Modifies the contents of the tests dictionary.
"""
negatives = []
positives = []
if len(matchspec):
# If the user specifies a minus sign before a regular expression,
# match negatively (exclude any matching tests)
for spec in matchspec:
if spec.startswith('-'):
negatives.append(spec[1:])
else:
positives.append(spec)
# Iterating over tests first, match specifications second to
# preserve the order of the selected tests. Less efficient to
# compile every regex several times, but less confusing.
for testname, test in tests.items():
# First determine if it is to be excluded, in which case,
# remove it and do not evaluate further match specifications.
exclude = False
for spec in negatives:
if bool(re.search(spec, testname)):
exclude = True
if exclude:
tests.pop(testname)
continue
# If the user ONLY specified negative match specifications,
# then admit all tests
if not len(positives):
continue
# Otherwise, only admit if it matches a positive spec
include = False
for spec in positives:
if bool(re.search(spec, testname)):
include = True
break
if not include:
tests.pop(testname)
return
def _testGeneric(self, label, config, tests, matchspec=[]):
self._filterMatchingTests(tests, matchspec)
if not len(tests):
logger.info('No matching tests')
return False
# If doing a multi-host test, then toggle the network mode
if not self.settings.singlehost:
config.multiHostMode()
self.writeConfig(config)
if self.settings.singlehost:
if not self.executeFakenet():
self.delConfig()
return False
sec = self.settings.sleep_after_start
logger.info('Sleeping %d seconds before commencing' % (sec))
time.sleep(sec)
else:
logger.info('Waiting for you to transition the remote FakeNet-NG')
logger.info('system to run the %s test suite' % (label))
logger.info('(Copy this config: %s)' % (self.settings.configpath))
logger.info('(And this: %s)' % (self.settings.configpath_http))
logger.info('')
while True:
logger.info('Type \'ok\' to continue, or \'exit\' to stop')
try:
ok = raw_input()
except EOFError:
ok = 'exit'
if ok.lower() in ['exit', 'quit', 'stop', 'n', 'no']:
sys.exit(0)
elif ok.lower() in ['ok', 'okay', 'go', 'y', 'yes']:
break
logger.info('-' * 79)
logger.info('Testing')
logger.info('-' * 79)
# Do each test
for desc, (callback, args, expected) in tests.iteritems():
logger.debug('Testing: %s' % (desc))
passed = self._tryTest(desc, callback, args, expected)
# Retry in case of transient error e.g. timeout
if not passed:
logger.debug('Retrying: %s' % (desc))
passed = self._tryTest(desc, callback, args, expected)
self._printStatus(desc, passed)
time.sleep(0.5)
logger.info('-' * 79)
logger.info('Tests complete')
logger.info('-' * 79)
if self.settings.singlehost:
sec = self.settings.sleep_before_stop
logger.info('Sleeping %d seconds before transitioning' % (sec))
time.sleep(sec)
logger.info('Stopping FakeNet-NG and waiting for it to complete')
responsive = self.stopFakenetAndWait(15, True)
if responsive:
logger.info('FakeNet-NG is stopped')
else:
logger.info('FakeNet-NG was no longer running or was stopped forcibly')
time.sleep(1)
self.delConfig()
def _test_sk(self, proto, host, port, timeout=5):
"""Test socket-oriented"""
retval = False
s = socket.socket(socket.AF_INET, proto)
s.settimeout(timeout)
try:
s.connect((host, port))
teststring = 'Testing FakeNet-NG'
remaining = len(teststring)
while remaining:
sent = s.send(teststring)
if sent == 0:
raise IOError('Failed to send all bytes')
remaining -= sent
recvd = ''
remaining = len(teststring)
while remaining:
chunk = s.recv(remaining)
if chunk == '':
raise IOError('Failed to receive all bytes')
remaining -= len(chunk)
recvd += chunk
retval = (recvd == teststring)
except socket.error as e:
logger.error('Socket error: %s (%s %s:%d)' %
(str(e), proto, host, port))
except Exception as e:
logger.error('Non-socket Exception received: %s' % (str(e)))
return retval
def _test_icmp(self, host):
r = pyping.ping(host, count=1)
return (r.ret_code == 0)
def _test_ns(self, hostname, expected):
return (expected == socket.gethostbyname(hostname))
def _test_smtp_ssl(self, sender, recipient, msg, hostname, port=None, timeout=5):
smtpserver = smtplib.SMTP_SSL(hostname, port, 'fake.net', None, None, timeout)
server.sendmail(sender, recipient, msg)
smtpserver.quit()
def _test_smtp(self, sender, recipient, msg, hostname, port=None, timeout=5):
smtpserver = smtplib.SMTP(hostname, port, 'fake.net', timeout)
smtpserver.sendmail(sender, recipient, msg)
smtpserver.quit()
return True
def _test_pop(self, hostname, port=None, timeout=5):
pop3server = poplib.POP3(hostname, port, timeout)
pop3server.user('popuser')
pop3server.pass_('password')
msg = pop3server.retr(1)
response = msg[0]
lines = msg[1]
octets = msg[2]
if not response.startswith('+OK'):
msg = 'POP3 response does not start with "+OK"'
logger.error(msg)
return False
if not 'Alice' in ''.join(lines):
msg = 'POP3 message did not contain expected string'
raise FakeNetTestException(msg)
return False
return True
def _util_irc(self, nm, hostname, port, nick, callback):
r = irc.client.Reactor()
srv = r.server()
srv.connect(hostname, port, nick)
retval = callback(srv)
srv.close()
return retval
def _test_irc(self, hostname, port=6667):
irc_tester = IrcTester(hostname, port)
return irc_tester.test_irc()
def _test_http(self, hostname, port=None, scheme=None, uri=None,
teststring=None):
"""Test HTTP Listener"""
retval = False
scheme = scheme if scheme else 'http'
uri = uri.lstrip('/') if uri else 'asdf.html'
teststring = teststring if teststring else 'H T T P L I S T E N E R'
if port:
url = '%s://%s:%d/%s' % (scheme, hostname, port, uri)
else:
url = '%s://%s/%s' % (scheme, hostname, uri)
try:
r = requests.get(url, timeout=3)
if r.status_code != 200:
raise FakeNetTestException('Status code %d' % (r.status_code))
if teststring not in r.text:
raise FakeNetTestException('Test string not in response')
retval = True
except requests.exceptions.Timeout as e:
pass
except FakeNetTestException as e:
pass
return retval
def _test_ftp(self, hostname, port=None):
"""Note that the FakeNet-NG Proxy listener won't know what to do with
this client if you point it at some random port, because the client
listens silently for the server 220 welcome message which doesn't give
the Proxy listener anything to work with to decide where to forward it.
"""
fullbuf = ''
m = hashlib.md5()
def update_hash(buf):
m.update(buf)
f = ftplib.FTP()
f.connect(hostname, port)
f.login()
f.set_pasv(False)
f.retrbinary('RETR FakeNet.gif', update_hash)
f.quit()
digest = m.digest()
expected = binascii.unhexlify('a6b78c4791dc8110dec6c55f8a756395')
return (digest == expected)
def testNoRedirect(self, matchspec=[]):
config = self.makeConfig(singlehostmode=True, proxied=False, redirectall=False)
domain_dne = self.settings.domain_dne
ext_ip = self.settings.ext_ip
arbitrary = self.settings.arbitrary
localhost = self.settings.localhost
tcp = socket.SOCK_STREAM
udp = socket.SOCK_DGRAM
t = OrderedDict() # The tests
t['RedirectAllTraffic disabled external IP @ bound'] = (self._test_sk, (tcp, ext_ip, 1337), True)
t['RedirectAllTraffic disabled external IP @ unbound'] = (self._test_sk, (tcp, ext_ip, 9999), False)
t['RedirectAllTraffic disabled arbitrary host @ bound'] = (self._test_sk, (tcp, arbitrary, 1337), False)
t['RedirectAllTraffic disabled arbitrary host @ unbound'] = (self._test_sk, (tcp, arbitrary, 9999), False)
t['RedirectAllTraffic disabled named host @ bound'] = (self._test_sk, (tcp, domain_dne, 1337), False)
t['RedirectAllTraffic disabled named host @ unbound'] = (self._test_sk, (tcp, domain_dne, 9999), False)
if self.settings.singlehost:
t['RedirectAllTraffic disabled localhost @ bound'] = (self._test_sk, (tcp, localhost, 1337), True)
t['RedirectAllTraffic disabled localhost @ unbound'] = (self._test_sk, (tcp, localhost, 9999), False)
return self._testGeneric('No Redirect', config, t, matchspec)
def testBlacklistProcess(self, matchspec=[]):
config = self.makeConfig()
config.blacklistProcess(self.settings.pythonname)
arbitrary = self.settings.arbitrary
tcp = socket.SOCK_STREAM
udp = socket.SOCK_DGRAM
t = OrderedDict() # The tests
if self.settings.singlehost:
t['Global blacklisted process test'] = (self._test_sk, (tcp, arbitrary, 9999), False)
return self._testGeneric('Global process blacklist', config, t, matchspec)
def testWhitelistProcess(self, matchspec=[]):
config = self.makeConfig()
config.whitelistProcess(self.settings.pythonname)
arbitrary = self.settings.arbitrary
tcp = socket.SOCK_STREAM
udp = socket.SOCK_DGRAM
t = OrderedDict() # The tests
if self.settings.singlehost:
t['Global whitelisted process test'] = (self._test_sk, (tcp, arbitrary, 9999), True)
return self._testGeneric('Global process whitelist', config, t, matchspec)
def testGeneral(self, matchspec=[]):
config = self.makeConfig()
domain_dne = self.settings.domain_dne
ext_ip = self.settings.ext_ip
arbitrary = self.settings.arbitrary
blacklistedhost = self.settings.blacklistedhost
blacklistedtcp = self.settings.blacklistedtcp
blacklistedudp = self.settings.blacklistedudp
localhost = self.settings.localhost
dns_expected = self.settings.dns_expected
hidden_tcp = self.settings.hidden_tcp
no_service = self.settings.no_service
sender = self.settings.sender
recipient = self.settings.recipient
smtpmsg = self.settings.smtpmsg
tcp = socket.SOCK_STREAM
udp = socket.SOCK_DGRAM
t = OrderedDict() # The tests
t['TCP external IP @ bound'] = (self._test_sk, (tcp, ext_ip, 1337), True)
t['TCP external IP @ unbound'] = (self._test_sk, (tcp, ext_ip, 9999), True)
t['TCP arbitrary @ bound'] = (self._test_sk, (tcp, arbitrary, 1337), True)
t['TCP arbitrary @ unbound'] = (self._test_sk, (tcp, arbitrary, 9999), True)
t['TCP domainname @ bound'] = (self._test_sk, (tcp, domain_dne, 1337), True)
t['TCP domainname @ unbound'] = (self._test_sk, (tcp, domain_dne, 9999), True)
if self.settings.singlehost:
t['TCP localhost @ bound'] = (self._test_sk, (tcp, localhost, 1337), True)
t['TCP localhost @ unbound'] = (self._test_sk, (tcp, localhost, 9999), False)
t['UDP external IP @ bound'] = (self._test_sk, (udp, ext_ip, 1337), True)
t['UDP external IP @ unbound'] = (self._test_sk, (udp, ext_ip, 9999), True)
t['UDP arbitrary @ bound'] = (self._test_sk, (udp, arbitrary, 1337), True)
t['UDP arbitrary @ unbound'] = (self._test_sk, (udp, arbitrary, 9999), True)
t['UDP domainname @ bound'] = (self._test_sk, (udp, domain_dne, 1337), True)
t['UDP domainname @ unbound'] = (self._test_sk, (udp, domain_dne, 9999), True)
if self.settings.singlehost:
t['UDP localhost @ bound'] = (self._test_sk, (udp, localhost, 1337), True)
t['UDP localhost @ unbound'] = (self._test_sk, (udp, localhost, 9999), False)
t['ICMP external IP'] = (self._test_icmp, (ext_ip,), True)
t['ICMP arbitrary host'] = (self._test_icmp, (arbitrary,), True)
t['ICMP domainname'] = (self._test_icmp, (domain_dne,), True)
t['DNS listener test'] = (self._test_ns, (domain_dne, dns_expected), True)
t['HTTP listener test'] = (self._test_http, (arbitrary,), True)
t['HTTP custom test by URI'] = (self._test_http, (arbitrary, None, None, '/test.txt', 'Wraps this'), True)
t['HTTP custom test by hostname'] = (self._test_http, ('other.c2.com', None, None, None, 'success'), True)
t['HTTP custom test by both URI and hostname'] = (self._test_http, ('both_host.com', None, None, '/and_uri.txt', 'Ahoy'), True)
t['HTTP custom test by both URI and hostname negative'] = (self._test_http, ('both_host.com', None, None, '/not_uri.txt', 'Ahoy'), False)
t['FTP listener test'] = (self._test_ftp, (arbitrary,), True)
t['POP3 listener test'] = (self._test_pop, (arbitrary, 110), True)
t['SMTP listener test'] = (self._test_smtp, (sender, recipient, smtpmsg, arbitrary), True)
# Does not work, SSL error
t['SMTP SSL listener test'] = (self._test_smtp_ssl, (sender, recipient, smtpmsg, arbitrary), True)
# Works on Linux, not on Windows
t['IRC listener test'] = (self._test_irc, (arbitrary,), True)
t['Proxy listener HTTP test'] = (self._test_http, (arbitrary, no_service), True)
t['Proxy listener HTTP hidden test'] = (self._test_http, (arbitrary, hidden_tcp), True)
t['TCP blacklisted host @ unbound'] = (self._test_sk, (tcp, blacklistedhost, 9999), False)
t['TCP arbitrary @ blacklisted unbound'] = (self._test_sk, (tcp, arbitrary, blacklistedtcp), False)
t['UDP arbitrary @ blacklisted unbound'] = (self._test_sk, (udp, arbitrary, blacklistedudp), False)
if self.settings.singlehost:
t['Listener process blacklist'] = (self._test_http, (arbitrary, self.settings.listener_proc_black), False)
t['Listener process whitelist'] = (self._test_http, (arbitrary, self.settings.listener_proc_white), True)
t['Listener host blacklist'] = (self._test_http, (arbitrary, self.settings.listener_host_black), True)
t['Listener host whitelist'] = (self._test_http, (arbitrary, self.settings.listener_host_black), True)
return self._testGeneric('General', config, t, matchspec)
def makeConfig(self, singlehostmode=True, proxied=True, redirectall=True):
template = self.settings.configtemplate
return FakeNetConfig(template, singlehostmode, proxied, redirectall)
def writeConfig(self, config):
logger.info('Writing config to %s' % (self.settings.configpath))
config.write(self.settings.configpath)
for filename in self.settings.ancillary_files:
path = os.path.join(self.settings.startingpath, filename)
dest = os.path.join(self.settings.ancillary_files_dest, filename)
shutil.copyfile(path, dest)
class FakeNetConfig:
"""Convenience class to read/modify/rewrite a configuration template."""
def __init__(self, path, singlehostmode=True, proxied=True, redirectall=True):
self.rawconfig = ConfigParser.RawConfigParser()
self.rawconfig.read(path)
if singlehostmode:
self.singleHostMode()
else:
self.multiHostMode()
if not proxied: self.noProxy()
self.setRedirectAll(redirectall)
def blacklistProcess(self, process): self.rawconfig.set('Diverter', 'ProcessBlacklist', process)
def whitelistProcess(self, process): self.rawconfig.set('Diverter', 'ProcessWhitelist', process)
def setRedirectAll(self, enabled):
if enabled:
self.rawconfig.set('Diverter', 'RedirectAllTraffic', 'Yes')
else:
self.rawconfig.set('Diverter', 'RedirectAllTraffic', 'No')
def singleHostMode(self): self.rawconfig.set('Diverter', 'NetworkMode', 'SingleHost')
def multiHostMode(self): self.rawconfig.set('Diverter', 'NetworkMode', 'MultiHost')
def noProxy(self):
self.rawconfig.remove_section('ProxyTCPListener')
self.rawconfig.remove_section('ProxyUDPListener')
self.rawconfig.set('Diverter', 'DefaultTCPListener', 'RawTCPListener')
self.rawconfig.set('Diverter', 'DefaultUDPListener', 'RawUDPListener')
def write(self, path):
with open(path, 'w') as f:
return self.rawconfig.write(f)
class FakeNetTestSettings:
"""Test constants/literals, some of which may vary per OS, etc."""
def __init__(self, startingpath, singlehost=True):
# Where am I? Who are you?
self.platform_name = platform.system()
self.windows = (self.platform_name == 'Windows')
self.linux = (self.platform_name.lower().startswith('linux'))
# Test parameters
self.singlehost = singlehost
self.startingpath = startingpath
self.configtemplate = os.path.join(startingpath, 'template.ini')
self.ancillary_files_dest = self.genPath('%TEMP%', '/tmp/')
self.ancillary_files = [
'fakenet_http.ini',
'HTTPCustomProviderExample.py',
'sample_raw_response.txt',
]
# Paths
self.configpath = self.genPath('%TEMP%\\fakenet.ini', '/tmp/fakenet.ini')
self.configpath_http = self.genPath('%TEMP%\\fakenet_http.ini', '/tmp/fakenet_http.ini')
self.stopflag = self.genPath('%TEMP%\\stop_fakenet', '/tmp/stop_fakenet')
self.logpath = self.genPath('%TEMP%\\fakenet.log', '/tmp/fakenet.log')
self.fakenet = self.genPath('fakenet', 'python fakenet.py')
self.fndir = self.genPath('.', '$HOME/files/src/flare-fakenet-ng/fakenet')
# For process blacklisting
self.pythonname = os.path.basename(sys.executable)
# Various
self.ext_ip = get_external_ip()
self.arbitrary = '8.8.8.8'
self.blacklistedhost = '6.6.6.6'
self.blacklistedtcp = 139
self.blacklistedudp = 67
self.hidden_tcp = 12345
self.no_service = 10
self.listener_proc_black = 8080 # HTTP listener with process blacklist
self.listener_proc_white = 8081 # HTTP listener with process whitelists
self.listener_host_black = 8082 # HTTP listener with host blacklist
self.listener_host_white = 8083 # HTTP listener with host whitelists
self.localhost = '127.0.0.1'
self.dns_expected = '192.0.2.123'
self.domain_dne = 'does-not-exist-amirite.fireeye.com'
self.sender = '<EMAIL>'
self.recipient = '<EMAIL>'
self.smtpmsg = 'FakeNet-NG SMTP test email'
# Behaviors
self.sleep_after_start = 4
self.sleep_before_stop = 1
def genPath(self, winpath, unixypath):
if self.windows:
return os.path.expandvars(winpath)
else:
return os.path.expandvars(unixypath)
def genFakenetCmd(self):
return ('%s -f %s -n -l %s -c %s' %
(self.fakenet, self.stopflag, self.logpath, self.configpath))
def is_ip(s):
pat = '^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$'
return bool(re.match(pat, s))
def main():
if not is_admin():
logger.error('Not an admin, exiting...')
sys.exit(1)
if len(sys.argv) < 2:
logger.error('Usage: test.py <where> [matchspec1 [matchspec2 [...] ] ]')
logger.error('')
logger.error('Valid where:')
logger.error(' here')
logger.error(' Any dot-decimal IP address')
logger.error('')
logger.error('Each match specification is a regular expression that')
logger.error('will be compared against test names, and any matches')
logger.error('will be included. Because regular expression negative')
logger.error('matching is complicated to use, you can just prefix')
logger.error('a match specification with a minus sign to indicate')
logger.error('that you would like to include only tests that do NOT')
logger.error('match the expression.')
sys.exit(1)
# Validate where
where = sys.argv[1]
singlehost = (where.lower() == 'here')
if not singlehost and not is_ip(where):
logger.error('Invalid where: %s' % (where))
sys.exit(1)
# Will execute only tests matching *match_spec if specified
match_spec = sys.argv[2:]
if len(match_spec):
logger.info('Only running tests that match the following ' +
'specifications:')
for spec in match_spec:
logger.info(' %s' % (spec))
# Doit
startingpath = os.getcwd()
settings = FakeNetTestSettings(startingpath, singlehost)
if not singlehost: # <where> was an IP, so record it
settings.ext_ip = where
tester = FakeNetTester(settings)
logger.info('Running with privileges on %s' % (settings.platform_name))
tester.doTests(match_spec)
if __name__ == '__main__':
main()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NetworkArgs', 'Network']
@pulumi.input_type
class NetworkArgs:
def __init__(__self__, *,
auto_create_subnetworks: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
mtu: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
routing_config: Optional[pulumi.Input['NetworkRoutingConfigArgs']] = None):
"""
The set of arguments for constructing a Network resource.
:param pulumi.Input[bool] auto_create_subnetworks: Must be set to create a VPC network. If not set, a legacy network is created. When set to true, the VPC network is created in auto mode. When set to false, the VPC network is created in custom mode. An auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges. For custom mode VPC networks, you can add subnets using the subnetworks insert method.
:param pulumi.Input[str] description: An optional description of this resource. Provide this field when you create the resource.
:param pulumi.Input[int] mtu: Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and the maximum value is 1500 bytes. If unspecified, defaults to 1460.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.
:param pulumi.Input['NetworkRoutingConfigArgs'] routing_config: The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce.
"""
if auto_create_subnetworks is not None:
pulumi.set(__self__, "auto_create_subnetworks", auto_create_subnetworks)
if description is not None:
pulumi.set(__self__, "description", description)
if mtu is not None:
pulumi.set(__self__, "mtu", mtu)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
if routing_config is not None:
pulumi.set(__self__, "routing_config", routing_config)
@property
@pulumi.getter(name="autoCreateSubnetworks")
def auto_create_subnetworks(self) -> Optional[pulumi.Input[bool]]:
"""
Must be set to create a VPC network. If not set, a legacy network is created. When set to true, the VPC network is created in auto mode. When set to false, the VPC network is created in custom mode. An auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges. For custom mode VPC networks, you can add subnets using the subnetworks insert method.
"""
return pulumi.get(self, "auto_create_subnetworks")
@auto_create_subnetworks.setter
def auto_create_subnetworks(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_create_subnetworks", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource. Provide this field when you create the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def mtu(self) -> Optional[pulumi.Input[int]]:
"""
Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and the maximum value is 1500 bytes. If unspecified, defaults to 1460.
"""
return pulumi.get(self, "mtu")
@mtu.setter
def mtu(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mtu", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
@property
@pulumi.getter(name="routingConfig")
def routing_config(self) -> Optional[pulumi.Input['NetworkRoutingConfigArgs']]:
"""
The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce.
"""
return pulumi.get(self, "routing_config")
@routing_config.setter
def routing_config(self, value: Optional[pulumi.Input['NetworkRoutingConfigArgs']]):
pulumi.set(self, "routing_config", value)
class Network(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_create_subnetworks: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
mtu: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
routing_config: Optional[pulumi.Input[pulumi.InputType['NetworkRoutingConfigArgs']]] = None,
__props__=None):
"""
Creates a network in the specified project using the data included in the request.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_create_subnetworks: Must be set to create a VPC network. If not set, a legacy network is created. When set to true, the VPC network is created in auto mode. When set to false, the VPC network is created in custom mode. An auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges. For custom mode VPC networks, you can add subnets using the subnetworks insert method.
:param pulumi.Input[str] description: An optional description of this resource. Provide this field when you create the resource.
:param pulumi.Input[int] mtu: Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and the maximum value is 1500 bytes. If unspecified, defaults to 1460.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.
:param pulumi.Input[pulumi.InputType['NetworkRoutingConfigArgs']] routing_config: The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[NetworkArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a network in the specified project using the data included in the request.
:param str resource_name: The name of the resource.
:param NetworkArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetworkArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_create_subnetworks: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
mtu: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
routing_config: Optional[pulumi.Input[pulumi.InputType['NetworkRoutingConfigArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetworkArgs.__new__(NetworkArgs)
__props__.__dict__["auto_create_subnetworks"] = auto_create_subnetworks
__props__.__dict__["description"] = description
__props__.__dict__["mtu"] = mtu
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["request_id"] = request_id
__props__.__dict__["routing_config"] = routing_config
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["gateway_i_pv4"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["subnetworks"] = None
super(Network, __self__).__init__(
'google-native:compute/v1:Network',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Network':
"""
Get an existing Network resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NetworkArgs.__new__(NetworkArgs)
__props__.__dict__["auto_create_subnetworks"] = None
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["description"] = None
__props__.__dict__["gateway_i_pv4"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["mtu"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["routing_config"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["subnetworks"] = None
return Network(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoCreateSubnetworks")
def auto_create_subnetworks(self) -> pulumi.Output[bool]:
"""
Must be set to create a VPC network. If not set, a legacy network is created. When set to true, the VPC network is created in auto mode. When set to false, the VPC network is created in custom mode. An auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges. For custom mode VPC networks, you can add subnets using the subnetworks insert method.
"""
return pulumi.get(self, "auto_create_subnetworks")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> pulumi.Output[str]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
An optional description of this resource. Provide this field when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="gatewayIPv4")
def gateway_i_pv4(self) -> pulumi.Output[str]:
"""
The gateway address for default routing out of the network, selected by GCP.
"""
return pulumi.get(self, "gateway_i_pv4")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Type of the resource. Always compute#network for networks.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def mtu(self) -> pulumi.Output[int]:
"""
Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and the maximum value is 1500 bytes. If unspecified, defaults to 1460.
"""
return pulumi.get(self, "mtu")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> pulumi.Output[Sequence['outputs.NetworkPeeringResponse']]:
"""
A list of network peerings for the resource.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="routingConfig")
def routing_config(self) -> pulumi.Output['outputs.NetworkRoutingConfigResponse']:
"""
The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce.
"""
return pulumi.get(self, "routing_config")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> pulumi.Output[str]:
"""
Server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter
def subnetworks(self) -> pulumi.Output[Sequence[str]]:
"""
Server-defined fully-qualified URLs for all subnetworks in this VPC network.
"""
return pulumi.get(self, "subnetworks")
|
<gh_stars>0
import urllib3
import urllib.parse
import re
import json
class places(object):
def __init__(self, auth, place):
instance = urllib3.PoolManager()
quotedParams = urllib.parse.quote(place)
placeRequest = instance.request("GET", f"https://maps.googleapis.com/maps/api/place/findplacefromtext/json?input={quotedParams}&inputtype=textquery&fields=photos,formatted_address,name,rating,opening_hours,geometry&key={auth}")
self._placesInfo = placeRequest.data
def json(self, formatting=False):
placesCleaned = json.loads(self._placesInfo.decode())["candidates"][0]
placesDumped = json.dumps(placesCleaned)
formattedAddress = json.loads(placesDumped)["formatted_address"]
placesGeometry = json.loads(placesDumped)["geometry"]
placesGeoDumped = json.dumps(placesGeometry)
placesLocation = json.loads(placesGeoDumped)["location"]
placesLoDumped = json.dumps(placesLocation)
if "opening_hours" and "open_now" in placesDumped:
openingHours = json.loads(placesDumped)["opening_hours"]
openingDumped = json.dumps(openingHours)
openNow = json.loads(openingDumped)["open_now"]
else:
openNow = None
mapsPhotos = json.loads(placesDumped)["photos"]
mapsPhotosDumped = json.dumps(mapsPhotos)
mapsItems = json.loads(mapsPhotosDumped)[0]
mapsItemsDumped = json.dumps(mapsItems)
mapsUrlRaw = json.loads(mapsItemsDumped)["html_attributions"][0]
mapsUrlParsed = mapsUrlRaw.replace('<a href="', "")
urlReplace = re.sub(">.*?</a>", "", mapsUrlParsed).replace('"', "")
if "rating" in placesDumped:
ratingScore = json.loads(placesDumped)["rating"]
else:
ratingScore = None
placesName = json.loads(placesDumped)["name"]
placesLat = json.loads(placesLoDumped)["lat"]
placesLng = json.loads(placesLoDumped)["lng"]
parsed = {"name": placesName,
"open": openNow,
"rating": ratingScore,
"address": formattedAddress.split(", "),
"contrib_url": urlReplace,
"contrib_lat": placesLat,
"contrib_lng": placesLng}
if formatting is False:
return parsed
return json.dumps(parsed, indent=4)
def name(self):
nameCleaned = json.loads(self._placesInfo.decode())["candidates"][0]
nameDumped = json.dumps(nameCleaned)
placeName = json.loads(nameDumped)["name"]
return placeName
def address(self):
addressCleaned = json.loads(self._placesInfo.decode())["candidates"][0]
addressDumped = json.dumps(addressCleaned)
addressList = json.loads(addressDumped)["formatted_address"].split(", ")
return addressList
def openNow(self):
openingCleaned = json.loads(self._placesInfo.decode())["candidates"][0]
openingDumped = json.dumps(openingCleaned)
if "opening_hours" in openingCleaned:
openingHours = json.loads(openingDumped)["opening_hours"]
openingHoursDumped = json.dumps(openingHours)
isOpen = json.loads(openingHoursDumped)["open_now"]
return isOpen
else:
isOpen = None
def photo(self, auth, download=False):
photosInstance = urllib3.PoolManager()
photosCleaned = json.loads(self._placesInfo.decode())["candidates"][0]
photosDumped = json.dumps(photosCleaned)
if "photos" in photosCleaned:
photosJson = json.loads(photosDumped)["photos"][0]
photosJsonDumped = json.dumps(photosJson)
photosReference = json.loads(photosJsonDumped)["photo_reference"]
photosWidth = json.loads(photosJsonDumped)["width"]
photosUrl = f"https://maps.googleapis.com/maps/api/place/photo?maxwidth={photosWidth}&photoreference={photosReference}&key={auth}"
else:
photosUrl = None
if download is True and photosUrl:
photosRequest = photosInstance.request("GET", photosUrl)
with open("photoreference.jpg", "wb") as download_image:
download_image.write(photosRequest.data)
return photosUrl
def rating(self):
ratingCleaned = json.loads(self._placesInfo.decode())["candidates"][0]
ratingDumped = json.dumps(ratingCleaned)
if "rating" in ratingDumped:
rating = json.loads(ratingDumped)["rating"]
return float(rating)
else:
rating = None
def cordinate(self):
cordinateCleaned = json.loads(self._placesInfo.decode())["candidates"][0]
cordinateDumped = json.dumps(cordinateCleaned)
geo = json.loads(cordinateDumped)["geometry"]
geoDumped = json.dumps(geo)
cords = json.loads(geoDumped)["location"]
cordsDumped = json.dumps(cords)
latitude = json.loads(cordsDumped)["lat"]
longitude = json.loads(cordsDumped)["lng"]
return (latitude, longitude)
def status(self):
placesStatus = json.loads(self._placesInfo.decode())["status"]
return placesStatus
|
<filename>Preparing_input_rasters.py
###################################
######## Code to prepare the input rasters
###################################
# Input files
CHborder_path = "D:\\Geodata\\Raw_data\\SwissBOUNDARIES3D\\swissBOUNDARIES3D\\BOUNDARIES_2020\\DATEN\\swissBOUNDARIES3D\\SHAPEFILE_LV95_LN02\\swissBOUNDARIES3D_1_3_TLM_LANDESGEBIET.shp"
Sentinel_path = "D:\\Geodata\\Raw_data\\Sentinel_mosaic_CH\\MosaicSentinelCH2018\\SentinelCH2018.tif"
DEM_path = "D:\\Geodata\\Raw_data\\SwissALTI3D\\SwissALTI3D_CH_LV95.tif"
PopDens_path = "D:\\Geodata\\Raw_data\\Population_density\\gd-b-00.03-vz2018statpopb\\STATPOP2018B_B18BTOT_NAto0_noNOLOC.tif"
Building_path = "D:\\Geodata\\Raw_data\\swissTLM3D_2019_gebaeude_footprint\\swissTLM3D_2019_bldng_ftprnt_diss_LV95.shp"
Forest_path = "D:\\Geodata\\Raw_data\\Waldmischungsgrad\\Waldmischungsgrad\\MG2020_0306_oneRF_S1S2_AS_DEM_LV95.tif"
#NDVI_path = "D:\\Geodata\\Raw_data\\Sentinel_NDVI\\S2_2018_NDVI_med_06_08_LV95.tif"
Output_path = "D:\\Current_work\\Projects\\Landscape_typologies\\SwissWide_data\\"
# Import functions and specify settings in ArcGIS
import arcpy, os, rasterio
from arcpy import env
from arcpy.sa import *
import numpy as np
arcpy.SetProduct('ArcInfo')
arcpy.CheckOutExtension('Spatial')
arcpy.env.overwriteOutput = True
arcpy.env.parallelProcessingFactor = 3
arcpy.env.workspace = Output_path
##################################################
# Assign the people per m2 in a building to the buildings in the Building_path shapefile.
##################################################
# Create a Fishnet of the PopDens raster
x_left = arcpy.GetRasterProperties_management(PopDens_path,"LEFT").getOutput(0)
y_bottom = arcpy.GetRasterProperties_management(PopDens_path,"BOTTOM").getOutput(0)
origin = x_left+' '+y_bottom
yAxCoord = x_left+' '+str(int(y_bottom)+10)
cs = arcpy.GetRasterProperties_management(PopDens_path,"CELLSIZEX").getOutput(0)
ncol = arcpy.GetRasterProperties_management(PopDens_path,"COLUMNCOUNT").getOutput(0)
nrow = arcpy.GetRasterProperties_management(PopDens_path,"ROWCOUNT").getOutput(0)
arcpy.CreateFishnet_management(Output_path+"PopDens_fishnet.shp",origin,yAxCoord,cs,cs,nrow,ncol, "#","NO_LABELS","#","POLYGON")
# Intersect the Building_path shapefile with the fishnet.
arcpy.Intersect_analysis(["PopDens_fishnet.shp", Building_path],"Bldng_fishnet_intSect.shp")
# Calculate the area of a building within each fishnet.
arcpy.CalculateGeometryAttributes_management("Bldng_fishnet_intSect.shp",[["Area","AREA"]],"#","SQUARE_METERS")
# Remove parts of buildings that are smaller or equal to 15 m2.
arcpy.MakeFeatureLayer_management("Bldng_fishnet_intSect.shp","bld_fish_lyr")
arcpy.SelectLayerByAttribute_management("bld_fish_lyr",'NEW_SELECTION','"Area" > 15')
arcpy.CopyFeatures_management("bld_fish_lyr", "Bldng_fishnet_intSect_large.shp")
# Calculate the centroid of each building section.
arcpy.FeatureToPoint_management("Bldng_fishnet_intSect_large.shp","Bldng_fishnet_intSect_cntrd.shp","INSIDE")
# Calculate the total building area per PopDens raster cell
arcpy.env.snapRaster = PopDens_path
arcpy.env.extent = PopDens_path
arcpy.env.outputCoordinateSystem = PopDens_path
arcpy.PointToRaster_conversion("Bldng_fishnet_intSect_cntrd.shp","Area","Bldng_area.tif","SUM","#",100)
# Calculate the people per 100 m2 of building in PopDens area.
PopBldngDens = Float(Raster(PopDens_path)) / (Float(Raster("Bldng_area.tif")) / 100)
PopBldngDensNull = IsNull(PopBldngDens)
PopBldngDensCH = Con(PopBldngDensNull, 0, PopBldngDens, "Value = 1")
PopBldngDensCH.save("Pop_Bldng_Dens.tif")
del PopBldngDens, PopBldngDensNull, PopBldngDensCH
# Join the People / 100m2 building with the building footprints
ExtractValuesToPoints("Bldng_fishnet_intSect_cntrd.shp","Pop_Bldng_Dens.tif","Bldng_fishnet_intSect_cntrd_Pp100m2.shp")
arcpy.JoinField_management("Bldng_fishnet_intSect_large.shp", "FID", "Bldng_fishnet_intSect_cntrd_Pp100m2.shp", "ORIG_FID","RASTERVALU")
##################################################
# Make all rasters and shapefiles overlapping.
##################################################
arcpy.env.workspace = Output_path
# Remove Liechtenstein from the boundaries of Switzerland shapefile
arcpy.MakeFeatureLayer_management(CHborder_path,"CHborder_lyr")
arcpy.SelectLayerByAttribute_management("CHborder_lyr",'NEW_SELECTION',"NAME <> 'Liechtenstein'")
arcpy.CopyFeatures_management("CHborder_lyr", "CHborder.shp")
# Clip the Sentinel-RGB image with the borders of Switzerland to get the blue-print raster
sntnl_blueprint = ExtractByMask(Sentinel_path, "CHborder.shp")
sntnl_blueprint.save("SentinelRGB.tif")
# Set the extent, snap and coordinates to the Sentinel blueprint raster
# sntnl_blueprint = Raster("SentinelRGB.tif")
arcpy.env.snapRaster = sntnl_blueprint
arcpy.env.extent = sntnl_blueprint
arcpy.env.outputCoordinateSystem = sntnl_blueprint
arcpy.env.mask = sntnl_blueprint
# Aggregate the DEM raster
DEM = Aggregate(DEM_path, 5, "MEAN")
DEM = Int(DEM)
DEM.save("swissALTI.tif")
# Rasterise the building data
arcpy.PolygonToRaster_conversion("Bldng_fishnet_intSect_large.shp","RASTERVALU","PopDens.tif","CELL_CENTER", "#", 10)
PopDens = Raster("PopDens.tif")
PopDens = Ln(Int(PopDens*100))*100
PopDensNull = IsNull(PopDens)
PopDensCH = Con(PopDensNull, 0, PopDens, "Value = 1")
PopDensCH = ExtractByMask(PopDensCH, sntnl_blueprint)
PopDensCH = Int(PopDensCH)
PopDensCH.save("PopDens_100xLogPpHect.tif")
del PopDens, PopDensNull, PopDensCH, sntnl_blueprint
# Resample the Forest data
cell_size = int(arcpy.GetRasterProperties_management(Sentinel_path,"CELLSIZEX").getOutput(0))
arcpy.Resample_management(Forest_path, "Forest_mixture.tif", cell_size, "NEAREST")
#0 = 100% Laubwald, 10000 = 100% Nadelwald
ForMix = Raster("Forest_mixture.tif")
Deciduous = 10000-ForMix
Coniferous = ForMix
DecidNull = IsNull(Deciduous)
Deciduous = Con(DecidNull, 0, Deciduous, "Value = 1")
Coniferous = Con(DecidNull, 0, Coniferous, "Value = 1")
Deciduous = ExtractByMask(Deciduous, sntnl_blueprint)
Coniferous = ExtractByMask(Coniferous, sntnl_blueprint)
Deciduous = Int(Deciduous)
Coniferous = Int(Coniferous)
Deciduous.save("DeciduousPerc.tif")
Coniferous.save("ConiferousPerc.tif")
del Coniferous, Deciduous, ForMix, DecidNull
##################################################
# Make indices of SentinelRGB
##################################################
# See document "Sentinel_RGB_ratios.docx" for a justification of these ratios
Red = Raster('SentinelRGB.tif\Band_1')
Green = Raster('SentinelRGB.tif\Band_2')
Blue = Raster('SentinelRGB.tif\Band_3')
SentinelRGratio = Int(((Red-Green)/(Red+Green)+1)*1000)
SentinelRBratio = Int(((Red-Blue)/(Red+Blue)+1)*1000)
SentinelRGratio.save("SentinelRGratio.tif")
SentinelRBratio.save("SentinelRBratio.tif")
del SentinelRGratio, SentinelRBratio
# As there are some outliers in this data (i.e. few extreme data points), I reset the top and bottom 0.01 percentiles.
rast_obj = rasterio.open(Output_path+"SentinelRGratio.tif")
out_meta = rast_obj.meta.copy()
nodata_val = int(rast_obj.nodata)
rast_arr = rast_obj.read(1).astype(float)
rast_arr[rast_arr==nodata_val]=numpy.nan
percentile_001 = np.nanpercentile(rast_arr, 0.01)
percentile_9999 = np.nanpercentile(rast_arr, 99.99)
output = np.where((rast_arr < percentile_001), percentile_001, rast_arr)
output = np.where((rast_arr > percentile_9999), percentile_9999, output)
output = output - percentile_001
output_int = output.astype(int)
with rasterio.open(Output_path+"SentinelRGratio_rescale.tif", "w", **out_meta) as img:
img.write(output_int, 1)
rast_obj.close()
rast_obj = rasterio.open(Output_path+"SentinelRBratio.tif")
out_meta = rast_obj.meta.copy()
nodata_val = int(rast_obj.nodata)
rast_arr = rast_obj.read(1).astype(float)
rast_arr[rast_arr==nodata_val]=numpy.nan
percentile_001 = np.nanpercentile(rast_arr, 0.01)
percentile_9999 = np.nanpercentile(rast_arr, 99.99)
output = np.where((rast_arr < percentile_001), percentile_001, rast_arr)
output = np.where((rast_arr > percentile_9999), percentile_9999, output)
output = output - percentile_001
output_int = output.astype(int)
with rasterio.open(Output_path+"SentinelRBratio_rescale.tif", "w", **out_meta) as img:
img.write(output_int, 1)
rast_obj.close()
|
<reponame>TX-Yeager/LiTS---Liver-Tumor-Segmentation-Challenge
from __future__ import print_function, division
import SimpleITK as sitk
import numpy as np
import cv2
import os
trainImage = "D:\Data\LIST\\3dPatchdata_25625616\Image"
trainLiverMask = "D:\Data\LIST\\3dPatchdata_25625616\MaskLiver"
trainTumorMask = "D:\Data\LIST\\3dPatchdata_25625616\MaskTumor"
def getRangImageDepth(image):
"""
:param image:
:return:rangofimage depth
"""
fistflag = True
startposition = 0
endposition = 0
for z in range(image.shape[0]):
notzeroflag = np.max(image[z])
if notzeroflag and fistflag:
startposition = z
fistflag = False
if notzeroflag:
endposition = z
return startposition, endposition
def subimage_generator(image, mask, patch_block_size, numberxy, numberz):
"""
generate the sub images and masks with patch_block_size
:param image:
:param patch_block_size:
:param stride:
:return:
"""
width = np.shape(image)[1]
height = np.shape(image)[2]
imagez = np.shape(image)[0]
block_width = np.array(patch_block_size)[1]
block_height = np.array(patch_block_size)[2]
blockz = np.array(patch_block_size)[0]
stridewidth = (width - block_width) // numberxy
strideheight = (height - block_height) // numberxy
stridez = (imagez - blockz) // numberz
# step 1:if stridez is bigger 1,return numberxy * numberxy * numberz samples
if stridez >= 1 and stridewidth >= 1 and strideheight >= 1:
step_width = width - (stridewidth * numberxy + block_width)
step_width = step_width // 2
step_height = height - (strideheight * numberxy + block_height)
step_height = step_height // 2
step_z = imagez - (stridez * numberz + blockz)
step_z = step_z // 2
hr_samples_list = []
hr_mask_samples_list = []
for z in range(step_z, numberz * (stridez + 1) + step_z, numberz):
for x in range(step_width, numberxy * (stridewidth + 1) + step_width, numberxy):
for y in range(step_height, numberxy * (strideheight + 1) + step_height, numberxy):
if np.max(mask[z:z + blockz, x:x + block_width, y:y + block_height]) != 0:
hr_samples_list.append(image[z:z + blockz, x:x + block_width, y:y + block_height])
hr_mask_samples_list.append(mask[z:z + blockz, x:x + block_width, y:y + block_height])
hr_samples = np.array(hr_samples_list).reshape((len(hr_samples_list), blockz, block_width, block_height))
hr_mask_samples = np.array(hr_mask_samples_list).reshape(
(len(hr_mask_samples_list), blockz, block_width, block_height))
return hr_samples, hr_mask_samples
# step 2:other sutitation,return one samples
else:
nb_sub_images = 1 * 1 * 1
hr_samples = np.zeros(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float)
hr_mask_samples = np.zeros(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float)
rangz = min(imagez, blockz)
rangwidth = min(width, block_width)
rangheight = min(height, block_height)
hr_samples[0, 0:rangz, 0:rangwidth, 0:rangheight] = image[0:rangz, 0:rangwidth, 0:rangheight]
hr_mask_samples[0, 0:rangz, 0:rangwidth, 0:rangheight] = mask[0:rangz, 0:rangwidth, 0:rangheight]
return hr_samples, hr_mask_samples
def make_patch(image,mask, patch_block_size, numberxy, numberz, startpostion, endpostion):
"""
make number patch
:param image:[depth,512,512]
:param patch_block: such as[64,128,128]
:return:[samples,64,128,128]
expand the dimension z range the subimage:[startpostion-blockz//2:endpostion+blockz//2,:,:]
"""
blockz = np.array(patch_block_size)[0]
imagezsrc = np.shape(image)[0]
subimage_startpostion = startpostion - blockz // 2
subimage_endpostion = endpostion + blockz // 2
if subimage_startpostion < 0:
subimage_startpostion = 0
if subimage_endpostion > imagezsrc:
subimage_endpostion = imagezsrc
if (subimage_endpostion - subimage_startpostion) < blockz:
subimage_startpostion = 0
subimage_endpostion = imagezsrc
imageroi = image[subimage_startpostion:subimage_endpostion, :, :]
image_subsample, mask_subsample = subimage_generator(image=image, mask=mask, patch_block_size=patch_block_size,
numberxy=numberxy, numberz=numberz)
return image_subsample, mask_subsample
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array, origin and spacing of the image.
read_Image_mask fucntion get image and mask
'''
def load_itk(filename):
"""
load mhd files and normalization 0-255
:param filename:
:return:
"""
rescalFilt = sitk.RescaleIntensityImageFilter()
rescalFilt.SetOutputMaximum(255)
rescalFilt.SetOutputMinimum(0)
# Reads the image using SimpleITK
itkimage = rescalFilt.Execute(sitk.Cast(sitk.ReadImage(filename), sitk.sitkFloat32))
return itkimage
def gen_image_mask(srcimg, seg_image, index, shape, numberxy, numberz):
# step 1 get mask effective range(startpostion:endpostion)
startpostion, endpostion = getRangImageDepth(seg_image)
# step 2 get subimages (numberxy*numberxy*numberz,16, 256, 256)
sub_srcimages,sub_liverimages = make_patch(srcimg,seg_image, patch_block_size=shape, numberxy=numberxy, numberz=numberz,
startpostion=startpostion,
endpostion=endpostion)
# step 3 only save subimages (numberxy*numberxy*numberz,16, 256, 256)
samples, imagez = np.shape(sub_srcimages)[0], np.shape(sub_srcimages)[1]
sub_masks = sub_liverimages.astype(np.float32)
sub_masks = np.clip(sub_masks, 0, 255).astype('uint8')
for j in range(samples):
if np.max(sub_masks[j, :, :, :]) == 255:
filepath = trainImage + "\\" + str(index) + "_" + str(j) + "\\"
filepath2 = trainLiverMask + "\\" + str(index) + "_" + str(j) + "\\"
if not os.path.exists(filepath) and not os.path.exists(filepath2):
os.makedirs(filepath)
os.makedirs(filepath2)
for z in range(imagez):
image = sub_srcimages[j, z, :, :]
image = image.astype(np.float32)
image = np.clip(image, 0, 255).astype('uint8')
cv2.imwrite(filepath + str(z) + ".bmp", image)
cv2.imwrite(filepath2 + str(z) + ".bmp", sub_masks[j, z, :, :])
def preparetraindata():
for i in range(0, 131, 1):
seg = sitk.ReadImage("D:\Data\LIST\src_data\segmentation-" + str(i) + ".nii", sitk.sitkUInt8)
segimg = sitk.GetArrayFromImage(seg)
src = load_itk("D:\Data\LIST\src_data\\volume-" + str(i) + ".nii")
srcimg = sitk.GetArrayFromImage(src)
seg_liverimage = segimg.copy()
seg_liverimage[segimg > 0] = 255
seg_tumorimage = segimg.copy()
seg_tumorimage[segimg == 1] = 0
seg_tumorimage[segimg == 2] = 255
gen_image_mask(srcimg, seg_liverimage, i, shape=(16, 256, 256), numberxy=5, numberz=10)
# gen_image_mask(srcimg, seg_tumorimage, i, shape=(16, 256, 256), numberxy=5, numberz=10)
preparetraindata()
|
<reponame>cajohare/CompAxion
#================================PlotFuncs.py==================================#
# Created by <NAME> 2021
#==============================================================================#
from numpy import *
from numpy.random import *
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.gridspec as gridspec
from matplotlib.colors import ListedColormap
from matplotlib import colors
import matplotlib.ticker as mticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.cm as cm
from scipy.stats import norm
import PlotFuncs
#K_QCD = 6.743e-5 # GeV^4
K_QCD = 1.62e-5
def m2m1_ratio_hierarchical(f,N0,N1,N2):
eps = 1e-6
m1 = 1e9*sqrt(K_QCD/f**2*((N0+N1*eps**2)+(4*N2**2*eps**2 + (N0-N1*eps**2)**2)**0.5))
m2 = 1e9*sqrt(K_QCD/f**2*((N0+N1*eps**2)-(4*N2**2*eps**2 + (N0-N1*eps**2)**2)**0.5))
return (1/eps)*m2/m1
def m1m2_ratio_hierarchical(f,N0,N1,N2):
eps = 1e6
m1 = 1e9*sqrt(K_QCD/f**2*((N0+N1*eps**2)+(4*N2**2*eps**2 + (N0-N1*eps**2)**2)**0.5))
m2 = 1e9*sqrt(K_QCD/f**2*((N0+N1*eps**2)-(4*N2**2*eps**2 + (N0-N1*eps**2)**2)**0.5))
return (1/eps)*m1/m2
def Parameters(f,eps,k=0.04,AnomalyCoefficients=[3,0.5,13/2,3/2]):
'''
Input:
f and f' in GeV, must be the same size
N's are O(1) numbers
Output:
dm_sq = squared mass difference [eV^2]
m1 = heavier mass [eV]
m2 = lighter mass [eV]
tan_2alpha = mixing angle
'''
fp = f/eps
N,Np,Ng,Ngp = AnomalyCoefficients[:]
N0 = N**2+k*Ng**2
N1 = Np**2 + k*Ngp**2
N2 = N*Np + k*Ng*Ngp
dm_sq = (1e9**2)*(2*K_QCD/f**2)*(4*N2**2*eps**2 + (N0-N1*eps**2)**2)**0.5
m1 = 1e9*sqrt(K_QCD/f**2*((N0+N1*eps**2)+(4*N2**2*eps**2 + (N0-N1*eps**2)**2)**0.5))
m2 = 1e9*sqrt(K_QCD/f**2*((N0+N1*eps**2)-(4*N2**2*eps**2 + (N0-N1*eps**2)**2)**0.5))
m2_small1 = m1*eps*m2m1_ratio_hierarchical(f,N0,N1,N2)
m2_small2 = (1/eps)*m1/(m1m2_ratio_hierarchical(f,N0,N1,N2))
m2[f/fp<=1e-6] = m2_small1[f/fp<=1e-6]
m2[fp/f<=1e-6] = m2_small2[fp/f<=1e-6]
tan_2alpha = 2*eps*(N*Np+k*Ng*Ngp)/((N**2+k*Ng**2)-eps**2*(Np**2+k*Ngp**2))
return dm_sq,m1,m2,tan_2alpha
def Couplings(f,eps,k=0.04,AnomalyCoefficients=[3,0.5,13/2,3/2]):
N,Np,Ng,Ngp = AnomalyCoefficients[:]
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps,k,AnomalyCoefficients)
alph = arctan(tan_2alpha)/2
fp = f/eps
g1 = (1/137)*(1/(2*pi))*1.92*(N*cos(alph)/f-Np*sin(alph)/fp)
g2 = ((1/137)*(1/(2*pi))*1.92*(N*sin(alph)/f+Np*cos(alph)/fp))
return m1,m2,g1,g2
def Mixing1(f,eps,k=0.04,AnomalyCoefficients=[3,0.5,13/2,3/2]):
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps,k,AnomalyCoefficients)
alph = arctan(tan_2alpha)/2
Mix1 = 4*cos(alph)**4*((-tan(alph)+eps*(1-tan(alph)**2)+eps**2*tan(alph))**2)/(1+eps**2)**2
return Mix1
def Mixing2(f,eps,k=0.04,AnomalyCoefficients=[3,0.5,13/2,3/2],L=1.496e11):
dat = loadtxt('data/PrimakoffFlux_PlasmonCorrected.txt')
w = dat[:,0]
Phi = dat[:,1]
Phi = Phi/trapz(Phi,w)
L_eV = L/1.97e-7 # eV^-1
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps,k,AnomalyCoefficients)
n = shape(f)[0]
Mix2 = zeros_like(f)
for i in range(0,n):
for j in range(0,n):
if dm_sq[i,j]*L_eV/(4*(10.0*1e3))>2*pi:
Mix2[i,j] = 0.5
else:
Mix2[i,j] = trapz(sin(dm_sq[i,j]*L_eV/(4*(w*1e3)))**2*Phi,w)
return Mix2
def MapLimit(file,Prob,m,g):
m_lim,g_lim = loadtxt('limit_data/AxionPhoton/'+file+'.txt',unpack=True)
ni = shape(m)[0]
nj = shape(m)[1]
g_lim_interp = zeros_like(m)
for i in range(0,ni):
for j in range(0,nj):
g_lim_interp[i,j] = 10.0**interp(log10(m[i,j]),log10(m_lim),log10(g_lim))
constrained = ((g*Prob)>g_lim_interp)
constrained[m>amax(m_lim)] = False
constrained[m<amin(m_lim)] = False
return constrained
def MapHaloscope_m1(file,fvals,epsvals,k=0.04,AnomalyCoefficients=[3,0.5,13/2,3/2],Omega_a1=None):
f,eps = meshgrid(fvals,epsvals)
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps,k,AnomalyCoefficients)
m1,m2,g1,g2 = Couplings(f,eps,k,AnomalyCoefficients)
if Omega_a1 is None:
Omega_a1 = 1/(1+k**0.41*eps**(-7/6))
lim_m1 = MapLimit(file,sqrt(Omega_a1),m1,g1)
return lim_m1
def MapHaloscope_m2(file,fvals,epsvals,k=0.04,AnomalyCoefficients=[3,0.5,13/2,3/2],Omega_a2=None):
f,eps = meshgrid(fvals,epsvals)
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps,k,AnomalyCoefficients)
m1,m2,g1,g2 = Couplings(f,eps,k,AnomalyCoefficients)
if Omega_a2 is None:
Omega_a2 = 1/(1+k**-0.41*eps**(7/6))
lim_m2 = MapLimit(file,sqrt(Omega_a2),m2,g2)
return lim_m2
def MapHelioscope_m1(file,fvals,epsvals,n=100,k=0.04,AnomalyCoefficients=[3,0.5,13/2,3/2]):
f,eps = meshgrid(fvals,epsvals)
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps,k,AnomalyCoefficients)
m1,m2,g1,g2 = Couplings(f,eps,k,AnomalyCoefficients)
Mix2 = Mixing2(f,eps,k,AnomalyCoefficients)
Mix1 = Mixing1(f,eps,k,AnomalyCoefficients)
th = arcsin(sqrt(Mix1))/2
SurvivalProb_active = 1-Mix1*Mix2
Prob_m1 = SurvivalProb_active*cos(th)
lim_m1 = MapLimit(file,(Prob_m1)**(1/4),m1,g1)
return lim_m1
def MapHelioscope_m2(file,fvals,epsvals,n=100,k=0.04,AnomalyCoefficients=[3,0.5,13/2,3/2]):
f,eps = meshgrid(fvals,epsvals)
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps,k,AnomalyCoefficients)
m1,m2,g1,g2 = Couplings(f,eps,k,AnomalyCoefficients)
Mix2 = Mixing2(f,eps,k,AnomalyCoefficients)
Mix1 = Mixing1(f,eps,k,AnomalyCoefficients)
th = arcsin(sqrt(Mix1))/2
SurvivalProb_active = 1-Mix1*Mix2
Prob_m2 = SurvivalProb_active*sin(th)
lim_m2 = MapLimit(file,(Prob_m2)**(1/4),m2,g2)
return lim_m2
def Superradiance(file,fvals,epsvals,k=0.04,AnomalyCoefficients=[3,0.5,13/2,3/2]):
f,eps = meshgrid(fvals,epsvals)
fp = f/eps
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps,k,AnomalyCoefficients)
md,fd = loadtxt('limit_data/fa/BlackHoleSpins_'+file+'.txt',unpack=True)
ni = shape(f)[0]
nj = shape(f)[1]
constrained = zeros((ni,nj))
for i in range(0,ni):
for j in range(0,nj):
m1_ij = m1[i,j]
m2_ij = m2[i,j]
if (m1_ij<amax(md)) and (m1_ij>amin(md)):
g = interp(m1_ij,md,fd)
constrained[i,j] = ((1/f[i,j]<g) and (1/fp[i,j]<g))
if (m2_ij<amax(md)) and (m2_ij>amin(md)):
g = interp(m2_ij,md,fd)
constrained[i,j] += ((1/f[i,j]<g) and (1/fp[i,j]<g))
constrained = constrained>0
return constrained
def StellarCooling(ax,fvals,epsvals,text_pos=[5e6,1e-7],facecolor=PlotFuncs.HB_col,edgecolor='k',text_col='w',fs=30,rotation=90,k=0.04,AnomalyCoefficients=[3,0.5,13/2,3/2],\
edge_on=True,linestyle='-'):
f,eps = meshgrid(fvals,epsvals)
m1,m2,g1,g2 = Couplings(fvals,eps,k,AnomalyCoefficients)
g_active = sqrt(g1**2+g2**2)
HB = (g_active>6.6e-11)
PlotFuncs.PlotContour(ax,fvals,epsvals,HB,zorder=0,alpha=1.0,lw=5,facecolor=facecolor,edgecolor=edgecolor,linestyle=linestyle,edge_on=edge_on)
ax.text(text_pos[0],text_pos[1],r'{\bf Stellar cooling}',rotation=rotation,fontsize=fs)
return
def Omega_gw(fvals,m,k,A=0.8,eg=0.7,Omega_rad=4.15e-5):
M2 = 2.4e18*1e9
M = 1.22e19*1e9
A = 0.8
g = 10
g0 = 3.36
gs0 = 3.1
gann = 10
gsann = 10
sigma = K_QCD*(1e9**4)/m # eV
Tann = sqrt(m/(1+1/k))*sqrt(M2/(2*pi))*(90/g)**(1/4) # eV
Oann = eg*A**2*sigma**2/(Tann**4*M**2)
fpeak = 1.1e-9*(Tann/1e7)
Opeak = Omega_rad*(gann/g0)*(gs0/gsann)**(4/3)*Oann
Omega1 = Opeak*(fvals/fpeak)**3
Omega2 = Opeak*(fvals/fpeak)**-1
Omega = Omega1*(fvals<fpeak) + Omega2*(fvals>=fpeak)
return Omega
def Omega_gw_peak(m,k,A=0.8,eg=0.7,Omega_rad=4.15e-5):
M2 = 2.4e18*1e9
M = 1.22e19*1e9
A = 0.8
g = 10
g0 = 3.36
gs0 = 3.1
gann = 10
gsann = 10
sigma = K_QCD*(1e9**4)/m # eV
Tann = sqrt(m/(1+1/k))*sqrt(M2/(2*pi))*(90/g)**(1/4) # eV
Oann = eg*A**2*sigma**2/(Tann**4*M**2)
fpeak = 1.1e-9*(Tann/1e7)
Opeak = Omega_rad*(gann/g0)*(gs0/gsann)**(4/3)*Oann
return fpeak,Opeak
def MapLimit_GW(file,fvals,epsvals,k=0.6,AnomalyCoefficients=[3,0.5,13/2,3/2],A=0.8,eg=0.7,Omega_rad=4.15e-5):
f,eps = meshgrid(fvals,epsvals)
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps,k=k,AnomalyCoefficients=AnomalyCoefficients)
dat = 10.0**loadtxt('limit_data/GravitationalWaves/power-law-integrated_sensitivities/plis_'+file+'.dat')[:,0:2]
f_lim = dat[:,0]
O_lim = dat[:,1]
ni = shape(m2)[0]
nj = shape(m2)[1]
constrained = zeros_like(m2)
for i in range(0,ni):
for j in range(0,nj):
Omega = Omega_gw(f_lim,m2[i,j],k,A=A,eg=eg,Omega_rad=Omega_rad)+Omega_gw(f_lim,m1[i,j],k,A=A,eg=eg,Omega_rad=Omega_rad)
constrained[i,j] = sum(Omega>O_lim)
constrained = constrained>0
return constrained
def MapLimit_NanoGRAV(fvals,epsvals,k=0.6,AnomalyCoefficients=[3,0.5,13/2,3/2],A=0.8,eg=0.7,Omega_rad=4.15e-5):
f,eps = meshgrid(fvals,epsvals)
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps,k=k,AnomalyCoefficients=AnomalyCoefficients)
dat = loadtxt('limit_data/GravitationalWaves/NANOGrav_hint.txt')
f_lim1 = dat[5:10,0] # upper limit
O_lim1 = dat[5:10,1]
f_lim2 = dat[10:15,0] # lower limit
O_lim2 = dat[10:15,1]
ni = shape(m2)[0]
nj = shape(m2)[1]
constrained = zeros_like(m2)
for i in range(0,ni):
for j in range(0,nj):
Omega1 = Omega_gw(f_lim1,m2[i,j],k,A=A,eg=eg,Omega_rad=Omega_rad)+Omega_gw(f_lim1,m1[i,j],k,A=A,eg=eg,Omega_rad=Omega_rad)
Omega2 = Omega_gw(f_lim2,m2[i,j],k,A=A,eg=eg,Omega_rad=Omega_rad)+Omega_gw(f_lim2,m1[i,j],k,A=A,eg=eg,Omega_rad=Omega_rad)
constrained[i,j] = sum(Omega1<O_lim1)*sum(Omega2>O_lim2)
constrained = constrained>0
return constrained
# Cosmology
Mpl_GeV = 2.4e18
Mpl_MeV = Mpl_GeV*1e3
T0 = 2.35e-4/1e6 # MeV
g0 = 3.91
g1 = 61.75
ni = 6.68
Tt = 103.0 # MeV
rho_c = 8.06e-11 # eV^4
def Omega_CaseI(f,eps,k,theta_1,theta_2):
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps)
T1 = ((m1/1e6)*Mpl_MeV*sqrt(90)/(3*pi*sqrt(g1)))**(2/(ni+4))*Tt**(ni/(ni+4))
m1_T1 = m1*(Tt/T1)**(ni/2) # eV
rho1 = m1_T1*m1*theta_1**2*(f*1e9)**2*(T0/T1)**3*(g0/g1) # eV
ke = k**((ni+2)/(2*(ni+4)))*eps**(-(ni+6)/(ni+4))
Omega1 = rho1/rho_c
Omega2 = Omega1*(theta_2/theta_1)**2*ke
return Omega1,Omega2
def Thetas_CaseI(f,eps,k,Omega_dm=0.12):
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps)
T1 = ((m1/1e6)*Mpl_MeV*sqrt(90)/(3*pi*sqrt(g1)))**(2/(ni+4))*Tt**(ni/(ni+4))
m1_T1 = m1*(Tt/T1)**(ni/2) # eV
F = (m1_T1*m1*(f*1e9)**2*(T0/T1)**3*(g0/g1))
ke = k**((ni+2)/(2*(ni+4)))*eps**(-(ni+6)/(ni+4))
theta_1_max = sqrt(Omega_dm*rho_c/F)
theta_2_max = sqrt(Omega_dm*rho_c/(F*ke))
return theta_1_max,theta_2_max
def Omega_CaseII(f,eps,k,Omega_dm=0.12):
theta_1 = pi/sqrt(3)
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps)
T1 = ((m1/1e6)*Mpl_MeV*sqrt(90)/(3*pi*sqrt(61.75)))**(2/(ni+4))*Tt**(ni/(ni+4))
rho_c = 8.06e-11 # eV^4
m1_T1 = m1*(Tt/T1)**(ni/2) # eV
rho1 = m1_T1*m1*theta_1**2*(f*1e9)**2*(T0/T1)**3*(g0/g1) # eV
ke = k**((ni+2)/(2*(ni+4)))*eps**(-(ni+6)/(ni+4))
Omega1 = rho1/rho_c
theta_2 = theta_1*sqrt((Omega_dm/Omega1 - 1)/ke)
Omega2 = Omega_dm-Omega1
return Omega1,Omega2,theta_2
def Omega_CaseIII(f,eps,k):
theta_1 = pi/sqrt(3)
theta_2 = pi/sqrt(3)
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps)
T1 = ((m1/1e6)*Mpl_MeV*sqrt(90)/(3*pi*sqrt(61.75)))**(2/(ni+4))*Tt**(ni/(ni+4))
m1_T1 = m1*(Tt/T1)**(ni/2) # eV
rho1 = m1_T1*m1*theta_1**2*(f*1e9)**2*(T0/T1)**3*(g0/g1) # eV
Omega1 = rho1/rho_c
Omega2 = Omega1*(theta_2/theta_1)**2*k**((ni+2)/(2*(ni+4)))*eps**(-(ni+6)/(ni+4))
return Omega1,Omega2
def Omega_CaseIII(f,eps,k):
Mpl_GeV = 2.4e18
Mpl_MeV = Mpl_GeV*1e3
T0 = 2.35e-4/1e6 # MeV
g0 = 3.91
g1 = 61.75
ni = 6.68
Tt = 103.0 # MeV
theta_1 = pi/sqrt(3)
theta_2 = pi/sqrt(3)
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps)
T1 = ((m1/1e6)*Mpl_MeV*sqrt(90)/(3*pi*sqrt(61.75)))**(2/(ni+4))*Tt**(ni/(ni+4))
rho_c = 8.06e-11 # eV^4
m1_T1 = m1*(Tt/T1)**(ni/2) # eV
rho1 = m1_T1*m1*theta_1**2*(f*1e9)**2*(T0/T1)**3*(g0/g1) # eV
Omega1 = rho1/rho_c
Omega2 = Omega1*(theta_2/theta_1)**2*k**((ni+2)/(2*(ni+4)))*eps**(-(ni+6)/(ni+4))
return Omega1,Omega2
def Theta2_CaseII(f,eps,k,Omega_dm=0.12):
Mpl_GeV = 2.4e18
Mpl_MeV = Mpl_GeV*1e3
T0 = 2.35e-4/1e6 # MeV
g0 = 3.91
g1 = 61.75
ni = 6.68
Tt = 103.0 # MeV
theta_1 = pi/sqrt(3)
dm_sq,m1,m2,tan_2alpha = Parameters(f,eps)
T1 = ((m1/1e6)*Mpl_MeV*sqrt(90)/(3*pi*sqrt(61.75)))**(2/(ni+4))*Tt**(ni/(ni+4))
rho_c = 8.06e-11 # eV^4
m1_T1 = m1*(Tt/T1)**(ni/2) # eV
rho1 = m1_T1*m1*theta_1**2*(f*1e9)**2*(T0/T1)**3*(g0/g1) # eV
ke = k**((ni+2)/(2*(ni+4)))*eps**(-(ni+6)/(ni+4))
Omega1 = rho1/rho_c
theta_2 = theta_1*sqrt((Omega_dm/Omega1 - 1)/ke)
Omega2 = Omega_dm-Omega1
return theta_2,Omega1,Omega2
# def Superradiance(ax,fvals,epsvals,k=0.04,AnomalyCoefficients=[3,0.5,13/2,3/2],text_shift=[1,1],fs=25,\
# whichfile='Mehta',facecolor='gray',edgecolor='k',text_col='k',text_rot=51):
# f,eps = meshgrid(fvals,epsvals)
# fp = f/eps
# dm_sq,m1,m2,tan_2alpha = Parameters(f,eps,k,AnomalyCoefficients)
# md,fd = loadtxt('limit_data/fa/BlackHoleSpins_'+whichfile+'.txt',unpack=True)
# ni = shape(f)[0]
# nj = shape(f)[1]
# constrained = zeros((ni,nj))
# for i in range(0,ni):
# for j in range(0,nj):
# m1_ij = m1[i,j]
# m2_ij = m2[i,j]
# if (m1_ij<amax(md)) and (m1_ij>amin(md)):
# g = interp(m1_ij,md,fd)
# constrained[i,j] = ((1/f[i,j]<g) and (1/fp[i,j]<g))
# if (m2_ij<amax(md)) and (m2_ij>amin(md)):
# g = interp(m2_ij,md,fd)
# constrained[i,j] += ((1/f[i,j]<g) and (1/fp[i,j]<g))
#
# ax.contour(fvals,epsvals,constrained,levels=[0],linewidths=3,colors=edgecolor)
# constrained[constrained==0] = nan
# constrained[~isnan(constrained)] = 1.0
# ax.contourf(fvals,epsvals,constrained,levels=[0,1],alpha=1,colors=facecolor)
#
# text_pos1 = [3e12,4e-6]
# ax.text(text_shift[0]*text_pos1[0],text_shift[1]*text_pos1[1],r'{\bf Black hole superradiance}',fontsize=fs,color=text_col,rotation=text_rot)
# return
|
<reponame>Wipersee/profielp
# Generated by Django 3.2.8 on 2021-11-18 17:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Complaint',
fields=[
('complaint_id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('comment', models.TextField(blank=True, help_text='Requester complaint text', null=True)),
('admin_comment', models.TextField(blank=True, help_text='Admin comment', null=True)),
('date', models.DateTimeField(auto_now_add=True, help_text='Date and time of the complaint creating')),
('resolve_date', models.DateTimeField(blank=True, help_text='Date and time of the complaint resolving', null=True)),
('resolved', models.BooleanField(default=False)),
('admin_id', models.ForeignKey(help_text='ID of the Admin that assigned to the complaint', on_delete=django.db.models.deletion.DO_NOTHING, to='users.admin')),
('requester_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Complaint',
'verbose_name_plural': 'Complaints',
},
),
migrations.CreateModel(
name='OrderStatus',
fields=[
('order_status_id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('order_status', models.CharField(choices=[('CRTD', 'Created'), ('ACCPTD', 'Accepted'), ('INPRGRS', 'In progress'), ('ADMN', 'Complaint under consideration by the administrator'), ('DONE', 'Order is finished successfully'), ('DECLINED', 'Order is declined by performer')], help_text='Order status', max_length=16, unique=True)),
],
options={
'verbose_name': 'Order status',
'verbose_name_plural': 'Order statuses',
},
),
migrations.CreateModel(
name='Order',
fields=[
('order_id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('address', models.TextField(blank=True, help_text='Order address', null=True)),
('latitude', models.FloatField(help_text='Order address latitude')),
('longitude', models.FloatField(help_text='Order address longitude')),
('comment', models.TextField(blank=True, help_text='Order additional comment', null=True)),
('is_high_priority', models.BooleanField(default=False, help_text='True if order is urgent')),
('date', models.DateTimeField(auto_now=True, help_text='Date and time of the order creation')),
('completion_date', models.DateTimeField(blank=True, help_text='Date and time of the order completion', null=True)),
('customer_approved', models.BooleanField(default=False, help_text='True if customer approved the work')),
('performer_approved', models.BooleanField(default=False, help_text='True if performer approved the work')),
('complaint_id', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='orders.complaint')),
('customer_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='users.customer')),
('order_status_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='orders.orderstatus')),
('performer_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='users.performer')),
],
),
]
|
#!/usr/local/sbin/charm-env python3
#
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import subprocess
import sys
import traceback
from oslo_config import cfg
# Load modules from $CHARM_DIR/lib
sys.path.append('lib')
sys.path.append('reactive')
from charms.layer import basic
basic.bootstrap_charm_deps()
basic.init_config_states()
import charms_openstack.bus
import charmhelpers.core as ch_core
charms_openstack.bus.discover()
NEUTRON_CONF = '/etc/neutron/neutron.conf'
NEUTRON_OVN_DB_SYNC_CONF = '/etc/neutron/neutron-ovn-db-sync.conf'
def get_neutron_credentials():
"""Retrieve service credentials from Neutron's configuration file.
Since we are a subordinate of the neutron-api charm and have no direct
relationship with Keystone ourselves we rely on gleaning Neutron's
credentials from its config file.
:returns: Map of environment variable name and appropriate value for auth.
:rtype: Dict[str,str]
"""
sections = {}
parser = cfg.ConfigParser(NEUTRON_CONF, sections)
parser.parse()
auth_section = 'keystone_authtoken'
return {
'OS_USER_DOMAIN_NAME': sections[auth_section]['user_domain_name'][0],
'OS_PROJECT_DOMAIN_NAME': sections[auth_section][
'project_domain_name'][0],
'OS_AUTH_URL': sections[auth_section]['auth_url'][0],
'OS_PROJECT_NAME': sections[auth_section]['project_name'][0],
'OS_USERNAME': sections[auth_section]['username'][0],
'OS_PASSWORD': sections[auth_section]['password'][0],
}
def get_neutron_db_connection_string():
"""Retrieve db connection string from Neutron's configuration file.
Since we are a subordinate of the neutron-api charm and have no direct
relationship with the database ourselves we rely on gleaning Neutron's
credentials from its config file.
:returns: SQLAlchemy consumable DB connection string.
:rtype: str
"""
sections = {}
parser = cfg.ConfigParser(NEUTRON_CONF, sections)
parser.parse()
return sections['database']['connection'][0]
@contextlib.contextmanager
def write_filtered_neutron_config_for_sync_util():
"""This helper exists to work around LP: #1894048.
Load neutron config and write out a copy with any sections or options
offending the `neutron-ovn-db-sync-util` removed.
The helper should be used as a context manager to have the temporary config
file removed when done. Example:
with write_filtered_neutron_config_for_sync_util():
do_something()
"""
# Make sure the file we create has safe permissions
stored_mask = os.umask(0o0027)
try:
with open(NEUTRON_CONF, 'r') as fin:
with open(NEUTRON_OVN_DB_SYNC_CONF, 'w') as fout:
for line in fin.readlines():
# The ovn-db-sync-util chokes on this. LP: #1894048
if line.startswith('auth_section'):
continue
fout.write(line)
finally:
# Restore umask for further execution regardless of any exception
# occurring above.
os.umask(stored_mask)
yield
# remove the temporary config file
os.unlink(NEUTRON_OVN_DB_SYNC_CONF)
def migrate_mtu(args):
"""Reduce MTU on overlay networks prior to migration to Geneve.
:param args: Argument list
:type args: List[str]
"""
action_name = os.path.basename(args[0])
dry_run = not ch_core.hookenv.action_get('i-really-mean-it')
mode = 'verify' if dry_run else 'update'
cp = subprocess.run(
(
'neutron-ovn-migration-mtu',
mode,
'mtu',
),
capture_output=True,
universal_newlines=True,
env={
'PATH': '/usr/bin',
**get_neutron_credentials(),
})
if dry_run:
banner_msg = '{}: OUTPUT FROM VERIFY'.format(action_name)
else:
banner_msg = '{}: OUTPUT FROM UPDATE'.format(action_name)
# we pass the output through and it will be captured both in log and
# action output
output_indicates_failure = False
for output_name in ('stdout', 'stderr'):
fh = getattr(sys, output_name)
data = getattr(cp, output_name)
print('{} ON {}:\n'.format(banner_msg, output_name.upper()) + data,
file=fh)
for fail_word in ('Exception', 'Traceback'):
if fail_word in data:
# the `neutron-ovn-migration-mtu` tool does not set an error
# code on failure, look for errors in the output and set action
# status accordingly.
output_indicates_failure = True
if cp.returncode != 0 or output_indicates_failure:
ch_core.hookenv.action_fail(
'Execution failed, please investigate output.')
def migrate_ovn_db(args):
"""Migrate the Neutron DB into OVN with the `neutron-ovn-db-sync-util`.
:param args: Argument list
:type args: List[str]
"""
action_name = os.path.basename(args[0])
dry_run = not ch_core.hookenv.action_get('i-really-mean-it')
sync_mode = 'log' if dry_run else 'repair'
with write_filtered_neutron_config_for_sync_util():
cp = subprocess.run(
(
'neutron-ovn-db-sync-util',
'--config-file', NEUTRON_OVN_DB_SYNC_CONF,
'--config-file', '/etc/neutron/plugins/ml2/ml2_conf.ini',
'--ovn-neutron_sync_mode', sync_mode,
),
capture_output=True,
universal_newlines=True,
)
if dry_run:
banner_msg = '{}: OUTPUT FROM DRY-RUN'.format(action_name)
else:
banner_msg = '{}: OUTPUT FROM SYNC'.format(action_name)
# we pass the output through and it will be captured both in log and
# action output
output_indicates_failure = False
for output_name in ('stdout', 'stderr'):
fh = getattr(sys, output_name)
data = getattr(cp, output_name)
print('{} ON {}:\n'.format(banner_msg, output_name.upper()) + data,
file=fh)
if 'ERROR' in data:
# the `neutron-ovn-db-sync-util` tool does not set an error code on
# failure, look for errors in the output and set action status
# accordingly.
output_indicates_failure = True
if cp.returncode != 0 or output_indicates_failure:
ch_core.hookenv.action_fail(
'Execution failed, please investigate output.')
def offline_neutron_morph_db(args):
"""Perform offline moprhing of tunnel networks in the Neutron DB.
:param args: Argument list
:type args: List[str]
"""
action_name = os.path.basename(args[0])
dry_run = not ch_core.hookenv.action_get('i-really-mean-it')
mode = 'dry' if dry_run else 'morph'
cp = subprocess.run(
(
'{}'.format(
os.path.join(
ch_core.hookenv.charm_dir(),
'files/scripts/neutron_offline_network_type_update.py')),
get_neutron_db_connection_string(),
mode,
),
capture_output=True,
universal_newlines=True,
# We want this tool to run outside of the charm venv to let it consume
# system Python packages.
env={'PATH': '/usr/bin'},
)
if dry_run:
banner_msg = '{}: OUTPUT FROM DRY-RUN'.format(action_name)
else:
banner_msg = '{}: OUTPUT FROM MORPH'.format(action_name)
# we pass the output through and it will be captured both in log and
# action output
for output_name in ('stdout', 'stderr'):
fh = getattr(sys, output_name)
data = getattr(cp, output_name)
print('{} ON {}:\n'.format(banner_msg, output_name.upper()) + data,
file=fh)
if cp.returncode != 0:
ch_core.hookenv.action_fail(
'Execution failed, please investigate output.')
ACTIONS = {
'migrate-mtu': migrate_mtu,
'migrate-ovn-db': migrate_ovn_db,
'offline-neutron-morph-db': offline_neutron_morph_db,
}
def main(args):
action_name = os.path.basename(args[0])
try:
action = ACTIONS[action_name]
except KeyError:
return 'Action {} undefined'.format(action_name)
else:
try:
action(args)
except Exception as e:
ch_core.hookenv.log('action "{}" failed: "{}" "{}"'
.format(action_name, str(e),
traceback.format_exc()),
level=ch_core.hookenv.ERROR)
ch_core.hookenv.action_fail(str(e))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
#Container to store timestep info
Experience = namedtuple(
'Experience',
('state', 'action', 'next_state', 'reward')
)
#Hyper-params
batch_size = 512
gamma = 0.999
target_update = 10
memory_size = 900000
lr = 0.001
num_episodes = 500
epsilon = 0.10
#Deep Q Network
class DQN(nn.Module):
def __init__(self, img_height, img_width):
super().__init__()
self.fc1 = nn.Linear(in_features=img_height*img_width*3, out_features=128)
self.fc2 = nn.Linear(in_features=128, out_features=64)
self.fc3 = nn.Linear(in_features=64, out_features=32)
self.out = nn.Linear(in_features=32, out_features=2)
def forward(self, t):
t = t.flatten(start_dim=1)
t = F.relu(self.fc1(t))
t = F.relu(self.fc2(t))
t = F.relu(self.fc3(t))
t = self.out(t)
return t
class Agent():
def __init__(self, num_actions, epsilon, device):
self.current_step = 0
self.num_actions = num_actions
self.epsilon = epsilon
self.device = device
def select_action(self, state, policy_net):
if self.epsilon > random.random():
action = random.randrange(self.num_actions)
return torch.tensor([action]).to(self.device)
else:
return policy_net(state).argmax(dim=1).to(self.device)
class ReplayMemory():
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.mem_count = 0
def push(self, experience): #Add experience to replay buffer
if len(self.memory) < self.capacity:
self.memory.append(experience)
else:
self.memory[self.mem_count % self.capacity] = experience
self.mem_count += 1
def sample(self, batch_size): #Sample a batch from the replay buffer
return random.sample(self.memory, batch_size)
def can_provide_sample(self, batch_size):
return len(self.memory) >= batch_size
#Environement
class Env():
def __init__(self, device):
self.device = device
self.env = gym.make('CartPole-v0').unwrapped
self.env.reset()
self.current_screen = None
self.done = False
def reset(self):
self.env.reset()
self.current_screen = None
def close(self):
self.env.close()
def render(self, mode='human'):
return self.env.render(mode)
def num_actions_available(self):
return self.env.action_space.n
def take_action(self, action):
observation, reward, self.done, info = self.env.step(action.item())
return torch.tensor([reward], device=self.device)
def is_starting(self):
return self.current_screen is None
def get_state(self):
if self.is_starting() or self.done:
self.current_screen = self.get_processed_screen()
black_screen = torch.zeros_like(self.current_screen)
return black_screen
else:
screen1 = self.current_screen
screen2 = self.get_processed_screen()
self.current_screen = screen2
return screen2 - screen1
def get_screen_height(self):
return self.get_processed_screen().shape[2]
def get_screen_width(self):
return self.get_processed_screen().shape[3]
def get_processed_screen(self):
screen = self.render('rgb_array').transpose((2, 0, 1))
screen = self.crop_screen(screen)
return self.transform_screen_data(screen)
def crop_screen(self, screen):
screen_height = screen.shape[1]
top = int(screen_height * 0.4)
bottom = int(screen_height * 0.8)
screen = screen[:, top:bottom, :]
return screen
def transform_screen_data(self, screen):
screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
screen = torch.from_numpy(screen)
resize = T.Compose([
T.ToPILImage(), T.Resize((40, 90)), T.ToTensor()])
return resize(screen).unsqueeze(0).to(self.device)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
env = Env(device)
env.reset()
def get_current(policy_net, states, actions):
return policy_net(states).gather(dim=1, index=actions.unsqueeze(-1))
def get_next(target_net, next_states):
final_state_locations = next_states.flatten(start_dim=1) \
.max(dim=1)[0].eq(0).type(torch.bool)
non_final_state_locations = (final_state_locations == False)
non_final_states = next_states[non_final_state_locations]
batch_size = next_states.shape[0]
values = torch.zeros(batch_size).to(device)
values[non_final_state_locations] = target_net(non_final_states).max(dim=1)[0].detach()
return values
def extract_tensors(experiences):
# Convert batch of Experiences to Experience of batches
batch = Experience(*zip(*experiences))
t1 = torch.cat(batch.state)
t2 = torch.cat(batch.action)
t3 = torch.cat(batch.reward)
t4 = torch.cat(batch.next_state)
return (t1, t2, t3, t4)
def get_moving_average(period, values): #Calculate average of last 100 episodes
values = torch.tensor(values, dtype=torch.float)
if len(values) >= period:
moving_avg = values.unfold(dimension=0, size=period, step=1).mean(dim=1).flatten(start_dim=0)
moving_avg = torch.cat((torch.zeros(period-1), moving_avg))
return moving_avg.numpy()
else:
moving_avg = torch.zeros(len(values))
return moving_avg.numpy()
def plot(values, moving_avg_period):
plt.figure(2)
plt.clf()
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(values)
moving_avg = get_moving_average(moving_avg_period, values)
plt.plot(moving_avg)
plt.savefig("cartpole.png")
plt.pause(0.001)
agent = Agent(env.num_actions_available(), epsilon, device)
memory = ReplayMemory(memory_size)
policy_net = DQN(env.get_screen_height(), env.get_screen_width()).to(device)
target_net = DQN(env.get_screen_height(), env.get_screen_width()).to(device)
env.close()
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = optim.Adam(params=policy_net.parameters(), lr=lr)
episode_durations = []
for episode in range(num_episodes):
env.reset()
state = env.get_state()
for timestep in count():
action = agent.select_action(state, policy_net)
reward = env.take_action(action)
next_state = env.get_state()
memory.push(Experience(state, action, next_state, reward))
state = next_state
if memory.can_provide_sample(batch_size):
experiences = memory.sample(batch_size)
states, actions, rewards, next_states = extract_tensors(
experiences)
current_q_values = get_current(policy_net, states, actions)
next_q_values = get_next(target_net, next_states)
target_q_values = (next_q_values * gamma) + rewards
loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if env.done:
episode_durations.append(timestep)
plot(episode_durations, 100)
break
if episode % target_update == 0:
target_net.load_state_dict(policy_net.state_dict())
env.close()
|
<reponame>Suitceyes-Project-Code/Tactile-Brush-Python
from Stroke import ActuatorPoint, Stroke, EPSILON
import math
class ActuatorStep:
__slots__ = "line", "column", "intensity", "duration", "max_intensity"
def __init__(self, column : int, line : int, intensity : float, duration : float, max_intensity : float):
self.column = column
self.line = line
self.intensity = intensity
self.duration = duration
self.max_intensity = max_intensity
def __str__(self):
return "Physical actuator at column " + str(self.column) + " and line " + str(self.line) + \
" triggered for " + str(self.duration) + " msec with intensity " + str(self.intensity)
class TactileBrush:
__slots__ = "_lines", "_columns", "_inter_dist", "_min_coord", "_max_coord", "_actuator_triggers"
def __init__(self, lines : int, columns : int, distance : float):
self._lines = lines - 1
self._columns = columns - 1
self._inter_dist = distance
self._min_coord = ActuatorPoint(0, 0)
self._max_coord = ActuatorPoint(columns * self._inter_dist, lines * self._inter_dist)
self._actuator_triggers = { }
def compute_stroke_steps(self, s : Stroke):
if (self._is_point_within_grid(s.get_start()) and self._is_point_within_grid(s.get_end())) == False:
raise Exception("Stroke start or end point are out of the grid range.")
virtual_points = s.compute_parameters(self._lines, self._columns, self._inter_dist)
self._actuator_triggers.clear()
self._compute_physical_mapping(virtual_points, s.get_intensity())
return self._actuator_triggers
def pretty_print(self):
for p in self._actuator_triggers:
print("Time " + str(p) + " ms:")
for s in self._actuator_triggers[p]:
print("\t " + str(s))
def _insert_actuator_step(self, time : float, step : ActuatorStep):
if time in self._actuator_triggers:
self._actuator_triggers[time].append(step)
return
self._actuator_triggers[time] = list()
self._actuator_triggers[time].append(step)
def _compute_physical_mapping(self, virtual_points : list, global_intensity : float):
for e in virtual_points:
if math.fmod(e.first, self._inter_dist) < EPSILON and math.fmod(e.second, self._inter_dist) < EPSILON:
step = ActuatorStep(round(e.first / self._inter_dist), round(e.second / self._inter_dist), global_intensity, e.get_duration(), e.timer_max_intensity)
self._insert_actuator_step(e.get_start(), step)
else:
l1 = 0
c1 = 0
l2 = 0
c2 = 0
if math.fmod(e.first, self._inter_dist) < EPSILON:
c1 = c2 = round(e.first / self._inter_dist)
l1 = math.floor(e.second / self._inter_dist)
l2 = math.ceil(e.second / self._inter_dist)
elif math.fmod(e.second, self._inter_dist) < EPSILON:
l1 = l2 = round(e.second / self._inter_dist)
c1 = math.floor(e.first / self._inter_dist)
c2 = math.ceil(e.first / self._inter_dist)
else:
raise Exception("Virtual actuator at position (" + str(e.first) + ", " + str(e.second) + " is not on the physical actuators grid")
ratio = math.hypot(c1 - e.first / self._inter_dist, l1 - e.second / self._inter_dist) / math.hypot(c1 - c2, l1 - l2)
phy1 = ActuatorStep(c1, l1, math.sqrt(1 - ratio) * global_intensity, e.get_duration(), e.timer_max_intensity)
phy2 = ActuatorStep(c2, l2, math.sqrt(ratio) * global_intensity, e.get_duration(), e.timer_max_intensity)
self._insert_actuator_step(e.get_start(), phy1)
self._insert_actuator_step(e.get_start(), phy2)
def _is_point_within_grid(self, point : ActuatorPoint):
if point.first < self._min_coord.first or point.first > self._max_coord.first:
return False
if point.second < self._min_coord.second or point.second > self._max_coord.second:
return False
return True
if __name__ == "__main__":
#t = TactileBrush(2, 4, 2.5)
#s = Stroke(0, 1, 3, 0, 1000, 1)
t = TactileBrush(3, 4, 1.5)
s = Stroke(0.6666, 2, 3, 0.6666, 1000.0, 1.0)
t.compute_stroke_steps(s)
t.pretty_print() |
<reponame>phuerta-tc/tcex
"""Case / Cases Object"""
# standard library
from typing import TYPE_CHECKING, Union
# first-party
from tcex.api.tc.v3.api_endpoints import ApiEndpoints
from tcex.api.tc.v3.artifacts.artifact_model import ArtifactModel
from tcex.api.tc.v3.case_attributes.case_attribute_model import CaseAttributeModel
from tcex.api.tc.v3.cases.case_filter import CaseFilter
from tcex.api.tc.v3.cases.case_model import CaseModel, CasesModel
from tcex.api.tc.v3.notes.note_model import NoteModel
from tcex.api.tc.v3.object_abc import ObjectABC
from tcex.api.tc.v3.object_collection_abc import ObjectCollectionABC
from tcex.api.tc.v3.security.user_groups.user_group_model import UserGroupModel
from tcex.api.tc.v3.security.users.user_model import UserModel
from tcex.api.tc.v3.tags.tag_model import TagModel
from tcex.api.tc.v3.tasks.task_model import TaskModel
if TYPE_CHECKING: # pragma: no cover
# first-party
from tcex.api.tc.v3.artifacts.artifact import Artifact
from tcex.api.tc.v3.case_attributes.case_attribute import CaseAttribute
from tcex.api.tc.v3.notes.note import Note
from tcex.api.tc.v3.tags.tag import Tag
from tcex.api.tc.v3.tasks.task import Task
class Cases(ObjectCollectionABC):
"""Cases Collection.
# Example of params input
{
'result_limit': 100, # Limit the retrieved results.
'result_start': 10, # Starting count used for pagination.
'fields': ['caseId', 'summary'] # Select additional return fields.
}
Args:
session (Session): Session object configured with TC API Auth.
tql_filters (list): List of TQL filters.
params (dict): Additional query params (see example above).
"""
def __init__(self, **kwargs) -> None:
"""Initialize class properties."""
super().__init__(
kwargs.pop('session', None), kwargs.pop('tql_filter', None), kwargs.pop('params', None)
)
self._model = CasesModel(**kwargs)
self.type_ = 'cases'
def __iter__(self) -> 'Case':
"""Iterate over CM objects."""
return self.iterate(base_class=Case)
@property
def _api_endpoint(self) -> str:
"""Return the type specific API endpoint."""
return ApiEndpoints.CASES.value
@property
def filter(self) -> 'CaseFilter':
"""Return the type specific filter object."""
return CaseFilter(self.tql)
class Case(ObjectABC):
"""Cases Object.
Args:
artifacts (Artifacts, kwargs): A list of Artifacts corresponding to the Case.
assignee (Assignee, kwargs): The user or group Assignee object for the Case.
attributes (CaseAttributes, kwargs): A list of Attributes corresponding to the Case.
case_close_time (str, kwargs): The date and time that the Case was closed.
case_detection_time (str, kwargs): The date and time that ends the user initiated Case
duration.
case_occurrence_time (str, kwargs): The date and time that starts the user initiated Case
duration.
case_open_time (str, kwargs): The date and time that the Case was first opened.
description (str, kwargs): The description of the Case.
name (str, kwargs): The name of the Case.
notes (Notes, kwargs): A list of Notes corresponding to the Case.
resolution (str, kwargs): The Case resolution.
severity (str, kwargs): The Case severity.
status (str, kwargs): The Case status.
tags (Tags, kwargs): A list of Tags corresponding to the Case (NOTE: Setting this parameter
will replace any existing tag(s) with the one(s) specified).
tasks (Tasks, kwargs): A list of Tasks corresponding to the Case.
user_access (Users, kwargs): A list of Users that, when defined, are the only ones allowed
to view or edit the Case.
workflow_events (WorkflowEvents, kwargs): A list of workflowEvents (timeline) corresponding
to the Case.
workflow_template (WorkflowTemplate, kwargs): The Template that the Case is populated by.
xid (str, kwargs): The **xid** for the Case.
"""
def __init__(self, **kwargs) -> None:
"""Initialize class properties."""
super().__init__(kwargs.pop('session', None))
# properties
self._model = CaseModel(**kwargs)
self._nested_field_name = 'cases'
self._nested_filter = 'has_case'
self.type_ = 'Case'
@property
def _api_endpoint(self) -> str:
"""Return the type specific API endpoint."""
return ApiEndpoints.CASES.value
@property
def model(self) -> 'CaseModel':
"""Return the model data."""
return self._model
@model.setter
def model(self, data: Union['CaseModel', dict]) -> None:
"""Create model using the provided data."""
if isinstance(data, type(self.model)):
# provided data is already a model, nothing required to change
self._model = data
elif isinstance(data, dict):
# provided data is raw response, load the model
self._model = type(self.model)(**data)
else:
raise RuntimeError(f'Invalid data type: {type(data)} provided.')
@property
def as_entity(self) -> dict:
"""Return the entity representation of the object."""
type_ = self.type_
if hasattr(self.model, 'type'):
type_ = self.model.type
return {'type': type_, 'id': self.model.id, 'value': self.model.name}
@property
def artifacts(self) -> 'Artifact':
"""Yield Artifact from Artifacts."""
# first-party
from tcex.api.tc.v3.artifacts.artifact import Artifacts
yield from self._iterate_over_sublist(Artifacts)
@property
def attributes(self) -> 'CaseAttribute':
"""Yield Attribute from Attributes."""
# first-party
from tcex.api.tc.v3.case_attributes.case_attribute import CaseAttributes
yield from self._iterate_over_sublist(CaseAttributes)
@property
def notes(self) -> 'Note':
"""Yield Note from Notes."""
# first-party
from tcex.api.tc.v3.notes.note import Notes
yield from self._iterate_over_sublist(Notes)
@property
def tags(self) -> 'Tag':
"""Yield Tag from Tags."""
# first-party
from tcex.api.tc.v3.tags.tag import Tags
yield from self._iterate_over_sublist(Tags)
@property
def tasks(self) -> 'Task':
"""Yield Task from Tasks."""
# first-party
from tcex.api.tc.v3.tasks.task import Tasks
yield from self._iterate_over_sublist(Tasks)
def stage_artifact(self, data: Union[dict, 'ObjectABC', 'ArtifactModel']) -> None:
"""Stage artifact on the object."""
if isinstance(data, ObjectABC):
data = data.model
elif isinstance(data, dict):
data = ArtifactModel(**data)
if not isinstance(data, ArtifactModel):
raise RuntimeError('Invalid type passed in to stage_artifact')
data._staged = True
self.model.artifacts.data.append(data)
# pylint: disable=redefined-builtin
def stage_assignee(self, type: str, data: Union[dict, 'ObjectABC', 'ArtifactModel']) -> None:
"""Stage artifact on the object."""
if isinstance(data, ObjectABC):
data = data.model
elif type.lower() == 'user' and isinstance(data, dict):
data = UserModel(**data)
elif type.lower() == 'group' and isinstance(data, dict):
data = UserGroupModel(**data)
if not isinstance(data, (UserModel, UserGroupModel)):
raise RuntimeError('Invalid type passed in to stage_assignee')
data._staged = True
self.model.assignee._staged = True
self.model.assignee.type = type
self.model.assignee.data = data
def stage_attribute(self, data: Union[dict, 'ObjectABC', 'CaseAttributeModel']) -> None:
"""Stage attribute on the object."""
if isinstance(data, ObjectABC):
data = data.model
elif isinstance(data, dict):
data = CaseAttributeModel(**data)
if not isinstance(data, CaseAttributeModel):
raise RuntimeError('Invalid type passed in to stage_attribute')
data._staged = True
self.model.attributes.data.append(data)
def stage_note(self, data: Union[dict, 'ObjectABC', 'NoteModel']) -> None:
"""Stage note on the object."""
if isinstance(data, ObjectABC):
data = data.model
elif isinstance(data, dict):
data = NoteModel(**data)
if not isinstance(data, NoteModel):
raise RuntimeError('Invalid type passed in to stage_note')
data._staged = True
self.model.notes.data.append(data)
def stage_tag(self, data: Union[dict, 'ObjectABC', 'TagModel']) -> None:
"""Stage tag on the object."""
if isinstance(data, ObjectABC):
data = data.model
elif isinstance(data, dict):
data = TagModel(**data)
if not isinstance(data, TagModel):
raise RuntimeError('Invalid type passed in to stage_tag')
data._staged = True
self.model.tags.data.append(data)
def stage_task(self, data: Union[dict, 'ObjectABC', 'TaskModel']) -> None:
"""Stage task on the object."""
if isinstance(data, ObjectABC):
data = data.model
elif isinstance(data, dict):
data = TaskModel(**data)
if not isinstance(data, TaskModel):
raise RuntimeError('Invalid type passed in to stage_task')
data._staged = True
self.model.tasks.data.append(data)
def stage_user_access(self, data: Union[dict, 'ObjectABC', 'UserModel']) -> None:
"""Stage user on the object."""
if isinstance(data, ObjectABC):
data = data.model
elif isinstance(data, dict):
data = UserModel(**data)
if not isinstance(data, UserModel):
raise RuntimeError('Invalid type passed in to stage_user_access')
data._staged = True
self.model.user_access.data.append(data)
|
<filename>cocutils/dumpsc.py<gh_stars>0
# -*- coding:utf-8 -*-
# Credits: https://github.com/123456abcdef/cr-sc-dump/blob/master/dumpsc.py
import argparse
import hashlib
import io
import lzma
import os
from PIL import Image
class Reader(io.BytesIO):
def __init__(self, stream):
super().__init__(stream)
self._bytes_left = len(stream)
def __len__(self):
return 0 if self._bytes_left < 0 else self._bytes_left
def read(self, size):
self._bytes_left -= size
return super().read(size)
def read_byte(self):
return int.from_bytes(self.read(1), "little")
def read_uint16(self):
return int.from_bytes(self.read(2), "little")
def read_uint32(self):
return int.from_bytes(self.read(4), "little")
def read_string(self):
length = self.read_byte()
return self.read(length).decode("utf-8")
def decompress(data):
if data[:4] == b"SCLZ":
# Credits: https://github.com/Galaxy1036/pylzham
import lzham
dict_size = int.from_bytes(data[4:5], byteorder="big")
uncompressed_size = int.from_bytes(data[5:9], byteorder="little")
decompressed = lzham.decompress(
data[9:], uncompressed_size, {"dict_size_log2": dict_size}
)
else:
data = data[0:9] + (b"\x00" * 4) + data[9:]
decompressed = lzma.LZMADecompressor().decompress(data)
return decompressed
def process_csv(file_name, data, path):
decompressed = decompress(data)
with open(os.path.join(path, file_name), "wb") as f:
f.write(decompressed)
def create_image(width, height, pixels, sub_type):
if sub_type == 0 or sub_type == 1: # RGB8888
return Image.frombytes("RGBA", (width, height), pixels, "raw")
elif sub_type == 2: # RGBA4444
img = Image.new("RGBA", (width, height))
ps = img.load()
for h in range(height):
for w in range(width):
i = (w + h * width) * 2
p = int.from_bytes(pixels[i : i + 2], "little")
ps[w, h] = (
((p >> 12) & 0xF) << 4,
((p >> 8) & 0xF) << 4,
((p >> 4) & 0xF) << 4,
((p >> 0) & 0xF) << 4,
)
return img
elif sub_type == 3: # RBGA5551
args = ("RGBA;4B", 0, 0)
return Image.frombytes("RGBA", (width, height), pixels, "raw", args)
elif sub_type == 4: # RGB565
img = Image.new("RGB", (width, height))
ps = img.load()
for h in range(height):
for w in range(width):
i = (w + h * width) * 2
p = int.from_bytes(pixels[i : i + 2], "little")
ps[w, h] = (
((p >> 11) & 0x1F) << 3,
((p >> 5) & 0x3F) << 2,
(p & 0x1F) << 3,
)
return img
elif sub_type == 6: # LA88
return Image.frombytes("LA", (width, height), pixels)
elif sub_type == 10: # L8
return Image.frombytes("L", (width, height), pixels)
else:
raise Exception(f"Unknown sub type '{sub_type}'")
def pixel_size(sub_type):
if sub_type in [0, 1]:
return 4
elif sub_type in [2, 3, 4, 6]:
return 2
elif sub_type in [10]:
return 1
else:
raise Exception(f"Unknown sub type '{sub_type}'")
def process_sc(base_name, data, path, old):
decompressed = decompress(data[26:])
md5_hash = data[10:26]
if hashlib.md5(decompressed).digest() != md5_hash:
raise Exception("File seems corrupted")
reader = Reader(decompressed)
if old:
# Credits: https://github.com/Galaxy1036/Old-Sc-Dumper
reader.read(17)
count = reader.read_uint16()
reader.read(count * 2)
for i in range(count): # skip strings
reader.read_string()
count = 0
while len(reader):
file_type = reader.read_byte()
file_size = reader.read_uint32()
if file_type not in [1, 24, 27, 28]:
data = reader.read(file_size)
continue
sub_type = reader.read_byte()
width = reader.read_uint16()
height = reader.read_uint16()
print(
f" file_type: {file_type}, file_size: {file_size}, "
f"sub_type: {sub_type}, width: {width}, height: {height}"
)
if file_type == 27 or file_type == 28:
pixel_sz = pixel_size(sub_type)
block_sz = 32
pixels = bytearray(file_size - 5)
for _h in range(0, height, block_sz):
for _w in range(0, width, block_sz):
for h in range(_h, min(_h + block_sz, height)):
i = (_w + h * width) * pixel_sz
sz = min(block_sz, width - _w) * pixel_sz
pixels[i : i + sz] = reader.read(sz)
pixels = bytes(pixels)
else:
pixels = reader.read(file_size - 5)
img = create_image(width, height, pixels, sub_type)
img.save(os.path.join(path, f"{base_name}_{count}.png"))
count += 1
def check_header(data):
if data[0] == 0x5D:
return "csv"
if data[:2] == b"\x53\x43":
return "sc"
raise Exception(" Unknown header")
def dumpsc(fn, outputPath, isOld):
base_name, ext = os.path.splitext(os.path.basename(fn))
with open(fn, "rb") as f:
print(f.name)
data = f.read()
file_type = check_header(data)
if file_type == "csv":
process_csv(base_name + ext, data, outputPath)
elif file_type == "sc":
process_sc(base_name, data, outputPath, isOld)
# if __name__ == "__main__":
# parser = argparse.ArgumentParser(
# description="Extract png files from Clash" " Royale '*_tex.sc' files"
# )
# parser.add_argument("files", help="sc file", nargs="+")
# parser.add_argument("--old", action="store_true", help="used for '*_dl.sc' files")
# parser.add_argument("-o", help="Extract pngs to directory", type=str)
# args = parser.parse_args()
# if args.o:
# path = os.path.normpath(args.o)
# else:
# path = os.path.dirname(os.path.realpath(__file__))
# for file in args.files:
# try:
# base_name, ext = os.path.splitext(os.path.basename(file))
# with open(file, "rb") as f:
# print(f.name)
# data = f.read()
# file_type = check_header(data)
# if file_type == "csv":
# process_csv(base_name + ext, data, path)
# elif file_type == "sc":
# process_sc(base_name, data, path, args.old)
# except Exception as e:
# print(f"{e.__class__.__name__} {e}") |
from functools import reduce
from scipy.sparse import csr_matrix
from scipy.sparse import kron
import numpy as np
import cirq
from openfermion.linalg import qubit_operator_sparse
from openfermion.ops import QubitOperator
from quchem.Qcircuit.Ansatz_quantum_circuit_functions import full_exponentiated_PauliWord_circuit
from quchem.Qcircuit.Hamiltonian_term_measurement_functions import change_pauliword_to_Z_basis_then_measure
from quchem.Misc_functions.Misc_functions import sparse_allclose
from quchem.Unitary_Partitioning.Unitary_partitioning_Seq_Rot import Get_Xsk_op_list
def Build_R_SeqRot_Q_circuit(anti_commuting_set, S_index,N_Qubits, check_reduction_lin_alg=False, atol=1e-8, rtol=1e-05, check_circuit=False):
"""
Function to build R_S (make up of all R_SK terms)
Args:
anti_commuting_set(list): list of anti commuting QubitOperators
S_index(int): index for Ps in anti_commuting_set list
check_reduction (optional, bool): use linear algebra to check that 𝑅s† 𝐻s 𝑅s == 𝑃s
returns:
full_RS_circuit(cirq.Circuit): Q_circuit for R_s operator
Ps (QubitOperator): Pauli_S operator with cofactor of 1!
gamma_l (float): normalization term
"""
X_sk_theta_sk_list, full_normalised_set, Ps, gamma_l = Get_Xsk_op_list(anti_commuting_set,
S_index,
N_Qubits,
check_reduction=check_reduction_lin_alg,
atol=atol,
rtol=rtol)
circuit_list = []
for X_sk_Op, theta_sk in X_sk_theta_sk_list:
pauliword_X_sk = list(X_sk_Op.terms.keys())[0]
const_X_sk = list(X_sk_Op.terms.values())[0]
full_exp_circ_obj = full_exponentiated_PauliWord_circuit(QubitOperator(pauliword_X_sk, -1j),
theta_sk / 2 * const_X_sk)
circuit = cirq.Circuit(
cirq.decompose_once((full_exp_circ_obj(*cirq.LineQubit.range(full_exp_circ_obj.num_qubits())))))
circuit_list.append(circuit)
full_RS_circuit = cirq.Circuit(circuit_list)
if check_circuit:
H_S = QubitOperator()
for QubitOp in full_normalised_set['PauliWords']:
H_S += QubitOp
H_S_matrix = qubit_operator_sparse(H_S)
qbits = cirq.LineQubit.range(N_Qubits)
R_S_matrix = full_RS_circuit.unitary(qubits_that_should_be_present=qbits)
Ps_mat=qubit_operator_sparse(Ps, n_qubits=N_Qubits)
reduction_mat = R_S_matrix.dot(H_S_matrix.dot(R_S_matrix.conj().transpose()))
if not sparse_allclose(Ps_mat, reduction_mat):
print('reduction circuit incorrect... 𝑅s 𝐻s 𝑅s† != 𝑃s')
return full_RS_circuit, Ps, gamma_l
def Full_SeqRot_Rl_Circuit(Full_Ansatz_Q_Circuit, anti_commuting_set, S_index, N_Qubits, check_reduction_lin_alg=False):
"""
Function to build full Q Circuit... ansatz circuit + R_S
Args:
Full_Ansatz_Q_Circuit (cirq.Circuit): ansatz quantum circuit
anti_commuting_set(list): list of anti commuting QubitOperators
S_index(int): index for Ps in anti_commuting_set list
check_reduction (optional, bool): use linear algebra to check that 𝑅s† 𝐻s 𝑅s == 𝑃s
returns:
full_RS_circuit(cirq.Circuit): Q_circuit for R_s operator
Ps (QubitOperator): Pauli_S operator with cofactor of 1!
gamma_l (float): normalization term
"""
Reduction_circuit_circ, Ps, gamma_l = Build_R_SeqRot_Q_circuit(anti_commuting_set, S_index, N_Qubits,
check_reduction_lin_alg=check_reduction_lin_alg)
measure_PauliS_in_Z_basis_obj = change_pauliword_to_Z_basis_then_measure(Ps)
measure_PauliS_in_Z_basis_Q_circ = cirq.Circuit(cirq.decompose_once(
(measure_PauliS_in_Z_basis_obj(*cirq.LineQubit.range(measure_PauliS_in_Z_basis_obj.num_qubits())))))
full_circuit = cirq.Circuit(
[
Full_Ansatz_Q_Circuit.all_operations(),
*Reduction_circuit_circ.all_operations(),
*measure_PauliS_in_Z_basis_Q_circ.all_operations(),
]
)
return full_circuit, Ps, gamma_l
########## Linear Algebra circuit approach
class Seq_Rot_VQE_Experiment_UP_circuit_lin_alg():
def __init__(self, anti_commuting_sets, ansatz_circuit, S_key_dict=None):
self.anti_commuting_sets = anti_commuting_sets
self.ansatz_circuit = ansatz_circuit
self.S_key_dict = S_key_dict
self.n_qubits = len(ansatz_circuit.all_qubits())
ansatz_vector = ansatz_circuit.final_state_vector(ignore_terminal_measurements=True)#.reshape((2**self.n_qubits,1))
self.ansatz_density_mat = np.outer(ansatz_vector, ansatz_vector)
def Calc_Energy(self, check_reduction_lin_alg=False, atol=1e-8, rtol=1e-05, check_circuit=False):
E_list = []
for set_key in self.anti_commuting_sets:
anti_commuting_set = self.anti_commuting_sets[set_key]
if len(anti_commuting_set) > 1:
if self.S_key_dict is None:
full_RS_circuit, Ps, gamma_l= Build_R_SeqRot_Q_circuit(anti_commuting_set,
0, # <- S_index set to 0
self.n_qubits,
check_reduction_lin_alg=check_reduction_lin_alg, atol=atol, rtol=rtol, check_circuit=check_circuit)
else:
full_RS_circuit, Ps, gamma_l= Build_R_SeqRot_Q_circuit(anti_commuting_set,
self.S_key_dict[set_key],
self.n_qubits,
check_reduction_lin_alg=check_reduction_lin_alg, atol=atol, rtol=rtol, check_circuit=check_circuit)
# note Build_R_SeqRot_Q_circuit doesn't use a change of basis for Ps!
Q_circuit = cirq.Circuit(
[
self.ansatz_circuit.all_operations(),
*full_RS_circuit.all_operations(),
]
)
final_state_ket = (Q_circuit.final_state_vector(ignore_terminal_measurements=True)).reshape((2**self.n_qubits,1))
denisty_mat = np.outer(final_state_ket, final_state_ket)
Ps_matrix = qubit_operator_sparse(Ps, n_qubits=self.n_qubits)
exp_result = np.trace(denisty_mat@Ps_matrix)
E_list.append(exp_result*gamma_l)
else:
qubitOp = anti_commuting_set[0]
P_matrix = qubit_operator_sparse(qubitOp, n_qubits=self.n_qubits)
exp_result = np.trace(self.ansatz_density_mat@P_matrix)
E_list.append(exp_result)
return sum(E_list).real
########## sampling Quantum Circuit
class Seq_Rot_VQE_Experiment_UP_circuit_sampling():
# TODO: currently changed functions - NO LONGER WORKING!
def __init__(self, anti_commuting_sets, ansatz_circuit, n_shots, S_key_dict=None):
self.anti_commuting_sets = anti_commuting_sets
self.ansatz_circuit = ansatz_circuit
self.S_key_dict = S_key_dict
self.n_shots = n_shots
def Calc_Energy(self):
E_list = []
for set_key in self.anti_commuting_sets:
anti_commuting_set = self.anti_commuting_sets[set_key]
if len(anti_commuting_set) > 1:
if self.S_key_dict is None:
Q_circuit, Ps, gamma_l = Generate_Ansatz_SeqRot_R_Q_Circuit(self.ansatz_circuit,
anti_commuting_set,
0, # <- S_index set to 0
check_reduction=False)
else:
Q_circuit, Ps, gamma_l = Generate_Ansatz_SeqRot_R_Q_Circuit(self.ansatz_circuit,
anti_commuting_set,
self.S_key_dict[set_key],
# <- S_index set to 0
check_reduction=False)
hist_key_str = Get_Histogram_key(Ps)
int_state_counter = Simulate_Quantum_Circuit(Q_circuit, self.n_shots, hist_key_str)
binary_state_counter = Return_as_binary(int_state_counter, hist_key_str)
exp_result = expectation_value_by_parity(binary_state_counter)
E_list.append(exp_result * gamma_l)
else:
qubitOp = anti_commuting_set[0]
for PauliWord, const in qubitOp.terms.items():
if PauliWord != ():
Q_circuit = Generate_Full_Q_Circuit(self.ansatz_circuit, qubitOp)
hist_key_str = Get_Histogram_key(qubitOp)
int_state_counter = Simulate_Quantum_Circuit(Q_circuit, self.n_shots, hist_key_str)
binary_state_counter = Return_as_binary(int_state_counter, hist_key_str)
exp_result = expectation_value_by_parity(binary_state_counter)
E_list.append(exp_result * const)
else:
E_list.append(const)
return sum(E_list).real |
<reponame>alisaifee/youtrack-cli
import six
from pyutrack.util import Type
# Admin types
@six.add_metaclass(Type)
class Permission(object):
__list__ = {'url': 'admin/permission', 'hydrate': False}
__render__ = ('name', 'description')
__label__ = '%(name)s'
@six.add_metaclass(Type)
class Role(object):
__get__ = {'url': 'admin/role/%(name)s'}
__create__ = {
'url': 'admin/role/%(name)s',
'args': ('name', ),
'kwargs': {
'description': ''
}
}
__delete__ = {
'url': 'admin/role/%(name)s',
}
__update__ = {
'url': 'admin/role/%(name)s',
'args': ('description', 'newName')
}
__list__ = {'url': 'admin/role', 'hydrate': True}
__label__ = '%(name)s'
__render__ = ('name', 'description')
__associations__ = {
'permissions': {
'type': Permission,
'get': {
'url': 'admin/role/%(name)s/permission',
'hydrate': False
},
'add': {
'url': 'admin/role/%(name)s/permission/%(permission)s',
'key': 'permission',
'method': 'post'
},
'remove': {
'url': 'admin/role/%(name)s/permission/%(permission)s',
'key': 'permission'
}
}
}
@six.add_metaclass(Type)
class Group(object):
__get__ = {'url': 'admin/group/%(name)s'}
__create__ = {
'url': 'admin/group/%(name)s',
'args': ('name', ),
'kwargs': {
'autoJoin': False,
'description': ''
}
}
__delete__ = {
'url': 'admin/group/%(name)s',
}
__update__ = {
'url': 'admin/group/%(name)s',
'args': ('description', 'autoJoin', 'newName')
}
__list__ = {'url': 'admin/group', 'hydrate': True}
__render__ = ('name', 'description', 'autoJoin')
__label__ = '%(name)s'
__associations__ = {
'roles': {
'type': Role,
'get': {
'url': 'admin/group/%(name)s/role',
'hydrate': True
},
'add': {
'url': 'admin/group/%(name)s/role/%(role)s',
'key': 'role',
'method': 'put'
},
'remove': {
'url': 'admin/group/%(name)s/role/%(role)s',
'key': 'role'
}
}
}
@six.add_metaclass(Type)
class User(object):
__get__ = {'url': 'admin/user/%(login)s'}
__create__ = {
'url': 'admin/user/%(login)s',
'args': ('login', 'fullName', 'email', 'password')
}
__delete__ = {'url': 'admin/user/%(login)s'}
__update__ = {
'url': 'admin/user',
'kwargs': {
'login': '',
'fullName': '',
'email': '',
'password': ''
}
}
__list__ = {
'url':
'admin/user?q=%(query)s&role=%(role)s&permission=%(permission)s&group=%(group)s',
'hydrate':
True,
'kwargs': {
'project': '',
'role': '',
'permission': '',
'query': '',
'group': ''
}
}
__render__ = ('login', 'email', 'fullName')
__label__ = '%(name)s'
__aliases__ = {'name': 'fullName'}
__associations__ = {
'groups': {
'type': Group,
'get': {
'url': 'admin/user/%(login)s/group',
'hydrate': False
},
'add': {
'url': 'admin/user/%(login)s/group/%(group)s',
'key': 'group',
'method': 'post'
},
'remove': {
'url': 'admin/user/%(login)s/group/%(group)s',
'key': 'group'
}
},
'roles': {
'type': Role,
'get': {
'url': 'admin/user/%(login)s/role',
'hydrate': True
},
}
}
@six.add_metaclass(Type)
class IssueLinkType(object):
__get__ = {'url': 'admin/issueLinkType/%(name)s'}
__create__ = {
'url': 'admin/issueLinkType/%(name)s',
'args': ('outwardName', 'inwardName'),
'kwargs': {
'directed': False
}
}
__delete__ = {'url': 'admin/issueLinkType/%(name)s'}
__update__ = {
'url': 'admin/issueLinkType/%(name)s',
'kwargs': {
'newName': '',
'outwardName': '',
'inwardName': '',
'directed': ''
}
}
__list__ = {'url': 'admin/issueLinkType', 'hydrate': False}
__render__ = ('name', 'inwardName', 'outwardName')
__label__ = '[%(name)s] X->%(inwardName)s->Y, Y->%(outwardName)s->X'
@six.add_metaclass(Type)
class IssueLink(object):
__render__ = ('source', 'typeOutward', 'target')
__label__ = '%(typeOutward)s %(target)s'
pass
@six.add_metaclass(Type)
class Issue(object):
__get__ = {'url': 'issue/%(id)s'}
__create__ = {
'url': 'issue/',
'args': ('project', ),
'kwargs': {
'summary': '',
'description': ''
}
}
__delete__ = {'url': 'issue/%(id)s'}
__update__ = {
'url': 'issue/%(id)s/',
'kwargs': {
'summary': '',
'description': ''
}
}
__list__ = {
'url': 'issue?filter=%(filter)s&max=%(max)d',
'args': ('filter', ),
'kwargs': {
'max': 100
},
'hydrate': False,
'callback': lambda response: response['issue']
}
__aliases__ = {'project': 'projectShortName'}
__label__ = '%(id)s'
__attributes__ = {
'id': 'id',
'assignee': 'Assignee/0/value',
'reporter': 'reporterName',
'updater': 'updaterName',
'priority': 'Priority',
'status': 'Status'
}
__render__ = (
'id', 'summary', 'status', 'assignee', 'reporter', 'updater',
'priority', 'link'
)
__render_min__ = ('id', 'summary')
__associations__ = {
'issue_links': {
'type': IssueLink,
'get': {
'url': 'issue/%(id)s/link'
}
}
}
@property
def link(self):
return "%s/issue/%s" % (self.connection.base_url, self.id)
def command(self, command=None, comment=None):
"""
executes a command for the given issue.
:param str command: The youtrack command to execute. See
https://www.jetbrains.com/help/youtrack/standalone/Commands.html
for command grammar.
"""
url = 'issue/%(id)s/execute' % {
'id': self.id,
}
self.connection.post(
url, {'command': command, 'comment': comment}, parse=False
)
self.get()
@six.add_metaclass(Type)
class Project(object):
__get__ = {
'url': 'admin/project/%(projectId)s',
}
__create__ = {
'url': 'admin/project/%(projectId)s',
'args': ('projectName', 'projectLeadLogin'),
'kwargs': {
'projectId': None,
'startingNumber': 1,
'description': ''
}
}
__delete__ = {'url': 'admin/project/%(projectId)s'}
__list__ = {'url': 'project/all?verbose=true', 'hydrate': False}
__associations__ = {
'issues': {
'type': Issue,
'get': {
'url':
'issue/byproject/%(projectId)s?filter=%(filter)s&max=%(max)d',
'kwargs': {
'filter': '',
'max': 100
}
}
}
}
__attributes__ = {
'id': 'id',
'lead': 'lead',
'description': 'description',
}
__aliases__ = {
'id': 'shortName',
'shortName': 'projectId',
'lead': 'projectLeadLogin'
}
__label__ = '%(id)s'
__render__ = ('id', 'name', 'description', 'lead')
__render_min__ = ('id', 'name')
|
# -*- coding: utf8 -*-
import sys
from locust import HttpLocust, TaskSet, task
from requests_toolbelt import MultipartEncoder
from random import randrange
import json
import requests
import variables
import time
import datetime
import evotilities
def mpiAcuerdos(Mpi,response_idCaso,r_User):
########################################################################################
### 1X. Inserta Registro en la BD (Acuerdos: Acuerdo de Inicio) ###
########################################################################################
print("Peticion para consultar datos del Caso #"+str(response_idCaso)+ ": Acuerdo de Inicio")
response = Mpi.client.get("/v1/base/casos/" + str(response_idCaso), name="Acuerdo de Inicio: Peticion para consultar datos del Caso")
evotilities.logger("Peticion para consultar datos del Caso #"+str(response_idCaso)+ ": Acuerdo de Inicio", response)
print("Peticion para Cargar las opciones del Catalogo Presentó o Recibió Llamada - Acuerdo de Inicio")
response = Mpi.client.get("/v1/catalogos/entrevista/presento-llamadas/options", name="Acuerdo de Inicio: Peticion para Cargar las opciones del Catalogo Presentó o Recibió Llamada")
evotilities.logger("Peticion para Cargar las opciones del Catalogo Presentó o Recibió Llamada - Acuerdo de Inicio", response)
print("Peticion para consultar el Tipo de Acuerdo de Inicio del Caso #"+str(response_idCaso))
response = Mpi.client.get("/v1/base/acuerdos/casos/" + str(response_idCaso) + "/tipos?tipo=Acuerdo%20Inicio", name="Acuerdo de Inicio: Peticion para consultar el Tipo de Acuerdo de Inicio del Caso")
evotilities.logger("Peticion para consultar el Tipo de Acuerdo de Inicio del Caso #"+str(response_idCaso), response)
nuc = str(json.loads(r_User.text)['Distrito'])+"/"+str(json.loads(r_User.text)['fiscaliaAcronimo'])+"/"+str(json.loads(r_User.text)['agenciaAcronimo'])+"/"+str(json.loads(r_User.text)['Municipio'])+"/"+str(response_idCaso)+"/"+str(time.strftime("%y"))+"/"+str(time.strftime("%m"))
time.sleep(3)
jsonAcInicio = {
"caso": {
"nuc": nuc,
"id": response_idCaso
},
"personas":[],
"tipo": "Acuerdo Inicio",
"nombrePersonaAcepta": "Nombre de la Persona que Acepta Asistir a JR",
"presentoLlamada": {
"id": 1
},
"manifesto": "Manifestó",
"sintesisHechos": "Sintesis de los Hechos",
"observaciones": "Observaciones"
}
json_again = json.dumps(jsonAcInicio)
print("Insertando Registro en la BD (Acuerdo de Inicio) en Caso #" +str(response_idCaso))
Mpi.client.headers['Content-Type'] = "application/json"
r_AcInicio = Mpi.client.post("/v1/base/acuerdos", data=json_again, name="Acuerdo de Inicio: Peticion para Generar El Acuerdo de Inicio del Caso (NUC)")
evotilities.logger("Insertando Registro en la BD (Acuerdo de Inicio) en Caso #" +str(response_idCaso), r_AcInicio)
Mpi.wait()
r_idAcInicio = str(json.loads(r_AcInicio.text)['id'])
############ Caratula del Acuerdo de Inicio #################
time.sleep(10)
print("Peticion para Generar el Oficio Caratula Acuerdo General #"+str(response_idCaso)+ " - Acuerdo de Inicio")
response = Mpi.client.get("/v1/documentos/formatos/save/" + r_idAcInicio + "/F1_007", name="ERROR DOC - Acuerdo de Inicio: Peticion para Generar el Oficio Caratula Acuerdo General")
evotilities.logger("Peticion para Generar el Oficio Caratula Acuerdo General #"+str(response_idCaso)+ " - Acuerdo de Inicio", response)
buscar = "application/"
reemplazar_por = "application-"
time.sleep(5)
if response.status_code != 500:
response_contentType = str(json.loads(response.text)['contentType']).replace(buscar, reemplazar_por)
response_uuidEcm = str(json.loads(response.text)['uuidEcm'])
print("Peticion para Abrir el Oficio Caratula Acuerdo General del Caso #"+str(response_idCaso)+ " - Acuerdo de Inicio")
response = Mpi.client.get("/v1/documentos/documento/" + response_uuidEcm + "/" + response_contentType + "/Formato", name="Acuerdo de Inicio: Peticion para Abrir el Oficio Caratula Acuerdo General del Caso")
print ("Response status code:", response.status_code)
############ Acuerdo de Inicio #################
time.sleep(10)
print("Peticion para Generar el Oficio Acuerdo de inicio #"+str(response_idCaso)+ " - Acuerdo de Inicio")
response = Mpi.client.get("/v1/documentos/formatos/save/" + r_idAcInicio + "/F1_015_016", name="ERROR DOC - Acuerdo de Inicio: Peticion para Generar el Oficio Acuerdo de Inicio")
evotilities.logger("Peticion para Generar el Oficio Acuerdo de inicio #"+str(response_idCaso)+ " - Acuerdo de Inicio", response)
buscar = "application/"
reemplazar_por = "application-"
time.sleep(5)
if response.status_code != 500:
response_contentType = str(json.loads(response.text)['contentType']).replace(buscar, reemplazar_por)
response_uuidEcm = str(json.loads(response.text)['uuidEcm'])
print("Peticion para Abrir el Oficio Acuerdo Inicio del Caso #"+str(response_idCaso)+ " - Acuerdo de Inicio")
response = Mpi.client.get("/v1/documentos/documento/" + response_uuidEcm + "/" + response_contentType + "/Formato", name="Acuerdo de Inicio: Peticion para Abrir el Oficio Acuerdo de Inicio del Caso")
print ("Response status code:", response.status_code)
############ Registro de Derivación a la Justicia Restaurativa #################
time.sleep(10)
print("Peticion para Generar el Oficio Registro de Derivación a la Justicia Restaurativa del Caso #"+str(response_idCaso)+ " - Acuerdo de Inicio")
response = Mpi.client.get("/v1/documentos/formatos/save/" + r_idAcInicio + "/F2_117", name="ERROR DOC - Acuerdo de Inicio: Peticion para Generar el Oficio Registro de Derivación a la Justicia Restaurativa del Caso")
evotilities.logger("Peticion para Generar el Oficio Registro de Derivación a la Justicia Restaurativa del Caso #"+str(response_idCaso)+ " - Acuerdo de Inicio", response)
buscar = "application/"
reemplazar_por = "application-"
time.sleep(5)
if response.status_code != 500:
response_contentType = str(json.loads(response.text)['contentType']).replace(buscar, reemplazar_por)
response_uuidEcm = str(json.loads(response.text)['uuidEcm'])
print("Peticion para Abrir el Oficio Registro de Derivación a la Justicia Restaurativa del Caso #"+str(response_idCaso)+ " - Acuerdo de Inicio")
response = Mpi.client.get("/v1/documentos/documento/" + response_uuidEcm + "/" + response_contentType + "/Formato", name="Acuerdo de Inicio: Peticion para Abrir el Oficio Registro de Derivación a la Justicia Restaurativa del Caso")
print ("Response status code:", response.status_code)
#print ("Response content:", response.content)
########################################################################################
### 1X. Inserta Registro en la BD (Acuerdos: Acuerdo de Radicación). ###
########################################################################################
print("Peticion para consultar datos del Caso #"+str(response_idCaso)+ " - Acuerdo de Radicación")
response = Mpi.client.get("/v1/base/casos/" + str(response_idCaso), name="Acuerdo de Radicación: Peticion para consultar datos del Caso")
evotilities.logger("Peticion para consultar datos del Caso #"+str(response_idCaso)+ " - Acuerdo de Radicación", response)
print("Peticion para mostrar Acuerdo de Radicación del Caso #"+str(response_idCaso)+ " - Acuerdo de Radicación")
response = Mpi.client.get("/v1/base/acuerdos/casos/" + str(response_idCaso) + "/page?f=&p=0&tr=10", name="Acuerdo de Radicación: Peticion para mostrar Acuerdo de Radicación del Caso")
evotilities.logger("Peticion para mostrar Acuerdo de Radicación del Caso #"+str(response_idCaso)+ " - Acuerdo de Radicación", response)
time.sleep(3)
jsonAcRadic = {
"caso": {
"id": response_idCaso
},
"personas":[],
"tipo": "Acuerdo Radicación",
"observaciones": "Observaciones Acuerdo de Radicación"
}
json_again = json.dumps(jsonAcRadic)
print("Insertando Registro en la BD (Acuerdo de Radicación) en Caso #" +str(response_idCaso))
Mpi.client.headers['Content-Type'] = "application/json"
r_AcRadic = Mpi.client.post("/v1/base/acuerdos", data=json_again, name="Acuerdo de Radicación: Peticion para Generar un Acuerdo de Radicación")
evotilities.logger("Insertando Registro en la BD (Acuerdo de Radicación) en Caso #" +str(response_idCaso), r_AcRadic)
Mpi.wait()
r_idAcRadic = str(json.loads(r_AcRadic.text)['id'])
print("Peticion para consultar datos del Caso #"+str(response_idCaso)+ " - Acuerdo de Radicación")
response = Mpi.client.get("/v1/base/casos/" + str(response_idCaso), name="Acuerdo de Radicación: Peticion para consultar datos del Caso")
evotilities.logger("Peticion para consultar datos del Caso #"+str(response_idCaso)+ " - Acuerdo de Radicación", response)
print("Peticion para consultar datos del Acuerdo de Radicacion del Caso #"+str(response_idCaso)+ " - Acuerdo de Radicación")
response = Mpi.client.get("/v1/base/acuerdos/" + r_idAcRadic, name="Acuerdo de Radicación: Peticion para consultar datos del Acuerdo de Radicación del Caso")
evotilities.logger("Peticion para consultar datos del Acuerdo de Radicacion del Caso #"+str(response_idCaso)+ " - Acuerdo de Radicación", response)
time.sleep(10)
print("Peticion para Generar el Oficio Acuerdo de recepción y/o radicación del Caso #"+str(response_idCaso)+ " - Acuerdo de Radicación")
response = Mpi.client.get("/v1/documentos/formatos/save/" + str(r_idAcRadic) + "/F2_138", name="ERROR DOC - Acuerdo de Radicación: Peticion para Generar el Oficio Acuerdo de recepción y/o radicación")
evotilities.logger("Peticion para Generar el Oficio Acuerdo de recepción y/o radicación del Caso #"+str(response_idCaso)+ " - Acuerdo de Radicación", response)
#response_uuidEcm = str(json.loads(response.text)['uuidEcm'])
#buscar = "application/"
#reemplazar_por = "application-"
#response_contentType = str(json.loads(response.text)['contentType']).replace(buscar, reemplazar_por)
#time.sleep(5)
#print("Peticion para Abrir el Oficio Acuerdo de recepción y/o radicación del Caso #"+str(response_idCaso)+ " - Acuerdo de Radicación")
#response = Mpi.client.get("/v1/documentos/documento/" + response_uuidEcm + "/" + response_contentType + "/Formato", name="Acuerdo de Radicación: Peticion para Abrir el Oficio Acuerdo de recepción y/o radicación")
#print ("Response status code:", response.status_code)
def mpiEntrevistas(Mpi,response_idCaso):
#########################################################
### 1X. Inserta Registro en la BD (Entrevista) ###
#########################################################
print("Peticion para consultar datos del Caso #"+str(response_idCaso)+ " - Entrevista")
response = Mpi.client.get("/v1/base/casos/" + str(response_idCaso), name="Entrevista: Peticion para consultar datos del Caso")
evotilities.logger("Peticion para consultar datos del Caso #"+str(response_idCaso)+ " - Entrevista", response)
print("Peticion para mostrar las Entrevistas del Caso #"+str(response_idCaso)+ " - Entrevista ")
response = Mpi.client.get("/v1/base/entrevistas/casos/" + str(response_idCaso) + "/page?f=&p=0&tr=10", name="Solicitudes: Peticion para mostrar las Entrevistas del Caso")
evotilities.logger("Peticion para mostrar las Entrevistas del Caso #"+str(response_idCaso)+ " - Entrevista ", response)
time.sleep(3)
jsonEntrev = {
"personas":[],
"sexoHeredar": "",
"edadHeredar": "",
"cpHeredar": "",
"fechaNacimientoHeredar": "",
"noTelefonoCelularHeredar": "",
"noTelefonoParticularHeredar": "",
"curpHeredar": "",
"rfcHeredar": "",
"sexo": {},
"fechaNacimiento": "",
"fechasNacimiento": "",
"tipoInterviniente": {},
"curp": "",
"rfc": "",
"noOficio": "2342424",
"fechaReq": "2018-01-17T04:20:52.209Z",
"autoridadReq": "Nombre de la Autoridad Requerida",
"cargoTurnoAdscripcion": "Cargo, turno y Adscripcion",
"domicilioAutoridad": "Domicilio de la Autoridad Requerida",
"infoRequerida": "Informacion Requerida",
"plazoDias": "4 dias",
"apercibimiento": "Apercibimiento",
"observaciones": "Observaciones",
"caso": {
"id": response_idCaso
}
}
json_again = json.dumps(jsonEntrev)
print("Insertando Registro en la BD (Entrevistas) en Caso #" +str(response_idCaso))
Mpi.client.headers['Content-Type'] = "application/json"
r_Entrev = Mpi.client.post("/v1/base/entrevistas", data=json_again, name="Entrevistas: Peticion para Generar una Entrevista")
evotilities.logger("Insertando Registro en la BD (Entrevistas) en Caso #" +str(response_idCaso), r_Entrev)
Mpi.wait()
r_idEntrev = str(json.loads(r_Entrev.text)['id'])
time.sleep(10)
print("Peticion para Generar el Oficio Entrevista del Caso #"+str(response_idCaso)+ " - Entrevistas")
response = Mpi.client.get("/v1/documentos/formatos/save/" + r_idEntrev + "/F1_008", name="ERROR DOC - Entrevistas: Peticion para Generar el Oficio Entrevista")
evotilities.logger("Peticion para Generar el Oficio Entrevista del Caso #"+str(response_idCaso)+ " - Entrevistas", response)
buscar = "application/"
reemplazar_por = "application-"
time.sleep(5)
if response.status_code != 500:
response_contentType = str(json.loads(response.text)['contentType']).replace(buscar, reemplazar_por)
response_uuidEcm = str(json.loads(response.text)['uuidEcm'])
print("Peticion para Abrir el Oficio Entrevista del Caso #"+str(response_idCaso)+ " - Entrevistas")
response = Mpi.client.get("/v1/documentos/documento/" + response_uuidEcm + "/" + response_contentType + "/Formato", name="Entrevistas: Peticion para Abrir el Oficio Entrevista")
#print ("Response status code:", response.status_code)
|
<gh_stars>10-100
import numpy as np
import ctypes
from scipy.optimize import minimize
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
import test_math
m0 = int(11e0)
m1 = int(11e0)
m2 = int(13e0)
n0 = int(12e0)
n1 = int(14e0)
n2 = int(16e0)
p = int(3e0)
q = int(3e0)
k = int(4e0)
lam = 2.5
w_main = 3.2
w_user = 11.123
w_item = 0.234
w_implicit = 0.456
nthreads = 16
def gen_data():
np.random.seed(123)
X = np.random.gamma(1,1, size = (m,n))
W = np.random.gamma(1,1, size = (m,n))
U = np.random.gamma(1,1, size = (m_u,p))
I = np.random.gamma(1,1, size = (n_i,q))
A = np.random.normal(size = (m,k_user+k+k_main))
B = np.random.normal(size = (n,k_item+k+k_main))
C = np.random.normal(size = (p,k_user+k))
D = np.random.normal(size = (q,k_item+k))
Ai = np.empty((0,0), dtype="float64")
Bi = np.empty((0,0), dtype="float64")
if nzX > 0:
X[np.random.randint(m,size=nzX),np.random.randint(n,size=nzX)] = np.nan
all_NA_row = (np.isnan(X).sum(axis=1) == X.shape[1]).astype(bool)
X[all_NA_row, 0] = 1.
all_NA_col = (np.isnan(X).sum(axis=0) == X.shape[0]).astype(bool)
X[0,all_NA_col] = 1.
if nzU > 0:
U[np.random.randint(m_u,size=nzU),np.random.randint(p,size=nzU)] = np.nan
all_NA_row = (np.isnan(U).sum(axis=1) == U.shape[1]).astype(bool)
U[all_NA_row, 0] = 1.
all_NA_col = (np.isnan(U).sum(axis=0) == U.shape[0]).astype(bool)
U[0,all_NA_col] = 1.
I[np.random.randint(n_i,size=nzU),np.random.randint(q,size=nzU)] = np.nan
all_NA_row = (np.isnan(I).sum(axis=1) == I.shape[1]).astype(bool)
I[all_NA_row, 0] = 1.
all_NA_col = (np.isnan(I).sum(axis=0) == I.shape[0]).astype(bool)
I[0,all_NA_col] = 1.
if i_f:
Ai = np.random.normal(size = (m,k+k_main))
Bi = np.random.normal(size = (n,k+k_main))
return X, W, U, I, A, B, C, D, Ai, Bi
def dense_to_sp(X, W):
m = X.shape[0]
n = X.shape[1]
X_sp = X[~np.isnan(X)].reshape(-1)
W_sp = W[~np.isnan(X)].reshape(-1)
X_sp_row = np.repeat(np.arange(m), n).reshape(-1)[~np.isnan(X).reshape(-1)].astype(ctypes.c_int)
X_sp_col = np.tile(np.arange(n), m).reshape(-1)[~np.isnan(X).reshape(-1)].astype(ctypes.c_int)
Xcoo = coo_matrix((X_sp, (X_sp_row, X_sp_col)))
Wcoo = coo_matrix((W_sp, (X_sp_row, X_sp_col)))
Xcsr = csr_matrix(Xcoo)
Xcsc = csc_matrix(Xcoo)
Wcsr = csr_matrix(Wcoo)
Wcsc = csc_matrix(Wcoo)
return (
Xcsr.indptr.astype(ctypes.c_size_t),
Xcsr.indices.astype(ctypes.c_int),
Xcsr.data.astype(ctypes.c_double),
Wcsr.data.astype(ctypes.c_double),
Xcsc.indptr.astype(ctypes.c_size_t),
Xcsc.indices.astype(ctypes.c_int),
Xcsc.data.astype(ctypes.c_double),
Wcsc.data.astype(ctypes.c_double)
)
def dense_to_sp_simple(X):
m = X.shape[0]
n = X.shape[1]
X_sp = X[~np.isnan(X)].reshape(-1)
X_sp_row = np.repeat(np.arange(m), n).reshape(-1)[~np.isnan(X).reshape(-1)].astype(ctypes.c_int)
X_sp_col = np.tile(np.arange(n), m).reshape(-1)[~np.isnan(X).reshape(-1)].astype(ctypes.c_int)
Xcoo = coo_matrix((X_sp, (X_sp_row, X_sp_col)))
Xcsr = csr_matrix(Xcoo)
return (
Xcsr.indptr.astype(ctypes.c_size_t),
Xcsr.indices.astype(ctypes.c_int),
Xcsr.data.astype(ctypes.c_double)
)
empty_1d = np.empty(0, dtype=ctypes.c_double)
empty_2d = np.empty((0,0), dtype=ctypes.c_double)
empty_int = np.empty(0, dtype=ctypes.c_int)
empty_size_t = np.empty(0, dtype=ctypes.c_size_t)
buffer1 = np.empty(int(1e6), dtype=ctypes.c_double)
def get_solA():
A = np.empty((max(m,m_u),k_user+k+k_main), dtype=ctypes.c_double)
return test_math.py_optimizeA_collective(
A,
B.copy(),
C.copy(),
Bi.copy() if i_f else empty_2d,
m, n,
k, k_user, k_item, k_main,
m_u, p,
Xcsr_p.copy() if xtype=="sparse" else empty_size_t,
Xcsr_i.copy() if xtype=="sparse" else empty_int,
Xcsr.copy() if xtype=="sparse" else empty_1d,
X.copy() if xtype=="dense" else empty_2d,
Wpass.copy() if wtype else empty_1d,
U_csr_p.copy() if utype=="sparse" else empty_size_t,
U_csr_i.copy() if utype=="sparse" else empty_int,
U_csr.copy() if utype=="sparse" else empty_1d,
U.copy() if utype=="dense" else empty_2d,
False,
lam, w_main, w_user, w_implicit,
NA_as_zero_X, NA_as_zero_U,
as_near_dense_x, as_near_dense_u,
nthreads,
buffer1
)
def get_solB():
B = np.empty((max(n,n_i),k_item+k+k_main), dtype=ctypes.c_double)
if xtype!="dense":
pass_isB = False
pass_W = Wpass
elif n <= n_i:
pass_isB = True
pass_X = X
pass_W = Wpass
else:
pass_isB = False
pass_X = np.ascontiguousarray(X.T)
if xtype=="dense":
pass_W = np.ascontiguousarray(Wpass.reshape((m,n)).T).reshape(-1)
else:
pass_W = Wpass
return test_math.py_optimizeA_collective(
B,
A.copy(),
D.copy(),
Ai.copy() if i_f else empty_2d,
n, m,
k, k_item, k_user, k_main,
n_i, q,
Xcsc_p.copy() if xtype=="sparse" else empty_size_t,
Xcsc_i.copy() if xtype=="sparse" else empty_int,
Xcsc.copy() if xtype=="sparse" else empty_1d,
pass_X.copy() if xtype=="dense" else empty_2d,
pass_W.copy() if wtype else empty_1d,
I_csr_p.copy() if utype=="sparse" else empty_size_t,
I_csr_i.copy() if utype=="sparse" else empty_int,
I_csr.copy() if utype=="sparse" else empty_1d,
I.copy() if utype=="dense" else empty_2d,
pass_isB,
lam, w_main, w_item, w_implicit,
NA_as_zero_X, NA_as_zero_U,
as_near_dense_x, as_near_dense_u,
nthreads,
buffer1
)
def py_evalA(x):
A = x.reshape((max(m,m_u),k_user+k+k_main))
res = lam * A.reshape(-1).dot(A.reshape(-1))
if wtype:
Wuse = W.copy()
if NA_as_zero_X:
X_use = X.copy()
X_use[np.isnan(X)] = 0
if wtype:
Wuse[np.isnan(X)] = 1
else:
X_use = X
E = X_use - A[:m,k_user:].dot(B[:n,k_item:].T)
if not NA_as_zero_X:
E[np.isnan(X)] = 0
if wtype:
res += w_main * (Wuse * (E ** 2)).sum()
else:
res += w_main * E.reshape(-1).dot(E.reshape(-1))
if NA_as_zero_U:
U_use = U.copy()
U_use[np.isnan(U)] = 0
else:
U_use = U
E2 = U_use - A[:m_u,:k+k_user].dot(C.T)
if not NA_as_zero_U:
E2[np.isnan(U)] = 0
res += w_user * E2.reshape(-1).dot(E2.reshape(-1))
if i_f:
Eones = A[:m,k_user:].dot(Bi.T) - (~np.isnan(X))
res += w_implicit * Eones.reshape(-1).dot(Eones.reshape(-1))
if (m_u > m):
res += w_implicit * ((A[m:,k_user:].dot(Bi.T))**2).sum()
return res / 2
def py_evalB(x):
B = x.reshape((max(n,n_i),k_item+k+k_main))
res = lam * B.reshape(-1).dot(B.reshape(-1))
if wtype:
Wuse = W.copy()
if NA_as_zero_X:
X_use = X.copy()
X_use[np.isnan(X)] = 0
if wtype:
Wuse[np.isnan(X)] = 1
else:
X_use = X
E = X_use - A[:m,k_user:].dot(B[:n,k_item:].T)
if not NA_as_zero_X:
E[np.isnan(X)] = 0
if wtype:
res += w_main * (Wuse * (E ** 2)).sum()
else:
res += w_main * E.reshape(-1).dot(E.reshape(-1))
if NA_as_zero_U:
I_use = I.copy()
I_use[np.isnan(I)] = 0
else:
I_use = I
E2 = I_use - B[:n_i,:k+k_item].dot(D.T)
if not NA_as_zero_U:
E2[np.isnan(I)] = 0
res += w_item * E2.reshape(-1).dot(E2.reshape(-1))
if i_f:
Eones = Ai.dot(B[:n,k_item:].T) - (~np.isnan(X))
res += w_implicit * Eones.reshape(-1).dot(Eones.reshape(-1))
if (n_i > n):
res += w_implicit * ((Ai.dot(B[n:,k_item:].T))**2).sum()
return res / 2
xtry = ["dense", "sparse"]
utry = ["dense", "sparse"]
wtry = [False,True]
nztry = [0,1,25]
natry = [False,True]
ktry = [0,2]
ndtry = [False, True]
xlength = ["smaller", "longer", "even"]
imp_f = [False, True]
for xtype in xtry:
for utype in utry:
for nzX in nztry:
for nzU in nztry:
for NA_as_zero_X in natry:
for NA_as_zero_U in natry:
for as_near_dense_x in ndtry:
for as_near_dense_u in ndtry:
for k_user in ktry:
for k_item in ktry:
for k_main in ktry:
for xlen in xlength:
for wtype in wtry:
for i_f in imp_f:
if (nzX == 0) and (as_near_dense_x or NA_as_zero_X):
continue
if (nzU == 0) and (as_near_dense_u or NA_as_zero_U):
continue
if (NA_as_zero_X) and (xtype!="sparse"):
continue
if (NA_as_zero_U) and (utype!="sparse"):
continue
if (as_near_dense_x) and (xtype!="dense"):
continue
if (as_near_dense_u) and (utype!="dense"):
continue
if (NA_as_zero_U or NA_as_zero_X) and (xlen!="even"):
continue
if xlen == "even":
m = m1
m_u = m1
n_i = n1
elif xlen == "smaller":
m = m2
m_u = m1
n_i = n1
else:
m = m1
m_u = m2
n_i = n2
n = n0
X, W, U, I, A, B, C, D, Ai, Bi = gen_data()
Xcsr_p, Xcsr_i, Xcsr, Wcsr, \
Xcsc_p, Xcsc_i, Xcsc, Wcsc = dense_to_sp(X, W)
U_csr_p, U_csr_i, U_csr = dense_to_sp_simple(U)
if xtype=="sparse":
Wpass = Wcsr
else:
Wpass = W.reshape(-1).copy()
np.random.seed(456)
x0 = np.random.normal(size = max(m,m_u)*(k_user+k+k_main))
res_scipy = minimize(py_evalA, x0)["x"].reshape((max(m,m_u),k_user+k+k_main))
res_module = get_solA()
err1 = np.linalg.norm(res_module - res_scipy)
df1 = py_evalA(res_module.reshape(-1)) - py_evalA(res_scipy.reshape(-1))
np.random.seed(456)
if xlen == "even":
n = n1
elif xlen == "smaller":
n = n2
else:
n = n1
m = m0
X, W, U, I, A, B, C, D, Ai, Bi = gen_data()
Xcsr_p, Xcsr_i, Xcsr, Wcsr, \
Xcsc_p, Xcsc_i, Xcsc, Wcsc = dense_to_sp(X, W)
I_csr_p, I_csr_i, I_csr = dense_to_sp_simple(I)
if xtype=="sparse":
Wpass = Wcsc
else:
Wpass = W.reshape(-1).copy()
np.random.seed(456)
x0 = np.random.normal(size = max(n,n_i)*(k_item+k+k_main))
res_scipy = minimize(py_evalB, x0)["x"].reshape((max(n,n_i),k_item+k+k_main))
res_module = get_solB()
err2 = np.linalg.norm(res_module - res_scipy)
df2 = py_evalB(res_module.reshape(-1)) - py_evalB(res_scipy.reshape(-1))
is_wrong = (err1 > 5e0) or (err2 > 5e0) or (df1 > 5e0) or (df2 > 5e0) or np.any(np.isnan(res_module))
if is_wrong:
print("\n\n\n\n****ERROR BELOW****", flush=True)
print("[X %s] [U %s] [l:%s] [w:%d] [nz:%d,%d] [nd:%d,%d] [if:%d] [u:%d] [m:%d] [i:%d] [na:%d,%d] -> err:%.2f,%.2f df:%.2f,%.2f"
% (xtype[0], utype[0], xlen[0], int(wtype), nzX, nzU, int(as_near_dense_x), int(as_near_dense_u),
int(i_f), k_user, k_main, k_item, int(NA_as_zero_X), int(NA_as_zero_U), err1, err2, df1, df2), flush=True)
if is_wrong:
print("****ERROR ABOVE****\n\n\n\n", flush=True)
|
from collections import OrderedDict
from .compat import is_py2, str, bytes, integer_types, string_types
from .util import pack_bytes_into
from collections import namedtuple
from struct import Struct, error as struct_error
import inspect
getargspec = getattr(inspect, "getfullargspec", inspect.getargspec)
(SCRIPT_DATA_TYPE_NUMBER, SCRIPT_DATA_TYPE_BOOLEAN,
SCRIPT_DATA_TYPE_STRING, SCRIPT_DATA_TYPE_OBJECT,
SCRIPT_DATA_TYPE_RESERVED, SCRIPT_DATA_TYPE_NULL,
SCRIPT_DATA_TYPE_UNDEFINED, SCRIPT_DATA_TYPE_REFERENCE,
SCRIPT_DATA_TYPE_ECMAARRAY, SCRIPT_DATA_TYPE_OBJECTEND,
SCRIPT_DATA_TYPE_STRICTARRAY, SCRIPT_DATA_TYPE_DATE,
SCRIPT_DATA_TYPE_LONGSTRING) = range(13)
SCRIPT_DATA_TYPE_AMF3 = 0x11
(AMF3_TYPE_UNDEFINED, AMF3_TYPE_NULL, AMF3_TYPE_FALSE, AMF3_TYPE_TRUE,
AMF3_TYPE_INTEGER, AMF3_TYPE_DOUBLE, AMF3_TYPE_STRING, AMF3_TYPE_XML_DOC,
AMF3_TYPE_DATE, AMF3_TYPE_ARRAY, AMF3_TYPE_OBJECT, AMF3_TYPE_XML,
AMF3_TYPE_BYTE_ARRAY, AMF3_TYPE_VECTOR_INT, AMF3_TYPE_VECTOR_UINT,
AMF3_TYPE_VECTOR_DOUBLE, AMF3_TYPE_VECTOR_OBJECT, AMF3_TYPE_DICT) = range(0x12)
AMF3_EMPTY_STRING = 0x01
AMF3_DYNAMIC_OBJECT = 0x0b
AMF3_CLOSE_DYNAMIC_OBJECT = 0x01
AMF3_CLOSE_DYNAMIC_ARRAY = 0x01
AMF3_MIN_INTEGER = -268435456
AMF3_MAX_INTEGER = 268435455
class PrimitiveType(Struct):
def __call__(self, *args):
return self.pack(*args)
def read(self, fd):
data = fd.read(self.size)
if len(data) != self.size:
raise IOError("Unable to read required amount of data")
return self.unpack(data)[0]
class PrimitiveClassType(PrimitiveType):
def __init__(self, format, cls):
self.cls = cls
PrimitiveType.__init__(self, format)
def pack(self, val):
return PrimitiveType.pack(self, *val)
def pack_into(self, buf, offset, val):
return PrimitiveType.pack_into(self, buf, offset, *val)
def unpack(self, data):
vals = PrimitiveType.unpack(self, data)
rval = self.cls(*vals)
return (rval,)
def unpack_from(self, buf, offset):
vals = PrimitiveType.unpack_from(self, buf, offset)
rval = self.cls(*vals)
return (rval,)
class DynamicType(object):
def __new__(cls, *args, **kwargs):
return cls.pack(*args, **kwargs)
@classmethod
def size(cls, val):
raise NotImplementedError
@classmethod
def pack(cls, val):
raise NotImplementedError
@classmethod
def pack_into(cls, buf, offset, val):
raise NotImplementedError
@classmethod
def read(cls, fd):
raise NotImplementedError
@classmethod
def unpack_from(cls, buf, offset):
raise NotImplementedError
@classmethod
def unpack(cls, buf):
return cls.unpack_from(buf, 0)
class TwosComplement(PrimitiveType):
def __init__(self, primitive):
self.primitive = primitive
bits = self.primitive.size * 8
self.maxval = 1 << bits
self.midval = self.maxval >> 1
self.upper = self.midval - 1
self.lower = -self.midval
@property
def size(self):
return 3
def pack(self, val):
if val < self.lower or val > self.upper:
msg = "{0} format requires {1} <= number <= {2}".format(self.primitive.format,
self.lower, self.upper)
raise struct_error(msg)
if val < 0:
val = val + self.maxval
return self.primitive.pack(val)
def pack_into(self, buf, offset, val):
if val < self.lower or val > self.upper:
msg = "{0} format requires {1} <= number <= {2}".format(self.primitive.format,
self.lower, self.upper)
raise struct_error(msg)
if val < 0:
val = val + self.maxval
return self.primitive.pack_into(buf, offset, val)
def unpack(self, data):
val = self.primitive.unpack(data)[0]
if val & self.midval:
val = val - self.maxval
return (val,)
def unpack_from(self, buf, offset):
val = self.primitive.unpack_from(buf, offset)[0]
if val & self.midval:
val = val - self.maxval
return (val,)
class HighLowCombo(PrimitiveType):
def __init__(self, format, highbits, reverse=True):
PrimitiveType.__init__(self, format)
self.highbits = highbits
self.lowmask = (1 << highbits) - 1
self.reverse = reverse
self.lower = 0
self.upper = (1 << (self.size * 8)) - 1
def pack(self, val):
if val < self.lower or val > self.upper:
msg = "{0} format requires {1} <= number <= {2}".format(self.format,
self.lower, self.upper)
raise struct_error(msg)
if self.reverse:
high = val >> self.highbits
low = val & self.lowmask
else:
high = val & self.lowmask
low = val >> self.highbits
return PrimitiveType.pack(self, high, low)
def pack_into(self, buf, offset, val):
if val < self.lower or val > self.upper:
msg = "{0} format requires {1} <= number <= {2}".format(self.format,
self.lower, self.upper)
raise struct_error(msg)
if self.reverse:
high = val >> self.highbits
low = val & self.lowmask
else:
high = val & self.lowmask
low = val >> self.highbits
return PrimitiveType.pack_into(self, buf, offset, high, low)
def unpack(self, data):
high, low = PrimitiveType.unpack(self, data)
if self.reverse:
ret = high << self.highbits
ret |= low
else:
ret = high
ret |= low << self.highbits
return (ret,)
def unpack_from(self, buf, offset):
high, low = PrimitiveType.unpack_from(self, buf, offset)
if self.reverse:
ret = high << self.highbits
ret |= low
else:
ret = high
ret |= low << self.highbits
return (ret,)
class FixedPoint(PrimitiveType):
def __init__(self, format, bits):
self.divider = float(1 << bits)
PrimitiveType.__init__(self, format)
def pack(self, val):
val *= self.divider
return PrimitiveType.pack(self, int(val))
def pack_into(self, buf, offset, val):
val *= self.divider
return PrimitiveType.pack_into(self, buf, offset, int(val))
def unpack(self, data):
val = PrimitiveType.unpack(self, data)[0]
val /= self.divider
return (val,)
def unpack_from(self, buf, offset):
val = PrimitiveType.unpack_from(self, buf, offset)[0]
val /= self.divider
return (val,)
class PaddedBytes(PrimitiveType):
def __init__(self, size, padding):
self.padded_size = size
self.padding = bytes(padding, "ascii")
@property
def size(self):
return self.padded_size
def pack(self, val):
rval = bytes(val[:self.size], "ascii")
if len(rval) < self.size:
paddinglen = self.size - len(rval)
rval += self.padding * paddinglen
return rval
def pack_into(self, buf, offset, val):
rval = bytes(val[:self.size], "ascii")
offset = pack_bytes_into(buf, offset, rval)
if len(rval) < self.size:
paddinglen = self.size - len(rval)
offset = pack_bytes_into(buf, offset, self.padding * paddinglen)
def unpack(self, data):
return (str(data.rstrip(self.padding), "ascii"),)
def unpack_from(self, buf, offset):
data = buf[offset:offset + self.padded_size]
return (str(data.rstrip(self.padding), "ascii"),)
""" 8-bit integer """
U8 = PrimitiveType("B")
S8 = PrimitiveType("b")
""" 16-bit integer """
U16BE = PrimitiveType(">H")
S16BE = PrimitiveType(">h")
U16LE = PrimitiveType("<H")
S16LE = PrimitiveType("<h")
""" 24-bit integer """
U24BE = HighLowCombo(">HB", 8, True)
S24BE = TwosComplement(U24BE)
U24LE = HighLowCombo("<HB", 16, False)
S24LE = TwosComplement(U24LE)
""" 32-bit integer """
U32BE = PrimitiveType(">I")
S32BE = PrimitiveType(">i")
U32LE = PrimitiveType("<I")
S32LE = PrimitiveType("<i")
""" 64-bit integer """
U64BE = PrimitiveType(">Q")
U64LE = PrimitiveType("<Q")
""" Fixed point numbers """
U8_8BE = FixedPoint(">H", 8)
S8_8BE = FixedPoint(">h", 8)
U16_16BE = FixedPoint("<I", 16)
S16_16BE = FixedPoint("<i", 16)
U8_8LE = FixedPoint("<H", 8)
S8_8LE = FixedPoint("<h", 8)
U16_16LE = FixedPoint("<I", 16)
S16_16LE = FixedPoint("<i", 16)
DoubleLE = PrimitiveType("<d")
DoubleBE = PrimitiveType(">d")
""" Various types """
FourCC = PaddedBytes(4, " ")
""" Script data types """
ScriptDataNumber = DoubleBE
ScriptDataBoolean = PrimitiveType("?")
class U3264(DynamicType):
@classmethod
def size(cls, val, version):
if version == 1:
return U64BE.size
else:
return U32BE.size
@classmethod
def pack(cls, val, version):
if version == 1:
return U64BE(val)
else:
return U32BE(val)
@classmethod
def pack_into(cls, buf, offset, val, version):
if version == 1:
prim = U64BE
else:
prim = U32BE
prim.pack_into(buf, offset, val)
return offset + prim.size
@classmethod
def read(cls, fd, version):
if version == 1:
return U64BE.read(fd)
else:
return U32BE.read(fd)
@classmethod
def unpack_from(cls, buf, offset, version):
if version == 1:
prim = U64BE
else:
prim = U32BE
rval = prim.unpack_from(buf, offset)
offset += prim.size
return (rval, offset)
class String(DynamicType):
@classmethod
def size(cls, *args, **kwargs):
return len(cls.pack(*args, **kwargs))
@classmethod
def pack(cls, val, encoding="utf8", errors="ignore"):
rval = val.encode(encoding, errors)
return rval
@classmethod
def pack_into(cls, buf, offset, val,
encoding="utf8", errors="ignore"):
return pack_bytes_into(buf, offset,
val.encode(encoding, errors))
class CString(String):
EndMarker = b"\x00"
@classmethod
def pack(cls, *args, **kwargs):
rval = String.pack(*args, **kwargs)
rval += CString.EndMarker
return rval
@classmethod
def pack_into(cls, buf, offset, *args, **kwargs):
offset = String.pack_into(buf, offset, *args, **kwargs)
U8.pack_into(buf, offset, 0)
return offset + 1
@classmethod
def read(cls, fd, encoding="utf8", errors="ignore"):
rval = b""
while True:
ch = fd.read(1)
if len(ch) == 0 or ch == CString.EndMarker:
break
rval += ch
return rval.decode(encoding, errors)
@classmethod
def unpack_from(cls, buf, offset, encoding="utf8", errors="ignore"):
end = buf[offset:].find(b"\x00")
rval = buf[offset:offset + end].decode(encoding, errors)
offset += end + 1
return (rval, offset)
class ScriptDataType(object):
__identifier__ = 0
class ScriptDataString(String):
__size_primitive__ = U16BE
@classmethod
def pack(cls, val, *args, **kwargs):
rval = String.pack(val, *args, **kwargs)
size = cls.__size_primitive__(len(rval))
return size + rval
@classmethod
def pack_into(cls, buf, offset, val, *args, **kwargs):
noffset = String.pack_into(buf, offset + cls.__size_primitive__.size,
val, *args, **kwargs)
cls.__size_primitive__.pack_into(buf, offset,
(noffset - offset) - cls.__size_primitive__.size)
return noffset
@classmethod
def read(cls, fd, encoding="utf8", errors="ignore"):
size = cls.__size_primitive__.read(fd)
data = fd.read(size)
return data.decode(encoding, errors)
@classmethod
def unpack_from(cls, buf, offset, encoding="utf8", errors="ignore"):
size = cls.__size_primitive__.unpack_from(buf, offset)[0]
offset += cls.__size_primitive__.size
data = buf[offset:offset + size].decode(encoding, errors)
offset += size
return (data, offset)
class ScriptDataLongString(ScriptDataString):
__size_primitive__ = U32BE
class ScriptDataObjectEnd(Exception):
pass
class ScriptDataObject(OrderedDict, ScriptDataType):
__identifier__ = SCRIPT_DATA_TYPE_OBJECT
@classmethod
def size(cls, val):
size = 3
for key, value in val.items():
size += ScriptDataString.size(key)
size += ScriptDataValue.size(value)
return size
@classmethod
def pack(cls, val):
rval = b""
for key, value in val.items():
rval += ScriptDataString(key)
rval += ScriptDataValue.pack(value)
# Zero length key + object end identifier ends object
rval += ScriptDataString("")
rval += U8(SCRIPT_DATA_TYPE_OBJECTEND)
return rval
@classmethod
def pack_into(cls, buf, offset, val):
for key, value in val.items():
offset = ScriptDataString.pack_into(buf, offset, key)
offset = ScriptDataValue.pack_into(buf, offset, value)
# Zero length key + object end identifier ends object
offset = ScriptDataString.pack_into(buf, offset, "")
U8.pack_into(buf, offset, SCRIPT_DATA_TYPE_OBJECTEND)
return offset + U8.size
@classmethod
def read(cls, fd):
rval = cls()
while True:
try:
key = ScriptDataString.read(fd)
value = ScriptDataValue.read(fd)
except ScriptDataObjectEnd:
break
if len(key) == 0:
break
rval[key] = value
return rval
@classmethod
def unpack_from(cls, buf, offset):
rval = cls()
while True:
try:
key, offset = ScriptDataString.unpack_from(buf, offset)
value, offset = ScriptDataValue.unpack_from(buf, offset)
except ScriptDataObjectEnd:
offset += 1
break
if len(key) == 0:
break
rval[key] = value
return (rval, offset)
class ScriptDataECMAArray(ScriptDataObject):
__identifier__ = SCRIPT_DATA_TYPE_ECMAARRAY
@classmethod
def size(cls, val):
return 4 + ScriptDataObject.size(val)
@classmethod
def pack(cls, val):
rval = U32BE(len(val))
rval += ScriptDataObject.pack(val)
return rval
@classmethod
def pack_into(cls, buf, offset, val):
U32BE.pack_into(buf, offset, len(val))
return ScriptDataObject.pack_into(buf, offset + U32BE.size,
val)
@classmethod
def read(cls, fd):
U32BE.read(fd) # Length
val = ScriptDataObject.read(fd)
return cls(val)
@classmethod
def unpack_from(cls, buf, offset):
U32BE.unpack_from(buf, offset) # Length
offset += U32BE.size
val, offset = ScriptDataObject.unpack_from(buf, offset)
return (cls(val), offset)
class ScriptDataStrictArray(DynamicType):
@classmethod
def size(cls, val):
size = 4
for sdval in val:
size += ScriptDataValue.size(sdval)
return size
@classmethod
def pack(cls, val):
rval = U32BE(len(val))
for sdval in val:
rval += ScriptDataValue.pack(sdval)
return rval
@classmethod
def pack_into(cls, buf, offset, val):
U32BE.pack_into(buf, offset, len(val))
offset += U32BE.size
for sdval in val:
offset = ScriptDataValue.pack_into(buf, offset, sdval)
return offset
@classmethod
def read(cls, fd):
length = U32BE.read(fd)
rval = []
for i in range(length):
val = ScriptDataValue.read(fd)
rval.append(val)
return rval
@classmethod
def unpack_from(cls, buf, offset):
length = U32BE.unpack_from(buf, offset)[0]
offset += U32BE.size
rval = []
for i in range(length):
val, offset = ScriptDataValue.unpack_from(buf, offset)
rval.append(val)
return (rval, offset)
ScriptDataDate = namedtuple("ScriptDataDate", ["timestamp", "offset"])
ScriptDataDateStruct = PrimitiveClassType(">dh", ScriptDataDate)
ScriptDataDate.__identifier__ = SCRIPT_DATA_TYPE_DATE
ScriptDataDate.__packer__ = ScriptDataDateStruct
ScriptDataReference = namedtuple("ScriptDataReference", ["reference"])
ScriptDataReferenceStruct = PrimitiveClassType(">H", ScriptDataReference)
ScriptDataReference.__identifier__ = SCRIPT_DATA_TYPE_REFERENCE
ScriptDataReference.__packer__ = ScriptDataReferenceStruct
class ScriptDataValue(DynamicType, ScriptDataType):
# key: identifier, value: unpacker class
PrimitiveReaders = {
SCRIPT_DATA_TYPE_NUMBER: ScriptDataNumber,
SCRIPT_DATA_TYPE_BOOLEAN: ScriptDataBoolean,
SCRIPT_DATA_TYPE_REFERENCE: ScriptDataReferenceStruct,
SCRIPT_DATA_TYPE_DATE: ScriptDataDateStruct,
}
DynamicReaders = {
SCRIPT_DATA_TYPE_STRING: ScriptDataString,
SCRIPT_DATA_TYPE_LONGSTRING: ScriptDataLongString,
SCRIPT_DATA_TYPE_OBJECT: ScriptDataObject,
SCRIPT_DATA_TYPE_ECMAARRAY: ScriptDataECMAArray,
SCRIPT_DATA_TYPE_STRICTARRAY: ScriptDataStrictArray,
}
Readers = PrimitiveReaders.copy()
Readers.update(DynamicReaders)
@classmethod
def size(cls, val):
size = 1
if isinstance(val, bool):
size += ScriptDataBoolean.size
elif isinstance(val, (int, float)):
size += ScriptDataNumber.size
elif isinstance(val, list):
size += ScriptDataStrictArray.size(val)
elif isinstance(val, string_types):
if len(val) > 0xFFFF:
size += ScriptDataLongString.size(val)
else:
size += ScriptDataString.size(val)
elif isinstance(val, ScriptDataType):
cls = type(val)
size += cls.size(val)
elif type(val) in (ScriptDataDate, ScriptDataReference):
cls = type(val)
packer = cls.__packer__
size += packer.size
elif isinstance(val, AMF3ObjectBase):
size += U8.size
size += AMF3Value.size(val)
return size
@classmethod
def pack(cls, val):
rval = b""
if isinstance(val, bool):
rval += U8(SCRIPT_DATA_TYPE_BOOLEAN)
rval += ScriptDataBoolean(val)
elif isinstance(val, (int, float)):
rval += U8(SCRIPT_DATA_TYPE_NUMBER)
rval += ScriptDataNumber(val)
elif isinstance(val, list):
rval += U8(SCRIPT_DATA_TYPE_STRICTARRAY)
rval += ScriptDataStrictArray(val)
elif isinstance(val, string_types):
if len(val) > 0xFFFF:
rval += U8(SCRIPT_DATA_TYPE_LONGSTRING)
rval += ScriptDataLongString(val)
else:
rval += U8(SCRIPT_DATA_TYPE_STRING)
rval += ScriptDataString(val)
elif val is None:
rval += U8(SCRIPT_DATA_TYPE_NULL)
elif isinstance(val, ScriptDataType):
cls = type(val)
rval += U8(cls.__identifier__)
rval += cls.pack(val)
elif type(val) in (ScriptDataDate, ScriptDataReference):
cls = type(val)
packer = cls.__packer__
rval += U8(cls.__identifier__)
rval += packer.pack(val)
elif isinstance(val, AMF3ObjectBase):
rval += U8(SCRIPT_DATA_TYPE_AMF3)
rval += AMF3Value.pack(val)
else:
raise ValueError("Unable to pack value of type {0}".format(type(val)))
return rval
@classmethod
def pack_into(cls, buf, offset, val):
if isinstance(val, bool):
U8.pack_into(buf, offset, SCRIPT_DATA_TYPE_BOOLEAN)
offset += U8.size
ScriptDataBoolean.pack_into(buf, offset, val)
offset += ScriptDataBoolean.size
elif isinstance(val, (int, float)):
U8.pack_into(buf, offset, SCRIPT_DATA_TYPE_NUMBER)
offset += U8.size
ScriptDataNumber.pack_into(buf, offset, val)
offset += ScriptDataNumber.size
elif isinstance(val, list):
U8.pack_into(buf, offset, SCRIPT_DATA_TYPE_STRICTARRAY)
offset += U8.size
offset = ScriptDataStrictArray.pack_into(buf, offset, val)
elif isinstance(val, string_types):
if len(val) > 0xFFFF:
U8.pack_into(buf, offset, SCRIPT_DATA_TYPE_LONGSTRING)
offset += U8.size
offset = ScriptDataLongString.pack_into(buf, offset, val)
else:
U8.pack_into(buf, offset, SCRIPT_DATA_TYPE_STRING)
offset += U8.size
offset = ScriptDataString.pack_into(buf, offset, val)
elif val is None:
U8.pack_into(buf, offset, SCRIPT_DATA_TYPE_NULL)
elif isinstance(val, ScriptDataType):
cls = type(val)
U8.pack_into(buf, offset, cls.__identifier__)
offset += U8.size
offset = cls.pack_into(buf, offset, val)
elif type(val) in (ScriptDataDate, ScriptDataReference):
cls = type(val)
packer = cls.__packer__
U8.pack_into(buf, offset, cls.__identifier__)
offset += U8.size
packer.pack_into(buf, offset, val)
offset += packer.size
else:
raise ValueError("Unable to pack value of type {0}".format(type(val)))
return offset
@classmethod
def read(cls, fd, marker=None):
if marker is None:
type_ = U8.read(fd)
else:
type_ = marker
if type_ == SCRIPT_DATA_TYPE_AMF3:
return AMF3Value.read(fd)
elif type_ in ScriptDataValue.Readers:
return ScriptDataValue.Readers[type_].read(fd)
elif type_ == SCRIPT_DATA_TYPE_OBJECTEND:
raise ScriptDataObjectEnd
elif (type_ == SCRIPT_DATA_TYPE_NULL or
type_ == SCRIPT_DATA_TYPE_UNDEFINED):
return None
else:
raise IOError("Unhandled script data type: {0}".format(type_))
@classmethod
def unpack_from(cls, buf, offset):
type_ = U8.unpack_from(buf, offset)[0]
offset += U8.size
if type_ in ScriptDataValue.DynamicReaders:
return ScriptDataValue.Readers[type_].unpack_from(buf, offset)
elif type_ in ScriptDataValue.PrimitiveReaders:
reader = ScriptDataValue.PrimitiveReaders[type_]
rval = reader.unpack_from(buf, offset)[0]
offset += reader.size
return (rval, offset)
elif type_ == SCRIPT_DATA_TYPE_OBJECTEND:
raise ScriptDataObjectEnd
elif (type_ == SCRIPT_DATA_TYPE_NULL or
type_ == SCRIPT_DATA_TYPE_UNDEFINED):
return (None, offset)
else:
raise IOError("Unhandled script data type: {0}".format(hex(type_)))
class AMF0Value(ScriptDataValue):
pass
class AMF0String(ScriptDataString):
pass
AMF0Number = ScriptDataNumber
AMF3Double = ScriptDataNumber
class AMF3Type(ScriptDataType):
pass
class AMF3Integer(DynamicType, AMF3Type):
__identifier__ = AMF3_TYPE_INTEGER
@classmethod
def size(cls, val):
val &= 0x1fffffff
if val < 0x80:
return 1
elif val < 0x4000:
return 2
elif val < 0x200000:
return 3
elif val < 0x40000000:
return 4
@classmethod
def pack(cls, val):
size = cls.size(val)
buf = bytearray(size)
offset = cls.pack_into(buf, 0, val)
return bytes(buf[:offset])
@classmethod
def pack_into(cls, buf, offset, val):
val &= 0x1fffffff
if val < 0x80:
buf[offset] = val
offset += 1
elif val < 0x4000:
buf[offset] = (val >> 7 & 0x7f) | 0x80
buf[offset + 1] = val & 0x7f
offset += 2
elif val < 0x200000:
buf[offset] = (val >> 14 & 0x7f) | 0x80
buf[offset + 1] = (val >> 7 & 0x7f) | 0x80
buf[offset + 2] = val & 0x7f
offset += 3
elif val < 0x40000000:
buf[offset] = (val >> 22 & 0x7f) | 0x80
buf[offset + 1] = (val >> 15 & 0x7f) | 0x80
buf[offset + 2] = (val >> 8 & 0x7f) | 0x80
buf[offset + 3] = val & 0xff
offset += 4
return offset
@classmethod
def read(cls, fd):
rval, byte_count = 0, 0
byte = U8.read(fd)
while (byte & 0x80) != 0 and byte_count < 3:
rval <<= 7
rval |= byte & 0x7f
byte = U8.read(fd)
byte_count += 1
if byte_count < 3:
rval <<= 7
rval |= byte & 0x7F
else:
rval <<= 8
rval |= byte & 0xff
if (rval & 0x10000000) != 0:
rval -= 0x20000000
return rval
class AMF3String(String):
@classmethod
def size(cls, val, cache):
data = String.pack(val, "utf8", "ignore")
size = len(data)
if size == 0:
return U8.size
elif val in cache:
index = cache.index(val)
return AMF3Integer.size(index << 1)
else:
cache.append(val)
return AMF3Integer.size(size << 1 | 1) + size
@classmethod
def pack(cls, val, cache):
data = String.pack(val, "utf8", "ignore")
size = len(data)
if size == 0:
return U8(AMF3_EMPTY_STRING)
elif val in cache:
index = cache.index(val)
return AMF3Integer(index << 1)
else:
cache.append(val)
chunks = []
chunks.append(AMF3Integer(size << 1 | 1))
chunks.append(data)
return b"".join(chunks)
@classmethod
def read(cls, fd, cache):
header = AMF3Integer.read(fd)
if (header & 1) == 0:
index = header >> 1
return cache[index]
else:
size = header >> 1
data = fd.read(size)
rval = data.decode("utf8", "ignore")
if len(data) > 0:
cache.append(rval)
return rval
class AMF3ObjectBase(object):
__dynamic__ = False
__externalizable__ = False
__members__ = []
_registry = {}
def __init__(self, *args, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "<{0} {1!r}".format(self.__class__.__name__, self.__dict__)
@classmethod
def register(cls, name):
def deco(amfcls):
amfcls.__name__ = name
if not amfcls.__members__:
amfcls.__members__ = getargspec(amfcls.__init__).args[1:]
cls._registry[name] = amfcls
return amfcls
return deco
@classmethod
def lookup(cls, name):
return cls._registry.get(name, None)
@classmethod
def create(cls, name, externalizable, dynamic, members):
if is_py2:
name = name.encode("utf8")
amfcls = type(name, (cls,), {})
amfcls.__externalizable__ = externalizable
amfcls.__members__ = members
return amfcls
class AMF3Object(OrderedDict, AMF3ObjectBase):
__dynamic__ = True
class AMF3ObjectPacker(DynamicType, AMF3Type):
__identifier__ = AMF3_TYPE_OBJECT
@classmethod
def size(cls, val, str_cache, object_cache, traits_cache):
if val in object_cache:
index = object_cache.index(val)
return AMF3Integer.size(index << 1)
else:
object_cache.append(val)
size = 0
traits = type(val)
if traits in traits_cache:
index = traits_cache.index(traits)
size += AMF3Integer.size(index << 2 | 0x01)
else:
header = 0x03
if traits.__dynamic__:
header |= 0x02 << 2
if traits.__externalizable__:
header |= 0x01 << 2
header |= (len(traits.__members__)) << 4
size += AMF3Integer.size(header)
if isinstance(val, AMF3Object):
size += U8.size
else:
size += AMF3String.size(traits.__name__, cache=str_cache)
traits_cache.append(traits)
for member in traits.__members__:
size += AMF3String.size(member, cache=str_cache)
for member in traits.__members__:
value = getattr(val, member)
size += AMF3Value.size(value, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
if traits.__dynamic__:
if isinstance(val, AMF3Object):
iterator = val.items()
else:
iterator = val.__dict__.items()
for key, value in iterator:
if key in traits.__members__:
continue
size += AMF3String.size(key, cache=str_cache)
size += AMF3Value.size(value, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
size += U8.size
return size
@classmethod
def pack(cls, val, str_cache, object_cache, traits_cache):
chunks = []
if val in object_cache:
index = object_cache.index(val)
return AMF3Integer(index << 1)
else:
object_cache.append(val)
chunks = []
traits = type(val)
if traits in traits_cache:
index = traits_cache.index(traits)
chunks.append(AMF3Integer(index << 2 | 0x01))
else:
header = 0x03
if traits.__dynamic__:
header |= 0x02 << 2
if traits.__externalizable__:
header |= 0x01 << 2
header |= (len(traits.__members__)) << 4
chunks.append(AMF3Integer(header))
if isinstance(val, AMF3Object):
chunks.append(U8(AMF3_EMPTY_STRING))
else:
chunks.append(AMF3String(traits.__name__, cache=str_cache))
traits_cache.append(traits)
for member in traits.__members__:
chunks.append(AMF3String(member, cache=str_cache))
for member in traits.__members__:
value = getattr(val, member)
value = AMF3Value.pack(value, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
chunks.append(value)
if traits.__dynamic__:
if isinstance(val, AMF3Object):
iterator = val.items()
else:
iterator = val.__dict__.items()
for key, value in iterator:
if key in traits.__members__:
continue
key = AMF3String(key, cache=str_cache)
value = AMF3Value.pack(value, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
chunks.append(key)
chunks.append(value)
# Empty string is end of dynamic values
chunks.append(U8(AMF3_CLOSE_DYNAMIC_ARRAY))
return b"".join(chunks)
@classmethod
def read(cls, fd, str_cache, object_cache, traits_cache):
header = AMF3Integer.read(fd)
obj = None
if (header & 1) == 0:
index = header >> 1
obj = object_cache[index]
else:
header >>= 1
if (header & 1) == 0:
index = header >> 1
traits = traits_cache[index]
else:
externalizable = (header & 2) != 0
dynamic = (header & 4) != 0
members_len = header >> 3
class_name = AMF3String.read(fd, cache=str_cache)
members = []
for i in range(members_len):
member_name = AMF3String.read(fd, cache=str_cache)
members.append(member_name)
if len(class_name) == 0:
traits = AMF3Object
elif AMF3ObjectBase.lookup(class_name):
traits = AMF3ObjectBase.lookup(class_name)
traits.__members__ = members
traits.__dynamic__ = dynamic
traits_cache.append(traits)
else:
traits = AMF3ObjectBase.create(class_name, externalizable,
dynamic, members)
traits_cache.append(traits)
values = OrderedDict()
for member in traits.__members__:
value = AMF3Value.read(fd, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
values[member] = value
if traits.__dynamic__:
key = AMF3String.read(fd, cache=str_cache)
while len(key) > 0:
value = AMF3Value.read(fd, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
values[key] = value
key = AMF3String.read(fd, cache=str_cache)
if traits == AMF3Object:
obj = traits(values)
else:
obj = traits(**values)
return obj
class AMF3Array(OrderedDict):
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], list):
OrderedDict.__init__(self, **kwargs)
for i, value in enumerate(args[0]):
self[i] = value
else:
OrderedDict.__init__(self, *args, **kwargs)
def dense_keys(self):
dense_keys = []
for i in range(len(self)):
if i in self:
dense_keys.append(i)
return dense_keys
def dense_values(self):
for key in self.dense_keys():
yield self[key]
class AMF3ArrayPacker(DynamicType, AMF3Type):
__identifier__ = AMF3_TYPE_ARRAY
@classmethod
def size(cls, val, str_cache, object_cache, traits_cache):
if val in object_cache:
index = object_cache.index(val)
return AMF3Integer.size(index << 1)
else:
object_cache.append(val)
size = 0
if isinstance(val, AMF3Array):
dense_keys = val.dense_keys()
length = len(dense_keys)
else:
length = len(val)
dense_keys = list(range(length))
header = length << 1 | 1
size += AMF3Integer.size(header)
if isinstance(val, AMF3Array):
for key, value in val.items():
if key in dense_keys:
continue
size += AMF3String.size(key, cache=str_cache)
size += AMF3Value.size(value, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
size += U8.size
for key in dense_keys:
value = val[key]
size += AMF3Value.size(value, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
return size
@classmethod
def pack(cls, val, str_cache, object_cache, traits_cache):
if val in object_cache:
index = object_cache.index(val)
return AMF3Integer(index << 1)
else:
object_cache.append(val)
chunks = []
if isinstance(val, AMF3Array):
dense_keys = val.dense_keys()
length = len(dense_keys)
else:
length = len(val)
dense_keys = list(range(length))
header = length << 1 | 1
chunks.append(AMF3Integer(header))
if isinstance(val, AMF3Array):
for key, value in val.items():
if key in dense_keys:
continue
chunks.append(AMF3String(key, cache=str_cache))
value = AMF3Value.pack(value, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
chunks.append(value)
# Empty string is end of dynamic values
chunks.append(U8(AMF3_CLOSE_DYNAMIC_ARRAY))
for key in dense_keys:
value = val[key]
value = AMF3Value.pack(value, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
chunks.append(value)
return b"".join(chunks)
@classmethod
def read(cls, fd, str_cache, object_cache, traits_cache):
header = AMF3Integer.read(fd)
obj = None
if (header & 1) == 0:
index = header >> 1
obj = object_cache[index]
else:
header >>= 1
obj = AMF3Array()
object_cache.append(obj)
key = AMF3String.read(fd, cache=str_cache)
while len(key) > 0:
value = AMF3Value.read(fd, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
obj[key] = value
key = AMF3String.read(fd, cache=str_cache)
for i in range(header):
value = AMF3Value.read(fd, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
obj[i] = value
return obj
class AMF3Date(object):
def __init__(self, time):
self.time = time
class AMF3DatePacker(DynamicType, AMF3Type):
__identifier__ = AMF3_TYPE_ARRAY
@classmethod
def size(cls, val, cache):
if val in cache:
index = cache.index(val)
return AMF3Integer.size(index << 1)
else:
cache.append(val)
return AMF3Double.size + U8.size
@classmethod
def pack(cls, val, cache):
if val in cache:
index = cache.index(val)
return AMF3Integer(index << 1)
else:
cache.append(val)
chunks = [U8(AMF3_TYPE_NULL),
AMF3Double(val.time)]
return b"".join(chunks)
@classmethod
def read(cls, fd, cache):
header = AMF3Integer.read(fd)
if (header & 1) == 0:
index = header >> 1
return cache[index]
else:
time = AMF3Double.read(fd)
date = AMF3Date(time)
cache.append(date)
return date
class AMF3Value(DynamicType):
PrimitiveReaders = {
AMF3_TYPE_DOUBLE: AMF3Double,
}
DynamicReaders = {
AMF3_TYPE_INTEGER: AMF3Integer,
}
Readers = PrimitiveReaders.copy()
Readers.update(DynamicReaders)
@classmethod
def size(cls, val, str_cache=None, object_cache=None, traits_cache=None):
if str_cache is None:
str_cache = []
if object_cache is None:
object_cache = []
if traits_cache is None:
traits_cache = []
size = U8.size
if isinstance(val, bool) and val in (False, True):
pass
elif val is None:
pass
elif isinstance(val, integer_types):
if val < AMF3_MIN_INTEGER or val > AMF3_MAX_INTEGER:
size += AMF3Double.size
else:
size += AMF3Integer.size(val)
elif isinstance(val, float):
size += AMF3Double.size
elif isinstance(val, (AMF3Array, list)):
size += AMF3ArrayPacker.size(val, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
elif isinstance(val, string_types):
size += AMF3String.size(val, cache=str_cache)
elif isinstance(val, AMF3ObjectBase):
size += AMF3ObjectPacker.size(val, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
elif isinstance(val, AMF3Date):
size += AMF3DatePacker.size(val, cache=object_cache)
else:
raise ValueError("Unable to pack value of type {0}".format(type(val)))
return size
@classmethod
def pack(cls, val, str_cache=None, object_cache=None, traits_cache=None):
if str_cache is None:
str_cache = []
if object_cache is None:
object_cache = []
if traits_cache is None:
traits_cache = []
chunks = []
if isinstance(val, bool):
if val is False:
chunks.append(U8(AMF3_TYPE_FALSE))
elif val is True:
chunks.append(U8(AMF3_TYPE_TRUE))
elif val is None:
chunks.append(U8(AMF3_TYPE_NULL))
elif isinstance(val, integer_types):
if val < AMF3_MIN_INTEGER or val > AMF3_MAX_INTEGER:
chunks.append(U8(AMF3_TYPE_DOUBLE))
chunks.append(AMF3Double(val))
else:
chunks.append(U8(AMF3_TYPE_INTEGER))
chunks.append(AMF3Integer(val))
elif isinstance(val, float):
chunks.append(U8(AMF3_TYPE_DOUBLE))
chunks.append(AMF3Double(val))
elif isinstance(val, (AMF3Array, list)):
chunks.append(U8(AMF3_TYPE_ARRAY))
chunks.append(AMF3ArrayPacker.pack(val, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache))
elif isinstance(val, string_types):
chunks.append(U8(AMF3_TYPE_STRING))
chunks.append(AMF3String.pack(val, cache=str_cache))
elif isinstance(val, AMF3ObjectBase):
chunks.append(U8(AMF3_TYPE_OBJECT))
chunks.append(AMF3ObjectPacker.pack(val, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache))
elif isinstance(val, AMF3Date):
chunks.append(U8(AMF3_TYPE_DATE))
chunks.append(AMF3DatePacker.pack(val, cache=object_cache))
else:
raise ValueError("Unable to pack value of type {0}".format(type(val)))
return b"".join(chunks)
@classmethod
def read(cls, fd, str_cache=None, object_cache=None, traits_cache=None):
type_ = U8.read(fd)
if str_cache is None:
str_cache = []
if object_cache is None:
object_cache = []
if traits_cache is None:
traits_cache = []
if type_ == AMF3_TYPE_UNDEFINED or type_ == AMF3_TYPE_NULL:
return None
elif type_ == AMF3_TYPE_FALSE:
return False
elif type_ == AMF3_TYPE_TRUE:
return True
elif type_ == AMF3_TYPE_STRING:
return AMF3String.read(fd, cache=str_cache)
elif type_ == AMF3_TYPE_ARRAY:
return AMF3ArrayPacker.read(fd, str_cache=str_cache,
object_cache=object_cache,
traits_cache=traits_cache)
elif type_ == AMF3_TYPE_OBJECT:
return AMF3ObjectPacker.read(fd, str_cache=str_cache, object_cache=object_cache,
traits_cache=traits_cache)
elif type_ == AMF3_TYPE_DATE:
return AMF3DatePacker.read(fd, cache=object_cache)
elif type_ in cls.Readers:
return cls.Readers[type_].read(fd)
else:
raise IOError("Unhandled AMF3 type: {0}".format(hex(type_)))
|
import base64
import keyword
import re
from abc import ABCMeta, abstractmethod
from collections.abc import Mapping
import attr
from six import exec_, iteritems, add_metaclass, text_type, string_types
from marshmallow import missing, Schema, fields
from marshmallow.base import SchemaABC
from .compat import is_overridden
from .utils import IndentedString
# Regular Expression for identifying a valid Python identifier name.
_VALID_IDENTIFIER = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
if False: # pylint: disable=using-constant-test
# pylint: disable=unused-import
from typing import Any, Callable, Dict, Optional, Tuple, Union, Set
def field_symbol_name(field_name):
# type: (str) -> str
"""Generates the symbol name to be used when accessing a field in generated
code.
If the field name isn't a valid identifier name, synthesizes a name by
base64 encoding the fieldname.
"""
if not _VALID_IDENTIFIER.match(field_name):
field_name = str(base64.b64encode(
field_name.encode('utf-8')).decode('utf-8').strip('='))
return '_field_{field_name}'.format(field_name=field_name)
def attr_str(attr_name):
# type: (str) -> str
"""Gets the string to use when accessing an attribute on an object.
Handles case where the attribute name collides with a keyword and would
therefore be illegal to access with dot notation.
"""
if keyword.iskeyword(attr_name):
return 'getattr(obj, "{0}")'.format(attr_name)
return 'obj.{0}'.format(attr_name)
@add_metaclass(ABCMeta)
class FieldSerializer():
"""Base class for generating code to serialize a field.
"""
def __init__(self, context=None):
# type: (JitContext) -> None
"""
:param context: The context for the current Jit
"""
self.context = context or JitContext()
@abstractmethod
def serialize(self, attr_name, field_symbol,
assignment_template, field_obj):
# type: (str, str, str, fields.Field) -> IndentedString
"""Generates the code to pull a field off of an object into the result.
:param attr_name: The name of the attribute being accessed/
:param field_symbol: The symbol to use when accessing the field. Should
be generated via field_symbol_name.
:param assignment_template: A string template to use when generating
code. The assignment template is passed into the serializer and
has a single possitional placeholder for string formatting. An
example of a value that may be passed into assignment_template is:
`res['some_field'] = {0}`
:param field_obj: The instance of the Marshmallow field being
serialized.
:return: The code to pull a field off of the object passed in.
"""
pass # pragma: no cover
class InstanceSerializer(FieldSerializer):
"""Generates code for accessing fields as if they were instance variables.
For example, generates:
res['some_value'] = obj.some_value
"""
def serialize(self, attr_name, field_symbol,
assignment_template, field_obj):
# type: (str, str, str, fields.Field) -> IndentedString
return IndentedString(assignment_template.format(attr_str(attr_name)))
class DictSerializer(FieldSerializer):
"""Generates code for accessing fields as if they were a dict, generating
the proper code for handing missing fields as well. For example, generates:
# Required field with no default
res['some_value'] = obj['some_value']
# Field with a default. some_value__default will be injected at exec time.
res['some_value'] = obj.get('some_value', some_value__default)
# Non required field:
if 'some_value' in obj:
res['some_value'] = obj['some_value']
"""
def serialize(self, attr_name, field_symbol,
assignment_template, field_obj):
# type: (str, str, str, fields.Field) -> IndentedString
body = IndentedString()
if self.context.is_serializing:
default_str = 'default'
default_value = field_obj.default
else:
default_str = 'missing'
default_value = field_obj.missing
if field_obj.required:
body += assignment_template.format('obj["{attr_name}"]'.format(
attr_name=attr_name))
return body
if default_value == missing:
body += 'if "{attr_name}" in obj:'.format(attr_name=attr_name)
with body.indent():
body += assignment_template.format('obj["{attr_name}"]'.format(
attr_name=attr_name))
else:
if callable(default_value):
default_str += '()'
body += assignment_template.format(
'obj.get("{attr_name}", {field_symbol}__{default_str})'.format(
attr_name=attr_name, field_symbol=field_symbol,
default_str=default_str))
return body
class HybridSerializer(FieldSerializer):
"""Generates code for accessing fields as if they were a hybrid object.
Hybrid objects are objects that don't inherit from `Mapping`, but do
implement `__getitem__`. This means we first have to attempt a lookup by
key, then fall back to looking up by instance variable.
For example, generates:
try:
value = obj['some_value']
except (KeyError, AttributeError, IndexError, TypeError):
value = obj.some_value
res['some_value'] = value
"""
def serialize(self, attr_name, field_symbol,
assignment_template, field_obj):
# type: (str, str, str, fields.Field) -> IndentedString
body = IndentedString()
body += 'try:'
with body.indent():
body += 'value = obj["{attr_name}"]'.format(attr_name=attr_name)
body += 'except (KeyError, AttributeError, IndexError, TypeError):'
with body.indent():
body += 'value = {attr_str}'.format(attr_str=attr_str(attr_name))
body += assignment_template.format('value')
return body
@attr.s
class JitContext():
""" Bag of properties to keep track of the context of what's being jitted.
"""
namespace = attr.ib(default={}) # type: Dict[str, Any]
use_inliners = attr.ib(default=True) # type: bool
schema_stack = attr.ib(default=attr.Factory(set)) # type: Set[str]
only = attr.ib(default=None) # type: Optional[Set[str]]
exclude = attr.ib(default=set()) # type: Set[str]
is_serializing = attr.ib(default=True) # type: bool
use_cython = attr.ib(default=False) # type: bool
@add_metaclass(ABCMeta)
class FieldInliner():
"""Base class for generating code to serialize a field.
Inliners are used to generate the code to validate/parse fields without
having to bounce back into the underlying marshmallow code. While this is
somewhat fragile as it requires the inliners to be kept in sync with the
underlying implementation, it's good for a >2X speedup on benchmarks.
"""
@abstractmethod
def inline(self, field, context):
# type: (fields.Field, JitContext) -> Optional[str]
pass # pragma: no cover
class StringInliner(FieldInliner):
def inline(self, field, context):
# type: (fields.Field, JitContext) -> Optional[str]
"""Generates a template for inlining string serialization.
For example, generates "unicode(value) if value is not None else None"
to serialize a string in Python 2.7
"""
if is_overridden(field._serialize, fields.String._serialize):
return None
result = text_type.__name__ + '({0})'
result += ' if {0} is not None else None'
if not context.is_serializing:
string_type_strings = ','.join([x.__name__ for x in string_types])
result = ('(' + result + ') if '
'(isinstance({0}, (' + string_type_strings +
')) or {0} is None) else dict()["error"]')
return result
class BooleanInliner(FieldInliner):
def inline(self, field, context):
# type: (fields.Field, JitContext) -> Optional[str]
"""Generates a template for inlining boolean serialization.
For example, generates:
(
(value in __some_field_truthy) or
(False if value in __some_field_falsy else bool(value))
)
This is somewhat fragile but it tracks what Marshmallow does.
"""
if is_overridden(field._serialize, fields.Boolean._serialize):
return None
truthy_symbol = '__{0}_truthy'.format(field.name)
falsy_symbol = '__{0}_falsy'.format(field.name)
context.namespace[truthy_symbol] = field.truthy
context.namespace[falsy_symbol] = field.falsy
result = ('(({0} in ' + truthy_symbol +
') or (False if {0} in ' + falsy_symbol +
' else dict()["error"]))')
return result + ' if {0} is not None else None'
class NumberInliner(FieldInliner):
def inline(self, field, context):
# type: (fields.Field, JitContext) -> Optional[str]
"""Generates a template for inlining string serialization.
For example, generates "float(value) if value is not None else None"
to serialize a float. If `field.as_string` is `True` the result will
be coerced to a string if not None.
"""
if (is_overridden(field._validated, fields.Number._validated) or
is_overridden(field._serialize, fields.Number._serialize) or
field.num_type not in (int, float)):
return None
result = field.num_type.__name__ + '({0})'
if field.as_string and context.is_serializing:
result = 'str({0})'.format(result)
if field.allow_none is True or context.is_serializing:
# Only emit the Null checking code if nulls are allowed. If they
# aren't allowed casting `None` to an integer will throw and the
# slow path will take over.
result += ' if {0} is not None else None'
return result
class NestedInliner(FieldInliner): # pragma: no cover
def inline(self, field, context):
"""Generates a template for inlining nested field.
This doesn't pass tests yet in Marshmallow, namely due to issues around
code expecting the context of nested schema to be populated on first
access, so disabling for now.
"""
if is_overridden(field._serialize, fields.Nested._serialize):
return None
if not (isinstance(field.nested, type) and
issubclass(field.nested, SchemaABC)):
return None
if field.nested.__class__ in context.schema_stack:
return None
method_name = '__nested_{}_serialize'.format(
field_symbol_name(field.name))
old_only = context.only
old_exclude = context.exclude
old_namespace = context.namespace
context.only = set(field.only) if field.only else None
context.exclude = set(field.exclude)
context.namespace = {}
for only_field in old_only or []:
if only_field.startswith(field.name + '.'):
if not context.only:
context.only = set()
context.only.add(only_field[len(field.name + '.'):])
for only_field in list((context.only or [])):
if '.' in only_field:
if not context.only:
context.only = set()
context.only.add(only_field.split('.')[0])
for exclude_field in old_exclude:
if exclude_field.startswith(field.name + '.'):
context.exclude.add(exclude_field[len(field.name + '.'):])
serialize_method = generate_marshall_method(field.schema, context)
if serialize_method is None:
return None
context.namespace = old_namespace
context.only = old_only
context.exclude = old_exclude
context.namespace[method_name] = serialize_method
if field.many:
return ('[' + method_name +
'(_x) for _x in {0}] if {0} is not None else None')
return method_name + '({0}) if {0} is not None else None'
INLINERS = {
fields.String: StringInliner(),
fields.Number: NumberInliner(),
fields.Boolean: BooleanInliner(),
}
EXPECTED_TYPE_TO_CLASS = {
'object': InstanceSerializer,
'dict': DictSerializer,
'hybrid': HybridSerializer
}
def _should_skip_field(field_name, field_obj, context):
# type: (str, fields.Field, JitContext) -> bool
load_only = getattr(field_obj, 'load_only', False)
dump_only = getattr(field_obj, 'dump_only', False)
# Marshmallow 2.x.x doesn't properly set load_only or
# dump_only on Method objects. This is fixed in 3.0.0
# https://github.com/marshmallow-code/marshmallow/commit/1b676dd36cbb5cf040da4f5f6d43b0430684325c
if isinstance(field_obj, fields.Method):
load_only = (
bool(field_obj.deserialize_method_name) and
not bool(field_obj.serialize_method_name)
)
dump_only = (
bool(field_obj.serialize_method_name) and
not bool(field_obj.deserialize_method_name)
)
if load_only and context.is_serializing:
return True
if dump_only and not context.is_serializing:
return True
if context.only and field_name not in context.only:
return True
if context.exclude and field_name in context.exclude:
return True
return False
def generate_transform_method_body(schema, on_field, context):
# type: (Schema, FieldSerializer, JitContext) -> IndentedString
"""Generates the method body for a schema and a given field serialization
strategy.
"""
body = IndentedString()
body += 'def {method_name}(obj):'.format(
method_name=on_field.__class__.__name__)
with body.indent():
if schema.dict_class is dict:
# Declaring dictionaries via `{}` is faster than `dict()` since it
# avoids the global lookup.
body += 'res = {}'
else:
# dict_class will be injected before `exec` is called.
body += 'res = dict_class()'
if not context.is_serializing:
body += '__res_get = res.get'
for field_name, field_obj in iteritems(schema.fields):
if _should_skip_field(field_name, field_obj, context):
continue
attr_name, destination = _get_attr_and_destination(context,
field_name,
field_obj)
# https://marshmallow.readthedocs.io/en/stable/upgrading.html
# #the-prefix-schema-parameter-is-removed
# result_key = ''.join(
# [schema.prefix or '', destination])
result_key = destination
field_symbol = field_symbol_name(field_name)
assignment_template = ''
value_key = '{0}'
# If we have to assume any field can be callable we always have to
# check to see if we need to invoke the method first.
# We can investigate tracing this as well.
jit_options = getattr(schema.opts, 'jit_options', {})
no_callable_fields = (jit_options.get('no_callable_fields') or
not context.is_serializing)
if not no_callable_fields:
assignment_template = (
'value = {0}; '
'value = value() if callable(value) else value; ')
value_key = 'value'
# Attempt to see if this field type can be inlined.
inliner = inliner_for_field(context, field_obj)
if inliner:
assignment_template += _generate_inlined_access_template(
inliner, result_key, no_callable_fields)
else:
assignment_template += _generate_fallback_access_template(
context, field_name, field_obj, result_key, value_key)
if not field_obj._CHECK_ATTRIBUTE:
# fields like 'Method' expect to have `None` passed in when
# invoking their _serialize method.
body += assignment_template.format('None')
context.namespace['__marshmallow_missing'] = missing
body += 'if res["{key}"] is __marshmallow_missing:'.format(
key=result_key)
with body.indent():
body += 'del res["{key}"]'.format(key=result_key)
else:
serializer = on_field
if not _VALID_IDENTIFIER.match(attr_name):
# If attr_name is not a valid python identifier, it can
# only be accessed via key lookups.
serializer = DictSerializer(context)
body += serializer.serialize(
attr_name, field_symbol, assignment_template, field_obj)
if not context.is_serializing and field_obj.data_key:
# Marshmallow has a somewhat counter intuitive behavior.
# It will first load from the name of the field, then,
# should that fail, will load from the field specified in
# 'load_from'.
#
# For example:
#
# class TestSchema(Schema):
# foo = StringField(load_from='bar')
# TestSchema().load({'foo': 'haha'}).result
#
# Works just fine with no errors.
#
# class TestSchema(Schema):
# foo = StringField(load_from='bar')
# TestSchema().load({'foo': 'haha', 'bar': 'value'}).result
#
# Results in {'foo': 'haha'}
#
# Therefore, we generate code to mimic this behavior in
# cases where `load_from` is specified.
body += 'if "{key}" not in res:'.format(key=result_key)
with body.indent():
body += serializer.serialize(
field_obj.data_key, field_symbol,
assignment_template, field_obj)
if not context.is_serializing:
if field_obj.required:
body += 'if "{key}" not in res:'.format(key=result_key)
with body.indent():
body += 'raise ValueError()'
if field_obj.allow_none is not True:
body += 'if __res_get("{key}", res) is None:'.format(
key=result_key)
with body.indent():
body += 'raise ValueError()'
if (field_obj.validators or
is_overridden(field_obj._validate,
fields.Field._validate)):
body += 'if "{key}" in res:'.format(key=result_key)
with body.indent():
body += '{field_symbol}__validate(res["{result_key}"])'.format(
field_symbol=field_symbol, result_key=result_key
)
body += 'return res'
return body
def _generate_fallback_access_template(context, field_name, field_obj,
result_key, value_key):
field_symbol = field_symbol_name(field_name)
transform_method_name = 'serialize'
if not context.is_serializing:
transform_method_name = 'deserialize'
key_name = field_name
if not context.is_serializing:
key_name = field_obj.data_key or field_name
return (
'res["{key}"] = {field_symbol}__{transform}('
'{value_key}, "{key_name}", obj)'.format(
key=result_key, field_symbol=field_symbol,
transform=transform_method_name,
key_name=key_name, value_key=value_key))
def _get_attr_and_destination(context, field_name, field_obj):
# type: (JitContext, str, fields.Field) -> Tuple[str, str]
# The name of the attribute to pull off the incoming object
attr_name = field_name
# The destination of the field in the result dictionary.
destination = field_name
if context.is_serializing:
destination = field_obj.data_key or field_name
if field_obj.attribute:
if context.is_serializing:
attr_name = field_obj.attribute
else:
destination = field_obj.attribute
return attr_name, destination
def _generate_inlined_access_template(inliner, key, no_callable_fields):
# type: (str, str, bool) -> str
"""Generates the code to access a field with an inliner."""
value_key = 'value'
assignment_template = ''
if not no_callable_fields:
assignment_template += 'value = {0}; '.format(
inliner.format(value_key))
else:
assignment_template += 'value = {0}; '
value_key = inliner.format('value')
assignment_template += 'res["{key}"] = {value_key}'.format(
key=key, value_key=value_key)
return assignment_template
def inliner_for_field(context, field_obj):
# type: (JitContext, fields.Field) -> Optional[str]
if context.use_inliners:
inliner = None
for field_type, inliner_class in iteritems(INLINERS):
if isinstance(field_obj, field_type):
inliner = inliner_class.inline(field_obj, context)
if inliner:
break
return inliner
return None
def generate_method_bodies(schema, context):
# type: (Schema, JitContext) -> str
"""Generate 3 method bodies for marshalling objects, dictionaries, or hybrid
objects.
"""
result = IndentedString()
result += generate_transform_method_body(schema,
InstanceSerializer(context),
context)
result += generate_transform_method_body(schema,
DictSerializer(context),
context)
result += generate_transform_method_body(schema,
HybridSerializer(context),
context)
return str(result)
class SerializeProxy():
"""Proxy object for calling serializer methods.
Initially trace calls to serialize and if the number of calls
of a specific type crosses `threshold` swaps out the implementation being
used for the most specialized one available.
"""
def __init__(self, dict_serializer, hybrid_serializer,
instance_serializer,
threshold=100):
# type: (Callable, Callable, Callable, int) -> None
self.dict_serializer = dict_serializer
self.hybrid_serializer = hybrid_serializer
self.instance_serializer = instance_serializer
self.threshold = threshold
self.dict_count = 0
self.hybrid_count = 0
self.instance_count = 0
self._call = self.tracing_call
if not threshold:
self._call = self.no_tracing_call
def __call__(self, obj):
return self._call(obj)
def tracing_call(self, obj):
# type: (Any) -> Any
"""Dispatcher which traces calls and specializes if possible.
"""
try:
ret = None
if isinstance(obj, Mapping):
self.dict_count += 1
ret = self.dict_serializer(obj)
elif hasattr(obj, '__getitem__'):
self.hybrid_count += 1
ret = self.hybrid_serializer(obj)
else:
self.instance_count += 1
ret = self.instance_serializer(obj)
return ret
finally:
non_zeros = [x for x in
[self.dict_count,
self.hybrid_count,
self.instance_count] if x > 0]
if len(non_zeros) > 1:
self._call = self.no_tracing_call
elif self.dict_count >= self.threshold:
self._call = self.dict_serializer
elif self.hybrid_count >= self.threshold:
self._call = self.hybrid_serializer
elif self.instance_count >= self.threshold:
self._call = self.instance_serializer
def no_tracing_call(self, obj):
# type: (Any) -> Any
"""Dispatcher with no tracing.
"""
ret = None
if isinstance(obj, Mapping):
ret = self.dict_serializer(obj)
elif hasattr(obj, '__getitem__'):
ret = self.hybrid_serializer(obj)
else:
ret = self.instance_serializer(obj)
return ret
def generate_marshall_method(schema, context=missing, threshold=100):
# type: (Schema, JitContext, int) -> Union[SerializeProxy, Callable, None]
"""Generates a function to marshall objects for a given schema.
:param schema: The Schema to generate a marshall method for.
:param threshold: The number of calls of the same type to observe before
specializing the marshal method for that type.
:return: A Callable that can be used to marshall objects for the schema
"""
if is_overridden(schema.get_attribute, Schema.get_attribute):
# Bail if get_attribute is overridden. This provides the schema author
# too much control to reasonably JIT.
return None
if context is missing:
context = JitContext()
context.namespace = {}
context.namespace['dict_class'] = lambda: schema.dict_class() # pylint: disable=unnecessary-lambda
jit_options = getattr(schema.opts, 'jit_options', {})
context.schema_stack.add(schema.__class__)
result = generate_method_bodies(schema, context)
context.schema_stack.remove(schema.__class__)
namespace = context.namespace
for key, value in iteritems(schema.fields):
if value.attribute and '.' in value.attribute:
# We're currently unable to handle dotted attributes. These don't
# seem to be widely used so punting for now. For more information
# see
# https://github.com/marshmallow-code/marshmallow/issues/450
return None
namespace[field_symbol_name(key) + '__serialize'] = value._serialize
namespace[field_symbol_name(key) + '__deserialize'] = value._deserialize
namespace[field_symbol_name(key) + '__validate_missing'] = value._validate_missing
namespace[field_symbol_name(key) + '__validate'] = value._validate
if value.default is not missing:
namespace[field_symbol_name(key) + '__default'] = value.default
if value.missing is not missing:
namespace[field_symbol_name(key) + '__missing'] = value.missing
exec_(result, namespace)
proxy = None # type: Optional[SerializeProxy]
marshall_method = None # type: Union[SerializeProxy, Callable, None]
if not context.is_serializing:
# Deserialization always expects a dictionary.
marshall_method = namespace[DictSerializer.__name__]
elif jit_options.get('expected_marshal_type') in EXPECTED_TYPE_TO_CLASS:
marshall_method = namespace[EXPECTED_TYPE_TO_CLASS[
jit_options['expected_marshal_type']].__name__]
else:
marshall_method = SerializeProxy(
namespace[DictSerializer.__name__],
namespace[HybridSerializer.__name__],
namespace[InstanceSerializer.__name__],
threshold=threshold)
proxy = marshall_method
def marshall(obj, many=False):
if many:
return [marshall_method(x) for x in obj]
return marshall_method(obj)
if proxy:
# Used to allow tests to introspect the proxy.
marshall.proxy = proxy # type: ignore
marshall._source = result # type: ignore
return marshall
def generate_unmarshall_method(schema, context=missing):
context = context or JitContext()
context.is_serializing = False
return generate_marshall_method(schema, context)
|
# https://github.com/taki0112/ResNet-Tensorflow
import ops_resnet
import tensorflow as tf
class ResNet(object):
def __init__(self, feature_space_dimension, n_classes, n_res_blocks=18, margin_in_loss=0.25, is_train=True):
self.img_size = 28
self.c_dim = 1
self.res_n = n_res_blocks
self.feature_space_dimension = feature_space_dimension
self.n_classes = n_classes
self.x1 = tf.placeholder(tf.float32, [None, 28, 28, 1])
self.x1Image = self.x1
self.label_ = tf.placeholder(tf.int32, [None])
# Create loss
if is_train:
with tf.variable_scope("resnet") as scope:
self.o1_lastLayer = self.network(self.x1Image, is_training=True, reuse=False)
self.loss = self.loss_of_network()
else:
with tf.variable_scope("resnet") as scope:
self.o1_lastLayer = self.network(self.x1Image, is_training=False, reuse=False)
# def load_network_model(self, session_):
# # https://stackoverflow.com/questions/33759623/tensorflow-how-to-save-restore-a-model
# print(" [*] Reading checkpoints...")
# saver = tf.train.Saver()
# checkpoint_dir = os.path.join(self.checkpoint_dir, self.model_dir_)
# ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
# if ckpt and ckpt.model_checkpoint_path:
# ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
# saver.restore(session_, os.path.join(checkpoint_dir, ckpt_name))
# print(" [*] Success to read {}".format(ckpt_name))
# latest_epoch = int(ckpt_name[-1])
# return True, latest_epoch
# else:
# print(" [*] Failed to find a checkpoint")
# return False, 0
def save_network(self, session_, checkpoint_dir):
# https://stackoverflow.com/questions/46549056/can-tensorflow-save-the-variables-in-a-certain-variable-scope
# saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, "network"))
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, "resnet"))
saver.save(session_, checkpoint_dir + "\\model.ckpt")
def network(self, x, is_training=True, reuse=False):
with tf.variable_scope("network", reuse=reuse):
if self.res_n < 50 :
residual_block = ops_resnet.resblock
else :
residual_block = ops_resnet.bottle_resblock
residual_list = ops_resnet.get_residual_layer(self.res_n)
ch = 32 # paper is 64
x = ops_resnet.conv(x, channels=ch, kernel=3, stride=1, scope='conv')
for i in range(residual_list[0]) :
x = residual_block(x, channels=ch, is_training=is_training, downsample=False, scope='resblock0_' + str(i))
########################################################################################################
x = residual_block(x, channels=ch*2, is_training=is_training, downsample=True, scope='resblock1_0')
for i in range(1, residual_list[1]) :
x = residual_block(x, channels=ch*2, is_training=is_training, downsample=False, scope='resblock1_' + str(i))
########################################################################################################
x = residual_block(x, channels=ch*4, is_training=is_training, downsample=True, scope='resblock2_0')
for i in range(1, residual_list[2]) :
x = residual_block(x, channels=ch*4, is_training=is_training, downsample=False, scope='resblock2_' + str(i))
########################################################################################################
x = residual_block(x, channels=ch*8, is_training=is_training, downsample=True, scope='resblock_3_0')
for i in range(1, residual_list[3]) :
x = residual_block(x, channels=ch*8, is_training=is_training, downsample=False, scope='resblock_3_' + str(i))
########################################################################################################
x = ops_resnet.batch_norm(x, is_training, scope='batch_norm')
x = ops_resnet.relu(x)
x = ops_resnet.global_avg_pooling(x)
x_oneToLast = ops_resnet.fully_conneted(x, units=self.feature_space_dimension, scope='logit_1')
self.o1 = x_oneToLast
x_last = ops_resnet.fully_conneted(x_oneToLast, units=self.n_classes, scope='logit_2')
return x_last
def loss_of_network(self):
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.label_, logits=self.o1_lastLayer)) #--> needs one-hot-encoding of labels
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_, logits=self.o1_lastLayer))
return loss
|
import sys
import os
from os.path import stat
from argparse import ArgumentParser
import pickle
layer_files = ["/home/nannan/dockerimages/layers/hulk1/hulk1_layers_less_1g.lst"]#, "/home/nannan/dockerimages/layers/hulk4/hulk4_layers_less_1g.lst"]
out_dir = "/home/nannan/dockerimages/layers/hulk1/"
stored_dat_file = os.getcwd() + "/lyr_size.pkl"
#df_num = 160
def setup():
print 'entered setup mode, now collecting layer size information...'
layer_size_dict = {}
lyrs = []
lyr_failed = []
for lyr_f in layer_files:
with open(lyr_f, 'r') as f:
content = f.readlines()
lyrs.extend([x.strip() for x in content])
for lyr in lyrs:
try:
size = os.stat(lyr).st_size
layer_size_dict[lyr] = size
except:
lyr_failed.append(lyr)
print 'info collection complete.'
print 'successfully identified ' + str(len(lyrs)) + ' lyrs'
print 'failed to get the size of ' + str(len(lyr_failed)) + ' layer files, dump:'
print lyr_failed
print 'now writing results to pickle file in current directory...'
with open(stored_dat_file, 'wb') as f:
pickle.dump(layer_size_dict, f, pickle.HIGHEST_PROTOCOL)
def sampling(layer_size_dict, size):
print 'collecting all layers with size close to ' + str(size) + ' MB...'
res = {}
cap = size * 1.1
floor = size * 0.9
if size == 1:
floor = 0
for lyr, lyr_size in layer_size_dict.items():
mb_size = lyr_size / 1024 / 1024
if mb_size <= cap and mb_size >= floor :
res[lyr] = lyr_size
result = sorted(res, key=res.__getitem__)
print 'found ' + str(len(result)) + ' layers satisfying the size requirement.'
print 'writing layer list to hulk1...'
#print str(res[result[0]])
#print str(res[result[1]])
#print str(res[result[-1]])
with open(out_dir+'hulk_layers_approx_'+str(size)+'MB.lst', 'w') as f:
for lyr in result:
f.write("%s\n" % lyr)
def main():
print 'WARNING: the current running version is tuned for layers no more than 50M.'
print 'WARNING: now assuming static input output directories (hardcoded)'
parser = ArgumentParser(description='allow customized sampling args.')
parser.add_argument('-c', '--command', dest='command', type=str, required=True,
help = 'Mode command. Possible commands: setup, sample.')
#parser.add_argument('-n', '--number', dest='number', type=int, required=False,
# help = 'For sampling only. Specify number of layers wanted.')
parser.add_argument('-size', '--size', dest='size', type=int, required=False,
help = 'For sampling only. Specify layer size limit.')
args = parser.parse_args()
if args.command == 'setup':
setup()
elif args.command == 'sample':
if args.size == None:
print 'size not specified, quit'
exit(-1)
print 'attempting to populate layer:size dictionary...'
try:
with open(stored_dat_file, 'rb') as f:
layer_size_dict = pickle.load(f)
except:
print 'unable to read the stored layer:size file'
exit(-1)
print 'successfully read in ' + str(len(layer_size_dict)) + ' layers, now sampling...'
for i in range(60, 210, 10):
#print i
sampling(layer_size_dict, i)#args.size)
if __name__ == "__main__":
main()
|
<filename>apps/users/models.py
# -*- coding: utf-8 -*-
import base64
import hashlib
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from innovate.models import BaseModel
from innovate.utils import get_partition_id, safe_filename, ImageStorage
from tower import ugettext_lazy as _
def determine_upload_path(instance, filename):
chunk_size = 1000 # max files per directory
path = getattr(settings, 'USER_AVATAR_PATH', 'images/profiles/')
path = path.lstrip('/').rstrip('/')
return "%(path)s/%(filename)s" % {
'path': path,
'filename': safe_filename(filename)
}
class Link(BaseModel):
name = models.CharField(max_length=50, verbose_name=_(u'Link Name'))
url = models.URLField(verbose_name=_(u'URL'), max_length=255)
profile = models.ForeignKey('users.Profile', blank=True, null=True)
def __unicode__(self):
return u'%s -> %s' % (self.name, self.url)
def get_profile(cls):
"""Create an empty profile for users if none exists."""
profile, created = Profile.objects.get_or_create(user=cls)
return profile
User.get_profile = get_profile
class Profile(BaseModel):
user = models.OneToOneField(User, primary_key=True,
verbose_name=_(u'User'))
name = models.CharField(max_length=255, blank=True,
verbose_name=_(u'Display name'))
title = models.CharField(max_length=255, blank=True, null=True,
verbose_name=(u'Job title'))
avatar = models.ImageField(upload_to=determine_upload_path, null=True,
blank=True, verbose_name=_(u'Avatar'),
max_length=settings.MAX_FILEPATH_LENGTH,
storage=ImageStorage())
website = models.URLField(verbose_name=_(u'Website'), max_length=255,
blank=True)
bio = models.TextField(verbose_name=_(u'Bio'), blank=True)
featured = models.BooleanField(default=False)
featured_image = models.ImageField(verbose_name=_(u'Featured Image'),
blank=True, null=True,
upload_to=settings.USER_AVATAR_PATH)
@models.permalink
def get_absolute_url(self):
return ('users_profile', [self.user.username])
def get_gravatar_url(self, size=140):
base_url = getattr(settings, 'GRAVATAR_URL', None)
if not base_url:
return None
return '%s%s?s=%d' % (base_url, self.email_hash, size)
@property
def email_hash(self):
"""MD5 hash of users email address."""
return hashlib.md5(self.user.email).hexdigest()
def avatar_url(self, size=140):
"""Return user provided avatar, or gravatar if none exists."""
media_url = getattr(settings, 'MEDIA_URL', None)
path = lambda f: f and "%s%s" % (media_url, f)
return path(self.avatar) or self.get_gravatar_url(size)
@property
def featured_image_or_default(self):
"""Return featured image for splash page."""
return self.featured_image or 'img/featured-default.gif'
def __unicode__(self):
"""Return a string representation of the user."""
return self.display_name
@property
def username_hash(self):
"""
Return a hash of the users email. Used as a URL component when no
username is set (as is the case with users signed up via BrowserID).
"""
return base64.urlsafe_b64encode(
hashlib.sha1(self.user.email).digest()).rstrip('=')
@property
def has_chosen_identifier(self):
"""Determine if user has a generated or chosen public identifier.."""
return self.name or (not self.user.username == self.username_hash)
@property
def masked_email(self):
"""
If a user does not have a display name or a username, their email may
be displayed on their profile. This returns a masked copy so we don't
leak that data.
"""
user, domain = self.user.email.split('@')
mask_part = lambda s, n: s[:n] + u'…' + s[-1:]
return '@'.join(
(mask_part(user, len(user) / 3),
mask_part(domain, 1)))
@property
def display_name(self):
"""Choose and return the best public display identifier for a user."""
if self.name:
return self.name
if self.has_chosen_identifier:
return self.user.username
return self.masked_email
|
<filename>Scraper/hunter_api.py
import json
from pyhunter import PyHunter
from django.conf import settings
from .models import EmailModel, OdinList, Company
# This is the hunter.io api code
class HunterIO:
def __init__(self, api_key):
# Initializing the pyhunter object with our api key
self.hunter = PyHunter(api_key)
self.accepted = ['valid']
def get_value(self, tup, value):
for i in tup:
if i[1] == value:
return i[0]
return '0'
def clean_value(self, value):
if value is None:
return 'N/A'
return value
def build_odin_obj(self, domain, organization='', disposable='', webmail='', country='', state=''):
# Creating the domain odin's obj
url_search = OdinList.objects.filter(domain__exact=domain)
if url_search.exists() == False:
obj = OdinList.objects.create(
domain = self.clean_value(domain),
organization = self.clean_value(organization),
disposable = self.clean_value(disposable),
webmail = self.clean_value(webmail),
country = self.clean_value(country),
state = self.clean_value(state),
)
else:
obj = url_search.first()
return obj
def create_company(self, name):
if name is not None:
# Check if the company already exists in our db
company_search = Company.objects.filter(name__exact=name)
if company_search.exists():
company = company_search.first()
else:
# Creating the company name
value_c = self.clean_value(name)
company = Company.objects.create(name=value_c)
return company
def domain_search(self, domain_name='', company_name=''):
result = self.hunter.domain_search(domain_name, company_name)
# Initializing results set for the emails found
results_set = set()
# Getting all required results
disposable = result['disposable']
webmail = result['webmail']
organization = result['organization']
country = result['country']
state = result['state']
domain_name = result['domain']
# Build the odins obj
if domain_name:
self.build_odin_obj(domain_name, organization, disposable, webmail, country, state)
# Creating the company
company = self.create_company(organization)
# Looping through the email results returned and creating emails
for email in result['emails']:
value = email['value']
email_type = self.get_value(settings.EMAIL_TYPE, email['type'])
# confidence = email['confidence']
name = f"{email['first_name']} {email['last_name']}"
position = email['position']
seniority = self.get_value(settings.SENIOR_TYPE, email['seniority'])
department = email['department']
linkedin = email['linkedin']
twitter = email['twitter']
phone_number = email['phone_number']
status = email['verification']['status']
# Check if the email already exists
email_res = EmailModel.objects.filter(email__exact=value)
if email_res.exists():
email_obj = email_res.first()
else:
# Creating the new email objects
email_obj = EmailModel.objects.create(
email = self.clean_value(value),
name = self.clean_value(name),
domain = self.clean_value(domain_name),
position = self.clean_value(position),
seniority = self.clean_value(seniority),
department = self.clean_value(department),
email_type = self.clean_value(email_type),
linkedin = self.clean_value(linkedin),
twitter = self.clean_value(twitter),
phone_number = self.clean_value(phone_number),
)
# Adding the organization name to the email object
if company:
email_obj.company_names.add(company)
# Is the email object verified or not
if status in self.accepted:
email_obj.verified = True
else:
email_obj.verified = False
email_obj.save()
# Adding the email object to the result set
results_set.add(email_obj)
return list(results_set)
def find_email(self, name, company):
result = self.hunter.email_finder(company=company, full_name=name)
# Getting all required results
name = f"{result['first_name']} {result['last_name']}"
email = result['email']
domain = result['domain']
position = result['position']
twitter = result['twitter']
linkedin_url = result['linkedin_url']
phone_number = result['phone_number']
company = result['company']
status = result['status']
# Build the odins obj
if domain:
self.build_odin_obj(domain, company)
# Creating the company
company = self.create_company(company)
# Getting the linkedin username from the url
linkedin = linkedin_url.split('/')[-1]
# Check if the email already exists
email_res = EmailModel.objects.filter(email__exact=email)
if email_res.exists():
email_obj = email_res.first()
else:
# Creating the new email objects
email_obj = EmailModel.objects.create(
email = self.clean_value(email),
name = self.clean_value(name),
domain = self.clean_value(domain),
position = self.clean_value(position),
linkedin = self.clean_value(linkedin),
twitter = self.clean_value(twitter),
phone_number = self.clean_value(phone_number),
)
# Adding the organization name to the email object
if company:
email_obj.company_names.add(company)
# Is the email object verified or not
if status in self.accepted:
email_obj.verified = True
else:
email_obj.verified = False
email_obj.save()
return email_obj
def verify_email(self, email):
result = self.hunter.email_verifier(email)
# Getting all required results
email = result['email']
domain = email.split('@')[-1]
disposable = result['disposable']
webmail = result['webmail']
status = result['status']
# Build the odins obj
if domain:
self.build_odin_obj(domain, disposable=disposable, webmail=webmail)
# Check if the email already exists
email_res = EmailModel.objects.filter(email__exact=email)
if email_res.exists():
email_obj = email_res.first()
else:
# Creating the new email objects
email_obj = EmailModel.objects.create(
email = self.clean_value(email),
domain = self.clean_value(domain)
)
# Is the email object verified or not
if status in self.accepted:
email_obj.verified = True
else:
email_obj.verified = False
email_obj.save()
return email_obj
def verify_emails(self, emails=None):
if emails is None:
emails = []
# Initializing emails set
email_set = set()
for email in emails:
email_set.add(self.verify_email(email))
return list(email_set)
email_hunter = HunterIO(api_key=settings.HUNTER_API_KEY) |
<reponame>ETH-NEXUS/scout
"""Tests for the cases controllers"""
from flask import Flask, url_for
from scout.server.extensions import store
from scout.server.blueprints.cases.controllers import case, case_report_content
def test_case_report_content(adapter, institute_obj, case_obj, variant_obj):
adapter.case_collection.insert_one(case_obj)
adapter.institute_collection.insert_one(institute_obj)
adapter.variant_collection.insert_one(variant_obj)
## GIVEN an adapter with a case that have an existing causative
case_obj = adapter.case_collection.find_one()
institute_obj = adapter.institute_collection.find_one()
var_obj = adapter.variant_collection.find_one({"case_id": case_obj["_id"]})
assert var_obj
case_obj["causatives"] = [var_obj["_id"]]
## WHEN fetching a case with the controller
data = case_report_content(adapter, institute_obj, case_obj)
## THEN assert the result is on the correct format
assert isinstance(data, dict)
variant_types = {
"causatives_detailed": "causatives",
"suspects_detailed": "suspects",
"classified_detailed": "acmg_classification",
"tagged_detailed": "manual_rank",
"tier_detailed": "cancer_tier",
"dismissed_detailed": "dismiss_variant",
"commented_detailed": "is_commented",
}
for var_type in variant_types:
if var_type == "causatives_detailed":
assert len(data[var_type]) == 1
continue
assert len(data[var_type]) == 0
def test_case_controller_rank_model_link(adapter, institute_obj, dummy_case):
# GIVEN an adapter with a case
dummy_case["rank_model_version"] = "1.3"
adapter.case_collection.insert_one(dummy_case)
adapter.institute_collection.insert_one(institute_obj)
fetched_case = adapter.case_collection.find_one()
app = Flask(__name__)
app.config["RANK_MODEL_LINK_PREFIX"] = "http://"
app.config["RANK_MODEL_LINK_POSTFIX"] = ".ini"
# WHEN fetching a case with the controller
with app.app_context():
data = case(adapter, institute_obj, fetched_case)
# THEN assert that the link has been added
assert "rank_model_link" in fetched_case
def test_case_controller(adapter, institute_obj, dummy_case):
# GIVEN an adapter with a case
adapter.case_collection.insert_one(dummy_case)
adapter.institute_collection.insert_one(institute_obj)
fetched_case = adapter.case_collection.find_one()
app = Flask(__name__)
# WHEN fetching a case with the controller
with app.app_context():
data = case(adapter, institute_obj, fetched_case)
# THEN assert that the case have no link
assert "rank_model_link" not in fetched_case
def test_case_controller_no_panels(adapter, institute_obj, dummy_case):
# GIVEN an adapter with a case without gene panels
adapter.case_collection.insert_one(dummy_case)
adapter.institute_collection.insert_one(institute_obj)
fetched_case = adapter.case_collection.find_one()
assert "panel_names" not in fetched_case
app = Flask(__name__)
# WHEN fetching a case with the controller
with app.app_context():
data = case(adapter, institute_obj, fetched_case)
# THEN
assert fetched_case["panel_names"] == []
def test_case_controller_with_panel(app, institute_obj, panel, dummy_case):
# GIVEN an adapter with a case with a gene panel
dummy_case["panels"] = [
{
"panel_name": panel["panel_name"],
"version": panel["version"],
"nr_genes": 2,
"is_default": True,
}
]
store.case_collection.insert_one(dummy_case)
# GIVEN an adapter with a gene panel
store.panel_collection.insert_one(panel)
fetched_case = store.case_collection.find_one()
app = Flask(__name__)
# WHEN fetching a case with the controller
with app.app_context():
data = case(store, institute_obj, fetched_case)
# THEN assert that the display information has been added to case
assert len(fetched_case["panel_names"]) == 1
def test_case_controller_panel_wrong_version(adapter, app, institute_obj, panel, dummy_case):
# GIVEN an adapter with a case with a gene panel with wrong version
dummy_case["panels"] = [
{
"panel_name": panel["panel_name"],
"version": panel["version"] + 1,
"nr_genes": 2,
"is_default": True,
}
]
adapter.case_collection.insert_one(dummy_case)
adapter.institute_collection.insert_one(institute_obj)
# GIVEN an adapter with a gene panel
adapter.panel_collection.insert_one(panel)
fetched_case = adapter.case_collection.find_one()
# GIVEN an initialized app
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
# WHEN fetching a case with the controller
data = case(adapter, institute_obj, fetched_case)
# THEN assert that it succeded to fetch another panel version
assert str(panel["version"]) in fetched_case["panel_names"][0]
def test_case_controller_non_existing_panel(adapter, app, institute_obj, dummy_case, panel):
# GIVEN an adapter with a case with a gene panel but no panel objects
dummy_case["panels"] = [
{
"panel_name": panel["panel_name"],
"version": panel["version"] + 1,
"nr_genes": 2,
"is_default": True,
}
]
adapter.case_collection.insert_one(dummy_case)
fetched_case = adapter.case_collection.find_one()
# GIVEN an initialized app
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
# WHEN fetching a case with the controller
data = case(adapter, institute_obj, fetched_case)
# THEN assert that it succeded to fetch another panel version
assert len(fetched_case["panel_names"]) == 0
|
import base64
import re
import uuid
from collections import defaultdict
from decimal import Decimal
from textwrap import dedent
import requests
import gevent
import netaddr
from nacl.public import Box
from contextlib import ContextDecorator
from jumpscale.clients.explorer.models import DiskType, NextAction, WorkloadType, ZDBMode
from jumpscale.core.base import StoredFactory
from jumpscale.loader import j
from jumpscale.packages.tfgrid_solutions.models import PoolConfig
from jumpscale.sals.chatflows.chatflows import StopChatFlow
from jumpscale.sals.zos.zos import Zosv2
GATEWAY_WORKLOAD_TYPES = [
WorkloadType.Domain_delegate,
WorkloadType.Gateway4to6,
WorkloadType.Subdomain,
WorkloadType.Reverse_proxy,
WorkloadType.Proxy,
]
pool_factory = StoredFactory(PoolConfig)
pool_factory.always_reload = True
NODE_BLOCKING_WORKLOAD_TYPES = [
WorkloadType.Container,
WorkloadType.Network_resource,
WorkloadType.Volume,
WorkloadType.Zdb,
]
DOMAINS_DISALLOW_PREFIX = "TFGATEWAY:DOMAINS:DISALLOWED"
DOMAINS_DISALLOW_EXPIRATION = 60 * 60 * 4 # 4 hours
DOMAINS_COUNT_KEY = "TFGATEWAY:DOMAINS:FAILURE_COUNT"
class DeploymentFailed(StopChatFlow):
def __init__(self, msg=None, solution_uuid=None, wid=None, identity_name=None, **kwargs):
super().__init__(msg, **kwargs)
self.solution_uuid = solution_uuid
self.wid = wid
self.identity_name = identity_name
class deployment_context(ContextDecorator):
def __enter__(self):
return self
def __exit__(self, exc_type, exc, exc_tb):
if exc_type != DeploymentFailed:
return
if exc.solution_uuid:
# cancel related workloads
j.logger.info(f"canceling workload ids of solution_uuid: {exc.solution_uuid}")
j.sals.reservation_chatflow.solutions.cancel_solution_by_uuid(exc.solution_uuid, exc.identity_name)
if exc.wid:
# block the failed node if the workload is network or container
zos = j.sals.zos.get(exc.identity_name)
workload = zos.workloads.get(exc.wid)
if workload.info.workload_type in NODE_BLOCKING_WORKLOAD_TYPES:
j.logger.info(f"blocking node {workload.info.node_id} for failed workload {workload.id}")
j.sals.reservation_chatflow.reservation_chatflow.block_node(workload.info.node_id)
class NetworkView:
class dry_run_context(ContextDecorator):
def __init__(self, test_network_name, identity_name=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_network_name = test_network_name
self.identity_name = identity_name or j.core.identity.me.instance_name
def __enter__(self):
return self
def __exit__(self, *exc):
network_view = NetworkView(self.test_network_name, identity_name=self.identity_name)
for workload in network_view.network_workloads:
j.sals.zos.get(self.identity_name).workloads.decomission(workload.id)
def __init__(self, name, workloads=None, nodes=None, identity_name=None):
self.identity_name = identity_name or j.core.identity.me.instance_name
self.name = name
identity_tid = j.core.identity.get(self.identity_name).tid
if not workloads:
workloads = j.sals.zos.get(self.identity_name).workloads.list(identity_tid, NextAction.DEPLOY)
self.workloads = workloads
self.used_ips = []
self.network_workloads = []
nodes = nodes or {node.node_id for node in j.sals.zos.get(self.identity_name)._explorer.nodes.list()}
self._fill_used_ips(self.workloads, nodes)
self._init_network_workloads(self.workloads, nodes)
if self.network_workloads:
self.iprange = self.network_workloads[0].network_iprange
else:
self.iprange = "can't be retrieved"
def _init_network_workloads(self, workloads, nodes=None):
nodes = nodes or {node.node_id for node in j.sals.zos.get(self.identity_name)._explorer.nodes.list()}
for workload in workloads:
if workload.info.node_id not in nodes:
continue
if workload.info.next_action != NextAction.DEPLOY:
continue
if workload.info.workload_type == WorkloadType.Network_resource and workload.name == self.name:
self.network_workloads.append(workload)
def _fill_used_ips(self, workloads, nodes=None):
nodes = nodes or {node.node_id for node in j.sals.zos.get(self.identity_name)._explorer.nodes.list()}
for workload in workloads:
if workload.info.node_id not in nodes:
continue
if workload.info.next_action != NextAction.DEPLOY:
continue
if workload.info.workload_type == WorkloadType.Kubernetes:
if workload.network_id == self.name:
self.used_ips.append(workload.ipaddress)
elif workload.info.workload_type == WorkloadType.Container:
for conn in workload.network_connection:
if conn.network_id == self.name:
self.used_ips.append(conn.ipaddress)
def add_node(self, node, pool_id):
used_ip_ranges = set()
for workload in self.network_workloads:
if workload.info.node_id == node.node_id:
return
used_ip_ranges.add(workload.iprange)
for peer in workload.peers:
used_ip_ranges.add(peer.iprange)
else:
network_range = netaddr.IPNetwork(self.iprange)
for idx, subnet in enumerate(network_range.subnet(24)):
if str(subnet) not in used_ip_ranges:
break
else:
raise StopChatFlow("Failed to find free network")
network = j.sals.zos.get(self.identity_name).network.create(self.iprange, self.name)
node_workloads = {}
for net_workload in self.network_workloads:
node_workloads[net_workload.info.node_id] = net_workload
network.network_resources = list(node_workloads.values()) # add only latest network resource for each node
j.sals.zos.get(self.identity_name).network.add_node(network, node.node_id, str(subnet), pool_id)
return network
def add_multiple_nodes(self, node_ids, pool_ids):
used_ip_ranges = set()
existing_nodes = set()
for workload in self.network_workloads:
used_ip_ranges.add(workload.iprange)
for peer in workload.peers:
used_ip_ranges.add(peer.iprange)
if workload.info.node_id in node_ids:
existing_nodes.add(workload.info.node_id)
if len(existing_nodes) == len(node_ids):
return
node_to_range = {}
node_to_pool = {}
for idx, node_id in enumerate(node_ids):
if node_id in existing_nodes:
continue
node_to_pool[node_id] = pool_ids[idx]
network_range = netaddr.IPNetwork(self.iprange)
for _, subnet in enumerate(network_range.subnet(24)):
subnet = str(subnet)
if subnet not in used_ip_ranges:
node_to_range[node_id] = subnet
used_ip_ranges.add(subnet)
break
else:
raise StopChatFlow("Failed to find free network")
zos = j.sals.zos.get()
network = zos.network.create(self.iprange, self.name)
node_workloads = {}
for net_workload in self.network_workloads:
node_workloads[net_workload.info.node_id] = net_workload
network.network_resources = list(node_workloads.values()) # add only latest network resource for each node
for node_id, node_range in node_to_range.items():
zos.network.add_node(network, node_id, node_range, node_to_pool[node_id])
return network
def add_access(self, node_id=None, use_ipv4=True, pool_id=None):
if node_id and not pool_id:
raise StopChatFlow("You must specify the pool id if you specify the node id")
node_id = node_id or self.network_workloads[0].info.node_id
pool_id = pool_id or self.network_workloads[0].info.pool_id
used_ip_ranges = set()
for workload in self.network_workloads:
used_ip_ranges.add(workload.iprange)
for peer in workload.peers:
used_ip_ranges.add(peer.iprange)
else:
network_range = netaddr.IPNetwork(self.iprange)
for idx, subnet in enumerate(network_range.subnet(24)):
if str(subnet) not in used_ip_ranges:
break
else:
raise StopChatFlow("Failed to find free network")
network = j.sals.zos.get(self.identity_name).network.create(self.iprange, self.name)
node_workloads = {}
for net_workload in self.network_workloads:
node_workloads[net_workload.info.node_id] = net_workload
network.network_resources = list(node_workloads.values()) # add only latest network resource for each node
if node_id not in node_workloads:
j.sals.zos.get(self.identity_name).network.add_node(network, node_id, str(subnet), pool_id=pool_id)
wg_quick = j.sals.zos.get(self.identity_name).network.add_access(network, node_id, str(subnet), ipv4=use_ipv4)
return network, wg_quick
def delete_access(self, ip_range, node_id=None):
node_id = node_id or self.network_workloads[0].info.node_id
node_workloads = {}
for net_workload in self.network_workloads:
node_workloads[net_workload.info.node_id] = net_workload
network = j.sals.zos.get(self.identity_name).network.create(self.iprange, self.name)
network.network_resources = list(node_workloads.values())
network = j.sals.zos.get(self.identity_name).network.delete_access(network, node_id, ip_range)
return network
def get_node_range(self, node):
for workload in self.network_workloads:
if workload.info.next_action != NextAction.DEPLOY:
continue
if workload.info.node_id == node.node_id:
return workload.iprange
raise StopChatFlow(f"Node {node.node_id} is not part of network")
def copy(self):
return NetworkView(self.name, identity_name=self.identity_name)
def get_node_free_ips(self, node):
ip_range = self.get_node_range(node)
freeips = []
hosts = netaddr.IPNetwork(ip_range).iter_hosts()
next(hosts) # skip ip used by node
for host in hosts:
ip = str(host)
if ip not in self.used_ips:
freeips.append(ip)
return freeips
def get_free_ip(self, node):
ip_range = self.get_node_range(node)
hosts = netaddr.IPNetwork(ip_range).iter_hosts()
next(hosts) # skip ip used by node
for host in hosts:
ip = str(host)
if ip not in self.used_ips:
self.used_ips.append(ip)
return ip
return None
def dry_run(self, test_network_name=None, node_ids=None, pool_ids=None, bot=None, breaking_node_ids=None):
name = test_network_name or uuid.uuid4().hex
breaking_node_ids = breaking_node_ids or node_ids
if bot:
bot.md_show_update("Starting dry run to check nodes status")
ip_range = netaddr.IPNetwork("10.10.0.0/16")
if any([node_ids, pool_ids]) and not all([node_ids, pool_ids]):
raise StopChatFlow("you must specify both pool ids and node ids together")
node_pool_dict = {}
if node_ids:
for idx, node_id in enumerate(node_ids):
node_pool_dict[node_id] = pool_ids[idx]
else:
for workload in self.network_workloads:
node_pool_dict[workload.info.node_id] = workload.info.pool_id
node_ids = list(node_pool_dict.keys())
pool_ids = list(node_pool_dict.values())
node_ids = list(set(node_ids))
network = j.sals.zos.get(self.identity_name).network.create(str(ip_range), name)
for idx, subnet in enumerate(ip_range.subnet(24)):
if idx == len(node_ids):
break
j.sals.zos.get(self.identity_name).network.add_node(
network, node_ids[idx], str(subnet), node_pool_dict[node_ids[idx]]
)
result = []
for resource in network.network_resources:
if bot:
bot.md_show_update(f"testing deployment on node {resource.info.node_id}")
try:
result.append(j.sals.zos.get(self.identity_name).workloads.deploy(resource))
except Exception as e:
raise StopChatFlow(
f"failed to deploy workload on node {resource.info.node_id} due to" f" error {str(e)}"
)
for idx, wid in enumerate(result):
try:
deployer.wait_workload(wid, bot, 2)
except StopChatFlow:
workload = j.sals.zos.get(self.identity_name).workloads.get(wid)
# if not a breaking nodes (old node not used for deployment) we can overlook it
if workload.info.node_id not in breaking_node_ids:
continue
j.sals.reservation_chatflow.reservation_chatflow.block_node(network.network_resources[idx].info.node_id)
raise DeploymentFailed(
"Network nodes dry run failed on node" f" {network.network_resources[idx].info.node_id}", wid=wid
)
class ChatflowDeployer:
def __init__(self):
self.workloads = defaultdict(
lambda: defaultdict(lambda: defaultdict(list))
) # Next Action: workload_type: pool_id: [workloads]
@property
def _explorer(self):
return j.core.identity.me.explorer
def load_user_workloads(self, next_action=NextAction.DEPLOY):
all_workloads = j.sals.zos.get().workloads.list(j.core.identity.me.tid, next_action)
self.workloads.pop(next_action, None)
for workload in all_workloads:
if workload.info.metadata:
workload.info.metadata = self.decrypt_metadata(workload.info.metadata)
try:
j.data.serializers.json.loads(workload.info.metadata)
except:
workload.info.metadata = "{}"
else:
workload.info.metadata = "{}"
self.workloads[workload.info.next_action][workload.info.workload_type][workload.info.pool_id].append(
workload
)
def decrypt_metadata(self, encrypted_metadata, identity_name=None):
identity_name = identity_name or j.core.identity.me.instance_name
identity = j.core.identity.get(identity_name)
try:
pk = identity.nacl.signing_key.verify_key.to_curve25519_public_key()
sk = identity.nacl.signing_key.to_curve25519_private_key()
box = Box(sk, pk)
return box.decrypt(base64.b85decode(encrypted_metadata.encode())).decode()
except Exception as e:
j.logger.error(f"error when decrypting metadata. {str(e)}")
return "{}"
def list_networks(self, next_action=NextAction.DEPLOY, sync=True):
if sync:
self.load_user_workloads(next_action=next_action)
networks = {} # name: last child network resource
for pool_id in self.workloads[next_action][WorkloadType.Network_resource]:
for workload in self.workloads[next_action][WorkloadType.Network_resource][pool_id]:
networks[workload.name] = workload
all_workloads = []
for pools_workloads in self.workloads[next_action].values():
for pool_id, workload_list in pools_workloads.items():
all_workloads += workload_list
network_views = {}
nodes = {node.node_id for node in j.sals.zos.get()._explorer.nodes.list()}
for network_name in networks:
network_views[network_name] = NetworkView(network_name, all_workloads, nodes)
return network_views
def _pool_form(self, bot):
form = bot.new_form()
cu = form.int_ask("Required Amount of Compute Unit (CU)", required=True, min=0, default=0)
su = form.int_ask("Required Amount of Storage Unit (SU)", required=True, min=0, default=0)
time_unit = form.drop_down_choice(
"Please choose the duration unit", ["Day", "Month", "Year"], required=True, default="Month"
)
ttl = form.int_ask("Please specify the pools time-to-live", required=True, min=1, default=0)
form.ask(
"""- Compute Unit (CU) is the amount of data processing power specified as the number of virtual CPU cores (logical CPUs) and RAM (Random Access Memory).
- Storage Unit (SU) is the size of data storage capacity.
You can get more detail information about clout units on the wiki: <a href="https://wiki.threefold.io/#/grid_concepts?id=cloud-units-v4" target="_blank">Cloud units details</a>.
The way this form works is you define how much cloud units you want to reserve and define for how long you would like the selected amount of cloud units.
As an example, if you want to be able to run some workloads that consumes `5CU` and `10SU` worth of capacity for `2 month`, you would specify:
- CU: 5
- SU: 10
- Duration unit: Month
- Duration: 2
""",
md=True,
)
ttl = ttl.value
time_unit = time_unit.value
if time_unit == "Day":
days = 1
elif time_unit == "Month":
days = 30
elif time_unit == "Year":
days = 365
else:
raise j.exceptions.Input("Invalid duration unit")
cu = cu.value * 60 * 60 * 24 * days * ttl
su = su.value * 60 * 60 * 24 * days * ttl
return (cu, su, ["TFT"])
def create_pool(self, bot):
cu, su, currencies = self._pool_form(bot)
all_farms = self._explorer.farms.list()
available_farms = {}
farms_by_name = {}
for farm in all_farms:
farm_assets = [w.asset for w in farm.wallet_addresses]
if currencies[0] not in farm_assets:
continue
res = self.check_farm_capacity(farm.name, currencies)
available = res[0]
resources = res[1:]
if available:
available_farms[farm.name] = resources
farms_by_name[farm.name] = farm
farm_messages = {}
for farm in available_farms:
farm_assets = [w.asset for w in farms_by_name[farm].wallet_addresses]
if currencies[0] not in farm_assets:
continue
resources = available_farms[farm]
farm_obj = farms_by_name[farm]
location_list = [farm_obj.location.continent, farm_obj.location.country, farm_obj.location.city]
location = "-".join([info for info in location_list if info])
if location:
location = f" location: {location}"
farm_messages[
f"{farm.capitalize()}{location}: CRU: {resources[0]} SRU: {resources[1]} HRU: {resources[2]} MRU {resources[3]}"
] = farm
if not farm_messages:
raise StopChatFlow(f"There are no farms available that the support {currencies[0]} currency")
selected_farm = bot.drop_down_choice(
"Please choose a farm to reserve capacity from. By reserving IT Capacity, you are purchasing the capacity from one of the farms. The available Resource Units (RU): CRU, MRU, HRU, SRU, NRU are displayed for you to make a more-informed decision on farm selection. ",
list(farm_messages.keys()),
required=True,
)
farm = farm_messages[selected_farm]
try:
pool_info = j.sals.zos.get().pools.create(cu, su, 0, farm, currencies)
except Exception as e:
raise StopChatFlow(f"failed to reserve pool.\n{str(e)}")
qr_code = self.show_payment(pool_info, bot)
self.wait_pool_payment(bot, pool_info.reservation_id, 10, qr_code, trigger_cus=cu, trigger_sus=su)
return pool_info
def extend_pool(self, bot, pool_id):
cu, su, currencies = self._pool_form(bot)
currencies = ["TFT"]
try:
pool_info = j.sals.zos.get().pools.extend(pool_id, cu, su, 0, currencies=currencies)
except Exception as e:
raise StopChatFlow(f"failed to extend pool.\n{str(e)}")
qr_code = self.show_payment(pool_info, bot)
pool = j.sals.zos.get().pools.get(pool_id)
trigger_cus = pool.cus + (cu * 0.75) if cu else 0
trigger_sus = pool.sus + (su * 0.75) if su else 0
self.wait_pool_payment(bot, pool_id, 10, qr_code, trigger_cus=trigger_cus, trigger_sus=trigger_sus)
return pool_info
def check_farm_capacity(self, farm_name, currencies=None, sru=None, cru=None, mru=None, hru=None, ip_version=None):
node_filter = None
if j.core.config.get("OVER_PROVISIONING"):
cru = None
mru = None
if ip_version and ip_version not in ["IPv4", "IPv6"]:
raise j.exceptions.Runtime(f"{ip_version} is not a valid IP Version")
else:
if ip_version == "IPv4":
node_filter = j.sals.zos.get().nodes_finder.filter_public_ip4
elif ip_version == "IPv6":
node_filter = j.sals.zos.get().nodes_finder.filter_public_ip6
currencies = currencies or []
farm_nodes = j.sals.zos.get().nodes_finder.nodes_search(farm_name=farm_name)
available_cru = 0
available_sru = 0
available_mru = 0
available_hru = 0
running_nodes = 0
blocked_nodes = j.sals.reservation_chatflow.reservation_chatflow.list_blocked_nodes()
access_node = None
for node in farm_nodes:
if "FreeTFT" in currencies and not node.free_to_use:
continue
if not j.sals.zos.get().nodes_finder.filter_is_up(node):
continue
if node.node_id in blocked_nodes:
continue
if not access_node and ip_version and node_filter(node):
access_node = node
running_nodes += 1
available_cru += node.total_resources.cru - node.reserved_resources.cru
available_sru += node.total_resources.sru - node.reserved_resources.sru
available_mru += node.total_resources.mru - node.reserved_resources.mru
available_hru += node.total_resources.hru - node.reserved_resources.hru
if not running_nodes:
return False, available_cru, available_sru, available_mru, available_hru
if sru and available_sru < sru:
return False, available_cru, available_sru, available_mru, available_hru
if cru and available_cru < cru:
return False, available_cru, available_sru, available_mru, available_hru
if mru and available_mru < mru:
return False, available_cru, available_sru, available_mru, available_hru
if hru and available_hru < hru:
return False, available_cru, available_sru, available_mru, available_hru
if ip_version and not access_node:
return False, available_cru, available_sru, available_mru, available_hru
return True, available_cru, available_sru, available_mru, available_hru
def show_payment(self, pool, bot):
escrow_info = pool.escrow_information
resv_id = pool.reservation_id
escrow_address = escrow_info.address
escrow_asset = escrow_info.asset
total_amount = escrow_info.amount
if not total_amount:
return
total_amount_dec = Decimal(total_amount) / Decimal(1e7)
total_amount = "{0:f}".format(total_amount_dec)
wallets = j.sals.reservation_chatflow.reservation_chatflow.list_wallets()
wallet_names = []
for w in wallets.keys():
wallet = j.clients.stellar.get(w)
try:
balances = wallet.get_balance().balances
except:
continue
for balance in balances:
if balance.asset_code in escrow_asset:
if float(balance.balance) > float(total_amount):
wallet_names.append(w)
else:
break
wallet_names.append("External Wallet (QR Code)")
self.msg_payment_info, qr_code = self.get_qr_code_payment_info(pool)
message = f"""
<h3>Billing details:</h3><br>
{self.msg_payment_info}
<br><hr><br>
<h3> Choose a wallet name to use for payment or proceed with payment through External wallet (QR Code) </h3>
"""
result = bot.single_choice(message, wallet_names, html=True, required=True)
if result == "External Wallet (QR Code)":
msg_text = f"""
<h3>Make a Payment</h3>
Scan the QR code with your wallet (do not change the message) or enter the information below manually and proceed with the payment. Make sure to put p-{resv_id} as memo_text value.
{self.msg_payment_info}
"""
bot.qrcode_show(data=qr_code, msg=msg_text, scale=4, update=True, html=True)
else:
wallet = wallets[result]
wallet.transfer(
destination_address=escrow_address, amount=total_amount, asset=escrow_asset, memo_text=f"p-{resv_id}"
)
return None
return qr_code
def list_pools(self, cu=None, su=None):
all_pools = [p for p in j.sals.zos.get().pools.list() if p.node_ids]
available_pools = {}
for pool in all_pools:
hidden = False
name = ""
if f"pool_{pool.pool_id}" in pool_factory.list_all():
local_config = pool_factory.get(f"pool_{pool.pool_id}")
hidden = local_config.hidden
name = local_config.name
if hidden:
continue
res = self.check_pool_capacity(pool, cu, su)
available = res[0]
if available:
resources = res[1:]
if name:
resources += (name,)
available_pools[pool.pool_id] = resources
return available_pools
def check_pool_capacity(self, pool, cu=None, su=None):
available_su = pool.sus
available_cu = pool.cus
if pool.empty_at < 0:
return False, 0, 0
if cu and available_cu < cu:
return False, available_cu, available_su
if su and available_su < su:
return False, available_cu, available_su
if (cu or su) and pool.empty_at < j.data.time.now().timestamp:
return False, 0, 0
return True, available_cu, available_su
def select_pool(
self, bot, cu=None, su=None, sru=None, mru=None, hru=None, cru=None, available_pools=None, workload_name=None
):
if j.config.get("OVER_PROVISIONING"):
cru = 0
mru = 0
available_pools = available_pools or self.list_pools(cu, su)
if not available_pools:
raise StopChatFlow("no available pools with enough capacity for your workload")
pool_messages = {}
for pool in available_pools:
nodes = j.sals.zos.get().nodes_finder.nodes_by_capacity(pool_id=pool, sru=sru, mru=mru, hru=hru, cru=cru)
if not nodes:
continue
pool_msg = f"Pool: {pool} cu: {available_pools[pool][0]} su:" f" {available_pools[pool][1]}"
if len(available_pools[pool]) > 2:
pool_msg += f" Name: {available_pools[pool][2]}"
pool_messages[pool_msg] = pool
if not pool_messages:
raise StopChatFlow("no available resources in the farms bound to your pools")
msg = "Please select a pool"
if workload_name:
msg += f" for {workload_name}"
pool = bot.drop_down_choice(msg, list(pool_messages.keys()), required=True)
return pool_messages[pool]
def get_pool_farm_id(self, pool_id=None, pool=None):
pool = pool or j.sals.zos.get().pools.get(pool_id)
pool_id = pool.pool_id
if not pool.node_ids:
raise StopChatFlow(f"Pool {pool_id} doesn't contain any nodes")
farm_id = None
while not farm_id:
for node_id in pool.node_ids:
try:
node = self._explorer.nodes.get(node_id)
farm_id = node.farm_id
break
except requests.exceptions.HTTPError:
continue
return farm_id or -1
def ask_name(self, bot, msg=None):
msg = (
msg
or "Please enter a name for your workload (Can be used to prepare domain for you and needed to track your solution on the grid)"
)
name = bot.string_ask(msg, required=True, field="name", is_identifier=True)
return name
def ask_email(self, bot):
valid = False
email = None
regex = r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$"
while not valid:
email = bot.string_ask("Please enter your email address", required=True, field="email")
valid = re.search(regex, email) is not None
if not valid:
bot.md_show("Please enter a valid email address")
return email
def ask_ipv6(self, bot, workload_name=None):
workload_name = workload_name or "your workload"
ipv6 = bot.single_choice(
f"Do you want to assign a global IPv6 address to {workload_name}?",
options=["YES", "NO"],
default="NO",
required=True,
)
return ipv6 == "YES"
def encrypt_metadata(self, metadata, identity_name=None):
if isinstance(metadata, dict):
metadata = j.data.serializers.json.dumps(metadata)
identity_name = identity_name or j.core.identity.me.instance_name
pk = j.core.identity.get(identity_name).nacl.signing_key.verify_key.to_curve25519_public_key()
sk = j.core.identity.get(identity_name).nacl.signing_key.to_curve25519_private_key()
box = Box(sk, pk)
encrypted_metadata = base64.b85encode(box.encrypt(metadata.encode())).decode()
return encrypted_metadata
def deploy_network(self, name, access_node, ip_range, ip_version, pool_id, identity_name=None, **metadata):
identity_name = identity_name or j.core.identity.me.instance_name
network = j.sals.zos.get(identity_name).network.create(ip_range, name)
node_subnets = netaddr.IPNetwork(ip_range).subnet(24)
network_config = dict()
use_ipv4 = ip_version == "IPv4"
j.sals.zos.get(identity_name).network.add_node(network, access_node.node_id, str(next(node_subnets)), pool_id)
wg_quick = j.sals.zos.get(identity_name).network.add_access(
network, access_node.node_id, str(next(node_subnets)), ipv4=use_ipv4
)
network_config["wg"] = wg_quick
wg_dir = j.sals.fs.join_paths(j.core.dirs.CFGDIR, "wireguard")
j.sals.fs.mkdirs(wg_dir)
j.sals.fs.write_file(j.sals.fs.join_paths(wg_dir, f"{identity_name}_{name}.conf"), wg_quick)
ids = []
parent_id = None
for workload in network.network_resources:
workload.info.description = j.data.serializers.json.dumps({"parent_id": parent_id})
metadata["parent_network"] = parent_id
workload.info.metadata = self.encrypt_metadata(metadata, identity_name)
ids.append(j.sals.zos.get(identity_name).workloads.deploy(workload))
parent_id = ids[-1]
network_config["ids"] = ids
network_config["rid"] = ids[0]
return network_config
def add_access(
self,
network_name,
network_view=None,
node_id=None,
pool_id=None,
use_ipv4=True,
bot=None,
identity_name=None,
**metadata,
):
identity_name = identity_name or j.core.identity.me.instance_name
network_view = network_view or NetworkView(network_name, identity_name=identity_name)
network, wg = network_view.add_access(node_id, use_ipv4, pool_id)
result = {"ids": [], "wg": wg}
node_workloads = {}
# deploy only latest resource generated by zos sal for each node
owner = None
node_metadata = defaultdict(dict) # node_id: metadata dict
for workload in network.network_resources:
node_workloads[workload.info.node_id] = workload
decrypted_metadata = self.decrypt_metadata(workload.info.metadata, identity_name)
metadata_dict = j.data.serializers.json.loads(decrypted_metadata)
node_metadata[workload.info.node_id].update(metadata_dict)
if not owner and metadata_dict.get("owner"):
owner = metadata_dict["owner"]
if owner and "owner" not in metadata:
metadata["owner"] = owner
dry_run_name = uuid.uuid4().hex
with NetworkView.dry_run_context(dry_run_name, identity_name):
network_view.dry_run(
dry_run_name,
list(node_workloads.keys()),
[w.info.pool_id for w in node_workloads.values()],
bot,
breaking_node_ids=[node_id],
)
parent_id = network_view.network_workloads[-1].id
for resource in node_workloads.values():
resource.info.reference = ""
resource.info.description = j.data.serializers.json.dumps({"parent_id": parent_id})
metadata["parent_network"] = parent_id
old_metadata = node_metadata.get(resource.info.node_id, {})
old_metadata.pop("parent_network", None)
metadata.update(old_metadata)
resource.info.metadata = self.encrypt_metadata(metadata, identity_name)
result["ids"].append(j.sals.zos.get(identity_name).workloads.deploy(resource))
parent_id = result["ids"][-1]
result["rid"] = result["ids"][0]
return result
def delete_access(
self, network_name, iprange, network_view=None, node_id=None, bot=None, identity_name=None, **metadata
):
network_view = network_view or NetworkView(network_name)
network = network_view.delete_access(iprange, node_id)
node_workloads = {}
# deploy only latest resource generated by zos sal for each node
owner = None
node_metadata = defaultdict(dict)
for workload in network.network_resources:
node_workloads[workload.info.node_id] = workload
decrypted_metadata = self.decrypt_metadata(workload.info.metadata, identity_name)
metadata_dict = j.data.serializers.json.loads(decrypted_metadata)
node_metadata[workload.info.node_id].update(metadata_dict)
if not owner and metadata_dict.get("owner"):
owner = metadata_dict["owner"]
if owner and "owner" not in metadata:
metadata["owner"] = owner
dry_run_name = uuid.uuid4().hex
with NetworkView.dry_run_context(dry_run_name, identity_name):
network_view.dry_run(
dry_run_name,
list(node_workloads.keys()),
[w.info.pool_id for w in node_workloads.values()],
bot,
breaking_node_ids=[node_id],
)
parent_id = network_view.network_workloads[-1].id
result = []
for resource in node_workloads.values():
resource.info.reference = ""
resource.info.description = j.data.serializers.json.dumps({"parent_id": parent_id})
metadata["parent_network"] = parent_id
old_metadata = node_metadata.get(resource.info.node_id, {})
old_metadata.pop("parent_network", None)
metadata.update(old_metadata)
resource.info.metadata = self.encrypt_metadata(metadata, identity_name)
result.append(j.sals.zos.get().workloads.deploy(resource))
parent_id = result[-1]
return result
def wait_workload(self, workload_id, bot=None, expiry=10, breaking_node_id=None, identity_name=None):
j.logger.info(f"waiting workload {workload_id} to finish deployment")
expiry = expiry or 10
expiration_provisioning = j.data.time.now().timestamp + expiry * 60
workload = j.sals.zos.get(identity_name).workloads.get(workload_id)
# if the workload is network and it is not a breaking node, skip if the node is blocked
if (
workload.info.workload_type == WorkloadType.Network_resource
and workload.info.node_id in j.sals.reservation_chatflow.reservation_chatflow.list_blocked_nodes()
):
if workload.info.node_id != breaking_node_id:
return True
if workload.info.workload_type in GATEWAY_WORKLOAD_TYPES:
node = j.sals.zos.get(identity_name)._explorer.gateway.get(workload.info.node_id)
else:
node = j.sals.zos.get(identity_name)._explorer.nodes.get(workload.info.node_id)
# check if the node is up
if not j.sals.zos.get(identity_name).nodes_finder.filter_is_up(node):
cancel = True
if breaking_node_id and breaking_node_id == node.node_id:
# if the node is down and it is the same as breaking_node_id
if workload.info.workload_type == WorkloadType.Network_resource:
# if the workload is a newtork we don't cancel it
cancel = False
# the node is down but it is not a breaking node_id
elif workload.info.workload_type == WorkloadType.Network_resource:
# if the workload is network we can overlook it
return True
if cancel:
j.sals.reservation_chatflow.solutions.cancel_solution([workload_id], identity_name)
raise StopChatFlow(f"Workload {workload_id} failed to deploy because the node is down {node.node_id}")
# wait for workload
while True:
workload = j.sals.zos.get(identity_name).workloads.get(workload_id)
remaning_time = j.data.time.get(expiration_provisioning).humanize(granularity=["minute", "second"])
if bot:
deploying_message = f"""\
# Deploying...
<br />Workload ID: {workload_id}
Deployment should take around 2 to 3 minutes, but might take longer and will be cancelled if it is not successful in 10 mins
"""
bot.md_show_update(dedent(deploying_message), md=True)
if workload.info.result.workload_id:
success = workload.info.result.state.value == 1
if not success:
error_message = workload.info.result.message
msg = f"Workload {workload.id} failed to deploy due to error {error_message}. For more details: {j.core.identity.me.explorer_url}/reservations/workloads/{workload.id}"
j.logger.error(msg)
j.tools.alerthandler.alert_raise(
app_name="chatflows", category="internal_errors", message=msg, alert_type="exception"
)
elif workload.info.workload_type != WorkloadType.Network_resource:
j.sals.reservation_chatflow.reservation_chatflow.unblock_node(workload.info.node_id)
return success
if expiration_provisioning < j.data.time.get().timestamp:
j.sals.reservation_chatflow.reservation_chatflow.block_node(workload.info.node_id)
if workload.info.workload_type != WorkloadType.Network_resource:
j.sals.reservation_chatflow.solutions.cancel_solution([workload_id], identity_name)
elif breaking_node_id and workload.info.node_id != breaking_node_id:
return True
raise StopChatFlow(f"Workload {workload_id} failed to deploy in time")
gevent.sleep(1)
def add_network_node(self, name, node, pool_id, network_view=None, bot=None, identity_name=None, **metadata):
identity_name = identity_name or j.core.identity.me.instance_name
if not network_view:
network_view = NetworkView(name, identity_name=identity_name)
network = network_view.add_node(node, pool_id)
if not network:
return
parent_id = network_view.network_workloads[-1].id
ids = []
node_workloads = {}
# deploy only latest resource generated by zos sal for each node
owner = None
node_metadata = defaultdict(dict)
for workload in network.network_resources:
node_workloads[workload.info.node_id] = workload
decrypted_metadata = self.decrypt_metadata(workload.info.metadata, identity_name)
metadata_dict = j.data.serializers.json.loads(decrypted_metadata)
node_metadata[workload.info.node_id].update(metadata_dict)
if not owner and metadata_dict.get("owner"):
owner = metadata_dict["owner"]
if owner and "owner" not in metadata:
metadata["owner"] = owner
dry_run_name = uuid.uuid4().hex
with NetworkView.dry_run_context(dry_run_name, identity_name):
network_view.dry_run(
dry_run_name,
list(node_workloads.keys()),
[w.info.pool_id for w in node_workloads.values()],
bot,
breaking_node_ids=[node.node_id],
)
for workload in node_workloads.values():
workload.info.reference = ""
workload.info.description = j.data.serializers.json.dumps({"parent_id": parent_id})
metadata["parent_network"] = parent_id
old_metadata = node_metadata.get(workload.info.node_id, {})
old_metadata.pop("parent_network", None)
metadata.update(old_metadata)
workload.info.metadata = self.encrypt_metadata(metadata, identity_name)
ids.append(j.sals.zos.get(identity_name).workloads.deploy(workload))
parent_id = ids[-1]
return {"ids": ids, "rid": ids[0]}
def add_multiple_network_nodes(self, name, node_ids, pool_ids, network_view=None, bot=None, **metadata):
if not network_view:
network_view = NetworkView(name)
network = network_view.add_multiple_nodes(node_ids, pool_ids)
if not network:
return
parent_id = network_view.network_workloads[-1].id
ids = []
node_workloads = {}
# deploy only latest resource generated by zos sal for each node
for workload in network.network_resources:
node_workloads[workload.info.node_id] = workload
dry_run_name = uuid.uuid4().hex
with NetworkView.dry_run_context(dry_run_name):
network_view.dry_run(
dry_run_name,
list(node_workloads.keys()),
[w.info.pool_id for w in node_workloads.values()],
bot,
breaking_node_ids=node_ids,
)
for workload in node_workloads.values():
workload.info.reference = ""
workload.info.description = j.data.serializers.json.dumps({"parent_id": parent_id})
metadata["parent_network"] = parent_id
workload.info.metadata = self.encrypt_metadata(metadata)
ids.append(j.sals.zos.get().workloads.deploy(workload))
parent_id = ids[-1]
return {"ids": ids, "rid": ids[0]}
def select_network(self, bot, network_views=None):
network_views = network_views or self.list_networks()
if not network_views:
raise StopChatFlow(f"You don't have any deployed network.")
network_name = bot.single_choice("Please select a network", list(network_views.keys()), required=True)
return network_views[network_name]
def deploy_volume(self, pool_id, node_id, size, volume_type=DiskType.SSD, **metadata):
volume = j.sals.zos.get().volume.create(node_id, pool_id, size, volume_type)
if metadata:
volume.info.metadata = self.encrypt_metadata(metadata)
return j.sals.zos.get().workloads.deploy(volume)
def deploy_container(
self,
pool_id,
node_id,
network_name,
ip_address,
flist,
env=None,
cpu=1,
memory=1024,
disk_size=256,
disk_type=DiskType.SSD,
entrypoint="",
interactive=False,
secret_env=None,
volumes=None,
log_config=None,
public_ipv6=False,
identity_name=None,
**metadata,
):
"""
volumes: dict {"mountpoint (/)": volume_id}
log_Config: dict. keys ("channel_type", "channel_host", "channel_port", "channel_name")
"""
identity_name = identity_name or j.core.identity.me.instance_name
env = env or {}
encrypted_secret_env = {}
if secret_env:
for key, val in secret_env.items():
encrypted_secret_env[key] = j.sals.zos.get(identity_name).container.encrypt_secret(node_id, val)
container = j.sals.zos.get(identity_name).container.create(
node_id,
network_name,
ip_address,
flist,
pool_id,
env,
cpu,
memory,
disk_size,
entrypoint,
interactive,
encrypted_secret_env,
public_ipv6=public_ipv6,
)
if volumes:
for mount_point, vol_id in volumes.items():
j.sals.zos.get(identity_name).volume.attach_existing(container, f"{vol_id}-1", mount_point)
if metadata:
container.info.metadata = self.encrypt_metadata(metadata, identity_name=identity_name)
if log_config:
j.sals.zos.get(identity_name).container.add_logs(container, **log_config)
return j.sals.zos.get(identity_name).workloads.deploy(container)
def ask_container_resources(
self,
bot,
cpu=True,
memory=True,
disk_size=True,
disk_type=False,
default_cpu=1,
default_memory=1024,
default_disk_size=256,
default_disk_type="SSD",
):
form = bot.new_form()
if cpu:
cpu_answer = form.int_ask("Please specify how many CPUs", default=default_cpu, required=True, min=1)
if memory:
memory_answer = form.int_ask(
"Please specify how much memory (in MB)", default=default_memory, required=True, min=1024
)
if disk_size:
disk_size_answer = form.int_ask(
"Please specify the size of root filesystem (in MB)", default=default_disk_size, required=True
)
if disk_type:
disk_type_answer = form.single_choice(
"Please choose the root filesystem disktype", ["SSD", "HDD"], default=default_disk_type, required=True
)
form.ask()
resources = {}
if cpu:
resources["cpu"] = cpu_answer.value
if memory:
resources["memory"] = memory_answer.value
if disk_size:
resources["disk_size"] = disk_size_answer.value
if disk_type:
resources["disk_type"] = DiskType[disk_type_answer.value]
return resources
def ask_container_logs(self, bot, solution_name=None):
logs_config = {}
form = bot.new_form()
channel_type = form.string_ask("Please add the channel type", default="redis", required=True)
channel_host = form.string_ask("Please add the IP address where the logs will be output to", required=True)
channel_port = form.int_ask("Please add the port available where the logs will be output to", required=True)
channel_name = form.string_ask(
"Please add the channel name to be used. The channels will be in the form" " NAME-stdout and NAME-stderr",
default=solution_name,
required=True,
)
form.ask()
logs_config["channel_type"] = channel_type.value
logs_config["channel_host"] = channel_host.value
logs_config["channel_port"] = channel_port.value
logs_config["channel_name"] = channel_name.value
return logs_config
def schedule_container(self, pool_id, cru=None, sru=None, mru=None, hru=None, ip_version=None):
query = {"cru": cru, "sru": sru, "mru": mru, "hru": hru, "ip_version": ip_version}
return j.sals.reservation_chatflow.reservation_chatflow.get_nodes(1, pool_ids=[pool_id], **query)[0]
def ask_container_placement(
self,
bot,
pool_id,
cru=None,
sru=None,
mru=None,
hru=None,
ip_version=None,
free_to_use=False,
workload_name=None,
):
if not workload_name:
workload_name = "your workload"
automatic_choice = bot.single_choice(
"Do you want to automatically select a node for deployment for" f" {workload_name}?",
["YES", "NO"],
default="YES",
required=True,
)
if automatic_choice == "YES":
return None
if j.config.get("OVER_PROVISIONING"):
cru = 0
mru = 0
nodes = j.sals.zos.get().nodes_finder.nodes_by_capacity(pool_id=pool_id, cru=cru, sru=sru, mru=mru, hru=hru)
nodes = list(nodes)
nodes = j.sals.reservation_chatflow.reservation_chatflow.filter_nodes(nodes, free_to_use, ip_version)
blocked_nodes = j.sals.reservation_chatflow.reservation_chatflow.list_blocked_nodes()
node_messages = {node.node_id: node for node in nodes if node.node_id not in blocked_nodes}
if not node_messages:
raise StopChatFlow("Failed to find resources for this reservation")
node_id = bot.drop_down_choice(
f"Please choose the node you want to deploy {workload_name} on", list(node_messages.keys()), required=True
)
return node_messages[node_id]
def calculate_capacity_units(self, cru=0, mru=0, sru=0, hru=0):
"""
return cu, su
"""
cu = min((mru - 1) / 4, cru * 4 / 2)
su = (hru / 1000 + sru / 100 / 2) / 1.2
if cu < 0:
cu = 0
if su < 0:
su = 0
return cu, su
def get_network_view(self, network_name, workloads=None, identity_name=None):
return NetworkView(network_name, workloads, identity_name=identity_name)
def delegate_domain(self, pool_id, gateway_id, domain_name, **metadata):
domain_delegate = j.sals.zos.get().gateway.delegate_domain(gateway_id, domain_name, pool_id)
if metadata:
domain_delegate.info.metadata = self.encrypt_metadata(metadata)
return j.sals.zos.get().workloads.deploy(domain_delegate)
def deploy_kubernetes_master(
self, pool_id, node_id, network_name, cluster_secret, ssh_keys, ip_address, size=1, **metadata
):
master = j.sals.zos.get().kubernetes.add_master(
node_id, network_name, cluster_secret, ip_address, size, ssh_keys, pool_id
)
master.info.description = j.data.serializers.json.dumps({"role": "master"})
if metadata:
master.info.metadata = self.encrypt_metadata(metadata)
return j.sals.zos.get().workloads.deploy(master)
def deploy_kubernetes_worker(
self, pool_id, node_id, network_name, cluster_secret, ssh_keys, ip_address, master_ip, size=1, **metadata
):
worker = j.sals.zos.get().kubernetes.add_worker(
node_id, network_name, cluster_secret, ip_address, size, master_ip, ssh_keys, pool_id
)
worker.info.description = j.data.serializers.json.dumps({"role": "worker"})
if metadata:
worker.info.metadata = self.encrypt_metadata(metadata)
return j.sals.zos.get().workloads.deploy(worker)
def deploy_kubernetes_cluster(
self,
pool_id,
node_ids,
network_name,
cluster_secret,
ssh_keys,
size=1,
ip_addresses=None,
slave_pool_ids=None,
**metadata,
):
"""
deplou k8s cluster with the same number of nodes as specifed in node_ids
Args:
pool_id: this one is always used for master.
node_ids: list() of node ids to deploy on. first node_id is used for master reservation
ip_addresses: if specified it will be mapped 1-1 with node_ids for workloads. if not specified it will choose any free_ip from the node
slave_pool_ids: if specified, k8s workers will deployed on each of these pools respectively. if empty it will use the master pool_id
Return:
list: [{"node_id": "ip_address"}, ...] first dict is master's result
"""
slave_pool_ids = slave_pool_ids or ([pool_id] * (len(node_ids) - 1))
pool_ids = [pool_id] + slave_pool_ids
result = [] # [{"node_id": id, "ip_address": ip, "reservation_id": 16}] first dict is master's result
if ip_addresses and len(ip_addresses) != len(node_ids):
raise StopChatFlow("length of ips != node_ids")
if not ip_addresses:
# get free_ips for the nodes
ip_addresses = []
for i in range(len(node_ids)):
node_id = node_ids[i]
pool_id = pool_ids[i]
node = self._explorer.nodes.get(node_id)
res = self.add_network_node(network_name, node, pool_id)
if res:
for wid in res["ids"]:
success = self.wait_workload(wid, breaking_node_id=node.node_id)
if not success:
raise StopChatFlow(f"Failed to add node {node.node_id} to network {wid}")
network_view = NetworkView(network_name)
address = network_view.get_free_ip(node)
if not address:
raise StopChatFlow(f"No free IPs for network {network_name} on the specifed node" f" {node_id}")
ip_addresses.append(address)
# deploy_master
master_ip = ip_addresses[0]
master_resv_id = self.deploy_kubernetes_master(
pool_ids[0], node_ids[0], network_name, cluster_secret, ssh_keys, master_ip, size, **metadata
)
result.append({"node_id": node_ids[0], "ip_address": master_ip, "reservation_id": master_resv_id})
for i in range(1, len(node_ids)):
node_id = node_ids[i]
pool_id = pool_ids[i]
ip_address = ip_addresses[i]
resv_id = self.deploy_kubernetes_worker(
pool_id, node_id, network_name, cluster_secret, ssh_keys, ip_address, master_ip, size, **metadata
)
result.append({"node_id": node_id, "ip_address": ip_address, "reservation_id": resv_id})
return result
def ask_multi_pool_placement(
self, bot, number_of_nodes, resource_query_list=None, pool_ids=None, workload_names=None, ip_version=None
):
"""
Ask and schedule workloads accross multiple pools
Args:
bot: chatflow object
number_of_nodes: number of required nodes for deployment
resource_query_list: list of query dicts {"cru": 1, "sru": 2, "mru": 1, "hru": 1}. if specified it must be same length as number_of_nodes
pool_ids: if specfied it will limit the pools shown in the chatflow to only these pools
workload_names: if specified they will shown when asking the user for node selection for each workload. if specified it must be same length as number_of_nodes
Returns:
([], []): first list contains the selected node objects. second list contains selected pool ids
"""
resource_query_list = resource_query_list or [dict()] * number_of_nodes
workload_names = workload_names or [None] * number_of_nodes
if len(resource_query_list) != number_of_nodes:
raise StopChatFlow("resource query_list must be same length as number of nodes")
if len(workload_names) != number_of_nodes:
raise StopChatFlow("workload_names must be same length as number of nodes")
pools = self.list_pools()
if pool_ids:
filtered_pools = {}
for pool_id in pools:
if pool_id in pool_ids:
filtered_pools[pool_id] = pools[pool_id]
pools = filtered_pools
selected_nodes = []
selected_pool_ids = []
for i in range(number_of_nodes):
cu, su = self.calculate_capacity_units(**resource_query_list[i])
pool_choices = {}
for p in pools:
if pools[p][0] < cu or pools[p][1] < su:
continue
nodes = j.sals.zos.get().nodes_finder.nodes_by_capacity(pool_id=p, **resource_query_list[i])
if not nodes:
continue
pool_choices[p] = pools[p]
pool_id = self.select_pool(bot, available_pools=pool_choices, workload_name=workload_names[i], cu=cu, su=su)
node = self.ask_container_placement(
bot, pool_id, workload_name=workload_names[i], ip_version=ip_version, **resource_query_list[i]
)
if not node:
node = self.schedule_container(pool_id, ip_version=ip_version, **resource_query_list[i])
selected_nodes.append(node)
selected_pool_ids.append(pool_id)
return selected_nodes, selected_pool_ids
def list_pool_gateways(self, pool_id):
"""
return dict of gateways where keys are descriptive string of each gateway
"""
pool = j.sals.zos.get().pools.get(pool_id)
farm_id = self.get_pool_farm_id(pool_id)
if farm_id < 0:
raise StopChatFlow(f"no available gateways in pool {pool_id} farm: {farm_id}")
gateways = self._explorer.gateway.list(farm_id=farm_id)
if not gateways:
raise StopChatFlow(f"no available gateways in pool {pool_id} farm: {farm_id}")
result = {}
for g in gateways:
if not g.dns_nameserver:
continue
if g.node_id not in pool.node_ids:
continue
result[f"{g.dns_nameserver[0]} {g.location.continent} {g.location.country}" f" {g.node_id}"] = g
return result
def list_all_gateways(self, pool_ids=None, identity_name=None):
"""
Args:
pool_ids: if specified it will only list gateways inside these pools
Returns:
dict: {"gateway_message": {"gateway": g, "pool": pool},}
"""
identity_name = identity_name or j.core.identity.me.instance_name
all_gateways = filter(j.sals.zos.get(identity_name).nodes_finder.filter_is_up, self._explorer.gateway.list())
if not all_gateways:
raise StopChatFlow(f"no available gateways")
all_pools = [p for p in j.sals.zos.get(identity_name).pools.list() if p.node_ids]
available_node_ids = {} # node_id: pool
if pool_ids is not None:
for pool in all_pools:
if pool.pool_id in pool_ids:
available_node_ids.update({node_id: pool for node_id in pool.node_ids})
else:
for pool in all_pools:
available_node_ids.update({node_id: pool for node_id in pool.node_ids})
result = {}
for gateway in all_gateways:
if gateway.node_id in available_node_ids:
if not gateway.dns_nameserver:
continue
pool = available_node_ids[gateway.node_id]
hidden = False
name = ""
if f"pool_{pool.pool_id}" in pool_factory.list_all():
local_config = pool_factory.get(f"pool_{pool.pool_id}")
hidden = local_config.hidden
name = local_config.name
if hidden:
continue
if name:
message = (
f"Pool: {pool.pool_id} Name: {name} {gateway.dns_nameserver[0]}"
f" {gateway.location.continent} {gateway.location.country}"
f" {gateway.node_id}"
)
else:
message = (
f"Pool: {pool.pool_id} {gateway.dns_nameserver[0]}"
f" {gateway.location.continent} {gateway.location.country}"
f" {gateway.node_id}"
)
result[message] = {"gateway": gateway, "pool": pool}
if not result:
raise StopChatFlow(f"no gateways available in your pools")
return result
def select_gateway(self, bot, pool_ids=None):
"""
Args:
pool_ids: if specified it will only list gateways inside these pools
Returns:
gateway, pool_objects
"""
gateways = self.list_all_gateways(pool_ids)
selected = bot.single_choice("Please select a gateway", list(gateways.keys()), required=True)
return gateways[selected]["gateway"], gateways[selected]["pool"]
def create_ipv6_gateway(self, gateway_id, pool_id, public_key, **metadata):
if isinstance(public_key, bytes):
public_key = public_key.decode()
workload = j.sals.zos.get().gateway.gateway_4to6(gateway_id, public_key, pool_id)
if metadata:
workload.info.metadata = self.encrypt_metadata(metadata)
return j.sals.zos.get().workloads.deploy(workload)
def deploy_zdb(self, pool_id, node_id, size, mode, password, disk_type="SSD", public=False, **metadata):
workload = j.sals.zos.get().zdb.create(node_id, size, mode, password, pool_id, disk_type, public)
if metadata:
workload.info.metadata = self.encrypt_metadata(metadata)
return j.sals.zos.get().workloads.deploy(workload)
def create_subdomain(self, pool_id, gateway_id, subdomain, addresses=None, identity_name=None, **metadata):
"""
creates an A record pointing to the specified addresses
if no addresses are specified, the record will point the gateway IP address (used for exposing solutions)
"""
identity_name = identity_name or j.core.identity.me.instance_name
if not addresses:
gateway = self._explorer.gateway.get(gateway_id)
addresses = [j.sals.nettools.get_host_by_name(ns) for ns in gateway.dns_nameserver]
workload = j.sals.zos.get(identity_name).gateway.sub_domain(gateway_id, subdomain, addresses, pool_id)
if metadata:
workload.info.metadata = self.encrypt_metadata(metadata, identity_name)
return j.sals.zos.get(identity_name).workloads.deploy(workload)
def create_proxy(self, pool_id, gateway_id, domain_name, trc_secret, identity_name=None, **metadata):
"""
creates a reverse tunnel on the gateway node
"""
identity_name = identity_name or j.core.identity.me.instance_name
workload = j.sals.zos.get(identity_name).gateway.tcp_proxy_reverse(gateway_id, domain_name, trc_secret, pool_id)
if metadata:
workload.info.metadata = self.encrypt_metadata(metadata, identity_name)
return j.sals.zos.get(identity_name).workloads.deploy(workload)
def expose_and_create_certificate(
self,
pool_id,
gateway_id,
network_name,
trc_secret,
domain,
email,
solution_ip,
solution_port,
enforce_https=False,
node_id=None,
proxy_pool_id=None,
log_config=None,
bot=None,
public_key="",
**metadata,
):
"""
exposes the solution and enable ssl for it's domain
Args:
pool_id: the pool used to create your solution
gateway_id: Gateway id
network_name: Name of the network selected while creating the solution
trc_secret: Secret for tcp router
domain: the domain we will issue certificate for
email: used to issue certificate
solution_ip: where your server is hosted (the actual server)
solution_port: the port your application is listening on
enforce_https: whether you want to only use https or not
node_id: your node id
solution_uuid: solution id
public_key: your public key in case you want to have ssh access on the nginx container
"""
test_cert = j.config.get("TEST_CERT")
proxy_pool_id = proxy_pool_id or pool_id
gateway = self._explorer.gateway.get(gateway_id)
proxy_id = self.create_proxy(
pool_id=proxy_pool_id, gateway_id=gateway_id, domain_name=domain, trc_secret=trc_secret, **metadata
)
success = self.wait_workload(proxy_id)
if not success:
raise DeploymentFailed(
f"failed to create reverse proxy on gateway {gateway_id} workload {proxy_id}",
wid=proxy_id,
solution_uuid=metadata.get("solution_uuid"),
)
tf_gateway = f"{gateway.dns_nameserver[0]}:{gateway.tcp_router_port}"
secret_env = {
"TRC_SECRET": trc_secret,
"TFGATEWAY": tf_gateway,
"EMAIL": email,
"SOLUTION_IP": solution_ip,
"SOLUTION_PORT": str(solution_port),
"DOMAIN": domain,
"ENFORCE_HTTPS": "true" if enforce_https else "false",
"PUBKEY": public_key,
"TEST_CERT": "true" if test_cert else "false",
}
if not node_id:
node = self.schedule_container(pool_id=pool_id, cru=1, mru=1, hru=1)
node_id = node.node_id
else:
node = self._explorer.nodes.get(node_id)
res = self.add_network_node(network_name, node, pool_id, bot=bot)
if res:
for wid in res["ids"]:
success = self.wait_workload(wid, bot, breaking_node_id=node.node_id)
if not success:
raise DeploymentFailed(
f"failed to add node {node.node_id} to network workload {wid}",
wid=wid,
solution_uuid=metadata.get("solution_uuid"),
)
network_view = NetworkView(network_name)
network_view = network_view.copy()
ip_address = network_view.get_free_ip(node)
resv_id = self.deploy_container(
pool_id=pool_id,
node_id=node_id,
network_name=network_name,
ip_address=ip_address,
flist="https://hub.grid.tf/omar0.3bot/omarelawady-nginx-certbot-zinit.flist",
disk_type=DiskType.HDD,
disk_size=512,
secret_env=secret_env,
public_ipv6=False,
log_config=log_config,
**metadata,
)
return resv_id
def expose_address(
self,
pool_id,
gateway_id,
network_name,
local_ip,
port,
tls_port,
trc_secret,
node_id=None,
reserve_proxy=False,
proxy_pool_id=None,
domain_name=None,
bot=None,
log_config=None,
identity_name=None,
**metadata,
):
identity_name = identity_name or j.core.identity.me.instance_name
proxy_pool_id = proxy_pool_id or pool_id
gateway = self._explorer.gateway.get(gateway_id)
reverse_id = None
if reserve_proxy:
if not domain_name:
raise StopChatFlow("you must pass domain_name when you ise reserv_proxy")
resv_id = self.create_proxy(
pool_id=proxy_pool_id,
gateway_id=gateway_id,
domain_name=domain_name,
trc_secret=trc_secret,
identity_name=identity_name,
**metadata,
)
reverse_id = resv_id
success = self.wait_workload(resv_id)
if not success:
raise DeploymentFailed(
f"failed to create reverse proxy on gateway {gateway_id} to network workload {resv_id}",
wid=resv_id,
solution_uuid=metadata.get("solution_uuid"),
)
remote = f"{gateway.dns_nameserver[0]}:{gateway.tcp_router_port}"
secret_env = {"TRC_SECRET": trc_secret}
if not node_id:
node = self.schedule_container(pool_id=pool_id, cru=1, mru=1, hru=1)
node_id = node.node_id
else:
node = self._explorer.nodes.get(node_id)
res = self.add_network_node(network_name, node, pool_id, identity_name=identity_name, bot=bot)
if res:
for wid in res["ids"]:
success = self.wait_workload(wid, bot, breaking_node_id=node.node_id)
if not success:
if reserve_proxy:
j.sals.reservation_chatflow.solutions.cancel([resv_id])
raise DeploymentFailed(
f"Failed to add node {node.node_id} to network {wid}",
wid=wid,
solution_uuid=metadata.get("solution_uuid"),
)
network_view = NetworkView(network_name, identity_name=identity_name)
network_view = network_view.copy()
network_view.used_ips.append(local_ip)
ip_address = network_view.get_free_ip(node)
env = {
"SOLUTION_IP": local_ip,
"HTTP_PORT": str(port),
"HTTPS_PORT": str(tls_port),
"REMOTE_IP": gateway.dns_nameserver[0],
"REMOTE_PORT": str(gateway.tcp_router_port),
}
print(log_config)
resv_id = self.deploy_container(
pool_id=pool_id,
node_id=node_id,
network_name=network_name,
ip_address=ip_address,
flist="https://hub.grid.tf/omar0.3bot/omarelawady-trc-zinit.flist",
disk_type=DiskType.HDD,
secret_env=secret_env,
env=env,
public_ipv6=False,
log_config=log_config,
identity_name=identity_name,
**metadata,
)
return resv_id, reverse_id
def deploy_minio_zdb(
self,
pool_id,
password,
node_ids=None,
zdb_no=None,
disk_type=DiskType.HDD,
disk_size=10,
pool_ids=None,
**metadata,
):
"""
deploy zdb workloads on the specified node_ids if specified or deploy workloads as specifdied by the zdb_no
Args:
pool_id: used to deploy all workloads in this pool (overriden when pool_ids is specified)
node_ids: if specified, it will be used for deployment of workloads.
pool_ids: if specified, zdb workloads will be
zdb_no: if specified and no node_ids, it will automatically schedule zdb workloads matching pool config
Returns:
[]: list of workload ids deployed
"""
node_ids = node_ids or []
if not (zdb_no or node_ids):
raise StopChatFlow("you must pass at least one of zdb_no or node_ids")
if node_ids:
pool_ids = pool_ids or [pool_id] * len(node_ids)
else:
pool_ids = pool_ids or [pool_id] * zdb_no
if len(pool_ids) != len(node_ids):
raise StopChatFlow("pool_ids must be same length as node_ids")
if not node_ids and zdb_no:
query = {}
if disk_type == DiskType.SSD:
query["sru"] = disk_size
else:
query["hru"] = disk_size
for pool_id in pool_ids:
node = j.sals.reservation_chatflow.reservation_chatflow.nodes_get(
pool_ids=[pool_id], number_of_nodes=1, **query
)[0]
node_ids.append(node.node_id)
result = []
for i in range(len(node_ids)):
node_id = node_ids[i]
pool_id = pool_ids[i]
resv_id = self.deploy_zdb(
pool_id=pool_id,
node_id=node_id,
size=disk_size,
mode=ZDBMode.Seq,
password=password,
disk_type=disk_type,
**metadata,
)
result.append(resv_id)
return result
def deploy_minio_containers(
self,
pool_id,
network_name,
minio_nodes,
minio_ip_addresses,
zdb_configs,
ak,
sk,
ssh_key,
cpu,
memory,
data,
parity,
disk_type=DiskType.SSD,
disk_size=10,
log_config=None,
mode="Single",
bot=None,
public_ipv6=False,
secondary_pool_id=None,
**metadata,
):
secondary_pool_id = secondary_pool_id or pool_id
secret_env = {}
if mode == "Master/Slave":
secret_env["TLOG"] = zdb_configs.pop(-1)
shards = ",".join(zdb_configs)
secret_env["SHARDS"] = shards
secret_env["SECRET_KEY"] = sk
env = {
"DATA": str(data),
"PARITY": str(parity),
"ACCESS_KEY": ak,
"SSH_KEY": ssh_key,
"MINIO_PROMETHEUS_AUTH_TYPE": "public",
}
result = []
master_volume_id = self.deploy_volume(pool_id, minio_nodes[0], disk_size, disk_type, **metadata)
success = self.wait_workload(master_volume_id, bot)
if not success:
raise DeploymentFailed(
f"Failed to create volume {master_volume_id} for minio container on" f" node {minio_nodes[0]}",
wid=master_volume_id,
solution_uuid=metadata.get("solution_uuid"),
)
master_cont_id = self.deploy_container(
pool_id=pool_id,
node_id=minio_nodes[0],
network_name=network_name,
ip_address=minio_ip_addresses[0],
env=env,
cpu=cpu,
memory=memory,
secret_env=secret_env,
log_config=log_config,
volumes={"/data": master_volume_id},
public_ipv6=public_ipv6,
flist="https://hub.grid.tf/tf-official-apps/minio:latest.flist",
**metadata,
)
result.append(master_cont_id)
if mode == "Master/Slave":
secret_env["MASTER"] = secret_env.pop("TLOG")
slave_volume_id = self.deploy_volume(pool_id, minio_nodes[1], disk_size, disk_type, **metadata)
success = self.wait_workload(slave_volume_id, bot)
if not success:
raise DeploymentFailed(
f"Failed to create volume {slave_volume_id} for minio container on" f" node {minio_nodes[1]}",
solution_uuid=metadata.get("solution_uuid"),
wid=slave_volume_id,
)
slave_cont_id = self.deploy_container(
pool_id=secondary_pool_id,
node_id=minio_nodes[1],
network_name=network_name,
ip_address=minio_ip_addresses[1],
env=env,
cpu=cpu,
memory=memory,
secret_env=secret_env,
log_config=log_config,
volumes={"/data": slave_volume_id},
public_ipv6=public_ipv6,
flist="https://hub.grid.tf/tf-official-apps/minio:latest.flist",
**metadata,
)
result.append(slave_cont_id)
return result
def deploy_etcd_containers(
self,
pool_id,
node_id,
network_name,
ip_addresses,
etcd_cluster,
etcd_flist,
cpu=1,
memory=1024,
disk_size=1024,
disk_type=DiskType.SSD,
entrypoint="etcd",
public_ipv6=False,
**metadata,
):
"""
Deploy single and cluster etcd nodes
Args:
pool_id : Pool used to deploy etcd solution
node_id : Node used to deploy etcd solution
network_name : Network name used to deploy etcd solution
ip_addresses (List): List of IP address for every etcd node
etcd_cluster (str): Contains ETCD_INITIAL_CLUSTER value
etcd_flist (str): ETCD flist image used
cpu (int): CPU resource value. Defaults to 1.
memory (int): Memory resource size in MB. Defaults to 1024.
disk_size (int): Disk resource size in MB. Defaults to 1024.
disk_type (DiskType): Disk resource type. Defaults to DiskType.SSD.
entrypoint (str): Command that run at the start of the container. Defaults to "etcd".
public_ipv6 (bool): Check for IPv6. Defaults to False.
Returns:
List: List of reservation ids
"""
etcd_cluster = etcd_cluster.rstrip(",")
solution_uuid = metadata["solution_uuid"]
env_cluster = {
"ETCD_INITIAL_CLUSTER_TOKEN": f"etcd_cluster_{solution_uuid}",
"ETCD_INITIAL_CLUSTER_STATE": "new",
}
result = []
for n, ip_address in enumerate(ip_addresses):
env = {}
if len(ip_addresses) > 1:
env.update(env_cluster)
env.update(
{
"ALLOW_NONE_AUTHENTICATION": "yes",
"ETCD_NAME": f"etcd_{n+1}",
"ETCD_INITIAL_ADVERTISE_PEER_URLS": f"http://{ip_address}:2380",
"ETCD_LISTEN_PEER_URLS": "http://0.0.0.0:2380",
"ETCD_ADVERTISE_CLIENT_URLS": f"http://{ip_address}:2379",
"ETCD_LISTEN_CLIENT_URLS": "http://0.0.0.0:2379",
"ETCD_INITIAL_CLUSTER": etcd_cluster,
}
)
result.append(
self.deploy_container(
pool_id,
node_id,
network_name,
ip_address,
etcd_flist,
env,
cpu,
memory,
disk_size,
disk_type,
entrypoint=entrypoint,
public_ipv6=public_ipv6,
**metadata,
)
)
return result
def get_zdb_url(self, zdb_id, password):
workload = j.sals.zos.get().workloads.get(zdb_id)
result_json = j.data.serializers.json.loads(workload.info.result.data_json)
if "IPs" in result_json:
ip = result_json["IPs"][0]
else:
ip = result_json["IP"]
namespace = result_json["Namespace"]
port = result_json["Port"]
url = f"{namespace}:{password}@[{ip}]:{port}"
return url
def ask_multi_pool_distribution(
self, bot, number_of_nodes, resource_query=None, pool_ids=None, workload_name=None, ip_version=None
):
"""
Choose multiple pools to distribute workload automatically
Args:
bot: chatflow object
resource_query: query dict {"cru": 1, "sru": 2, "mru": 1, "hru": 1}.
pool_ids: if specfied it will limit the pools shown in the chatflow to only these pools
workload_name: name shown in the message
Returns:
([], []): first list contains the selected node objects. second list contains selected pool ids
"""
resource_query = resource_query or {}
cu, su = self.calculate_capacity_units(**resource_query)
pools = self.list_pools(cu, su)
if pool_ids:
filtered_pools = {}
for pool_id in pools:
if pool_id in pool_ids:
filtered_pools[pool_id] = pools[pool_id]
pools = filtered_pools
workload_name = workload_name or "workloads"
messages = {}
pool_factory = StoredFactory(PoolConfig)
for p in pools:
hidden = False
name = ""
if f"pool_{p}" in pool_factory.list_all():
pool_config = pool_factory.get(f"pool_{p}")
hidden = pool_config.hidden
name = pool_config.name
if hidden:
continue
if name:
messages[f"Name: {name} Pool: {p} CU: {pools[p][0]} SU: {pools[p][1]}"] = p
else:
messages[f"Pool: {p} CU: {pools[p][0]} SU: {pools[p][1]}"] = p
while True:
pool_choices = bot.multi_list_choice(
"Please select the pools you wish to distribute you" f" {workload_name} on",
options=list(messages.keys()),
required=True,
)
if not pool_choices:
bot.md_show("You must select at least one pool. please click next to try again.")
else:
break
pool_ids = {}
node_to_pool = {}
for p in pool_choices:
pool = pool_ids.get(messages[p], j.sals.zos.get().pools.get(messages[p]))
pool_ids[messages[p]] = pool.pool_id
for node_id in pool.node_ids:
node_to_pool[node_id] = pool
nodes = j.sals.reservation_chatflow.reservation_chatflow.get_nodes(
number_of_nodes, pool_ids=list(pool_ids.values()), ip_version=ip_version, **resource_query
)
selected_nodes = []
selected_pool_ids = []
for node in nodes:
selected_nodes.append(node)
pool = node_to_pool[node.node_id]
selected_pool_ids.append(pool.pool_id)
return selected_nodes, selected_pool_ids
def chatflow_pools_check(self):
if not self.list_pools():
raise StopChatFlow("You don't have any capacity pools. Please create one first.")
def chatflow_network_check(self, bot):
networks = self.list_networks()
if not networks:
raise StopChatFlow("You don't have any deployed networks. Please create one first.")
bot.all_network_viewes = networks
def wait_demo_payment(self, bot, pool_id, exp=5, trigger_cus=0, trigger_sus=1, identity_name=None):
expiration = j.data.time.now().timestamp + exp * 60
msg = "<h2> Waiting for resources provisioning...</h2>"
while j.data.time.get().timestamp < expiration:
bot.md_show_update(msg, html=True)
pool = j.sals.zos.get(identity_name).pools.get(pool_id)
if pool.cus >= trigger_cus and pool.sus >= trigger_sus:
bot.md_show_update("Preparing app resources")
return True
gevent.sleep(2)
return False
def wait_pool_payment(self, bot, pool_id, exp=5, qr_code=None, trigger_cus=0, trigger_sus=1, identity_name=None):
expiration = j.data.time.now().timestamp + exp * 60
msg = "<h2> Waiting for payment...</h2>"
if qr_code:
qr_encoded = j.tools.qrcode.base64_get(qr_code, scale=2)
msg += f"Please scan the QR Code below for the payment details if you missed it from the previous screen"
qr_code_msg = f"""
<div class="text-center">
<img style="border:1px dashed #85929E" src="data:image/png;base64,{qr_encoded}"/>
</div>
"""
pool = j.sals.zos.get(identity_name).pools.get(pool_id)
msg = msg + self.msg_payment_info + qr_code_msg
while j.data.time.get().timestamp < expiration:
bot.md_show_update(msg, html=True)
pool = j.sals.zos.get(identity_name).pools.get(pool_id)
if pool.cus >= trigger_cus and pool.sus >= trigger_sus:
bot.md_show_update("Preparing app resources")
return True
gevent.sleep(2)
return False
def get_payment_info(self, pool):
escrow_info = pool.escrow_information
resv_id = pool.reservation_id
escrow_address = escrow_info.address
escrow_asset = escrow_info.asset
total_amount = escrow_info.amount
total_amount_dec = Decimal(total_amount) / Decimal(1e7)
thecurrency = escrow_asset.split(":")[0]
return {
"escrow_info": escrow_info,
"resv_id": resv_id,
"escrow_address": escrow_address,
"escrow_asset": escrow_asset,
"total_amount_dec": total_amount_dec,
"thecurrency": thecurrency,
"total_amount": total_amount,
}
def get_qr_code_payment_info(self, pool):
info = self.get_payment_info(pool)
total_amount = "{0:f}".format(info["total_amount_dec"])
qr_code = f"{info['thecurrency']}:{info['escrow_address']}?amount={total_amount}&message=p-{info['resv_id']}&sender=me"
msg_text = f"""
<h4> Destination Wallet Address: </h4> {info['escrow_address']} \n
<h4> Currency: </h4> {info['thecurrency']} \n
<h4> Memo Text (Reservation ID): </h4> p-{info['resv_id']} \n
<h4> Total Amount: </h4> {total_amount} {info['thecurrency']} \n
<h5>Inserting the memo-text is an important way to identify a transaction recipient beyond a wallet address. Failure to do so will result in a failed payment. Please also keep in mind that an additional Transaction fee of 0.1 {info['thecurrency']} will automatically occurs per transaction.</h5>
"""
return msg_text, qr_code
def test_managed_domain(self, gateway_id, managed_domain, pool_id, gateway=None, identity_name=None):
identity_name = identity_name or j.core.identity.me.instance_name
gateway = gateway or self._explorer.gateway.get(gateway_id)
subdomain = f"{uuid.uuid4().hex}.{managed_domain}"
addresses = [j.sals.nettools.get_host_by_name(gateway.dns_nameserver[0])]
subdomain_id = self.create_subdomain(pool_id, gateway_id, subdomain, addresses, identity_name=identity_name)
success = self.wait_workload(subdomain_id)
if not success:
return False
try:
j.sals.nettools.get_host_by_name(subdomain)
except Exception as e:
j.logger.error(f"managed domain test failed for {managed_domain} due to error {str(e)}")
j.sals.zos.get(identity_name).workloads.decomission(subdomain_id)
return False
j.sals.zos.get(identity_name).workloads.decomission(subdomain_id)
return True
def block_managed_domain(self, managed_domain):
count = j.core.db.hincrby(DOMAINS_COUNT_KEY, managed_domain)
expiration = count * DOMAINS_DISALLOW_EXPIRATION
domain_key = f"{DOMAINS_DISALLOW_PREFIX}:{managed_domain}"
j.core.db.set(domain_key, j.data.time.utcnow().timestamp + expiration, ex=expiration)
def unblock_managed_domain(self, managed_domain, reset=True):
domain_key = f"{DOMAINS_DISALLOW_PREFIX}:{managed_domain}"
j.core.db.delete(domain_key)
if reset:
j.core.db.hdel(DOMAINS_COUNT_KEY, managed_domain)
def list_blocked_managed_domains(self):
blocked_domains_keys = j.core.db.keys(f"{DOMAINS_DISALLOW_PREFIX}:*")
failure_count_dict = j.core.db.hgetall(DOMAINS_COUNT_KEY)
blocked_domains_values = j.core.db.mget(blocked_domains_keys)
result = {}
for idx, key in enumerate(blocked_domains_keys):
key = key[len(DOMAINS_DISALLOW_PREFIX) + 1 :]
node_id = key.decode()
expiration = int(blocked_domains_values[idx])
failure_count = int(failure_count_dict[key])
result[node_id] = {"expiration": expiration, "failure_count": failure_count}
return result
deployer = ChatflowDeployer()
|
import os
import time
import datetime
import socket
import platform
import sys
from colorama import Fore, Back, Style
while True:
iplist=sys.argv[1]
passlist=sys.argv[2]
if os.path.exists('./logs'):
out=open('logs','a')
out.close()
else:
out=open('logs','w')
out.close()
choice = input ("Which exploit do yo want to run\n 0) All\n 1) Hikvision\n 2) Dahua\n 3) Net-Surveillance\n 4) Axis\n 5) Arecont\n 6) Samsung\n 7) Samsung multi-channel\n 8) Scw-admiral\n 9) Scw-admiral-line\n 10) Swann\n 11) Hipcam\n 12) Uniview\n 13) Exit\n \nChoose your option: ")
if choice == 0:
print Fore.CYAN + "\nHikvision Exploit Running\n"
os.system("python hik-brute.py %s %s" %(iplist,passlist))
print Fore.CYAN + "\nDahua Exploit Running\n"
os.system("python dah-brute.py %s %s" %(iplist,passlist))
print Fore.CYAN + "\nNet-Surveillance Exploit Running\n"
os.system("python net-brute.py %s %s" %(iplist,passlist))
print Fore.CYAN + "\nAxis Exploit Running\n"
os.system("python axis-brute.py %s %s" %(iplist,passlist))
print Fore.CYAN + "\nArecont Exploit Running\n"
os.system("python arecont-brute.py %s %s" %(iplist,passlist))
print Fore.CYAN + "\nSamsung Exploit Running\n"
os.system("python sam-hanwa-techwin-brute.py %s %s" %(iplist,passlist))
print Fore.CYAN + "\nSamsung2 Exploit Running\n"
os.system("python sam-hanwa-techwin-2-brute.py %s %s" %(iplist,passlist))
print Fore.CYAN + "\nScw-admiral Exploit Running\n"
os.system("python scw-admiral-brute.py %s %s" %(iplist,passlist))
print Fore.CYAN + "\nScw-admiral-line Exploit Running\n"
os.system("python scw-admiral-line-brute.py %s %s" %(iplist,passlist))
print Fore.CYAN + "\nSwann Exploit Running\n"
os.system("python swann-brute.py %s %s" %(iplist,passlist))
print Fore.CYAN + "\nHipcam Exploit Running\n"
os.system("python hipcam.py %s %s" %(iplist,passlist))
print Fore.CYAN + "\nUniview Exploit Running\n"
os.system("python uniview.py %s %s" %(iplist,passlist))
elif choice == 1:
print Fore.CYAN + "\nHikvision Exploit Running\n"
os.system("python hik-brute.py %s %s" %(iplist,passlist))
elif choice == 2:
print Fore.CYAN + "\nDahua Exploit Running\n"
os.system("python dah-brute.py %s %s" %(iplist,passlist))
elif choice == 3:
print Fore.CYAN + "\nNet-Surveillance Exploit Running\n"
os.system("python net-brute.py %s %s" %(iplist,passlist))
elif choice == 4:
print Fore.CYAN + "\nAxis Exploit Running\n"
os.system("python axis-brute.py %s %s" %(iplist,passlist))
elif choice == 5:
print Fore.CYAN + "\nArecont Exploit Running\n"
os.system("python arecont-brute.py %s %s" %(iplist,passlist))
elif choice == 6:
print Fore.CYAN + "\nSamsung Exploit Running\n"
os.system("python sam-hanwa-techwin-brute.py %s %s" %(iplist,passlist))
elif choice == 7:
print Fore.CYAN + "\nSamsung2 Exploit Running\n"
os.system("python sam-hanwa-techwin-2-brute.py %s %s" %(iplist,passlist))
elif choice == 8:
print Fore.CYAN + "\nScw-admiral Exploit Running\n"
os.system("python scw-admiral-brute.py %s %s" %(iplist,passlist))
elif choice == 9:
print Fore.CYAN + "\nScw-admiral-line Exploit Running\n"
os.system("python scw-admiral-line-brute.py %s %s" %(iplist,passlist))
elif choice == 10:
print Fore.CYAN + "\nSwann Exploit Running\n"
os.system("python swann-brute.py %s %s" %(iplist,passlist))
elif choice == 11:
print Fore.CYAN + "\nHipcam Exploit Running\n"
os.system("python hipcam.py %s %s" %(iplist,passlist))
elif choice == 12:
print Fore.CYAN + "\nUniview Exploit Running\n"
os.system("python uniview.py %s %s" %(iplist,passlist))
elif choice == 13:
exit()
else :
print "\nThis is an incorrect option, please choose valid option\n "
|
<filename>src/pymyinstall/win_installer/win_setup_main.py
# -*- coding: utf-8 -*-
"""
@file
@brief Functions to prepare a setup on Windows
"""
from __future__ import print_function
import os
import shutil
import sys
import warnings
import datetime
from ..installhelper.install_cmd_helper import update_pip, run_cmd, python_version
from ..installhelper.module_dependencies import missing_dependencies
from .win_batch import create_win_batches
from .win_ipy_kernels import install_kernels
from .win_innosetup_helper import run_innosetup, innosetup_replacements
from .win_fix_compiler_c import switch_to_VS_compiler, switch_to_mingw_compiler
from .win_patch import win_patch_paths
from .win_setup_main_helper import dtnow, copy_icons, win_download, win_install, create_links_tools
from .win_setup_main_helper import win_download_notebooks, win_install_julia_step, win_install_r_step
from .win_packages import win_install_packages_other_python, get_modules_version
from .win_extract import clean_msi
from .win_ipython_helper import ipython_create_profile, ipython_update_profile, install_jupyter_extension
from .win_setup_r import get_package_description
from .win_exception import WinInstallMissingDependency
from .tutorial import copy_tutorial
from .win_setup_main_checkings import distribution_checkings
if sys.version_info[0] == 2:
from codecs import open
FileNotFoundError = Exception
license = """
Copyright (c) 2013-2016, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
_default_notebooks = [
("docs/ensae", ["http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx/_downloads/td1a_cenonce_session_12.ipynb",
"http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx/_downloads/td2a_cenonce_session_2A.ipynb",
"http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx/_downloads/td2a_cenonce_session_2B.ipynb",
"http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx/_downloads/td2a_cenonce_session_2C.ipynb",
]),
("docs/actuariat", ["http://www.xavierdupre.fr/app/actuariat_python/helpsphinx/_downloads/population_recuperation_donnees.ipynb",
]),
]
def architecture():
"""
@return either 32bit or 64bit
"""
o, b = python_version()
return b
def win_python_setup(folder="dist/win_python_setup_" + architecture(),
download_folder="build/win_python_setup_" + architecture(),
module_list=None, verbose=False, fLOG=print, download_only=False,
no_setup=False, notebooks=None, selection=None,
documentation=True, last_function=None, r_packages=True,
julia_packages=True, tutorial=None, source=None,
embed=True):
"""
Prepares a Windows distribution of Python based on InnoSetup,
inspired from WinPython but more easier to tweak I hope.
@param folder where to prepare the python version
@param module_list list of module to install (see @see fn small_installation = default options)
@param fLOG logging function
@param download_only only downloads
@param no_setup skip the building of the setup
@param verbose print more information
@param notebooks notebooks to copy to the workspace, list of ("subfolders", url)
@param selection selection of tools to install, example: ``{"R", "mingw", "tdm"}``,
empty by default
@param last_function function to execute just before running InnoSetup,
see `win_setup_helper.py
<https://github.com/sdpython/ensae_teaching_cs/blob/master/src/
ensae_teaching_cs/automation/win_setup_helper.py>`_
for an example
@param r_packages install R packages
@param julia_packages install Julia packages
@param documentation add documentation
@param tutorial list of folders to copy in ``workspace/tutorial``,
it can refer to internal tutorials (see folder ``win_installer/tutorial``)
@param source source of python packages (see @see cl ModuleInstall)
@param embed custom *Python* or embedded distribution (False)
@return list of completed operations
The available tools to install must be chose among:
* `R <http://www.r-project.org/>`_
* `Julia <http://julialang.org/>`_
* `MinGW <http://www.mingw.org/>`_
* `TDM-GCC <http://tdm-gcc.tdragon.net/>`_
* `VS <https://www.visualstudio.com/en-us/products/visual-studio-express-vs.aspx>`_
* `Java JDK <http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html>`_
By default, only R is included. Julia requires too much work.
The command line does not always end. The building of the package
is sometimes reluctant to work. And the Julia kernel is exclusive:
it cannot be setup with others kernel. Maybe the version 0.5 will fix those issues.
The signature of function ``last_function`` should be the following::
def last_function(inno_script, folders, verbose=False, fLOG=print):
# something specific to do
# before compiling the setup
# such adding new tools
# or replacing icons
# the inno setup script is located in folders["logs"]
The parameters *folders* is a dictionary which gives access to the main folders
of the distribution.
The setup will also contains `pandoc <http://pandoc.org/>`_,
`7z <http://www.7-zip.org/>`_,
`SQLiteSpy <https://www.yunqa.de/delphi/doku.php/products/sqlitespy/index>`_,
`Scite <http://www.scintilla.org/SciTE.html>`_,
`MinGW <http://www.mingw.org/>`_,
`Graphviz <http://www.graphviz.org/>`_.
The function first downloads everything.
It does not do it twice, so you can run the function again and directly go
to where it was interrupted. If there is no notebooks,
the setup will add some anyway.
It uses `InnoSetup <http://www.jrsoftware.org/isinfo.php>`_ to build the setup.
The distribution will contain the following subfolders:
* *tools*: subfolders for R, Julia, MinGW, Scite, pandoc, 7z, Putty...
* *python*: subfolder for python interpreter
* *workspace*: current folder for the notebooks
* *build*: location of downloaded modules and tools
Comments and remarks:
* 7z setup needs a last click to complete
* pandoc needs a last click to complete
* R must be manually installed in the right folder
* TDM-GCC is manually installed in the right folder
* Julia produces a file exe, for the time being it must be done manually
* MinGW is also installed manually, the command line is different from others tools,
once it is installed, you should run the command line::
mingw-get install binutils gcc g++ mingw32 fortran gdb mingw32 mingw w32api g77
* Python setups needs a last click to complete
* Julia command line sometimes gets stuck, the setup needs to be stopped
and restarted. It happens while installing the packages and
also while building IJulia (the package to use Julia in a notebook).
The Julia should be stopped instead of stopping the python script.
That trick shows the standard output of Julia.
* Julia kernel cannot be used with the others: it requires a different
configuration which prevents others kernel to be available at the same time.
We will skip for the time being.
* If the R kernel fails to start, you should manually run the script
`R_install.r <https://github.com/sdpython/pymyinstall/blob/master/src/pymyinstall/win_installer/R_install.r>`_.
With Julia, initialisation, installation or building takes time.
The function writes a file ``log.step.<julia_step>.txt``
to tell the step has completed once. You can remove this file
to do it again.
.. exref::
:title: Prepare a standalone distribution
The function downloads everything. The installation of tools
is still manual. Package installation is automated.
::
from pymyinstall import win_python_setup
from pymyinstall.packaged import ensae_fullset
list_modules = ensae_fullset()
win_python_setup(module_list=list_modules,
verbose=False,
download_only=False)
This works only for Windows.
@warning The Julia installation might be stuck after the installation or the build.
In that case, the script python should be stopped by *stopping the Julia
process* from the Task Manager
and started again. If it has completed, it will go to the
next step.
.. index:: issue
**Known issues while preparing the setup:**
* Some modules generates utf-8 encoding errors while being installed.
The python scripts stops. It should be started again, it will
detect the module was insalled and will go to the next one in the list.
* The setup are started by the Python script but the user needs to manually
click on the final ok button to proceed.
**Known issues after the setup is installed:**
* The first run of Spyder after the installation usually fails (failure of python.exe).
The second one succeeds. You should run Spyder from the installation setup before
compiling the setup.
.. index:: missing modules, vcomp110.dll, llvmlite, numba, issue, theano, xgboost
**Known extra steps needed by some modules**
* **llvmlite**, **numba**, on Windows, if the dll
*api-ms-win-crt-runtime-l1-1-0.dll* is missing, it is explained
in `api-ms-win-crt-runtime-l1-1-0.dll error <https://github.com/cmderdev/cmder/issues/490>`_,
`Visual C++ Redistributable for Visual Studio 2015 <https://www.microsoft.com/en-us/download/details.aspx?id=48145>`_
needs to be installed.
* **theano** requires `TDM-GCC <http://tdm-gcc.tdragon.net/>`_,
read `Installation of Theano on Windows <http://deeplearning.net/software/theano/install_windows.html>`_
* **xgboost** if DLL ``vcomp110.dll`` is missing, you should read blog :ref:`blog_xgboost_install`
to understand how to get it.
@todo Use chocolatey to process installation.
@todo Fix Julia installation.
"""
if selection is None:
selection = set()
if notebooks is None:
notebooks = _default_notebooks
if not isinstance(selection, set):
selection = set(selection)
selection.add("pandoc")
selection.add("7z")
selection.add("scite")
selection.add("putty")
selection.add("sqlitespy")
selection.add("graphviz")
selection.add("python")
selection.add("jenkins")
selection = set(_.lower() for _ in selection)
_allowed = {"pandoc", "7z", "scite", "putty",
"sqlitespy", "scite", "python", "tdm", "vs", "r", "graphviz",
"jenkins", "jdk"}
for s in selection:
s_ = s.split("==")[0]
if s_ not in _allowed:
raise ValueError("{0} unknown, should be in {1}".format(
s, ", ".join(sorted(_allowed))))
fLOG("[pymy] --- selection", selection)
#####
# we change for the version
#####
versions = {}
for sel in selection:
if "==" in sel:
spl = sel.split("==")
versions[spl[0].lower()] = spl[1]
else:
versions[sel.lower()] = None
selection = versions
######
# next
######
operations = []
operations.append(("time", dtnow()))
folder = os.path.abspath(folder)
download_folder = os.path.abspath(download_folder)
if not os.path.exists(folder):
os.makedirs(folder)
operations.append(("mkdir", folder))
if not os.path.exists(download_folder):
os.makedirs(download_folder)
operations.append(("mkdir", download_folder))
###################
# definition of folders
###################
operations.append(("time", dtnow()))
folders = dict(tools=os.path.join(folder, "tools"),
workspace=os.path.join(folder, "workspace"),
python=os.path.join(folder, "python"),
config=os.path.join(folder, "config"),
logs=os.path.join(folder, "logs"),
build=download_folder,
)
for k, v in folders.items():
if not os.path.exists(v):
os.mkdir(v)
###########################
# download the documentation
###########################
if documentation:
operations.append(("documentation", "-"))
op = win_download_notebooks(
notebooks, folders["workspace"], verbose=verbose, fLOG=fLOG)
operations.extend(op)
operations.append(("time", dtnow()))
######################
# download of everything
######################
for mod in module_list:
mod.fLOG = fLOG
operations.append(("download", "-"))
op = win_download(folder=download_folder,
module_list=module_list,
verbose=verbose, fLOG=fLOG,
selection=selection, source=source,
embed=embed)
operations.extend(op)
operations.append(("time", dtnow()))
########
# license
########
fLOG("[pymy] --- license")
with open(os.path.join(folder, "license.txt"), "w") as f:
f.write(license)
operations.append(("license", "license.txt"))
if not download_only:
########################
# copy icons in tools/icons
#######################
fLOG("[pymy] --- copy icons")
op = copy_icons(os.path.join(os.path.dirname(__file__), "icons"),
os.path.join(folders["tools"], "icons"))
operations.extend(op)
operations.append(("time", dtnow()))
#############
# install setups
#############
fLOG("[pymy] --- installation of python and tools")
fLOG("[pymy] --- you might have to it yourself for R, Julia")
op, installed = win_install(
folders=folders, download_folder=download_folder, verbose=verbose, fLOG=fLOG,
selection=selection)
operations.extend(op)
operations.append(("time", dtnow()))
if "pandoc" not in installed:
raise FileNotFoundError("pandoc was not installed")
if verbose:
for k, v in installed.items():
fLOG("[pymy] INSTALLED:", k, "-->", v)
##########
# clean msi
##########
fLOG("[pymy] --- clean msi")
op = clean_msi(folders["tools"], "*.msi", verbose=verbose, fLOG=fLOG)
operations.extend(op)
operations.append(("time", dtnow()))
################
# create links tools
################
fLOG("[pymy] --- create links")
op = create_links_tools(folder, installed, verbose=verbose, fLOG=fLOG)
operations.extend(op)
operations.append(("time", dtnow()))
#########################
# create batch command files
#########################
fLOG("[pymy] --- create batch command file")
op = create_win_batches(
folders, verbose=verbose, fLOG=fLOG, selection=selection, module_list=module_list)
operations.extend(op)
###########
# update pip
###########
fLOG("[pymy] --- update pip")
operations.append(("python pip", "update"))
op = update_pip(folders["python"])
operations.extend(op)
operations.append(("time", dtnow()))
if "julia" in selection and julia_packages:
operations.append(("julia", "-"))
ops = win_install_julia_step(folders, verbose=verbose, fLOG=fLOG)
operations.extend(ops)
operations.append(("time", dtnow()))
if "r" in selection and r_packages:
operations.append(("r", "-"))
ops = win_install_r_step(folders, verbose=verbose, fLOG=fLOG)
operations.extend(ops)
operations.append(("time", dtnow()))
######################
# installation of packages
######################
fLOG("[pymy] --- installation of python packages")
operations.append(("python packaes", "start"))
python_path = folders["python"]
win_install_packages_other_python(
python_path, download_folder, verbose=verbose, fLOG=fLOG,
module_list=module_list)
fLOG("[pymy] done")
operations.append(("time", dtnow()))
##########################
# mingw, add file distutils.cfg
##########################
if "mingw" in selection and "vs" not in selection:
fLOG("[pymy] --- switch_to_mingw_compiler")
op = switch_to_mingw_compiler(folders["python"])
for o in op:
operations.append(("modify", o))
##########################
# Visual Studio, VS 2015 for Python 3.5
##########################
if "vs" in selection:
fLOG("[pymy] --- switch_to_VS_compiler")
op = switch_to_VS_compiler(folders["python"])
for o in op:
operations.append(("modify", o))
######################
# create jupyter profile
######################
has_jupyter = False
for mod in module_list:
if mod.name == "jupyter":
has_jupyter = True
if has_jupyter:
fLOG("[pymy] --- create jupyter profile")
operations.append(("jupyter", "create profile"))
ipath = ipython_create_profile(
folders["config"], folders["python"], fLOG=fLOG)
operations.append(("profile", ipath))
operations.append(("time", dtnow()))
######################
# update ipython profile
######################
if has_jupyter:
fLOG("[pymy] --- update jupyter profile")
operations.append(("jupyter", "update profile"))
ipython_update_profile(ipath)
operations.append(("time", dtnow()))
######################
# update jupyter extension
######################
if has_jupyter:
fLOG("[pymy] --- install jupyter extension")
operations.append(("jupyter", "update install jupyter extension"))
install_jupyter_extension(folders["python"])
operations.append(("time", dtnow()))
######################
# copy pywin32 dll to main folders
######################
fLOG("[pymy] --- pywin32 dll to main folders")
operations.append(("pywin32", "dll"))
fdll = os.path.join(
python_path, "Lib", "site-packages", "pywin32_system32")
if not os.path.exists(fdll):
fdll = os.path.join(
python_path, "Lib", "site-packages", "pypiwin32_system32")
if not os.path.exists(fdll):
raise FileNotFoundError(fdll)
for dll in os.listdir(fdll):
full = os.path.join(fdll, dll)
if os.path.isdir(full):
continue
try:
shutil.copy(full, python_path)
except KeyError as a:
# it means it already exists
continue
operations.append(("pywin32", "copy " + dll))
operations.append(("time", dtnow()))
########
# kernels
########
if has_jupyter:
fLOG("[pymy] --- add kernels")
operations.append(("kernel", "add"))
res = install_kernels(folders["tools"], folders["python"])
for r in res:
fLOG("[pymy] ADD: kernel", r)
operations.append(("time", dtnow()))
#########
# checking
#########
distribution_checkings(folders["python"], folders[
"tools"], fLOG=fLOG, module_list=module_list)
########
# tutorial
########
if tutorial is not None:
fLOG("[pymy] --- copy tutorial")
operations.append(("tutorial", "begin"))
fold_tuto = os.path.join(folders["workspace"], "tutorial")
if not os.path.exists(fold_tuto):
fLOG("[pymy] --- create ", fold_tuto)
operations.append(("create", fold_tuto))
os.mkdir(fold_tuto)
for tuto in tutorial:
fLOG("[pymy] copy tutorial", tuto)
operations.append(("tutorial", tuto))
res = copy_tutorial(tuto, fold_tuto)
for a, b, c in res:
operations.append(("copy", c))
operations.append(("time", dtnow()))
################################
# prepare setup script for InnoSetup
###############################
fLOG("[pymy] --- prepare setup script for InnoSetup")
replacements = dict(__DISTPATH__=folder)
new_script = innosetup_replacements(replacements=replacements, fLOG=fLOG,
temp_folder=os.path.join(folders["logs"]))
fLOG("[pymy] done")
operations.append(("InnoSetup", "replacement"))
operations.append(("time", dtnow()))
if last_function is not None:
#################
# run last_function
#################
fLOG("[pymy] --- run last_function")
operations.append(("start", "last_function"))
last_function(new_script, folders, verbose=verbose, fLOG=fLOG)
operations.append(("time", dtnow()))
##################
# check there is no missing modules
##################
if not download_only:
scr = "from pymyinstall.installhelper import missing_dependencies;r=missing_dependencies();" + \
"print('\\n'.join('\'{0}\' misses \'{1}\''.format(k,v) for k,v in sorted(r.items())))"
cmd = '{0} -c "{1}"'.format(os.path.join(
folders["python"], "python.exe"), scr)
fLOG("[pymy] --- run dependencies")
fLOG("[pymy] CMD:", cmd)
out, err = run_cmd(cmd, wait=True)
if len(err) > 0:
raise WinInstallMissingDependency(err)
fLOG(out)
miss = missing_dependencies()
if len(miss) > 0:
mes = "\n".join("'{0}' misses '{1}'".format(k, ", ".join(v))
for k, v in sorted(miss.items()))
warnings.warn(mes)
##################
# patch exe in scripts
##################
if not download_only:
fLOG(
"--- patch paths, see http://www.clemens-sielaff.com/create-a-portable-python-with-pip-on-windows/")
op = win_patch_paths(
os.path.join(folders["python"], "Scripts"), "", fLOG=fLOG)
operations.extend(op)
operations.append(("time", dtnow()))
#################
# print the list of modules (python)
#################
if not download_only:
fLOG("[pymy] --- pip freeze")
mods = get_modules_version(folders["python"])
fLOG("[pymy] nb modules: {0}".format(len(mods)))
if len(mods) == 0:
raise ValueError(
"unable to get module list from folder " + folders["python"])
with open(os.path.join(folders["config"], "installed.python.packages.txt"), "w") as f:
mods = [(a.lower(), a, b) for a, b in mods.items()]
for la, a, b in sorted(mods):
f.write("{0}\t{1}\n".format(a, b))
#################
# print the list of modules (R)
#################
if not download_only:
r_path = os.path.join(folders["tools"], "R")
r_lib = os.path.join(r_path, "library")
if os.path.exists(r_lib):
fLOG("[pymy] --- list R packages")
packs = os.listdir(r_lib)
with open(os.path.join(folders["config"], "installed.R.packages.txt"), "w") as f:
packs_ = [(p.lower(), p) for p in packs]
for lp, pack in sorted(packs_):
desc = get_package_description(r_path, pack)
vers = desc.get("Version", "unknown")
f.write("{0}\t{1}\n".format(pack, vers))
if not no_setup:
if not os.path.exists(folders["workspace"]):
raise FileNotFoundError(folders["workspace"])
if len(os.listdir(folders["workspace"])) == 0:
raise FileNotFoundError(
"folder {0} is empty, it should not".format(folders["workspace"]))
# remove
fLOG("[pymy] --- remove setup")
dist = os.path.join(folders["logs"], "..", "dist", "setup")
if os.path.exists(dist):
exe = [_ for _ in os.listdir(dist) if ".exe" in _]
else:
exe = []
if len(exe) > 0:
for e in exe:
os.remove(os.path.join(dist, e))
################################
# prepare setup script for InnoSetup
###############################
fLOG("[pymy] --- building setup with InnoSetup")
out = run_innosetup(new_script, fLOG=fLOG,
temp_folder=os.path.join(folders["logs"]))
with open(os.path.join(folders["logs"], "out.install.innosetup.txt"), "w", encoding="utf8") as f:
f.write(out)
fLOG("[pymy] done")
operations.append(("InnoSetup", "done"))
operations.append(("time", dtnow()))
# copy
fLOG("[pymy] --- copy setup")
dist = os.path.join(folders["logs"], "..", "dist", "setup")
to = os.path.join(folders["logs"], "..", "..")
exe = [_ for _ in os.listdir(dist) if ".exe" in _]
if len(exe) > 0:
dt = datetime.datetime.now()
suffix = "%d%02d%02d.%d" % (dt.year, dt.month, dt.day, dt.hour)
for e in exe:
shutil.copy(os.path.join(dist, e), to)
operations.append(("copy", e))
final = os.path.join(to, e)
tof = final.replace(".exe", "_" + suffix + ".exe")
operations.append(("rename", tof))
os.rename(final, tof)
##########
# store logs
##########
with open(os.path.join(folders["logs"], "log.setup.txt"), "a", encoding="utf8") as f:
f.write("\n")
f.write("-------------------------------------------\n")
f.write("NEW RUN\n")
f.write("-------------------------------------------\n")
for ab in operations:
if isinstance(ab, tuple):
if len(ab) == 2:
a, b = ab
elif len(ab) == 1:
a, b = a, None
else:
a, b = ab[0], str(ab[1:])
if isinstance(b, str # unicode#
):
b = b.replace(folder, "")
b = b.replace(os.environ.get(
"USERNAME", os.environ["USER"]), "---")
f.write("{0}\t{1}\n".format(a, b))
|
<reponame>nixballs/ungoogled-chromium
# ungoogled-chromium: A Google Chromium variant for removing Google integration and
# enhancing privacy, control, and transparency
# Copyright (C) 2016 Eloston
#
# This file is part of ungoogled-chromium.
#
# ungoogled-chromium is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ungoogled-chromium is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ungoogled-chromium. If not, see <http://www.gnu.org/licenses/>.
'''Code for Windows'''
import pathlib
import os
import zipfile
import subprocess
import shutil
from ._util import BuilderException
from .common import GNUPatchComponent, GNMetaBuildComponent, CPUArch
__all__ = ["WindowsBuilder"]
class WindowsBuilder(GNUPatchComponent, GNMetaBuildComponent):
'''Builder for Windows'''
_resources = pathlib.Path("resources", "windows")
python2_command = "python"
use_depot_tools_toolchain = False
target_cpu = CPUArch.x86
def __init__(self, *args, **kwargs):
super(WindowsBuilder, self).__init__(*args, **kwargs)
self._files_cfg = (self._sandbox_dir /
pathlib.Path("chrome", "tools", "build", "win", "FILES.cfg"))
def _run_subprocess(self, *args, **kwargs):
# On Windows for some reason, subprocess.run(['python']) will use the current interpreter's
# executable even though it is not in the PATH or cwd
# Also, subprocess calls CreateProcess on Windows, which has limitations as shown by
# https://bugs.python.org/issue17023
# Adding shell=True solves all of these problems
kwargs["shell"] = True
return super(WindowsBuilder, self)._run_subprocess(*args, **kwargs)
def _write_path_override(self, name, value):
raise BuilderException("File-based PATH overrides are not supported on Windows")
def check_build_environment(self):
super(WindowsBuilder, self).check_build_environment()
self.logger.info("Checking bison command...")
result = self._run_subprocess(["bison", "--version"], stdout=subprocess.PIPE,
universal_newlines=True)
if not result.returncode is 0:
raise BuilderException("bison command returned non-zero exit code {}".format(
result.returncode))
result_which = shutil.which("bison")
if result_which:
if " " in result_which:
raise BuilderException("Spaces are not allowed in the path to bison: {}".format(
result_which))
else:
raise BuilderException("shutil.which returned unexpected value: {}".format(
result_which))
self.logger.debug("Using bison command '{!s}'".format(result.stdout.split("\n")[0]))
self.logger.info("Checking gperf command...")
result = self._run_subprocess(["gperf", "--version"], stdout=subprocess.PIPE,
universal_newlines=True)
if not result.returncode is 0:
raise BuilderException("gperf command returned non-zero exit code {}".format(
result.returncode))
result_which = shutil.which("gperf")
if result_which:
if " " in result_which:
raise BuilderException("Spaces are not allowed in the path to gperf: {}".format(
result_which))
else:
raise BuilderException("shutil.which returned unexpected value: {}".format(
result_which))
self.logger.debug("Using gperf command '{!s}'".format(result.stdout.split("\n")[0]))
def generate_build_configuration(self):
self.logger.info("Running gn command...")
if self.use_depot_tools_toolchain:
append_environ = None
else:
append_environ = {"DEPOT_TOOLS_WIN_TOOLCHAIN": "0"}
self._gn_generate_ninja(self._get_gn_flags(), append_environ)
def build(self):
# Try to make temporary directories so ninja won't fail
os.makedirs(os.environ["TEMP"], exist_ok=True)
os.makedirs(os.environ["TMP"], exist_ok=True)
super(WindowsBuilder, self).build()
def generate_package(self):
# Derived from chrome/tools/build/make_zip.py
# Hardcoded to only include files with buildtype "official"
if self.target_cpu is None:
cpu_arch = "defaultcpu"
else:
cpu_arch = str(self.target_cpu.value)
output_filename = str(self.build_dir / pathlib.Path(
"ungoogled-chromium_{}-{}_windows_{}.zip".format(self.chromium_version,
self.release_revision,
cpu_arch)))
self.logger.info("Creating build output archive {} ...".format(output_filename))
def file_list_generator():
'''Generator for files to be included in package'''
exec_globals = {"__builtins__": None}
with self._files_cfg.open() as cfg_file:
exec(cfg_file.read(), exec_globals) # pylint: disable=exec-used
for file_spec in exec_globals["FILES"]:
if "official" in file_spec["buildtype"]:
if "arch" in file_spec:
if self.target_cpu == CPUArch.x86 and not "32bit" in file_spec["arch"]:
continue
elif self.target_cpu == CPUArch.x64 and not "64bit" in file_spec["arch"]:
continue
for file_path in (self._sandbox_dir /
self.build_output).glob(file_spec["filename"]):
if not file_path.suffix.lower() == ".pdb":
yield (str(file_path.relative_to(self._sandbox_dir /
self.build_output)), file_path)
with zipfile.ZipFile(output_filename, mode="w",
compression=zipfile.ZIP_DEFLATED) as zip_file:
for arcname, real_path in file_list_generator():
zip_file.write(str(real_path), arcname)
|
import requests
from flask import request, jsonify
from .base import Base
from .json_validate import SCHEMA
class UserCoupons(Base):
def get(self):
"""
{
"userCoupons": [
{
"userId": xxx,
"storeId": yyy,
"coupons": {}
},
{
"userId": aaa,
"storeId": bbb,
"coupons": {}
}
]
}
"""
params = request.args.to_dict()
is_valid, tag = self.validate_dict_with_schema(
params, SCHEMA['user_coupons_get'])
if not is_valid:
return self.error_msg(self.ERR['invalid_query_params'], tag)
flag, user_coupons = self.db.find_by_condition('userCoupons', params)
if not flag:
return '', 500
if user_coupons:
for user_coupon in user_coupons:
from bson import ObjectId
coupon_id_list = [ObjectId(coupon_id) for coupon_id
in user_coupon['coupons'].keys()]
flag, coupons = self.db.find_by_condition(
'coupons', {'_id': {'$in': coupon_id_list}})
if not flag:
return '', 500
for coupon in coupons:
coupon_num = user_coupon['coupons'].get(coupon['id'])
coupon['number'] = coupon_num
user_coupon['coupons'] = coupons
store_id = user_coupon['storeId']
api_resp = requests.get(
'{0}/accounts/stores/{1}'.format(
self.endpoint['accounts'], store_id))
resp_status = api_resp.status_code
if resp_status != 200:
if resp_status == 400:
return self.error_msg(api_resp.json())
return '', 500
store = api_resp.json()
user_coupon['storeName'] = store['storeName']
user_coupon['address'] = store['address']
return jsonify({'userCoupons': user_coupons})
def put(self):
is_valid, data = self.get_params_from_request(
request, SCHEMA['user_coupons_put'])
if not is_valid:
return self.error_msg(self.ERR['invalid_body_content'], data)
amount = data['amount']
user_id = data['userId']
store_id = data['storeId']
flag, coupons = self.db.find_by_condition(
'coupons', {'storeId': store_id, 'pay': {'$lte': amount}})
if not flag:
return '', 500
if not coupons:
return jsonify({'id': user_id})
coupon_id = coupons[0]['id']
pay = coupons[0]['pay']
for coupon in coupons:
if coupon['pay'] > pay:
coupon_id = coupon['id']
flag, user_coupon = self.db.find_by_condition(
'userCoupons', {'storeId': store_id, 'userId': user_id})
if not flag:
return '', 500
if user_coupon:
user_coupon_id = user_coupon[0]['id']
flag, result = self.db.update('userCoupons', {
'id': user_coupon_id}, {'$inc': {'coupons.' + coupon_id: 1}})
if not flag:
self.logger.error('update user coupon failed')
return '', 500
if not result:
return self.error_msg(self.ERR['user_coupon_not_exist'])
return jsonify(result)
else:
result = self.db.create('userCoupons', {'storeId': store_id,
'userId': user_id,
'coupons': {coupon_id: 1}})
if not result:
self.logger.error('create user coupon failed')
return '', 500
return jsonify(result)
class UserCouponRemover(Base):
def post(self):
is_valid, data = self.get_params_from_request(
request, SCHEMA['user_coupon_remover_post'])
if not is_valid:
return self.error_msg(self.ERR['invalid_body_content'], data)
user_id = data['userId']
store_id = data['storeId']
coupon_id = data['couponId']
flag, user_coupon = self.db.find_by_condition(
'userCoupons', {'userId': user_id, 'storeId': store_id,
'coupons.' + coupon_id: {'$exists': True}})
if not flag:
return '', 500
if not user_coupon:
return self.error_msg(self.ERR['user_coupon_not_exist'])
user_coupon_id = user_coupon[0]['id']
coupon_num = user_coupon[0]['coupons'][coupon_id]
if coupon_num < 1:
return self.error_msg(self.ERR['user_coupon_not_exist'])
elif coupon_num == 1:
flag, result = self.db.update('userCoupons', {
'id': user_coupon_id}, {'$unset': {'coupons.' + coupon_id: 1}})
if not flag:
return '', 500
return jsonify(result), 201
else:
flag, result = self.db.update('userCoupons', {
'id': user_coupon_id}, {'$inc': {'coupons.' + coupon_id: -1}})
if not flag:
return '', 500
return jsonify(result), 201
|
<reponame>mohsenari/aws-lex-v2-cfn-cr
#!/usr/bin/env python3.8
################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed #
# on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express #
# or implied. See the License for the specific language governing #
# permissions and limitations under the License. #
################################################################################
"""Amazon Lex CloudFormation Custom Resource Intent Manager"""
import logging
from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING
import boto3
from .slot import Slot
from .slot_type import SlotType
from .shared.api import get_api_parameters
from .shared.constants import (
CUSTOM_ATTRIBUTES,
)
if TYPE_CHECKING:
from mypy_boto3_lexv2_models import LexModelsV2Client
from mypy_boto3_lexv2_models.type_defs import (
CreateIntentResponseTypeDef,
CreateSlotResponseTypeDef,
UpdateIntentResponseTypeDef,
)
else:
LexModelsV2Client = object
CreateIntentResponseTypeDef = object
CreateSlotResponseTypeDef = object
UpdateIntentResponseTypeDef = object
class Intent:
"""Lex V2 CloudFormation Custom Resource Intent"""
def __init__(
self,
client: Optional[LexModelsV2Client] = None,
logger: Optional[logging.Logger] = None,
):
self._client = client or boto3.client("lexv2-models")
self._logger = logger or logging.getLogger(__name__)
self._slot_type_manager = SlotType(
client=self._client,
logger=self._logger,
)
self._slot_manager = Slot(
client=self._client,
logger=self._logger,
)
def _create_intent(self, input_parameters: Dict[str, Any]) -> CreateIntentResponseTypeDef:
operation = "CreateIntent"
operation_parameters = get_api_parameters(
operation=operation,
input_parameters=input_parameters,
client=self._client,
logger=self._logger,
)
response = self._client.create_intent(**operation_parameters)
self._logger.debug(response)
return response
def _create_or_update_existing_slots(
self,
bot_id: str,
bot_version: str,
intent_id: str,
locale_id: str,
slots=List[Dict[str, Any]],
) -> None:
slots_to_update: List[Dict[str, Any]] = []
slots_to_create: List[Dict[str, Any]] = []
for slot in slots:
slot_name = slot["slotName"]
slot_id = self._slot_manager.get_slot_id(
bot_id=bot_id,
bot_version=bot_version,
locale_id=locale_id,
intent_id=intent_id,
slot_name=slot_name,
)
if slot_id:
slots_to_update.append(slot)
else:
slots_to_create.append(slot)
if slots_to_update:
self._update_existing_slots(
bot_id=bot_id,
bot_version=bot_version,
locale_id=locale_id,
intent_id=intent_id,
slots=slots_to_update,
)
if slots_to_create:
self._create_slots(
bot_id=bot_id,
bot_version=bot_version,
locale_id=locale_id,
intent_id=intent_id,
slots=slots_to_create,
)
def _create_slots(
self,
bot_id: str,
bot_version: str,
locale_id: str,
intent_id: str,
slots=List[Dict[str, Any]],
) -> List[CreateSlotResponseTypeDef]:
create_slot_responses: List[CreateSlotResponseTypeDef] = []
for slot in slots:
slot_name = slot["slotName"]
slot_type_name = (
slot[CUSTOM_ATTRIBUTES["slotTypeName"]]
if CUSTOM_ATTRIBUTES["slotTypeName"] in slot
else ""
)
if not slot_type_name:
raise ValueError("unable to find slot type name attribute")
slot_type_id = self._slot_type_manager.get_slot_type_id(
bot_id=bot_id,
bot_version=bot_version,
locale_id=locale_id,
slot_type_name=slot_type_name,
)
if not slot_type_id:
raise ValueError(f"unable to find slot type id for slot name: {slot_name}")
input_parameters = {
"botId": bot_id,
"botVersion": bot_version,
"intentId": intent_id,
"localeId": locale_id,
"slotTypeId": slot_type_id,
**slot,
}
response = self._slot_manager.create_slot(input_parameters=input_parameters)
create_slot_responses.append(response)
return create_slot_responses
def _delete_slots(
self,
bot_id: str,
bot_version: str,
locale_id: str,
intent_id: str,
slots=List[Dict[str, Any]],
) -> None:
for slot in slots:
slot_name = slot["slotName"]
slot_id = self._slot_manager.get_slot_id(
bot_id=bot_id,
bot_version=bot_version,
locale_id=locale_id,
intent_id=intent_id,
slot_name=slot_name,
)
input_parameters = {
"botId": bot_id,
"botVersion": bot_version,
"localeId": locale_id,
"intentId": intent_id,
"slotId": slot_id,
}
if slot_id:
self._slot_manager.delete_slot(input_parameters=input_parameters)
else:
self._logger.warning(
"unable to find slot with name: %s",
slot_name,
)
def _update_intent(self, input_parameters: Dict[str, Any]) -> UpdateIntentResponseTypeDef:
operation = "UpdateIntent"
operation_parameters = get_api_parameters(
operation=operation,
input_parameters=input_parameters,
client=self._client,
logger=self._logger,
)
response = self._client.update_intent(**operation_parameters)
self._logger.debug(response)
return response
def _update_existing_slots(
self,
bot_id: str,
bot_version: str,
intent_id: str,
locale_id: str,
slots=List[Dict[str, Any]],
) -> None:
for slot in slots:
slot_name = slot["slotName"]
slot_id = self._slot_manager.get_slot_id(
bot_id=bot_id,
bot_version=bot_version,
intent_id=intent_id,
locale_id=locale_id,
slot_name=slot_name,
)
if not slot_id:
raise ValueError(f"slot not found: {slot_name}")
slot_type_id = ""
if CUSTOM_ATTRIBUTES["slotTypeName"] in slot:
slot_type_name = slot[CUSTOM_ATTRIBUTES["slotTypeName"]]
slot_type_id = self._slot_type_manager.get_slot_type_id(
bot_id=bot_id,
bot_version=bot_version,
locale_id=locale_id,
slot_type_name=slot_type_name,
)
elif "slotTypeId" in slot and slot["slotTypeId"].startswith("AMAZON."):
slot_type_id = slot["slotTypeId"]
if not slot_type_id:
raise ValueError(
f"missing CR.slotTypeName or slotTypeId attribute for slot name: {slot_name}"
)
input_parameters = {
"botId": bot_id,
"botVersion": bot_version,
"localeId": locale_id,
"intentId": intent_id,
"slotId": slot_id,
"slotTypeId": slot_type_id,
**slot,
}
self._slot_manager.update_slot(input_parameters=input_parameters)
def _update_slots(
self,
bot_id: str,
bot_version: str,
locale_id: str,
intent_id: str,
new_slots: List[Dict[str, Any]],
old_slots: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
old_slot_names = {s_t["slotName"] for s_t in old_slots}
new_slot_names = {s_t["slotName"] for s_t in new_slots}
slots_to_create = [s_t for s_t in new_slots if s_t["slotName"] not in old_slot_names]
slots_to_delete = [s_t for s_t in old_slots if s_t["slotName"] not in new_slot_names]
slot_names_to_update = new_slot_names.intersection(old_slot_names)
slots_to_update_new = {
s_t["slotName"]: s_t for s_t in new_slots if s_t["slotName"] in slot_names_to_update
}
slots_to_update_old = {
s_t["slotName"]: s_t for s_t in old_slots if s_t["slotName"] in slot_names_to_update
}
slots_to_update = [
slots_to_update_new[slot_name]
for slot_name in slot_names_to_update
if slots_to_update_new[slot_name] != slots_to_update_old[slot_name]
]
if slots_to_create or slots_to_update:
self._create_or_update_existing_slots(
bot_id=bot_id,
bot_version=bot_version,
locale_id=locale_id,
intent_id=intent_id,
slots=[*slots_to_create, *slots_to_update],
)
if slots_to_delete:
self._delete_slots(
bot_id=bot_id,
bot_version=bot_version,
locale_id=locale_id,
intent_id=intent_id,
slots=slots_to_delete,
)
return [*slots_to_create, *slots_to_delete, *slots_to_update]
def get_intent_id(
self,
bot_id: str,
bot_version: str,
locale_id: str,
intent_name: str,
) -> str:
"""Get Intent Id from Name"""
list_intents_args: Dict[str, Any] = dict(
botId=bot_id,
botVersion=bot_version,
localeId=locale_id,
filters=[
{
"name": "IntentName",
"values": [intent_name],
"operator": "EQ",
}
],
sortBy={
"attribute": "IntentName",
"order": "Ascending",
},
)
while True:
response = self._client.list_intents(**list_intents_args)
self._logger.debug(response)
intent_summaries = response["intentSummaries"]
intent_id = intent_summaries[0]["intentId"] if intent_summaries else ""
if intent_id:
break
next_token = response.get("nextToken")
if next_token:
list_intents_args["nextToken"] = next_token
else:
break
if not intent_id:
self._logger.warning("could not find intent named: %s", intent_name)
return intent_id
def create_intent(
self, input_parameters: Dict[str, Any]
) -> Union[CreateIntentResponseTypeDef, UpdateIntentResponseTypeDef]:
"""Create Intent"""
intent_name = input_parameters.get("intentName")
# fallback intent is automatically created with the locale and can only
# be updated. The intent has a fixed ID: "FALLBCKINT"
# It does not contain slots
if intent_name == "FallbackIntent":
return self._update_intent(
input_parameters={
"intentId": "FALLBCKINT",
"parentIntentSignature": "AMAZON.FallbackIntent",
**input_parameters,
}
)
response = self._create_intent(input_parameters=input_parameters)
bot_id = response["botId"]
bot_version = response["botVersion"]
locale_id = response["localeId"]
intent_id = response["intentId"]
if CUSTOM_ATTRIBUTES["slots"] in input_parameters:
slots = self._create_slots(
bot_id=bot_id,
bot_version=bot_version,
intent_id=intent_id,
locale_id=locale_id,
slots=input_parameters[CUSTOM_ATTRIBUTES["slots"]],
)
slot_priorities = [
dict(priority=(i + 1), slotId=slot["slotId"]) for i, slot in enumerate(slots)
]
update_intent_input_parameters = {
"intentId": intent_id,
"slotPriorities": slot_priorities,
**input_parameters,
}
response = self._update_intent(input_parameters=update_intent_input_parameters)
return response
def delete_intent(self, input_parameters: Dict[str, Any]) -> None:
"""Delete Intent"""
operation = "DeleteIntent"
intent_id = input_parameters.get("intentId")
# fallback intent is automatically created with the locale and can only
# be updated - not deleted. The intent has a fixed ID: "FALLBCKINT"
# ignoring deletes to avoid failures
if intent_id == "FALLBCKINT":
self._logger.warning("attempted to delete fallback intent - ignoring")
return
operation_parameters = get_api_parameters(
operation=operation,
input_parameters=input_parameters,
client=self._client,
logger=self._logger,
)
self._client.delete_intent(**operation_parameters)
def update_intent(
self,
bot_id: str,
bot_version: str,
locale_id: str,
intent_id: str,
intent: Dict[str, Any],
old_intent: Dict[str, Any],
) -> UpdateIntentResponseTypeDef:
"""Update Intent"""
input_parameters: Dict[str, Any] = {
"botId": bot_id,
"botVersion": bot_version,
"localeId": locale_id,
"intentId": intent_id,
**intent,
}
intent_name = intent.get("intentName")
if intent_name == "FallbackIntent":
return self._update_intent(
input_parameters={
"parentIntentSignature": "AMAZON.FallbackIntent",
**input_parameters,
}
)
old_slots = (
old_intent[CUSTOM_ATTRIBUTES["slots"]]
if CUSTOM_ATTRIBUTES["slots"] in old_intent
else []
)
new_slots = (
intent[CUSTOM_ATTRIBUTES["slots"]] if CUSTOM_ATTRIBUTES["slots"] in intent else []
)
if new_slots or old_slots:
self._update_slots(
bot_id=bot_id,
bot_version=bot_version,
intent_id=intent_id,
locale_id=locale_id,
new_slots=new_slots,
old_slots=old_slots,
)
if new_slots:
slot_priorities = []
for i, slot in enumerate(new_slots):
slot_name = slot["slotName"]
slot_id = self._slot_manager.get_slot_id(
bot_id=bot_id,
bot_version=bot_version,
intent_id=intent_id,
locale_id=locale_id,
slot_name=slot_name,
)
if slot_id:
slot_priorities.append(dict(priority=(i + 1), slotId=slot_id))
else:
self._logger.warning("slot id not found for slot name: %s", slot_name)
if slot_priorities:
input_parameters["slotPriorities"] = slot_priorities
return self._update_intent(input_parameters=input_parameters)
|
<reponame>annehulsey/high-resolution_post-earthquake_recovery_simulation_of_safety_cordons
from .base import *
def assign_impeding_factors(community_damage, rc_triggers, if_idx, if_pool, max_rc, weeks):
# initialize the output
[n_rups, n_bldgs, _, n_sims] = community_damage.shape
time = np.zeros([n_rups, n_bldgs, n_sims])
for i_rc, i_if in zip(rc_triggers, if_idx):
# find relevant realizations
idx = np.where(max_rc >= i_rc)
# identify the relevant realizations within the simulated values
impeding_factors = if_pool[:, :, i_if, :]
# assign the relevant realizations
time[idx] = impeding_factors[idx]
if weeks:
days = 7 * time
else:
days = time
return days
def inspection_time(community_damage, if_pool):
# Inspection is only included if any component reaches the RC=3 repair class
# This is consistent with REDi v1
# identify the largest RC for each realization (may be structural or non-structural)
str_rc_idx = 4
non_rc_idx = 5
max_rc = np.maximum(community_damage[:, :, str_rc_idx, :], community_damage[:, :, non_rc_idx, :])
# prep RC criteria for selecting the relevant impeding factor for FUNCTIONAL recovery
rc_triggers = [3]
if_idx = [0]
# flag for parameters units in weeks
weeks = 0
# retrieve the relevant values
days = assign_impeding_factors(community_damage, rc_triggers, if_idx, if_pool, max_rc, weeks)
return days
def eng_mob_time(community_damage, if_pool):
# Engineering Mobilization is determined by the maximum structural repair class OR whether it required a complete redesign
# This is consistent with REDi v1
# get max structural RC
idx = 4
max_rc = community_damage[:, :, idx, :]
# set replacement trigger as RC = 4
idx = 10 ## THIS IDX FOR REPLACEMENT INCLUDES CASES WHERE THE *FULL* REPAIR EXCEEDED REPLACEMENT TIME ##
replacement = community_damage[:, :, idx, :]
idx = np.where(replacement == 1)
max_rc[idx] = 4
# prep RC criteria for selecting the relevant impeding factor for FUNCTIONAL recovery
rc_triggers = [3,
4] ## only RC=3 and replacement is relevant for functional recovery, per REDi v1 (structural RCs skip RC=2)
if_idx = [2, 3]
# flag for parameters units in weeks
weeks = 1
# retrieve the relevant values
days = assign_impeding_factors(community_damage, rc_triggers, if_idx, if_pool, max_rc, weeks)
return days
def financing_time(community_damage, if_pool):
# Financing is based on the maximum repair class of any component
# This is consistent with REDi v1
# identify the largest RC for each realization (may be structural or non-structural)
str_rc_idx = 4
non_rc_idx = 5
max_rc = np.maximum(community_damage[:, :, str_rc_idx, :], community_damage[:, :, non_rc_idx, :])
# prep RC criteria for selecting the relevant impeding factor for FUNCTIONAL recovery
rc_triggers = [2]
if_idx = [4]
# flag for parameters units in weeks
weeks = 1
# retrieve the relevant values
days = assign_impeding_factors(community_damage, rc_triggers, if_idx, if_pool, max_rc, weeks)
return days
def contr_mob_time(community_damage, if_pool):
# Contractor Mobilization is based on the maximum repair class of any component
# This is consistent with REDi v1
# identify the largest RC for each realization (may be structural or non-structural)
str_rc_idx = 4
non_rc_idx = 5
max_rc = np.maximum(community_damage[:, :, str_rc_idx, :], community_damage[:, :, non_rc_idx, :])
# prep RC criteria for selecting the relevant impeding factor for FUNCTIONAL recovery
rc_triggers = [2]
if_idx = [6]
# flag for parameters units in weeks
weeks = 1
# retrieve the relevant values
days = assign_impeding_factors(community_damage, rc_triggers, if_idx, if_pool, max_rc, weeks)
return days
def permitting_time(community_damage, if_pool):
# Permitting is based on the maximum structural repair class
# This is consistent with REDi v1
# get max structural RC
idx = 4
max_rc = community_damage[:, :, idx, :]
# set replacement trigger as RC = 4
idx = 10 ## THIS IDX FOR REPLACEMENT INCLUDES CASES WHERE THE *FULL* REPAIR EXCEEDED REPLACEMENT TIME ##
replacement = community_damage[:, :, idx, :]
idx = np.where(replacement == 1)
max_rc[idx] = 4
# prep RC criteria for selecting the relevant impeding factor for FUNCTIONAL recovery
rc_triggers = [3]
if_idx = [8]
# flag for parameters units in weeks
weeks = 1
# retrieve the relevant values
days = assign_impeding_factors(community_damage, rc_triggers, if_idx, if_pool, max_rc, weeks)
return days |
<filename>tests/common/test_run/ascend/fused_cast_conv_run.py<gh_stars>100-1000
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import numpy as np
from akg.utils import kernel_exec as utils
from akg.ops.math import Cast
from akg.ops.nn.ascend import Conv
from akg.ops.nn.ascend.conv import conv_core
from akg import dim
from tests.common.test_run.ascend.conv_utils import conv_forward_naive
from tests.common.test_run.ascend.conv_utils import random_gaussian
from akg.utils import custom_tiling as ct_util
cast_conv_set_dim_map = {
# resnet50_wkl tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, tile_ww
str(((1, 1024, 14, 14), (2048, 1024, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): ([14, 48, 64, 96, 128], {"bypass": 0}), # 01
str(((1, 1024, 14, 14), (256, 1024, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): ([14, 32, 208, 64, 112], {"bypass": 0}), # 02
str(((1, 1024, 14, 14), (512, 1024, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): ([14, 48, 49, 32, 512], {"bypass": 0}), # 03
str(((1, 128, 28, 28), (128, 128, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))): ([28, 32, 400, 32, 128], {"bypass": 0}), # 04
str(((1, 128, 28, 28), (512, 128, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): ([28, 48, 784, 16, 32], {"bypass": 0}), # 05
str(((1, 2048, 7, 7), (512, 2048, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): ([7, 16, 49, 32, 512], {"bypass": 0}), # 06
str(((1, 256, 14, 14), (1024, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): ([14, 48, 112, 32, 240], {"bypass": 0}), # 07
str(((1, 256, 14, 14), (256, 256, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))): ([14, 16, 196, 64, 256], {"bypass": 0}), # 08
str(((1, 256, 56, 56), (128, 256, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): ([7, 32, 252, 64, 128], {"bypass": 0}), # 09
str(((1, 256, 56, 56), (64, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): ([16, 64, 280, 16, 64], {"bypass": 0}), # 10
str(((1, 3, 224, 224), (64, 3, 7, 7), (3, 3, 3, 3), (2, 2), (1, 1))): ([65, 48, 448, 32, 64], {"bypass": 0}), # 11
str(((1, 512, 28, 28), (128, 512, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): ([14, 32, 448, 16, 64], {"bypass": 0}), # 12
str(((1, 512, 28, 28), (256, 512, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): ([11, 32, 98, 64, 256], {"bypass": 0}), # 13
str(((1, 512, 7, 7), (2048, 512, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): ([7, 48, 49, 16, 512], {"bypass": 0}), # 14
str(((1, 512, 7, 7), (512, 512, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))): ([7, 16, 49, 32, 512], {"bypass": 0}), # 15
str(((1, 64, 56, 56), (256, 64, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): ([56, 256, 784, 16, 32], {"bypass": 0}), # 16
str(((1, 64, 56, 56), (64, 64, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))): ([56, 64, 784, 16, 32], {"bypass": 0}), # 17
str(((1, 64, 56, 56), (64, 64, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))): ([56, 64, 336, 16, 64], {"bypass": 0}), # 18
str(((1, 256, 56, 56), (512, 256, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): ([7, 128, 196, 64, 256], {"bypass": 0}), # 19
str(((1, 512, 28, 28), (1024, 512, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))): ([13, 64, 112, 32, 512], {"bypass": 0}), # 20
}
def cast_conv_set_dim_func(data, fmap_shape, filter_shape, pad_, stride_, dilation_, use_bias=False, block_size=16, attrs=None):
if isinstance(stride_, int):
stride_ = [stride_] * 2
elif isinstance(stride_, (list, tuple)) and 1 == len(stride_):
stride_ = list(stride_) * 2
elif isinstance(stride_, (list, tuple)) and 2 == len(stride_):
pass
else:
raise RuntimeError('stride para illegal !!!')
if isinstance(pad_, int):
pad_ = [pad_] * 4
elif isinstance(pad_, (list, tuple)) and 1 == len(pad_):
pad_ = list(pad_) * 4
elif isinstance(pad_, (list, tuple)) and 4 == len(pad_):
pass
else:
raise RuntimeError('pad para illegal !!!')
if isinstance(dilation_, int):
dilation_ = [dilation_] * 2
elif isinstance(dilation_, (list, tuple)) and 1 == len(dilation_):
dilation_ = list(dilation_) * 2
elif isinstance(dilation_, (list, tuple)) and 2 == len(dilation_):
pass
else:
raise RuntimeError('dilation para illegal !!!')
key = []
key.append(tuple(fmap_shape))
key.append(tuple(filter_shape))
key.append(tuple(pad_))
key.append(tuple(stride_))
key.append(tuple(dilation_))
hash_key = str(tuple(key))
# input shape (NCHW -> NC1HWC0)
in_n, in_c, in_h, in_w = fmap_shape
in_c = (in_c + block_size - 1) // block_size * block_size
# kernel shape (NCHW -> NC1HWC0 -> Fractal)
k_n, k_c, k_h, k_w = filter_shape
k_c = (k_c + block_size - 1) // block_size * block_size
k_n = (k_n + block_size - 1) // block_size * block_size
# padding((padding_h, padding_w) -> (padding_top, padding_bottom, padding_left, padding_right))
padding = (pad_[0], pad_[0], pad_[1], pad_[1])
p_top, p_bottom, p_left, p_right = padding
# stride (stride_h, stride_w)
s_h, s_w = stride_
# dilation (dilation_h, dilation_w)
d_h, d_w = dilation_
k_w_d = (k_w - 1) * d_w + 1
out_w = (in_w + p_left + p_right - k_w_d) // (s_w) + 1
bypass_list = [0, 1]
bypass = 0
if attrs is not None and 'conv_tile' in attrs and len(attrs['conv_tile']) >= 5:
tile_hh = attrs['conv_tile'][0]
tile_coco = attrs['conv_tile'][1]
tile_mm = attrs['conv_tile'][2]
tile_kk = attrs['conv_tile'][3]
tile_nn = attrs['conv_tile'][4]
if len(attrs['conv_tile']) > 5:
tile_ww = attrs['conv_tile'][5]
else:
tile_ww = (out_w - 1) * s_w + k_w_d
if 'bypass' in attrs:
bypass = attrs['bypass']
elif hash_key in cast_conv_set_dim_map:
configs = cast_conv_set_dim_map[hash_key]
if isinstance(configs, tuple):
tiles = configs[0]
if "bypass" in configs[1]:
bypass = configs[1]["bypass"]
else:
tiles = configs
if len(tiles) > 5:
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, tile_ww = tiles
else:
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn = tiles
tile_ww = (out_w - 1) * s_w + k_w_d
else:
tile_hh = (k_h - 1) * d_h + 1 + p_top * s_h
tile_ww = (out_w - 1) * s_w + k_w_d
tile_coco = 16
tile_mm = 16
tile_kk = 16
tile_nn = 16
if not (bypass in bypass_list):
raise RuntimeError("conv_cce ony supports %s while bypass is %d" % (",".join(str(bypass_list)), bypass))
if (tile_hh == in_h):
tile_hh += p_top + p_bottom
tile_coco = (tile_coco + block_size - 1) // block_size * block_size
tile_mm = (tile_mm + block_size - 1) // block_size * block_size
tile_kk = (tile_kk + block_size - 1) // block_size * block_size
tile_nn = (tile_nn + block_size - 1) // block_size * block_size
c0 = block_size
c1_cut = tile_coco // c0
h_window_cut = (tile_hh - k_h) // s_h + 1
out_w = (in_w + p_left + p_right - k_w) // (s_w) + 1
input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)
in_n, in_c1, in_h, in_w, in_c0 = input_shape_nc1hwc0
kernel_shape_nc1hwc0 = (k_n, k_c // block_size, k_h, k_w, block_size)
k_n, k_c1, k_h, k_w, k_c0 = kernel_shape_nc1hwc0
k_h_d = (k_h - 1) * d_h + 1
k_w_d = (k_w - 1) * d_w + 1
out_h = (in_h + p_top + p_bottom - k_h_d) // (s_h) + 1
tile_out_h = (tile_hh - k_h_d) // s_h + 1
out_w = (in_w + p_left + p_right - k_w_d) // (s_w) + 1
tile_out_w = (tile_ww - k_w_d) // s_w + 1
out_shape_nc1hwc0 = (in_n, k_n // block_size, out_h, out_w, block_size)
out_n, out_c1, out_h, out_w, out_c0 = out_shape_nc1hwc0
if (tile_coco > 0):
c1_cut = tile_coco // block_size
else:
c1_cut = out_c1
# set dim
info = dim.Dim()
if (out_n > 1):
info.setdim(index=0, axis=0, tilel1=1, tilel0=0) # n
if (out_c1 > 1):
info.setdim(index=0, axis=0, tilel1=c1_cut, tilel0=0) # c1
if (out_h > 1):
info.setdim(index=0, axis="H", tilel1=tile_out_h, tilel0=0) # h
if (out_w > 1):
info.setdim(index=0, axis="W", tilel1=tile_out_w, tilel0=0) # w
if (out_c0 > 1):
info.setdim(index=0, axis=4, tilel1=out_c0, tilel0=0) # c0
if (in_c1 > 1):
info.setdim(index=0, axis=5, tilel1=in_c1, tilel0=0) # kc1
if (k_h > 1):
info.setdim(index=0, axis=5, tilel1=k_h, tilel0=0) # kh
if (k_w > 1):
info.setdim(index=0, axis=5, tilel1=k_w, tilel0=0) # kw
return str(info) # ct_util.set_dims_by_key(hash_key, conv_set_dim_map)
@ct_util.reg_set_dim_func(cast_conv_set_dim_func)
def cast_conv(data, fmap_shape, filter_shape, pad_, stride_, dilation_, use_bias=False, block_size=16, attrs=None):
a = data[0]
data[1].dtype = 'float32'
b = Cast(data[1], 'float16', target='cce')
if use_bias:
conv_data = [a, b, data[2]]
else:
conv_data = [a, b]
# mmad fp32 failed in post_fusing
res, _ = conv_core(conv_data, fmap_shape, filter_shape, pad_, stride_, dilation_, use_bias, block_size, attrs)
return res, {}
def fused_cast_conv_run(fmap_shape, filter_shape, pad_, stride_, dilation_, use_bias=False, dump_data=False, attrs=None):
conv_dtype = 'float16'
fmap_data, filter_data, bias_data, expect = gen_data(fmap_shape, filter_shape, pad_, stride_, dilation_, use_bias)
if dump_data:
with open('input.bin', 'wb') as fo:
fo.write(fmap_data.astype(np.float16, copy=False))
with open('filter.bin', 'wb') as fo:
fo.write(filter_data.astype(np.float16, copy=False))
with open('bias.bin', 'wb') as fo:
fo.write(bias_data.astype(np.float16, copy=False))
with open('output.bin', 'wb') as fo:
fo.write(expect.astype(np.float16, copy=False))
out_data = np.full(expect.shape, 0, 'float16')
if use_bias:
input = [fmap_data, filter_data, bias_data]
input_shape = [fmap_data.shape, filter_data.shape, bias_data.shape]
else:
input = [fmap_data, filter_data]
input_shape = [fmap_data.shape, filter_data.shape]
args = input
args.append(out_data)
args = tuple(args)
block_size = 16
mod = utils.op_build_test(cast_conv, [input_shape], [conv_dtype], op_attrs=[fmap_shape, filter_shape, pad_, stride_, dilation_, use_bias, block_size, attrs], kernel_name='cast_conv', attrs=attrs)
out_data = utils.mod_launch(mod, args, expect=expect)
data_len = expect.size
try:
actual = out_data
N, C1, H, W, C0 = out_data.shape
error = 0
count = 0
lastErr = -2
continueErr = 0
maxContinue = -1
maxEnd = 0
partial_debug = 0
for n in range(N):
for c1 in range(C1):
for h in range(H):
for w in range(W):
for c0 in range(C0):
a = actual[n, c1, h, w, c0]
b = expect[n, c1, h, w, c0]
if (abs(a - b) > abs(b) * 5e-03):
if (partial_debug and (a == 0.0)):
continue
error += 1
if lastErr + 1 == count:
continueErr += 1
else:
if continueErr > maxContinue:
maxContinue = continueErr
maxEnd = lastErr
continueErr = 1
lastErr = count
count += 1
if continueErr > maxContinue:
maxContinue = continueErr
maxEnd = lastErr
print("error num: %d/%d (%.2f%%)" % (error, count, 100.0 * error / count))
print("longest error range: [%d, %d]" % (maxEnd - maxContinue + 1, maxEnd))
sys.stdout.flush()
if maxContinue >= 16:
assert_res = False
else:
assert_res = True
np.testing.assert_allclose(actual, expect, rtol=5e-02, atol=1e-2, equal_nan=True, verbose=True)
print("\n\n******************** test ok *****************\n\n")
except BaseException as e:
np.savetxt("actual.txt", out_data.reshape(data_len))
np.savetxt("expect.txt", expect.reshape(data_len))
print(str(e))
return input, out_data, expect, assert_res
def gen_data(fm_shape, w_shape, pad, stride, dilation, bias):
if isinstance(stride, int):
stride = [stride] * 2
elif isinstance(stride, (list, tuple)) and 1 == len(stride):
stride = list(stride) * 2
elif isinstance(stride, (list, tuple)) and 2 == len(stride):
pass
else:
raise RuntimeError('stride para illegal !!!')
if isinstance(pad, int):
pad = [pad] * 4
elif isinstance(pad, (list, tuple)) and 1 == len(pad):
pad = list(pad) * 4
elif isinstance(pad, (list, tuple)) and 4 == len(pad):
pass
else:
raise RuntimeError('pad para illegal !!!')
if isinstance(dilation, int):
dilation = [dilation] * 2
elif isinstance(dilation, (list, tuple)) and 1 == len(dilation):
dilation = list(dilation) * 2
elif isinstance(dilation, (list, tuple)) and 2 == len(dilation):
pass
else:
raise RuntimeError('dilation para illegal !!!')
S_h, S_w = stride
P_top, P_bottom, P_left, P_right = pad
D_h, D_w = dilation
IN, IC, IH, IW = fm_shape
C0 = 16
IC = ((IC + C0 - 1) // C0) * C0
WN, WC, WH, WW = w_shape
WN = ((WN + C0 - 1) // C0) * C0
WC = ((WC + C0 - 1) // C0) * C0
ON = IN
OC = WN
WHD = (WH - 1) * D_h + 1
WWD = (WW - 1) * D_w + 1
OH = (IH + P_top + P_bottom - WHD) // S_h + 1
OW = (IW + P_left + P_right - WWD) // S_w + 1
x = random_gaussian((IN, IC, IH, IW), miu=1, sigma=0.1).astype(np.float16)
w1 = random_gaussian((WN, WC, WH, WW), miu=0.5, sigma=0.01).astype(np.float32)
w = w1.astype(np.float16)
if bias:
b = np.random.rand(WN).astype(np.float16, copy=False)
else:
b = (np.array(np.zeros(WN))).astype(np.float16, copy=False)
conv_param = {'stride': stride, 'pad': pad, 'dilation': dilation}
out = conv_forward_naive(x, w, b, conv_param)
''' transpose to 5D - NC1HWC0 '''
feature = x.reshape(IN, IC // C0, C0, IH, IW).transpose(0, 1, 3, 4, 2).copy()
''' transpose to 5D - C1HWNC0 '''
filter = w1.reshape(WN, WC // C0, C0, WH, WW).transpose(1, 3, 4, 0, 2).copy()
filter = filter.reshape(WC // C0 * WH * WW, WN // 16, 16, C0)
bb = b.reshape(1, WN // 16, 1, 1, 16)
''' transpose to 5D - NC1HWC0 '''
output = out.reshape(ON, OC // C0, C0, OH, OW).transpose(0, 1, 3, 4, 2).copy()
return feature, filter, bb, output
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.