text
stringlengths 2
999k
|
|---|
################################################################################################################################
# *** Copyright Notice ***
#
# "Price Based Local Power Distribution Management System (Local Power Distribution Manager) v1.0"
# Copyright (c) 2016, The Regents of the University of California, through Lawrence Berkeley National Laboratory
# (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved.
#
# If you have questions about your rights to use or distribute this software, please contact
# Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov.
################################################################################################################################
from global_cache_controls import GlobalCacheBridge
class DVDController:
def __init__(self, bridge, emitter=1):
self.bridge = bridge
self.emitter = 1
def power(self):
"Sends the power function to dvd player"
return self.bridge.sendir(1, '1,1,38226,1,1,342,171,22,21,22,21,21,21,22,21,22,21,21,21,22,21,22,21,21,64,22,63,22,64,21,64,22,63,22,64,21,64,22,63,22,21,22,63,22,21,22,63,22,21,22,21,21,64,22,21,21,64,22,21,21,64,22,21,21,64,22,63,22,21,22,63,22,1523,341,85,22,3660,342,85,21,3660,342,85,22,3660,341,85,22,3800')
# Need to fix signal for this
def open(self):
"Sends the power function to dvd player"
return self.bridge.sendir(1, '1,6,38109,1,1,341,171,21,22,21,22,21,21,21,22,21,22,21,21,21,22,21,22,21,64,10,3800')
|
"""Yearly sunspots data 1700-2008"""
from sm2.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """This data is public domain."""
TITLE = __doc__
SOURCE = """
http://www.ngdc.noaa.gov/stp/solar/solarda3.html
The original dataset contains monthly data on sunspot activity in the file
./src/sunspots_yearly.dat. There is also sunspots_monthly.dat.
"""
DESCRSHORT = """Yearly (1700-2008) data on sunspots from the National
Geophysical Data Center."""
DESCRLONG = DESCRSHORT
NOTE = """
Number of Observations - 309 (Annual 1700 - 2008)
Number of Variables - 1
Variable name definitions::
SUNACTIVITY - Number of sunspots for each year
The data file contains a 'YEAR' variable that is not returned by load.
"""
def load_pandas():
data = _get_data()
# TODO: time series
endog = data.set_index(data.YEAR).SUNACTIVITY
dataset = du.Dataset(data=data, names=list(data.columns),
endog=endog, endog_name='volume')
return dataset
def load(as_pandas=None):
"""
Load the yearly sunspot data and returns a data class.
Parameters
----------
as_pandas : bool
Flag indicating whether to return pandas DataFrames and Series
or numpy recarrays and arrays. If True, returns pandas.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
This dataset only contains data for one variable, so the attributes
data, raw_data, and endog are all the same variable. There is no exog
attribute defined.
"""
return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas)
def _get_data():
return du.load_csv(__file__, 'sunspots.csv').astype(float)
|
__all__ = ["SNSTopic"]
from .api import SNSTopic # noqa
|
""" Implementation of the command line interface.
"""
import pprint
from argparse import ArgumentParser
from sanic.log import logger
from ci_hooks_app import __version__
from ci_hooks_app.config import config
def main(argv=None):
""" Execute the application CLI.
:param argv: argument list to parse (sys.argv by default)
"""
args = _args(argv)
logger.debug(args.warn)
logger.debug("starting execution")
config.read_file(open(args.config, 'rt'))
# do not move, needs to be imported after config is set up
from ci_hooks_app import server
server.app.run(host="0.0.0.0", port=8080, debug=True)
logger.debug("successful completion")
return 0
def _args(argv):
""" Parse command line arguments.
:param argv: argument list to parse
"""
parser = ArgumentParser()
parser.add_argument("-c", "--config", action="store",
help="config file [etc/config.ini]")
parser.add_argument("-v", "--version", action="version",
version="ci-hooks-app {:s}".format(__version__),
help="print version and exit")
parser.add_argument("-w", "--warn", default="WARN",
help="logger warning level [WARN]")
args = parser.parse_args(argv)
if not args.config:
# Don't specify this as an argument default or else it will always be
# included in the list.
args.config = "config.ini"
return args
def interactive(argv=None):
""" Execute the application CLI.
:param argv: argument list to parse (sys.argv by default)
"""
args = _args(argv)
logger.debug(args.warn)
logger.debug("starting execution")
config.read_file(open(args.config, 'rt'))
# do not move, needs to be imported after config is set up
from ci_hooks_app import server
return server.github_app
if __name__ == "__main__":
try:
status = main()
except:
logger.critical("shutting down due to fatal error")
raise # print stack trace
else:
raise SystemExit(status)
|
import pytest
from schemathesis import fixups
def test_global_fixup(testdir, fast_api_schema):
# When all fixups are enabled globally
testdir.makepyfile(
"""
import schemathesis
from hypothesis import settings
schemathesis.fixups.install()
schema = schemathesis.from_dict({schema})
def teardown_module(module):
schemathesis.fixups.uninstall()
assert schemathesis.hooks.get_all_by_name("before_load_schema") == []
@schema.parametrize()
@settings(max_examples=1)
def test(case):
assert 0 < case.query["value"] < 10
""".format(
schema=fast_api_schema
),
)
# Then Fast API schemas that are not compliant should be processed
result = testdir.runpytest("-s")
result.assert_outcomes(passed=1)
@pytest.mark.parametrize(
"value, expected",
(
# No-op case
({"exclusiveMinimum": True, "minimum": 5}, {"exclusiveMinimum": True, "minimum": 5}),
# Draft 7 to Draft 4
({"exclusiveMinimum": 5}, {"exclusiveMinimum": True, "minimum": 5}),
({"exclusiveMaximum": 5}, {"exclusiveMaximum": True, "maximum": 5}),
# Nested cases
({"schema": {"exclusiveMaximum": 5}}, {"schema": {"exclusiveMaximum": True, "maximum": 5}}),
([{"schema": {"exclusiveMaximum": 5}}], [{"schema": {"exclusiveMaximum": True, "maximum": 5}}]),
),
)
def test_fastapi_schema_conversion(value, expected):
fixups.fast_api.before_load_schema(None, value)
assert value == expected
|
#===============================================
# Written by enddl22@gmail.com on 7/Jun/2019
# Extracting images from a bag file
#===============================================
from __future__ import print_function
import os,sys
import argparse
from ros import rosbag
#import roslib
import rospy
from sensor_msgs.msg import Image
import cv2
from cv_bridge import CvBridge, CvBridgeError
from timeit import default_timer as timer
class Img_Extractor(object):
def __init__(self):
self.parser = argparse.ArgumentParser(description='extract images from a bag file')
self.args=None
self.args_parse()
self.bag=None
self.bridge=CvBridge()
self.total_n_image=None
def args_parse(self):
self.parser.add_argument('--img_topic', required=True,
metavar="/image_raw",
help='Name of image topic you want to extract')
self.parser.add_argument('--bag', required=True,
help='Path to the bag file and name, e.g. ./dataset/Big.bag')
self.parser.add_argument('--file_name', required=False,
help='Prefixed file name for stored images',default="frame")
self.parser.add_argument('--output_format', required=False,
help='output image format, e.g., jpg or png',default="jpg")
self.parser.add_argument('--output_folder', required=False,
help='Path to a output folder where extracted images will be stored.',default="./output")
self.parser.add_argument('--encoding', required=False,
help='encoding options, e.g., mono8, mono16, bgr8, rgb8, bgra8, rgba8',default="passthrough")
self.args = self.parser.parse_args()
def run(self):
start = timer()
if not os.path.exists(self.args.output_folder):
os.mkdir(self.args.output_folder)
self.bag=rosbag.Bag(self.args.bag,"r")
for i,msg in enumerate(self.bag.read_messages(topics=[self.args.img_topic])):
try:
cv2_img = self.bridge.imgmsg_to_cv2(msg.message, desired_encoding=self.args.encoding)
outputFileName=os.path.join(self.args.output_folder,"{}_{:06d}.{}".format(self.args.file_name,i,self.args.output_format))
print("{} saved".format(outputFileName))
self.total_n_image=i
cv2.imwrite(outputFileName,cv2_img)
except CvBridgeError, e:
print(e)
self.bag.close()
end = timer()
print("=====================================================")
print("Extraction took {:.03f}s for extracting {} images".format(end - start,self.total_n_image+1))
print("=====================================================")
if __name__ == "__main__":
print(len( sys.argv ))
if len( sys.argv ) >= 3:
extractor=Img_Extractor()
extractor.run()
else:
print( "Usage: python bag2img --img_topic=/img_topic_name --bag=bag_filename --output=output_folder_name --output_format=jpg")
|
import torch
from torch import nn
import MinkowskiEngine as ME
from mmdet.core import BaseAssigner, reduce_mean, build_assigner
from mmdet.models.builder import HEADS, build_loss
from mmdet.core.bbox.builder import BBOX_ASSIGNERS
from mmcv.cnn import Scale, bias_init_with_prob
from mmdet3d.core.bbox.structures import rotation_3d_in_axis
from mmdet3d.ops.pcdet_nms import pcdet_nms_gpu, pcdet_nms_normal_gpu
@HEADS.register_module()
class Fcaf3DNeckWithHead(nn.Module):
def __init__(self,
n_classes,
in_channels,
out_channels,
n_reg_outs,
voxel_size,
pts_threshold,
assigner,
yaw_parametrization='fcaf3d',
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(type='IoU3DLoss', loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
train_cfg=None,
test_cfg=None):
super(Fcaf3DNeckWithHead, self).__init__()
self.voxel_size = voxel_size
self.yaw_parametrization = yaw_parametrization
self.assigner = build_assigner(assigner)
self.loss_centerness = build_loss(loss_centerness)
self.loss_bbox = build_loss(loss_bbox)
self.loss_cls = build_loss(loss_cls)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.pts_threshold = pts_threshold
self._init_layers(in_channels, out_channels, n_reg_outs, n_classes)
@staticmethod
def _make_block(in_channels, out_channels):
return nn.Sequential(
ME.MinkowskiConvolution(in_channels, out_channels, kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(out_channels),
ME.MinkowskiELU()
)
@staticmethod
def _make_up_block(in_channels, out_channels):
return nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
in_channels,
out_channels,
kernel_size=2,
stride=2,
dimension=3,
),
ME.MinkowskiBatchNorm(out_channels),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(out_channels, out_channels, kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(out_channels),
ME.MinkowskiELU()
)
def _init_layers(self, in_channels, out_channels, n_reg_outs, n_classes):
# neck layers
self.pruning = ME.MinkowskiPruning()
for i in range(len(in_channels)):
if i > 0:
self.__setattr__(f'up_block_{i}', self._make_up_block(in_channels[i], in_channels[i - 1]))
self.__setattr__(f'out_block_{i}', self._make_block(in_channels[i], out_channels))
# head layers
self.centerness_conv = ME.MinkowskiConvolution(out_channels, 1, kernel_size=1, dimension=3)
self.reg_conv = ME.MinkowskiConvolution(out_channels, n_reg_outs, kernel_size=1, dimension=3)
self.cls_conv = ME.MinkowskiConvolution(out_channels, n_classes, kernel_size=1, bias=True, dimension=3)
self.scales = nn.ModuleList([Scale(1.) for _ in range(len(in_channels))])
def init_weights(self):
nn.init.normal_(self.centerness_conv.kernel, std=.01)
nn.init.normal_(self.reg_conv.kernel, std=.01)
nn.init.normal_(self.cls_conv.kernel, std=.01)
nn.init.constant_(self.cls_conv.bias, bias_init_with_prob(.01))
def forward(self, x):
outs = []
inputs = x
x = inputs[-1]
for i in range(len(inputs) - 1, -1, -1):
if i < len(inputs) - 1:
x = self.__getattr__(f'up_block_{i + 1}')(x)
x = inputs[i] + x
x = self._prune(x, scores)
out = self.__getattr__(f'out_block_{i}')(x)
out = self.forward_single(out, self.scales[i])
scores = out[-1]
outs.append(out[:-1])
return zip(*outs[::-1])
def _prune(self, x, scores):
if self.pts_threshold < 0:
return x
with torch.no_grad():
coordinates = x.C.float()
interpolated_scores = scores.features_at_coordinates(coordinates)
prune_mask = interpolated_scores.new_zeros((len(interpolated_scores)), dtype=torch.bool)
for permutation in x.decomposition_permutations:
score = interpolated_scores[permutation]
mask = score.new_zeros((len(score)), dtype=torch.bool)
topk = min(len(score), self.pts_threshold)
ids = torch.topk(score.squeeze(1), topk, sorted=False).indices
mask[ids] = True
prune_mask[permutation[mask]] = True
x = self.pruning(x, prune_mask)
return x
def loss(self,
centernesses,
bbox_preds,
cls_scores,
points,
gt_bboxes,
gt_labels,
img_metas):
assert len(centernesses[0]) == len(bbox_preds[0]) == len(cls_scores[0]) \
== len(points[0]) == len(img_metas) == len(gt_bboxes) == len(gt_labels)
loss_centerness, loss_bbox, loss_cls = [], [], []
for i in range(len(img_metas)):
img_loss_centerness, img_loss_bbox, img_loss_cls = self._loss_single(
centernesses=[x[i] for x in centernesses],
bbox_preds=[x[i] for x in bbox_preds],
cls_scores=[x[i] for x in cls_scores],
points=[x[i] for x in points],
img_meta=img_metas[i],
gt_bboxes=gt_bboxes[i],
gt_labels=gt_labels[i]
)
loss_centerness.append(img_loss_centerness)
loss_bbox.append(img_loss_bbox)
loss_cls.append(img_loss_cls)
return dict(
loss_centerness=torch.mean(torch.stack(loss_centerness)),
loss_bbox=torch.mean(torch.stack(loss_bbox)),
loss_cls=torch.mean(torch.stack(loss_cls))
)
# per image
def _loss_single(self,
centernesses,
bbox_preds,
cls_scores,
points,
gt_bboxes,
gt_labels,
img_meta):
with torch.no_grad():
centerness_targets, bbox_targets, labels = self.assigner.assign(points, gt_bboxes, gt_labels)
centerness = torch.cat(centernesses)
bbox_preds = torch.cat(bbox_preds)
cls_scores = torch.cat(cls_scores)
points = torch.cat(points)
# skip background
pos_inds = torch.nonzero(labels >= 0).squeeze(1)
n_pos = torch.tensor(len(pos_inds), dtype=torch.float, device=centerness.device)
n_pos = max(reduce_mean(n_pos), 1.)
loss_cls = self.loss_cls(cls_scores, labels, avg_factor=n_pos)
pos_centerness = centerness[pos_inds]
pos_bbox_preds = bbox_preds[pos_inds]
pos_centerness_targets = centerness_targets[pos_inds].unsqueeze(1)
pos_bbox_targets = bbox_targets[pos_inds]
# centerness weighted iou loss
centerness_denorm = max(
reduce_mean(pos_centerness_targets.sum().detach()), 1e-6)
if len(pos_inds) > 0:
pos_points = points[pos_inds]
loss_centerness = self.loss_centerness(
pos_centerness, pos_centerness_targets, avg_factor=n_pos
)
loss_bbox = self.loss_bbox(
self._bbox_pred_to_bbox(pos_points, pos_bbox_preds),
pos_bbox_targets,
weight=pos_centerness_targets.squeeze(1),
avg_factor=centerness_denorm
)
else:
loss_centerness = pos_centerness.sum()
loss_bbox = pos_bbox_preds.sum()
return loss_centerness, loss_bbox, loss_cls
def get_bboxes(self,
centernesses,
bbox_preds,
cls_scores,
points,
img_metas,
rescale=False):
assert len(centernesses[0]) == len(bbox_preds[0]) == len(cls_scores[0]) \
== len(points[0]) == len(img_metas)
results = []
for i in range(len(img_metas)):
result = self._get_bboxes_single(
centernesses=[x[i] for x in centernesses],
bbox_preds=[x[i] for x in bbox_preds],
cls_scores=[x[i] for x in cls_scores],
points=[x[i] for x in points],
img_meta=img_metas[i]
)
results.append(result)
return results
# per image
def _get_bboxes_single(self,
centernesses,
bbox_preds,
cls_scores,
points,
img_meta):
mlvl_bboxes, mlvl_scores = [], []
for centerness, bbox_pred, cls_score, point in zip(
centernesses, bbox_preds, cls_scores, points
):
scores = cls_score.sigmoid() * centerness.sigmoid()
max_scores, _ = scores.max(dim=1)
if len(scores) > self.test_cfg.nms_pre > 0:
_, ids = max_scores.topk(self.test_cfg.nms_pre)
bbox_pred = bbox_pred[ids]
scores = scores[ids]
point = point[ids]
bboxes = self._bbox_pred_to_bbox(point, bbox_pred)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
bboxes = torch.cat(mlvl_bboxes)
scores = torch.cat(mlvl_scores)
bboxes, scores, labels = self._nms(bboxes, scores, img_meta)
return bboxes, scores, labels
# per scale
def forward_single(self, x, scale):
centerness = self.centerness_conv(x).features
scores = self.cls_conv(x)
cls_score = scores.features
prune_scores = ME.SparseTensor(
scores.features.max(dim=1, keepdim=True).values,
coordinate_map_key=scores.coordinate_map_key,
coordinate_manager=scores.coordinate_manager)
reg_final = self.reg_conv(x).features
reg_distance = torch.exp(scale(reg_final[:, :6]))
reg_angle = reg_final[:, 6:]
bbox_pred = torch.cat((reg_distance, reg_angle), dim=1)
centernesses, bbox_preds, cls_scores, points = [], [], [], []
for permutation in x.decomposition_permutations:
centernesses.append(centerness[permutation])
bbox_preds.append(bbox_pred[permutation])
cls_scores.append(cls_score[permutation])
points = x.decomposed_coordinates
for i in range(len(points)):
points[i] = points[i] * self.voxel_size
return centernesses, bbox_preds, cls_scores, points, prune_scores
def _bbox_pred_to_bbox(self, points, bbox_pred):
if bbox_pred.shape[0] == 0:
return bbox_pred
x_center = points[:, 0] + (bbox_pred[:, 1] - bbox_pred[:, 0]) / 2
y_center = points[:, 1] + (bbox_pred[:, 3] - bbox_pred[:, 2]) / 2
z_center = points[:, 2] + (bbox_pred[:, 5] - bbox_pred[:, 4]) / 2
# dx_min, dx_max, dy_min, dy_max, dz_min, dz_max -> x, y, z, w, l, h
base_bbox = torch.stack([
x_center,
y_center,
z_center,
bbox_pred[:, 0] + bbox_pred[:, 1],
bbox_pred[:, 2] + bbox_pred[:, 3],
bbox_pred[:, 4] + bbox_pred[:, 5],
], -1)
if bbox_pred.shape[1] == 6:
return base_bbox
if self.yaw_parametrization == 'naive':
# ..., alpha
return torch.cat((
base_bbox,
bbox_pred[:, 6:7]
), -1)
elif self.yaw_parametrization == 'sin-cos':
# ..., sin(a), cos(a)
norm = torch.pow(torch.pow(bbox_pred[:, 6:7], 2) + torch.pow(bbox_pred[:, 7:8], 2), 0.5)
sin = bbox_pred[:, 6:7] / norm
cos = bbox_pred[:, 7:8] / norm
return torch.cat((
base_bbox,
torch.atan2(sin, cos)
), -1)
else: # self.yaw_parametrization == 'fcaf3d'
# ..., sin(2a)ln(q), cos(2a)ln(q)
scale = bbox_pred[:, 0] + bbox_pred[:, 1] + bbox_pred[:, 2] + bbox_pred[:, 3]
q = torch.exp(torch.sqrt(torch.pow(bbox_pred[:, 6], 2) + torch.pow(bbox_pred[:, 7], 2)))
alpha = 0.5 * torch.atan2(bbox_pred[:, 6], bbox_pred[:, 7])
return torch.stack((
x_center,
y_center,
z_center,
scale / (1 + q),
scale / (1 + q) * q,
bbox_pred[:, 5] + bbox_pred[:, 4],
alpha
), dim=-1)
def _nms(self, bboxes, scores, img_meta):
n_classes = scores.shape[1]
yaw_flag = bboxes.shape[1] == 7
nms_bboxes, nms_scores, nms_labels = [], [], []
for i in range(n_classes):
ids = scores[:, i] > self.test_cfg.score_thr
if not ids.any():
continue
class_scores = scores[ids, i]
class_bboxes = bboxes[ids]
if yaw_flag:
nms_function = pcdet_nms_gpu
else:
class_bboxes = torch.cat((
class_bboxes, torch.zeros_like(class_bboxes[:, :1])), dim=1)
nms_function = pcdet_nms_normal_gpu
nms_ids, _ = nms_function(class_bboxes, class_scores, self.test_cfg.iou_thr)
nms_bboxes.append(class_bboxes[nms_ids])
nms_scores.append(class_scores[nms_ids])
nms_labels.append(bboxes.new_full(class_scores[nms_ids].shape, i, dtype=torch.long))
if len(nms_bboxes):
nms_bboxes = torch.cat(nms_bboxes, dim=0)
nms_scores = torch.cat(nms_scores, dim=0)
nms_labels = torch.cat(nms_labels, dim=0)
else:
nms_bboxes = bboxes.new_zeros((0, bboxes.shape[1]))
nms_scores = bboxes.new_zeros((0,))
nms_labels = bboxes.new_zeros((0,))
if yaw_flag:
box_dim = 7
with_yaw = True
else:
box_dim = 6
with_yaw = False
nms_bboxes = nms_bboxes[:, :6]
nms_bboxes = img_meta['box_type_3d'](
nms_bboxes, box_dim=box_dim, with_yaw=with_yaw, origin=(.5, .5, .5))
return nms_bboxes, nms_scores, nms_labels
def compute_centerness(bbox_targets):
x_dims = bbox_targets[..., [0, 1]]
y_dims = bbox_targets[..., [2, 3]]
z_dims = bbox_targets[..., [4, 5]]
centerness_targets = x_dims.min(dim=-1)[0] / x_dims.max(dim=-1)[0] * \
y_dims.min(dim=-1)[0] / y_dims.max(dim=-1)[0] * \
z_dims.min(dim=-1)[0] / z_dims.max(dim=-1)[0]
return torch.sqrt(centerness_targets)
@BBOX_ASSIGNERS.register_module()
class Fcaf3DAssigner(BaseAssigner):
def __init__(self, limit, topk, n_scales):
self.limit = limit
self.topk = topk
self.n_scales = n_scales
def assign(self, points, gt_bboxes, gt_labels):
float_max = 1e8
# expand scales to align with points
expanded_scales = [
points[i].new_tensor(i).expand(len(points[i]))
for i in range(len(points))
]
points = torch.cat(points, dim=0)
scales = torch.cat(expanded_scales, dim=0)
# below is based on FCOSHead._get_target_single
n_points = len(points)
n_boxes = len(gt_bboxes)
volumes = gt_bboxes.volume.to(points.device)
volumes = volumes.expand(n_points, n_boxes).contiguous()
gt_bboxes = torch.cat((gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), dim=1)
gt_bboxes = gt_bboxes.to(points.device).expand(n_points, n_boxes, 7)
expanded_points = points.unsqueeze(1).expand(n_points, n_boxes, 3)
shift = torch.stack((
expanded_points[..., 0] - gt_bboxes[..., 0],
expanded_points[..., 1] - gt_bboxes[..., 1],
expanded_points[..., 2] - gt_bboxes[..., 2]
), dim=-1).permute(1, 0, 2)
shift = rotation_3d_in_axis(shift, -gt_bboxes[0, :, 6], axis=2).permute(1, 0, 2)
centers = gt_bboxes[..., :3] + shift
dx_min = centers[..., 0] - gt_bboxes[..., 0] + gt_bboxes[..., 3] / 2
dx_max = gt_bboxes[..., 0] + gt_bboxes[..., 3] / 2 - centers[..., 0]
dy_min = centers[..., 1] - gt_bboxes[..., 1] + gt_bboxes[..., 4] / 2
dy_max = gt_bboxes[..., 1] + gt_bboxes[..., 4] / 2 - centers[..., 1]
dz_min = centers[..., 2] - gt_bboxes[..., 2] + gt_bboxes[..., 5] / 2
dz_max = gt_bboxes[..., 2] + gt_bboxes[..., 5] / 2 - centers[..., 2]
bbox_targets = torch.stack((dx_min, dx_max, dy_min, dy_max, dz_min, dz_max, gt_bboxes[..., 6]), dim=-1)
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets[..., :6].min(-1)[0] > 0 # skip angle
# condition2: positive points per scale >= limit
# calculate positive points per scale
n_pos_points_per_scale = []
for i in range(self.n_scales):
n_pos_points_per_scale.append(torch.sum(inside_gt_bbox_mask[scales == i], dim=0))
# find best scale
n_pos_points_per_scale = torch.stack(n_pos_points_per_scale, dim=0)
lower_limit_mask = n_pos_points_per_scale < self.limit
lower_index = torch.argmax(lower_limit_mask.int(), dim=0) - 1
lower_index = torch.where(lower_index < 0, 0, lower_index)
all_upper_limit_mask = torch.all(torch.logical_not(lower_limit_mask), dim=0)
best_scale = torch.where(all_upper_limit_mask, self.n_scales - 1, lower_index)
# keep only points with best scale
best_scale = torch.unsqueeze(best_scale, 0).expand(n_points, n_boxes)
scales = torch.unsqueeze(scales, 1).expand(n_points, n_boxes)
inside_best_scale_mask = best_scale == scales
# condition3: limit topk locations per box by centerness
centerness = compute_centerness(bbox_targets)
centerness = torch.where(inside_gt_bbox_mask, centerness, torch.ones_like(centerness) * -1)
centerness = torch.where(inside_best_scale_mask, centerness, torch.ones_like(centerness) * -1)
top_centerness = torch.topk(centerness, min(self.topk + 1, len(centerness)), dim=0).values[-1]
inside_top_centerness_mask = centerness > top_centerness.unsqueeze(0)
# if there are still more than one objects for a location,
# we choose the one with minimal area
volumes = torch.where(inside_gt_bbox_mask, volumes, torch.ones_like(volumes) * float_max)
volumes = torch.where(inside_best_scale_mask, volumes, torch.ones_like(volumes) * float_max)
volumes = torch.where(inside_top_centerness_mask, volumes, torch.ones_like(volumes) * float_max)
min_area, min_area_inds = volumes.min(dim=1)
labels = gt_labels[min_area_inds]
labels = torch.where(min_area == float_max, -1, labels)
bbox_targets = bbox_targets[range(n_points), min_area_inds]
centerness_targets = compute_centerness(bbox_targets)
return centerness_targets, gt_bboxes[range(n_points), min_area_inds], labels
|
import maya.mel as mm
import maya.cmds as mc
import glTools.nrig.rig.mocap
import glTools.nrig.rig.bipedMocap
import glTools.utils.characterSet
import glTools.utils.clip
import glTools.utils.reference
import os
import os.path
def createMocapClipsFromFbxWip(sourceDir,targetDir,skipUpToDate=False,skipExistsing=False):
'''
Generate trax clips from a directory of mocap anim files.
@param sourceDir: Source directory to generate clips from.
@type sourceDir: str
@param targetDir: Target clip directory to export processed clips to.
@type targetDir: str
@param extList: List of file extensions to generate clips from
@type extList: list
@param skipUpToDate: Skip files that are up to date. Checks last modification date of source and destination files.
@type skipUpToDate: bool
@param skipExistsing: Skip existing files
@type skipExistsing: bool
'''
# ==========
# - Checks -
# ==========
# Check Source Directory
if not os.path.isdir(sourceDir):
raise Exception('Source directory "'+sourceDir+'" does not exist!')
# =====================
# - Get FBX File List -
# =====================
# Check Source Dir
if not sourceDir.endswith('/'): sourceDir+='/'
# Get All FBX Files
clipFileList = None ####!!!
clipFileList.sort()
clipFileList.reverse()
# =================
# - Process Clips -
# =================
clipPathList = []
for clipFile in clipFileList:
# Clear Scene
mc.file(newFile=True,force=True,prompt=False)
# Skip Directories
if not os.path.isfile(clipFile):
print('Invalid path "'+clipFile+'"! Skipping...')
continue
# Get Clip Name
clipName = os.path.splitext(os.path.basename(clipFile))[0].split('.')[0]
# Build New Clip Path
clipPath = targetDir+'/'+clipName+'.mb'
# Check Clip Path
if os.path.isfile(clipPath):
# Check Up To Date
if skipUpToDate:
if os.stat(clipFile).st_mtime < os.stat(clipPath).st_mtime:
print ('"'+clipName+'" is up to date! Skipping file...')
continue
else:
print ('"'+clipName+'" is out of date! Regenerating clip...')
# Check Existing
if skipExistsing:
print (clipName+' already exists! Skipping file...')
continue
# Print Status
print ('Generating Clip "'+clipName+'" from '+clipFile+'...')
# Import Clip File
mc.file(clipFile,i=True,type="FBX",defaultNamespace=True)
# Create Character Set
mocap = glTools.nrig.rig.bipedMocap.BipedMocapRigRoll()
try:
charSet = mocap.createCharSet('char','')
except:
print('ERROR: Problem creating characterSet for clip "'+clipName+'"!')
continue
# =========================
# - Create Character Clip -
# =========================
keys = mc.keyframe('Hips',q=True,tc=True)
if not keys:
print ('No animation on Hips! Skipping...')
continue
start = keys[0]
end = keys[-1]
clip = glTools.utils.clip.createClip(charSet,startTime=start,endTime=end,name=clipName)
if not clip: continue
# Export Clip
print 'Exporting: '+clipName
export = glTools.utils.clip.exportClip(clip,clipPath,force=True)
# Update Result
clipPathList.append(clipPath)
# =================
# - Return Result -
# =================
return clipPathList
def createMocapClips(sourceDir,targetDir='',extList=['fbx'],skipUpToDate=False,skipExistsing=False):
'''
Generate trax clips from a directory of mocap anim files.
@param sourceDir: Source directory to generate clips from.
@type sourceDir: str
@param targetDir: Target clip directory to export processed clips to.
@type targetDir: str
@param extList: List of file extensions to generate clips from
@type extList: list
@param skipUpToDate: Skip files that are up to date. Checks last modification date of source and destination files.
@type skipUpToDate: bool
@param skipExistsing: Skip existing files
@type skipExistsing: bool
'''
# ==========
# - Checks -
# ==========
# Check Source Directory
if not os.path.isdir(sourceDir):
raise Exception('Source directory "'+sourceDir+'" does not exist!')
extTypeMap = {}
extTypeMap['fbx'] = 'FBX'
extTypeMap['ma'] = 'mayaAscii'
extTypeMap['mb'] = 'mayaBinary'
# =================
# - Process Clips -
# =================
# New File
mc.file(newFile=True,force=True,prompt=False)
clipPathList = []
clipFileList = os.listdir(sourceDir)
clipFileList.sort()
for clipFile in clipFileList:
# Skip Directories
if os.path.isdir(sourceDir+'/'+clipFile): continue
# Get Clip Extension
ext = os.path.splitext(clipFile)[1].lower()[1:]
if not extList.count(ext): continue
# Get Clip Name
clipName = os.path.splitext(os.path.basename(clipFile))[0]
# Build New Clip Path
clipPath = targetDir+'/'+clipName+'.mb'
# Check Clip Path
if os.path.isfile(clipPath):
# Check Up To Date
if skipUpToDate:
if os.stat(sourceDir+'/'+clipFile).st_mtime < os.stat(clipPath).st_mtime:
print ('"'+clipName+'" is up to date! Skipping file...')
continue
else:
print ('"'+clipName+'" is up out of date! Regenerating up to date clip.')
# Check Existing
if skipExistsing:
print (clipName+' already exists! Skipping file...')
continue
# Print Status
print ('Generating Clip "'+clipName+'"...')
# Import Clip File
mc.file(sourceDir+'/'+clipFile,i=True,type="FBX",defaultNamespace=True)
# Create Character Set
mocap = glTools.nrig.rig.bipedMocap.BipedMocapRigRoll()
try: charSet = mocap.createCharSet('char','')
except: print('ERROR: Problem creating characterSet for clip "'+clipName+'"!')
# Create Character Clip
start = mc.keyframe('Hips',q=True,tc=True)[0]
end = mc.keyframe('Hips',q=True,tc=True)[-1]
clip = glTools.utils.clip.createClip(charSet,startTime=start,endTime=end,name=clipName)
if not clip: continue
# Export Clip
print 'Exporting: '+clipName
export = glTools.utils.clip.exportClip(clip,clipPath,force=True)
# Update Result
clipPathList.append(clipPath)
# Clear Scene
mc.file(newFile=True,force=True,prompt=False)
# =================
# - Return Result -
# =================
return clipPathList
def createSourceClipFile(sourceDir,setLatest=False):
'''
'''
# ==========================
# - Check Source Directory -
# ==========================
if not os.path.isdir(sourceDir):
raise Exception('Source directory "'+sourceDir+'" does not exist!')
# =================
# - Get File List -
# =================
sourceFileList = os.listdir(sourceDir)
sourceClipList = [i for i in sourceFileList if i.startswith('sourceClips')]
sourceFileList = [i for i in sourceFileList if not i.startswith('sourceClips')]
# Get Existing Source Clip Versions
if sourceClipList:
sourceClipVersions = [int(i.split('.')[-2]) for i in sourceClipList]
sourceClipVersions.sort()
newSourceVersion = '%03d' % (sourceClipVersions[-1] + 1)
else:
newSourceVersion = '001'
# ======================
# - Build Source Scene -
# ======================
# Clear Scene
mc.file(newFile=True,force=True,prompt=False)
# Import Source Files
for sourceFile in sourceFileList:
# Get Clip Name
clipName = os.path.splitext(os.path.basename(sourceFile))[0]
print clipName
# Import Clip File
mc.file(sourceDir+'/'+sourceFile,i=True,type="mayaAscii",defaultNamespace=True)
# Save Scene
mc.file(rename=sourceDir+'/sourceClips.'+newSourceVersion+'.mb')
mc.file(save=True,type='mayaBinary')
# Update Latest
if setLatest:
mc.file(rename=sourceDir+'/sourceClips.latest.mb')
mc.file(save=True,type='mayaBinary')
# =================
# - Return Result -
# =================
return
|
# -*- coding: utf-8 -*-
import requests
from openprocurement.integrations.edr.timeout_handler import TimeoutHandler
from logging import getLogger
logger = getLogger(__name__)
class EdrClient(object):
"""Base class for making requests to EDR"""
def __init__(self, host, token, port=443, timeout_min=1, timeout_max=300, timeout_step=2, timeout_mode='mult'):
self.session = requests.Session()
self.token = token
self.url = '{host}:{port}/1.0/subjects'.format(host=host, port=port)
self.headers = {"Accept": "application/json",
"Authorization": "Token {token}".format(token=self.token)}
self.timeout_verify = TimeoutHandler(timeout_min, timeout_max, timeout_step, timeout_mode)
self.timeout_details = TimeoutHandler(timeout_min, timeout_max, timeout_step, timeout_mode)
def _do_request(self, url, timeout):
try:
response = self.session.get(url=url, headers=self.headers, timeout=timeout.value)
timeout.update(True)
return response
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectTimeout):
if not timeout.update(False):
logger.fatal('Timeout maxed out! Value: {0}'.format(timeout.value))
raise
def get_subject(self, param, code):
"""
Send request to EDR using EDRPOU (physical entity-entrepreneur) code or passport.
In response we except list of subjects with unique id in each subject.
List mostly contains 1 subject, but occasionally includes 2 or none.
"""
return self._do_request('{url}?{param}={code}'.format(url=self.url, param=param, code=code), self.timeout_verify)
def get_subject_details(self, edr_unique_id):
"""
Send request to EDR using unique identifier to get subject's details.
"""
return self._do_request('{url}/{id}'.format(url=self.url, id=edr_unique_id), self.timeout_details)
|
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Simple PB Words client demo
This connects to a server (host/port specified by argv[1]/argv[2]),
authenticates with a username and password (given by argv[3] and argv[4]),
joins a group (argv[5]) sends a simple message, leaves the group, and quits
the server.
To run the script:
$ python pb_client.py <host> <port> <username> <password> <group>
"""
import sys
from twisted.python import log
from twisted.cred import credentials
from twisted.words import service
from twisted.spread import pb
from twisted.internet import reactor
class DemoMind(service.PBMind):
"""An utterly pointless PBMind subclass.
This notices messages received and prints them to stdout. Since
the bot never stays in a channel very long, it is exceedingly
unlikely this will ever do anything interesting.
"""
def remote_receive(self, sender, recipient, message):
print("Woop", sender, recipient, message)
def quitServer(ignored):
"""Quit succeeded, shut down the reactor."""
reactor.stop()
def leftGroup(ignored, avatar):
"""Left the group successfully, quit the server."""
q = avatar.quit()
q.addCallback(quitServer)
return q
def sentMessage(ignored, group, avatar):
"""Sent the message successfully, leave the group."""
l = group.leave()
l.addCallback(leftGroup, avatar)
return l
def joinedGroup(group, avatar):
"""Joined the group successfully, send a stupid message."""
s = group.send({"text": "Hello, monkeys"})
s.addCallback(sentMessage, group, avatar)
return s
def loggedIn(avatar, group):
"""Logged in successfully, join a group."""
j = avatar.join(group)
j.addCallback(joinedGroup, avatar)
return j
def errorOccurred(err):
"""Something went awry, log it and shutdown."""
log.err(err)
try:
reactor.stop()
except RuntimeError:
pass
def run(host, port, username, password, group):
"""Create a mind and factory and set things in motion."""
m = DemoMind()
f = pb.PBClientFactory()
f.unsafeTracebacks = True
l = f.login(credentials.UsernamePassword(username, password), m)
l.addCallback(loggedIn, group)
l.addErrback(errorOccurred)
reactor.connectTCP(host, int(port), f)
def main():
"""
Set up logging, have the real main function run, and start the reactor.
"""
if len(sys.argv) != 6:
raise SystemExit(
"Usage: {} host port username password group".format(sys.argv[0])
)
log.startLogging(sys.stdout)
host, port, username, password, group = sys.argv[1:]
port = int(port)
username = username.decode(sys.stdin.encoding)
group = group.decode(sys.stdin.encoding)
reactor.callWhenRunning(run, host, port, username, password, group)
reactor.run()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 18 22:51:57 2016
@author: yxl
"""
# -*- coding: utf-8 -*
import scipy.ndimage as ndimg
import numpy as np
from imagepy.core.engine import Filter
from skimage.morphology import convex_hull_object
class Closing(Filter):
"""Closing: derived from imagepy.core.engine.Filter """
title = 'Binary Closeing'
note = ['8-bit', 'auto_msk', 'auto_snap','preview']
para = {'w':3, 'h':3}
view = [(int, (1,15), 0, 'width', 'w', 'pix'),
(int, (1,15), 0, 'height', 'h', 'pix')]
def run(self, ips, snap, img, para = None):
strc = np.ones((para['h'], para['w']), dtype=np.uint8)
ndimg.binary_closing(snap, strc, output=img)
img *= 255
class Opening(Filter):
"""Opening: derived from imagepy.core.engine.Filter """
title = 'Binary Opening'
note = ['8-bit', 'auto_msk', 'auto_snap','preview']
para = {'w':3, 'h':3}
view = [(int, (1,15), 0, 'width', 'w', 'pix'),
(int, (1,15), 0, 'height', 'h', 'pix')]
def run(self, ips, snap, img, para = None):
strc = np.ones((para['h'], para['w']), dtype=np.uint8)
ndimg.binary_opening(snap, strc, output=img)
img *= 255
class Dilation(Filter):
"""Dilation: derived from imagepy.core.engine.Filter """
title = 'Binary Dilation'
note = ['8-bit', 'auto_msk', 'auto_snap','preview']
para = {'w':3, 'h':3}
view = [(int, (1,15), 0, 'width', 'w', 'pix'),
(int, (1,15), 0, 'height', 'h', 'pix')]
def run(self, ips, snap, img, para = None):
strc = np.ones((para['h'], para['w']), dtype=np.uint8)
ndimg.binary_dilation(snap, strc, output=img)
img *= 255
class Erosion(Filter):
"""Erosion: derived from imagepy.core.engine.Filter """
title = 'Binary Erosion'
note = ['8-bit', 'auto_msk', 'auto_snap','preview']
para = {'w':3, 'h':3}
view = [(int, (1,15), 0, 'width', 'w', 'pix'),
(int, (1,15), 0, 'height', 'h', 'pix')]
def run(self, ips, snap, img, para = None):
strc = np.ones((para['h'], para['w']), dtype=np.uint8)
ndimg.binary_erosion(snap, strc, output=img)
img *= 255
class Outline(Filter):
"""Outline: derived from imagepy.core.engine.Filter """
title = 'Binary Outline'
note = ['8-bit', 'auto_msk', 'auto_snap','preview']
def run(self, ips, snap, img, para = None):
ndimg.binary_dilation(snap, output=img)
img *= 255
img -= snap
class FillHoles(Filter):
"""FillHoles: derived from imagepy.core.engine.Filter """
title = 'Fill Holes'
note = ['8-bit', 'auto_msk', 'auto_snap','preview']
def run(self, ips, snap, img, para = None):
ndimg.binary_fill_holes(snap, output=img)
img *= 255
class Convex(Filter):
title = 'Binary ConvexHull'
note = ['8-bit', 'auto_msk', 'auto_snap']
#process
def run(self, ips, snap, img, para = None):
img[convex_hull_object(snap)] = 255
plgs = [Dilation, Erosion, '-', Closing, Opening, '-', Outline, FillHoles, Convex]
|
from typing import List, Union
from ..utils import get, post
BASE_URL = "https://api.live.bilibili.com"
async def get_rooms_info_by_uids(
uids: List[Union[int, str]], *, auth=None, reqtype="both", **kwargs
):
"""根据 UID 批量获取直播间信息"""
url = f"{BASE_URL}/room/v1/Room/get_status_info_by_uids"
data = {"uids": uids}
return await post(url, json=data, auth=auth, reqtype=reqtype, **kwargs)
async def get_rooms_info_by_ids(
room_ids: List[Union[int, str]], *, auth=None, reqtype="both", **kwargs
):
"""根据房间号批量获取直播间信息"""
url = f"{BASE_URL}/room/v1/Room/get_info_by_id"
data = {"ids": room_ids}
return await post(url, json=data, auth=auth, reqtype=reqtype, **kwargs)
async def get_room_info_by_uid(
uid: Union[int, str], *, auth=None, reqtype="both", **kwargs
):
"""根据 UID 获取指定直播间信息"""
url = f"{BASE_URL}/room/v1/Room/getRoomInfoOld"
params = {"mid": uid}
return await get(url, params=params, auth=auth, reqtype=reqtype, **kwargs)
async def get_room_info_by_id(
room_id: Union[int, str], *, auth=None, reqtype="both", **kwargs
):
"""根据房间号获取指定直播间信息"""
url = f"{BASE_URL}/room/v1/Room/get_info"
params = {"id": room_id}
return await get(url, params=params, auth=auth, reqtype=reqtype, **kwargs)
|
"""
全局变量local_school就是一个ThreadLocal对象,每个Thread对它都可以读写student属性,但互不影响。
你可以把local_school看成全局变量,但每个属性如local_school.student都是线程的局部变量,
可以任意读写而互不干扰,也不用管理锁的问题,ThreadLocal内部会处理。
"""
#解决线程间变量相互影响的问题
import threading
thread_local =threading.local()
class Student(object):
def __init__(self,name):
self.name=name
pass
def process_student():
#使用当前线程的student对象
std =thread_local.stu
print("Hello %s in thread(%s)"%(std.name,threading.current_thread().name))
def process_thread(name):
#为当前线程设置对象
thread_local.stu =Student(name)
process_student()
if __name__ == '__main__':
t1 =threading.Thread(target=process_thread,args=("Szhua",),name="Thread_A")
t2 =threading.Thread(target=process_thread,args=("Leilei",),name="Thread_B")
t1.start()
t2.start()
t1.join()
t2.join()
"""
一个ThreadLocal变量虽然是全局变量,但每个线程都只能读写自己线程的独立副本,互不干扰。ThreadLocal解决了参数在一个线程中各个函数之间互相传递的问题。
"""
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: Komal Thareja (kthare10@renci.org)
import json
from typing import List
from fim.slivers.base_sliver import BaseSliver
from fim.slivers.capacities_labels import JSONField, Capacities, Labels, CapacityHints
from fim.slivers.json import JSONSliver
from fim.slivers.network_node import NodeSliver
from fim.slivers.network_service import NetworkServiceSliver
from fabric_cf.orchestrator.elements.constants import Constants
class Reservation(JSONField):
"""
Class represents the reservations received
"""
def __init__(self):
self.name = None
self.graph_node_id = None
self.slice_id = None
self.reservation_id = None
self.join_state = None
self.pending_state = None
self.reservation_state = None
self.notices = None
self.lease_end = None
self.sliver_type = None
self.sliver = None
def get_sliver_type(self) -> str:
return self.sliver_type
def get_name(self) -> str:
return self.name
def get_graph_node_id(self) -> str:
return self.graph_node_id
def get_slice_id(self) -> str:
return self.slice_id
def get_join_state(self) -> str:
return self.join_state
def get_state(self) -> str:
return self.reservation_state
def get_pending_state(self) -> str:
return self.pending_state
def get_notices(self) -> str:
return self.notices
def get_sliver(self) -> BaseSliver:
return self.sliver
def _set_fields(self, **kwargs):
"""
Universal integer setter for all fields.
Values should be non-negative integers. Throws a RuntimeError
if you try to set a non-existent field.
:param kwargs:
:return: self to support call chaining
"""
for k, v in kwargs.items():
try:
# will toss an exception if field is not defined
self.__getattribute__(k)
if k == Constants.PROP_SLIVER:
continue
self.__setattr__(k, v)
except AttributeError:
raise RuntimeError(f"Unable to set field {k} of reservation, no such field available")
sliver_json = kwargs.get(Constants.PROP_SLIVER, None)
if sliver_json is not None:
if self.sliver_type == NodeSliver.__name__:
self.sliver = JSONSliver.node_sliver_from_json(s=sliver_json)
elif self.sliver_type == NetworkServiceSliver.__name__:
self.sliver = JSONSliver.network_service_sliver_from_json(s=sliver_json)
return self
def to_json(self) -> str:
"""
Dumps to JSON the __dict__ of the instance. Be careful as the fields in this
class should only be those that can be present in JSON output.
If there are no values in the object, returns empty string.
:return:
"""
d = self.__dict__.copy()
for k in self.__dict__:
if d[k] is None:
d.pop(k)
elif k == Constants.PROP_SLIVER:
d[k] = JSONSliver.sliver_to_json(sliver=d[k])
if len(d) == 0:
return ''
return json.dumps(d, skipkeys=True, sort_keys=True, indent=4)
class ReservationFactory:
"""
Factory class to instantiate Reservation
"""
@staticmethod
def create_reservations(*, reservation_list: List[dict]) -> List[Reservation]:
"""
Create list of reservations from JSON List
:param reservation_list reservation list
:return list of reservations
"""
result = []
for r_dict in reservation_list:
reservation = ReservationFactory.create(reservation_dict=r_dict)
result.append(reservation)
return result
@staticmethod
def create(*, reservation_dict: dict) -> Reservation:
"""
Create reservations from JSON
:param reservation_dict reservation jso
:return reservation
"""
reservation_json = json.dumps(reservation_dict)
res_obj = Reservation().from_json(json_string=reservation_json)
return res_obj
|
{% block meta %}
name: StateMachine
description: SMACH template containing code common to all state templates.
language: Python
framework: SMACH
type: None
tags: [core]
includes: []
extends: []
variables: []
input_keys: []
output_keys: []
{% endblock meta %}
{% from "Utils.tpl.py" import render_input_keys, render_output_keys, render_outcomes, render_callbacks, render_transitions, render_remapping, render_def_lambda_callbacks, render_userdata %}
{% block base_header %}
{% endblock base_header %}
{% block imports %}
{% endblock imports %}
{% block defs %}
{% endblock defs %}
{% block class_defs %}
{% endblock class_defs %}
{% block cb_defs %}
{% if callbacks is defined %}
{% if input_keys is defined %}
{{ render_def_lambda_callbacks(defined_headers, class_name, name, uuid, input_keys, outcomes, callbacks) }}
{% else %}
{{ render_def_lambda_callbacks(defined_headers, class_name, name, uuid, [], outcomes, callbacks) }}
{% endif %}
{% endif %}
{% endblock cb_defs %}
{% block main_def %}
{% endblock main_def %}
{% block header %}
{% endblock header %}
{% block header_userdata %}
{% if userdata is defined %}{{ render_userdata(parent_sm_name, userdata) }}{% endif %}
{% endblock header_userdata %}
{% block body %}
smach.{{ parent_type }}.add('{{ name }}',
{{ '' | indent(23, true) }}{{ class_name }}({% if input_keys is defined %}{{ render_input_keys(input_keys, indent=0) }}{% endif %}{% if output_keys is defined %}{% if input_keys is defined %}, {% endif %}{{ render_output_keys(output_keys, indent=0) }}{% endif %}{% if callbacks is defined %}{% if input_keys is defined or output_keys is defined %}, {% endif %}{{ render_callbacks(name, uuid, callbacks, indent=0) }}{% endif %}{% if outcomes is defined %}{% if input_keys is defined or output_keys is defined or callbacks is defined %}, {% endif %}{{ render_outcomes(outcomes, indent=0) }}{% endif %}){% if transitions is defined %},
{{ render_transitions(transitions) }}{% endif %}{% if remapping is defined %},
{{ render_remapping(remapping) }}{% endif %})
{% endblock body %}
{% block footer %}
{% endblock footer %}
{% block introspection_server %}
{% endblock introspection_server %}
{% block execute %}
{% endblock execute %}
{% block spin %}
{% endblock spin %}
{% block base_footer %}
{% endblock base_footer %}
{% block main %}
{% endblock main %}
|
# -*- coding: utf-8 -*-
'''
Manage RabbitMQ Virtual Hosts
=============================
Example:
.. code-block:: yaml
virtual_host:
rabbitmq_vhost.present:
- user: rabbit_user
- conf: .*
- write: .*
- read: .*
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if RabbitMQ is installed.
'''
return salt.utils.which('rabbitmqctl') is not None
def present(name):
'''
Ensure the RabbitMQ VHost exists.
name
VHost name
user
Initial user permission to set on the VHost, if present
.. deprecated:: 2015.8.0
owner
Initial owner permission to set on the VHost, if present
.. deprecated:: 2015.8.0
conf
Initial conf string to apply to the VHost and user. Defaults to .*
.. deprecated:: 2015.8.0
write
Initial write permissions to apply to the VHost and user.
Defaults to .*
.. deprecated:: 2015.8.0
read
Initial read permissions to apply to the VHost and user.
Defaults to .*
.. deprecated:: 2015.8.0
runas
Name of the user to run the command
.. deprecated:: 2015.8.0
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
vhost_exists = __salt__['rabbitmq.vhost_exists'](name)
if __opts__['test']:
ret['result'] = None
if vhost_exists:
ret['comment'] = 'VHost {0} already exists'.format(name)
else:
ret['comment'] = 'Creating VHost {0}'.format(name)
else:
if vhost_exists:
ret['comment'] = 'VHost {0} already exists'.format(name)
else:
result = __salt__['rabbitmq.add_vhost'](name)
if 'Error' in result:
ret['result'] = False
ret['comment'] = result['Error']
elif 'Added' in result:
ret['comment'] = result['Added']
ret['changes'] = {'old': '', 'new': name}
return ret
def absent(name):
'''
Ensure the RabbitMQ Virtual Host is absent
name
Name of the Virtual Host to remove
runas
User to run the command
.. deprecated:: 2015.8.0
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
vhost_exists = __salt__['rabbitmq.vhost_exists'](name)
if not vhost_exists:
ret['comment'] = 'Virtual Host {0} is not present'.format(name)
elif __opts__['test']:
ret['result'] = None
if vhost_exists:
ret['comment'] = 'Removing Virtual Host {0}'.format(name)
else:
if vhost_exists:
result = __salt__['rabbitmq.delete_vhost'](name)
if 'Error' in result:
ret['result'] = False
ret['comment'] = result['Error']
elif 'Deleted' in result:
ret['comment'] = result['Deleted']
ret['changes'] = {'new': '', 'old': name}
return ret
|
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import re
from builtins import open
from pants.backend.native.config.environment import Platform
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.process_handler import subprocess
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class PythonDistributionIntegrationTest(PantsRunIntegrationTest):
# The paths to both a project containing a simple C extension (to be packaged into a
# whl by setup.py) and an associated test to be consumed by the pants goals tested below.
fasthello_project = 'examples/src/python/example/python_distribution/hello/fasthello'
fasthello_tests = 'examples/tests/python/example/python_distribution/hello/test_fasthello'
fasthello_install_requires_dir = 'testprojects/src/python/python_distribution/fasthello_with_install_requires'
hello_setup_requires = 'examples/src/python/example/python_distribution/hello/setup_requires'
def _assert_native_greeting(self, output):
self.assertIn('Hello from C!', output)
self.assertIn('Hello from C++!', output)
def test_pants_binary(self):
with temporary_dir() as tmp_dir:
pex = os.path.join(tmp_dir, 'main.pex')
# The + is because we append the target's fingerprint to the version. We test this version
# string in test_build_local_python_distributions.py.
wheel_glob = os.path.join(tmp_dir, 'fasthello-1.0.0+*.whl')
command=[
'--pants-distdir={}'.format(tmp_dir), 'binary', '{}:main'.format(self.fasthello_project)]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
# Check that the pex was built.
self.assertTrue(os.path.isfile(pex))
# Check that the pex runs.
output = subprocess.check_output(pex)
self._assert_native_greeting(output)
# Check that we have exact one wheel output
self.assertEqual(len(glob.glob(wheel_glob)), 1)
def test_pants_run(self):
with temporary_dir() as tmp_dir:
command=[
'--pants-distdir={}'.format(tmp_dir),
'run',
'{}:main'.format(self.fasthello_project)]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
# Check that text was properly printed to stdout.
self._assert_native_greeting(pants_run.stdout_data)
def test_invalidation(self):
"""Test that the current version of a python_dist() is resolved after modifying its sources."""
fasthello_run = '{}:main_with_no_conflict'.format(self.fasthello_install_requires_dir)
with self.mock_buildroot(
dirs_to_copy=[self.fasthello_install_requires_dir]) as buildroot, buildroot.pushd():
run_target = lambda: self.run_pants_with_workdir(
command=['-ldebug', 'run', fasthello_run],
workdir=os.path.join(buildroot.new_buildroot, '.pants.d'),
build_root=buildroot.new_buildroot,
extra_env={'PEX_VERBOSE': '9'},
)
unmodified_pants_run = run_target()
self.assert_success(unmodified_pants_run)
self.assertIn('Hello from C!\n', unmodified_pants_run.stdout_data)
# Modify one of the source files for this target so that the output is different.
c_source_file = os.path.join(self.fasthello_install_requires_dir, 'c_greet.c')
with open(c_source_file, 'r') as f:
orig_contents = f.read()
modified_contents = re.sub('"Hello from C!"', '"Hello from C?"', orig_contents)
with open(c_source_file, 'w') as f:
f.write(modified_contents)
modified_pants_run = run_target()
self.assert_success(modified_pants_run)
self.assertIn('Hello from C?\n', modified_pants_run.stdout_data)
def test_pants_test(self):
with temporary_dir() as tmp_dir:
wheel_glob = os.path.join(tmp_dir, '*.whl')
command=[
'--pants-distdir={}'.format(tmp_dir),
'test',
'{}:fasthello'.format(self.fasthello_tests)]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
# Make sure that there is no wheel output when 'binary' goal is not invoked.
self.assertEqual(len(glob.glob(wheel_glob)), 0)
def test_with_install_requires(self):
with temporary_dir() as tmp_dir:
pex = os.path.join(tmp_dir, 'main_with_no_conflict.pex')
command=[
'--pants-distdir={}'.format(tmp_dir),
'run',
'{}:main_with_no_conflict'.format(self.fasthello_install_requires_dir)]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
self.assertIn('United States', pants_run.stdout_data)
command=['binary', '{}:main_with_no_conflict'.format(self.fasthello_install_requires_dir)]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
output = subprocess.check_output(pex)
self.assertIn('United States', output)
def test_with_conflicting_transitive_deps(self):
command=['run', '{}:main_with_conflicting_dep'.format(self.fasthello_install_requires_dir)]
pants_run = self.run_pants(command=command)
self.assert_failure(pants_run)
self.assertIn('pycountry', pants_run.stderr_data)
self.assertIn('fasthello', pants_run.stderr_data)
command=['binary', '{}:main_with_conflicting_dep'.format(self.fasthello_install_requires_dir)]
pants_run = self.run_pants(command=command)
self.assert_failure(pants_run)
self.assertIn('pycountry', pants_run.stderr_data)
self.assertIn('fasthello', pants_run.stderr_data)
def test_pants_binary_dep_isolation_with_multiple_targets(self):
with temporary_dir() as tmp_dir:
pex1 = os.path.join(tmp_dir, 'main_with_no_conflict.pex')
pex2 = os.path.join(tmp_dir, 'main_with_no_pycountry.pex')
command=[
'--pants-distdir={}'.format(tmp_dir),
'binary',
'{}:main_with_no_conflict'.format(self.fasthello_install_requires_dir),
'{}:main_with_no_pycountry'.format(self.fasthello_install_requires_dir)]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
# Check that the pex was built.
self.assertTrue(os.path.isfile(pex1))
self.assertTrue(os.path.isfile(pex2))
# Check that the pex 1 runs.
output = subprocess.check_output(pex1)
self._assert_native_greeting(output)
# Check that the pex 2 fails due to no python_dists leaked into it.
try:
output = subprocess.check_output(pex2)
except subprocess.CalledProcessError as e:
self.assertNotEqual(0, e.returncode)
def test_pants_resolves_local_dists_for_current_platform_only(self):
# Test that pants will override pants.ini platforms config when building
# or running a target that depends on native (c or cpp) sources.
with temporary_dir() as tmp_dir:
pex = os.path.join(tmp_dir, 'main.pex')
pants_ini_config = {
'python-setup': {
'platforms': ['current', 'this-platform-does_not-exist'],
},
}
command=[
'--pants-distdir={}'.format(tmp_dir),
'run',
'{}:main'.format(self.fasthello_project)]
pants_run = self.run_pants(command=command, config=pants_ini_config)
self.assert_success(pants_run)
command=['binary', '{}:main'.format(self.fasthello_project)]
pants_run = self.run_pants(command=command, config=pants_ini_config)
self.assert_success(pants_run)
# Check that the pex was built.
self.assertTrue(os.path.isfile(pex))
# Check that the pex runs.
output = subprocess.check_output(pex)
self._assert_native_greeting(output)
def _get_current_platform_string(self):
return Platform.create().resolve_platform_specific({
'darwin': lambda: 'macosx-10.12-x86_64',
'linux': lambda: 'linux-x86_64',
})
def test_pants_tests_local_dists_for_current_platform_only(self):
platform_string = self._get_current_platform_string()
# Use a platform-specific string for testing because the test goal
# requires the coverage package and the pex resolver raises an Untranslatable error when
# attempting to translate the coverage sdist for incompatible platforms.
pants_ini_config = {'python-setup': {'platforms': [platform_string]}}
# Clean all to rebuild requirements pex.
with temporary_dir() as tmp_dir:
command=[
'--pants-distdir={}'.format(tmp_dir),
'clean-all',
'test',
'{}:fasthello'.format(self.fasthello_tests)]
pants_run = self.run_pants(command=command, config=pants_ini_config)
self.assert_success(pants_run)
def test_python_distribution_with_setup_requires(self):
# Validate that setup_requires dependencies are present and functional.
# PANTS_TEST_SETUP_REQUIRES triggers test functionality in this particular setup.py.
with environment_as(PANTS_TEST_SETUP_REQUIRES='1'):
command=['run', '{}:main'.format(self.hello_setup_requires)]
pants_run = self.run_pants(command=command)
self.assertRaises(Exception)
# Indicates the pycountry package is available to setup.py script.
self.assertIn('current/setup_requires_site/pycountry/__init__.py', pants_run.stderr_data)
# Indicates that the pycountry wheel has been installed on PYTHONPATH correctly.
self.assertIn('pycountry-18.5.20.dist-info', pants_run.stderr_data)
# Valdiate the run task. Use clean-all to invalidate cached python_dist wheels from
# previous test run. Use -ldebug to get debug info on setup_requires functionality.
command=['-ldebug', 'clean-all', 'run', '{}:main'.format(self.hello_setup_requires)]
pants_run = self.run_pants(command=command)
self.assertIn("Installing setup requirements: ['pycountry']", pants_run.stdout_data)
self.assertIn("Setting PYTHONPATH with setup_requires site directory", pants_run.stdout_data)
# Validate that the binary can build and run properly. Use clean-all to invalidate cached
# python_dist wheels from previous test run. Use -ldebug to get debug info on setup_requires
# functionality.
pex = os.path.join(get_buildroot(), 'dist', 'main.pex')
try:
command=['-ldebug', 'clean-all', 'binary', '{}:main'.format(self.hello_setup_requires)]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
self.assertIn("Installing setup requirements: ['pycountry']", pants_run.stdout_data)
self.assertIn("Setting PYTHONPATH with setup_requires site directory", pants_run.stdout_data)
# Check that the pex was built.
self.assertTrue(os.path.isfile(pex))
# Check that the pex runs.
output = subprocess.check_output(pex)
self.assertIn('Hello, world!', output)
finally:
if os.path.exists(pex):
# Cleanup.
os.remove(pex)
|
import things
import unittest
from queue import Queue
class TestPutOperator(unittest.TestCase):
def test_put(self):
'''
Send a message to an actor using the put sugar syntax.
'''
# use a queue
queue = Queue()
message = "hello world"
class Actor(things.Actor):
def on_message(self, data):
queue.put(data)
actor = Actor()
actor << message
result = queue.get(timeout=5)
assert result == message
def test_bus_put(self):
'''
Send a message to a bus using the put sugar syntax.
'''
queue = Queue()
message = "hello world"
class Bus(things.Bus):
@things.subscriber
def subscriber(self, data):
queue.put(data)
bus = Bus()
bus.subscriber << message
result = queue.get(timeout=5)
assert result == message
|
import os
import pickle
import time
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 13})
plt.rcParams['figure.figsize'] = 10, 8
def prRed(prt): print("\033[91m {}\033[00m" .format(prt))
def prGreen(prt): print("\033[92m {}\033[00m" .format(prt))
def prYellow(prt): print("\033[93m {}\033[00m" .format(prt))
def prLightPurple(prt): print("\033[94m {}\033[00m" .format(prt))
def prPurple(prt): print("\033[95m {}\033[00m" .format(prt))
def prCyan(prt): print("\033[96m {}\033[00m" .format(prt))
def prLightGray(prt): print("\033[97m {}\033[00m" .format(prt))
def prBlack(prt): print("\033[98m {}\033[00m" .format(prt))
def soft_update(target, source, tau=0.001):
"""
update target by target = tau * source + (1 - tau) * target
:param target: Target network
:param source: source network
:param tau: 0 < tau << 1
:return:
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
def hard_update(target, source):
"""
update target by target = source
:param target: Target network
:param source: source network
:return:
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def get_output_folder(parent_dir, env_name):
"""Return save folder.
Assumes folders in the parent_dir have suffix -run{run
number}. Finds the highest run number and sets the output folder
to that number + 1. This is just convenient so that if you run the
same script multiple times tensorboard can plot all of the results
on the same plots with different names.
Parameters
----------
parent_dir: str
Path of the directory containing all experiment runs.
Returns
-------
parent_dir/run_dir
Path to this run's save directory.
"""
os.makedirs(parent_dir, exist_ok=True)
experiment_id = 0
for folder_name in os.listdir(parent_dir):
if not os.path.isdir(os.path.join(parent_dir, folder_name)):
continue
try:
folder_name = int(folder_name.split('-run')[-1])
if folder_name > experiment_id:
experiment_id = folder_name
except:
pass
experiment_id += 1
parent_dir = os.path.join(parent_dir, env_name)
parent_dir = parent_dir + '-run{}'.format(experiment_id)
os.makedirs(parent_dir, exist_ok=True)
return parent_dir
def statistics_plot(x, y, x_name, y_name, title='', filename=None):
fig, ax = plt.subplots()
ax.plot(x, y, 'c')
ax.tick_params(labelcolor='black', labelsize='medium', width=1)
ax.set_xlabel(x_name)
ax.set_ylabel(y_name)
ax.set_title(title)
plt.show()
if filename is not None:
plt.savefig(filename)
# matplot color
# 'b' blue
# 'g' green
# 'r' red
# 'c' cyan
# 'm' magenta
# 'y' yellow
# 'k' black
# 'w' white
def time_seq():
return time.strftime("%Y%m%d%H%M%S", time.localtime())
def get_class_attr(Cls) -> []:
"""
get attribute name from Class(type)
:param Cls:
:return:
"""
import re
return [a for a, v in Cls.__dict__.items()
if not re.match('<function.*?>', str(v))
and not (a.startswith('__') and a.endswith('__'))]
def get_class_attr_val(cls):
"""
get attribute name and their value from class(variable)
:param cls:
:return:
"""
attr = get_class_attr(type(cls))
attr_dict = {}
for a in attr:
attr_dict[a] = getattr(cls, a)
return attr_dict
def load_obj(path):
return pickle.load(open(path, 'rb'))
|
## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
## SPDX-License-Identifier: MIT-0
import boto3
from botocore.client import Config
import os
import io
class AwsHelper:
def getClient(self, name, awsRegion=None):
config = Config(
retries = dict(
max_attempts = 6
)
)
if(awsRegion):
return boto3.client(name, region_name=awsRegion, config=config)
else:
return boto3.client(name, config=config)
def getResource(self, name, awsRegion=None):
config = Config(
retries = dict(
max_attempts = 6
)
)
if(awsRegion):
return boto3.resource(name, region_name=awsRegion, config=config)
else:
return boto3.resource(name, config=config)
class S3Helper:
@staticmethod
def getS3BucketRegion(bucketName):
client = boto3.client('s3')
response = client.get_bucket_location(Bucket=bucketName)
awsRegion = response['LocationConstraint']
return awsRegion
@staticmethod
def getFileNames(bucketName, prefix, maxPages, allowedFileTypes, awsRegion=None):
files = []
currentPage = 1
hasMoreContent = True
continuationToken = None
s3client = AwsHelper().getClient('s3', awsRegion)
while(hasMoreContent and currentPage <= maxPages):
if(continuationToken):
listObjectsResponse = s3client.list_objects_v2(
Bucket=bucketName,
Prefix=prefix,
ContinuationToken=continuationToken)
else:
listObjectsResponse = s3client.list_objects_v2(
Bucket=bucketName,
Prefix=prefix)
if(listObjectsResponse['IsTruncated']):
continuationToken = listObjectsResponse['NextContinuationToken']
else:
hasMoreContent = False
for doc in listObjectsResponse['Contents']:
docName = doc['Key']
docExt = FileHelper.getFileExtenstion(docName)
docExtLower = docExt.lower()
if(docExtLower in allowedFileTypes):
files.append(docName)
return files
@staticmethod
def getFilteredFileNames(bucketName, prefix, extension, awsRegion=None):
files = []
s3client = AwsHelper().getResource('s3', awsRegion)
bucket = s3client.Bucket(bucketName)
objs = bucket.objects.filter(Prefix=prefix)
for obj in objs:
docName = obj.key
print("Key:{}".format(docName))
if docName.endswith(extension):
files.append(docName)
return files
@staticmethod
def writeToS3(content, bucketName, s3FileName, awsRegion=None):
s3 = AwsHelper().getResource('s3', awsRegion)
object = s3.Object(bucketName, s3FileName)
object.put(Body=content)
@staticmethod
def readFromS3(bucketName, s3FileName, awsRegion=None):
s3 = AwsHelper().getResource('s3', awsRegion)
obj = s3.Object(bucketName, s3FileName)
return obj.get()['Body'].read().decode('utf-8')
@staticmethod
def deleteObject(bucketName, s3FileName, awsRegion=None):
s3 = AwsHelper().getResource('s3', awsRegion)
obj = s3.Object(bucketName, s3FileName)
return obj.delete()
@staticmethod
def renameObject(bucketName, current, newObject, awsRegion=None):
print("Renaming object {} to {} in bucket {}".format(current,newObject,bucketName))
s3 = AwsHelper().getResource('s3', awsRegion)
s3.Object(bucketName, newObject).copy_from(CopySource=bucketName+"/"+current)
s3.Object(bucketName,current).delete()
class FileHelper:
@staticmethod
def getFileNameAndExtension(filePath):
basename = os.path.basename(filePath)
dn, dext = os.path.splitext(basename)
return (dn, dext[1:])
@staticmethod
def getFileName(fileName):
basename = os.path.basename(fileName)
dn, dext = os.path.splitext(basename)
return dn
@staticmethod
def getFileExtenstion(fileName):
basename = os.path.basename(fileName)
dn, dext = os.path.splitext(basename)
return dext[1:]
|
from django.db import models
from django.contrib.auth.models import User
import PIL.Image
from django.urls import reverse
from cloudinary.models import CloudinaryField
# Create your models here.
class Image(models.Model):
''' a model for Image posts '''
image = CloudinaryField('image')
caption = models.TextField()
profile = models.ForeignKey('Profile', default='1', on_delete=models.CASCADE)
likes = models.ManyToManyField(User, blank=True)
created_on = models.DateTimeField(auto_now_add=True)
def get_absolute_url(self):
return reverse('vinsta-home')
def save_image(self):
''' method to save an image post instance '''
self.save()
def delete_image(self):
'''method to delete an image post instance '''
self.delete()
def update_caption(self, new_caption):
''' method to update an image's caption '''
self.caption = new_caption
self.save()
@classmethod
def get_user_images(cls, user_id):
''' method to retrieve all images'''
img = Image.objects.filter(profile=user_id).all()
sort = sorted(img, key=lambda t: t.created_on)
return sort
class Profile(models.Model):
''' a model for profile '''
user = models.OneToOneField(User, on_delete=models.CASCADE)
photo = models.ImageField(upload_to = 'photos/',default='default.jpg')
bio = models.TextField(max_length=500, blank=True, default=f'I love vinstagram!')
def __str__(self):
return f'{self.user.username}'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
# img = Image.open(self.image.path)
img = PIL.Image.open(self.photo.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.photo.path)
def save_profile(self):
''' method to save a user's profile '''
self.save()
def delete_profile(self):
'''method to delete a user's profile '''
self.delete()
def update_bio(self, new_bio):
''' method to update a users profile bio '''
self.bio = new_bio
self.save()
def update_image(self, user_id, new_image):
''' method to update a users profile image '''
user = User.objects.get(id=user_id)
self.photo = new_image
self.save()
class Comment(models.Model):
''' a model for comments'''
related_post = models.ForeignKey('Image', on_delete=models.CASCADE)
name = models.ForeignKey('Profile', on_delete=models.CASCADE)
comment_body = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['created_on']
def save_comments(self):
''' method to save comment instance '''
self.save()
def delete_comment(self):
'''method to delete comment instance '''
self.delete()
def edit_comment(self, new_comment):
''' method to edit a comment '''
self.comment_body = new_comment
self.save()
def __str__(self):
return f'Comment by {self.name}'
|
# Copyright (c) 2017 Huawei, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from conveyordashboard import dashboard
class Volumes(horizon.Panel):
name = _("Volumes")
slug = 'volumes'
dashboard.Conveyor.register(Volumes)
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from hierarkey.forms import HierarkeyForm
from i18nfield.forms import I18nFormMixin, I18nModelForm
from pretalx.common.mixins.forms import ReadOnlyFlag
from pretalx.submission.models import AnswerOption, CfP, Question, SubmissionType, Track
class CfPSettingsForm(ReadOnlyFlag, I18nFormMixin, HierarkeyForm):
use_tracks = forms.BooleanField(
label=_('Use tracks'),
required=False,
help_text=_('Do you organise your talks by tracks?'),
)
present_multiple_times = forms.BooleanField(
label=_('Slot Count'),
required=False,
help_text=_('Can talks be held multiple times?'),
)
cfp_show_deadline = forms.BooleanField(
label=_('Display deadline publicly'),
required=False,
help_text=_('Show the time and date the CfP ends to potential speakers.'),
)
cfp_request_abstract = forms.BooleanField(label='', required=False)
cfp_request_description = forms.BooleanField(label='', required=False)
cfp_request_notes = forms.BooleanField(label='', required=False)
cfp_request_biography = forms.BooleanField(label='', required=False)
cfp_request_availabilities = forms.BooleanField(label='', required=False)
cfp_request_do_not_record = forms.BooleanField(label='', required=False)
cfp_request_image = forms.BooleanField(label='', required=False)
cfp_request_track = forms.BooleanField(label='', required=False)
cfp_request_duration = forms.BooleanField(label='', required=False)
cfp_require_abstract = forms.BooleanField(label='', required=False)
cfp_require_description = forms.BooleanField(label='', required=False)
cfp_require_notes = forms.BooleanField(label='', required=False)
cfp_require_biography = forms.BooleanField(label='', required=False)
cfp_require_availabilities = forms.BooleanField(label='', required=False)
cfp_require_image = forms.BooleanField(label='', required=False)
cfp_require_track = forms.BooleanField(label='', required=False)
cfp_require_duration = forms.BooleanField(label='', required=False)
cfp_abstract_min_length = forms.IntegerField(
label=_('Minimum length'), required=False, min_value=0
)
cfp_description_min_length = forms.IntegerField(
label=_('Minimum length'), required=False, min_value=0
)
cfp_biography_min_length = forms.IntegerField(
label=_('Minimum length'), required=False, min_value=0
)
cfp_abstract_max_length = forms.IntegerField(
label=_('Maximum length'), required=False, min_value=0
)
cfp_description_max_length = forms.IntegerField(
label=_('Maximum length'), required=False, min_value=0
)
cfp_biography_max_length = forms.IntegerField(
label=_('Maximum length'), required=False, min_value=0
)
cfp_count_length_in = forms.ChoiceField(
label=_('Count text length in'),
choices=(('chars', _('Characters')), ('words', _('Words'))),
)
mail_on_new_submission = forms.BooleanField(
label=_('Send mail on new submission'),
help_text=_(
'If this setting is checked, you will receive an email to the organiser address for every received submission.'
),
required=False,
)
def __init__(self, obj, *args, **kwargs):
kwargs.pop(
'read_only'
) # added in ActionFromUrl view mixin, but not needed here.
super().__init__(*args, obj=obj, **kwargs)
if getattr(obj, 'email'):
self.fields[
'mail_on_new_submission'
].help_text += f' (<a href="mailto:{obj.email}">{obj.email}</a>)'
for field in ['abstract', 'description', 'biography']:
self.fields[f'cfp_{field}_min_length'].widget.attrs['placeholder'] = ''
self.fields[f'cfp_{field}_max_length'].widget.attrs['placeholder'] = ''
class CfPForm(ReadOnlyFlag, I18nModelForm):
class Meta:
model = CfP
fields = ['headline', 'text', 'deadline']
widgets = {
'deadline': forms.DateTimeInput(attrs={'class': 'datetimepickerfield'})
}
class QuestionForm(ReadOnlyFlag, I18nModelForm):
def __init__(self, *args, event=None, **kwargs):
super().__init__(*args, **kwargs)
instance = kwargs.get('instance')
if not (
event.settings.use_tracks
and event.tracks.all().count()
and event.settings.cfp_request_track
):
self.fields.pop('tracks')
else:
self.fields['tracks'].queryset = event.tracks.all()
if (
instance
and instance.pk
and instance.answers.count()
and not instance.is_public
):
self.fields['is_public'].disabled = True
class Meta:
model = Question
fields = [
'target',
'question',
'help_text',
'variant',
'is_public',
'required',
'tracks',
'contains_personal_data',
'min_length',
'max_length',
]
class AnswerOptionForm(ReadOnlyFlag, I18nModelForm):
class Meta:
model = AnswerOption
fields = ['answer']
class SubmissionTypeForm(ReadOnlyFlag, I18nModelForm):
def save(self, *args, **kwargs):
instance = super().save(*args, **kwargs)
if instance.pk and 'duration' in self.changed_data:
instance.update_duration()
class Meta:
model = SubmissionType
fields = ['name', 'default_duration', 'deadline']
widgets = {
'deadline': forms.DateTimeInput(attrs={'class': 'datetimepickerfield'})
}
class TrackForm(ReadOnlyFlag, I18nModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['color'].widget.attrs['class'] = 'colorpickerfield'
class Meta:
model = Track
fields = ['name', 'color']
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
class Net(Cell):
def __init__(self, mul_weight, strategy1=None, strategy2=None):
super().__init__()
self.mul = P.Mul().shard(strategy1)
self.loss = P.SigmoidCrossEntropyWithLogits().shard(strategy2)
self.mul_weight = Parameter(mul_weight, "w1")
def construct(self, x, b):
out = self.mul(x, self.mul_weight)
out = self.loss(out, b)
return out
_x = Tensor(np.ones([128, 64]), dtype=ms.float32)
_w1 = Tensor(np.ones([128, 64]), dtype=ms.float32)
_b = Tensor(np.ones([128, 64]), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()
def test_sigmoid_cross_entropy_with_logits_data_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((16, 1), (16, 1))
strategy2 = ((16, 1), (16, 1))
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_sigmoid_cross_entropy_with_logits_model_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((1, 16), (1, 16))
strategy2 = ((1, 16), (1, 16))
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_sigmoid_cross_entropy_with_logits_hybrid_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((2, 8), (2, 8))
strategy2 = ((2, 8), (2, 8))
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_sigmoid_cross_entropy_with_logits_auto_parallel():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0)
net = Net(_w1)
compile_net(net)
def test_sigmoid_cross_entropy_with_logits_repeat_calc():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((2, 8), (2, 8))
strategy2 = ((2, 2), (2, 2))
net = Net(_w1, strategy1, strategy2)
compile_net(net)
|
# model settings
model = dict(
type='GFL',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='GFLHeadUnshare',
norm_cfg=dict(type='SyncBN', requires_grad=True),
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
reg_max=16,
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/share2/public/xldetection/coco/'
# multi-scale training
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 22])
total_epochs = 24
checkpoint_config = dict(interval=2)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
"""
Django settings for harmonization_project project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
SECRET_KEY = os.environ.get("SECRET_KEY")
DEBUG = int(os.environ.get("DEBUG", default=0))
# 'DJANGO_ALLOWED_HOSTS' should be a single string of hosts with a space between each.
# For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]'
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS").split(" ")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
'django.contrib.sites',
# Third party
'crispy_forms',
'allauth',
'allauth.account',
# Local
'users.apps.UsersConfig',
'pages.apps.PagesConfig',
'upload.apps.UploadConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'harmonization_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'harmonization_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.sqlite3"),
"NAME": os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")),
"USER": os.environ.get("SQL_USER", "user"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "password"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/staticfiles/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'),]
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# TODO
AUTH_USER_MODEL = 'users.CustomUser'
# django-crispy-forms
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# django-allauth config
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT = 'home'
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST=os.environ.get('EMAIL_HOST')
EMAIL_HOST_USER=os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD=os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_PORT=os.environ.get('EMAIL_PORT')
EMAIL_USE_TLS=os.environ.get('EMAIL_USE_TLS')
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
DEFAULT_FROM_EMAIL = 'admin@harmonization.com'
|
#!/usr/bin/python
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import sys
from random import randint
class KLL300:
def __init__(self):
self.maxSize = 300
self.size = 0
self.capacities = [2, 2, 4, 6, 10, 18, 28, 44, 70, 112]
self.H = len(self.capacities)
self.compactors = [Compactor() for _ in range(self.H)]
def sizef(self):
return sum([len(c) for c in self.compactors])
def update(self, item):
self.compactors[0].append(item)
self.size += 1
if self.size >= self.maxSize:
for h in range(self.H - 1):
if len(self.compactors[h]) >= self.capacities[h]:
newItems = self.compactors[h].compact()
self.compactors[h+1].extend(newItems)
break
self.size = self.sizef()
assert(self.size < self.maxSize)
def cdf(self):
itemsAndWeights = []
for (h, items) in enumerate(self.compactors):
itemsAndWeights.extend( (item, 2**h) for item in items )
itemsAndWeights.sort()
items = [t[0] for t in itemsAndWeights]
weights = [t[1] for t in itemsAndWeights]
for i in range(len(weights)-1):
weights[i+1]+=weights[i]
totWeight = weights[-1]
return items, [w/totWeight for w in weights]
class Compactor(list):
def compact(self):
self.sort()
offset = randint(0,1)
for item in self[offset::2]:
yield item
self.clear()
|
from json import JSONEncoder
class BaseContext(JSONEncoder):
def __init__(self):
super(BaseContext, self).__init__()
self.instance = None
def __repr__(self):
if self.instance is not None:
return self.instance.__repr__()
else:
return ""
def json(self):
if self.instance is not None:
return self.instance.json()
else:
return None
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-11-23 08:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('echobot', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BiGramRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('freq', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='TriGramRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('freq', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='WeightedPersonality',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('weight', models.FloatField(default=0)),
('personality', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='echobot.Personality')),
],
),
migrations.RemoveField(
model_name='phraserelation',
name='post',
),
migrations.RemoveField(
model_name='phraserelation',
name='prev',
),
migrations.RemoveField(
model_name='phrase',
name='personality',
),
migrations.DeleteModel(
name='PhraseRelation',
),
migrations.AddField(
model_name='trigramrelation',
name='first',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='first_phrase_in_trigram', to='echobot.Phrase'),
),
migrations.AddField(
model_name='trigramrelation',
name='last',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='last_phrase_in_trigram', to='echobot.Phrase'),
),
migrations.AddField(
model_name='trigramrelation',
name='mid',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mid_phrase_in_trigram', to='echobot.Phrase'),
),
migrations.AddField(
model_name='bigramrelation',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_phrase_in_bigram', to='echobot.Phrase'),
),
migrations.AddField(
model_name='bigramrelation',
name='prev',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='prev_phrase_in_bigram', to='echobot.Phrase'),
),
migrations.AddField(
model_name='phrase',
name='weighted_personality_list',
field=models.ManyToManyField(to='echobot.WeightedPersonality'),
),
]
|
#!/usr/bin/env python3
import requests, argparse
from datetime import datetime
from AMPConfig import APIKey, ClientID
# Add the argument parsers and help menu
ap = argparse.ArgumentParser(add_help=False)
ap.add_argument("-h", "--help", action="help", default=argparse.SUPPRESS,
help="Shows this help menu.")
ap.add_argument("-g", "--group", required=True,
help="Group GUID to move the connectors to.")
ap.add_argument("-t", "--time", required=True,
help="Time (in days) since the connector was last seen.")
args = ap.parse_args()
ls = {}
now = datetime.now()
comps = [i['connector_guid'] for i in requests.get("https://api.amp.cisco.com/v1/computers", auth=(ClientID, APIKey)).json()['data']]
for i in comps:
r = requests.get("https://api.amp.cisco.com/v1/computers/{}".format(i), auth=(ClientID, APIKey)).json()['data'].get('last_seen')
delta = None
if len(r) > 2:
s = datetime.strptime(r, "%Y-%m-%dT%H:%M:%SZ")
delta = (now-s).days
ls[i] = (r, delta+1)
for i in ls:
move = ""
if ls[i][1] > int(args.time):
data = {"group_guid": args.group}
move = requests.patch("https://api.amp.cisco.com/v1/computers/{}".format(i), data=data, auth=(ClientID, APIKey))
if move.status_code == 202:
print("Connector GUID "+str(i)+" was successfully moved into "+str(args.group)+" group.")
else:
print("There was an issue moving connector GUID "+str(i)+" to the new group.\nError: "+move.json()['errors'][0]['details'][0])
|
'''
--- Day 6: Chronal Coordinates ---
The device on your wrist beeps several times, and once again you feel like you're falling.
"Situation critical," the device announces. "Destination indeterminate. Chronal interference detected. Please specify new target coordinates."
The device then produces a list of coordinates (your puzzle input). Are they places it thinks are safe or dangerous? It recommends you check manual page 729. The Elves did not give you a manual.
If they're dangerous, maybe you can minimize the danger by finding the coordinate that gives the largest distance from the other points.
Using only the Manhattan distance, determine the area around each coordinate by counting the number of integer X,Y locations that are closest to that coordinate (and aren't tied in distance to any other coordinate).
Your goal is to find the size of the largest area that isn't infinite. For example, consider the following list of coordinates:
1, 1
1, 6
8, 3
3, 4
5, 5
8, 9
If we name these coordinates A through F, we can draw them on a grid, putting 0,0 at the top left:
..........
.A........
..........
........C.
...D......
.....E....
.B........
..........
..........
........F.
This view is partial - the actual grid extends infinitely in all directions. Using the Manhattan distance, each location's closest coordinate can be determined, shown here in lowercase:
aaaaa.cccc
aAaaa.cccc
aaaddecccc
aadddeccCc
..dDdeeccc
bb.deEeecc
bBb.eeee..
bbb.eeefff
bbb.eeffff
bbb.ffffFf
Locations shown as . are equally far from two or more coordinates, and so they don't count as being closest to any.
In this example, the areas of coordinates A, B, C, and F are infinite - while not shown here, their areas extend forever outside the visible grid. However, the areas of coordinates D and E are finite: D is closest to 9 locations, and E is closest to 17 (both including the coordinate's location itself). Therefore, in this example, the size of the largest area is 17.
What is the size of the largest area that isn't infinite?
'''
from pathlib import Path
import operator
from dataclasses import dataclass, field
from typing import Set
from collections import defaultdict
@dataclass
class Point:
x: int = field(default_factory=int)
y: int = field(default_factory=int)
def _distance(self, other):
return abs(self.x - other.x) + abs(self.y - other.y)
def closest_point(self, points: Set['Point']):
min_distance = 0
closest = self
for point in points:
if self == point:
return point, 0
distance = self._distance(point)
if min_distance == 0 or distance < min_distance:
min_distance = distance
closest = point
elif distance == min_distance:
return None, 0
return closest, min_distance
def __hash__(self):
return hash((self.x, self.y))
def __eq__(self, other):
return (self.x, self.y) == (other.x, other.y)
def __lt__(self, other):
origin = Point(0, 0)
return origin._distance(self) < origin._distance(other)
def __gt__(self, other):
origin = Point(0, 0)
return origin._distance(self) > origin._distance(other)
def main(data: str):
points_of_interest = set(
Point(int(p.split(',')[0]), int(p.split(',')[1])) for p in data
)
all_points = set()
min_x = min(p.x for p in points_of_interest)
min_y = min(p.y for p in points_of_interest)
max_x = max(p.x for p in points_of_interest)
max_y = max(p.y for p in points_of_interest)
distance_dict = defaultdict(int)
# The points that have x or y values matching the min and max x and y values are defacto infinite
for x in range(max_x + 1):
for y in range(max_y + 1):
p = Point(x, y)
all_points.add(p)
for point in all_points:
closest, distance = point.closest_point(points_of_interest)
if not closest:
distance_dict[point] += 1
elif closest.x == min_x or closest.x == max_x:
continue
elif closest.y == min_y or closest.y == max_y:
continue
else:
distance_dict[closest] += 1
# print(f'{point} is closest to {closest} at {distance} meters')
# print(sorted(list(all_points)))
# print(distance_dict[Point(3, 4)])
# print(distance_dict[Point(5, 5)])
return max(distance_dict, key=distance_dict.get), max(distance_dict.values())
if __name__ == '__main__':
filepath = Path.cwd() / '2018/day_6.txt'
with open(filepath) as f:
data = [x.strip() for x in f.readlines()]
result = main(data)
print(f'Result: {result}')
|
from __future__ import unicode_literals
from java.util import EventListener
_wrapperClassMap = {} # event interface name -> wrapper class
def _noOp(self, event):
pass
def _createListenerWrapper(eventInterface, eventNames, listener, args, kwargs,
removeMethod):
eventNames = ((eventNames,) if isinstance(eventNames, basestring) else
sorted(eventNames))
assert issubclass(eventInterface, EventListener), \
'eventName class must be a subclass of EventListener'
assert hasattr(listener, '__call__'), 'listener must be callable'
for eventName in eventNames:
assert hasattr(eventInterface, eventName), \
'%s has no method named "%s"' % \
(eventInterface.__name__, eventName)
# Figure out the fully qualified name of the interface
className = eventInterface.__name__
if eventInterface.__module__:
className = eventInterface.__module__ + '.' + className
mapKey = '%s/%s' % (className, ','.join(eventNames))
# Create a wrapper class for this eventName class if it's not there yet
wrapperClass = _wrapperClassMap.get(mapKey)
if not wrapperClass:
# Gather all the interface methods
methodNames = set()
for cls in eventInterface.__mro__:
if cls is EventListener:
break
methodNames.update(m for m in cls.__dict__ if
not m.startswith('_'))
# Redirect all interface methods to self.handleEvent()
methods = {m: EventListenerWrapper.handleEvent
if m in eventNames else _noOp for m in methodNames}
wrapperClass = type('%sWrapper' % eventInterface.__name__,
(EventListenerWrapper, eventInterface), methods)
_wrapperClassMap[mapKey] = wrapperClass
return wrapperClass(listener, args, kwargs, removeMethod)
class EventListenerWrapper(object):
def __init__(self, listener, args, kwargs, removeMethod):
self.listener = listener
self.args = args
self.kwargs = kwargs
self.removeMethod = removeMethod
self.removeMethodArgs = (self,)
def handleEvent(self, event):
self.listener(event, *self.args, **self.kwargs)
def unlisten(self):
self.removeMethod(*self.removeMethodArgs)
def addEventListener(target, eventInterface, event, listener, *args, **kwargs):
"""
Adds an event listener to `target`.
:param target: an object that supports listening to the events of the given
type (the add*Listener methods must be inherited from a Java
class so that autodetection will work)
:param eventInterface: the interface that the listener wrapper has to
implement (e.g. :class:`java.awt.MouseListener`)
:param event: name(s) of the event(s) to listen for (e.g. "mouseClicked")
:param listener: callable that is called with ``(event, *args, **kwargs)``
when the event is fired
:type eventInterface: Java interface
:type event: string or an iterable of strings
:type listener: callable
:return: the listener wrapper that you can use to stop listening to these
events (with :meth:`~EventListenerWrapper.unlisten`)
"""
addMethodName = 'add%s' % eventInterface.__name__
addMethod = getattr(target, addMethodName)
removeMethodName = 'remove%s' % eventInterface.__name__
removeMethod = getattr(target, removeMethodName)
wrapper = _createListenerWrapper(eventInterface, event, listener, args,
kwargs, removeMethod)
addMethod(wrapper)
return wrapper
def addPropertyListener(target, property, listener, *args, **kwargs):
"""
Adds a callback that is called when the given property has changed.
A listener can either listen to changes in a specific property,
or all properties (by supplying `None` as the property name).
The listener is called with ``(event, *args, **kwargs)``.
:param target: the object whose property will be listened to
:param property: name of the property, or None to listen to all
property changes
:type listener: function or any callable
:return: the listener wrapper that you can use to stop listening to these
events (with obj.removePropertyChangeListener())
"""
from java.beans import PropertyChangeListener
wrapper = _createListenerWrapper(
PropertyChangeListener, 'propertyChange', listener, args, kwargs,
target.removePropertyChangeListener)
add_args = (wrapper,) if property is None else (property, wrapper)
wrapper.removeMethodArgs = add_args
target.addPropertyChangeListener(*add_args)
return wrapper
#
# Shortcuts for java.awt.event
#
def addActionListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, ActionListener, 'actionPerformed',
listener).
"""
from java.awt.event import ActionListener
return addEventListener(target, ActionListener, 'actionPerformed',
listener, *args, **kwargs)
def addItemListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, ItemListener, 'itemStateChanged',
listener).
"""
from java.awt.event import ItemListener
return addEventListener(target, ItemListener, 'itemStateChanged', listener,
*args, **kwargs)
def addFocusLostListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, FocusListener, 'focusLost',
listener).
"""
from java.awt.event import FocusListener
return addEventListener(target, FocusListener, 'focusLost', listener,
*args, **kwargs)
def addMouseClickListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, MouseListener, 'mouseClicked',
listener).
"""
from java.awt.event import MouseListener
return addEventListener(target, MouseListener, 'mouseClicked', listener,
*args, **kwargs)
#
# Shortcuts for javax.swing.events
#
def addCaretListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, CaretListener, 'caretUpdate',
listener).
"""
from javax.swing.event import CaretListener
return addEventListener(target, CaretListener, 'caretUpdate', listener,
*args, **kwargs)
def addChangeListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, ChangeListener, 'stateChanged',
listener).
"""
from javax.swing.event import ChangeListener
return addEventListener(target, ChangeListener, 'stateChanged', listener,
*args, **kwargs)
def addDocumentListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, DocumentListener,
('insertUpdate', 'removeUpdate', 'changedUpdate'), listener).
"""
from javax.swing.event import DocumentListener
events = ('insertUpdate', 'removeUpdate', 'changedUpdate')
return addEventListener(target, DocumentListener, events, listener, *args,
**kwargs)
def addListDataListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, ListDataListener,
('contentsChanged', 'intervalAdded', 'intervalRemoved'), listener).
"""
from javax.swing.event import ListDataListener
events = ('contentsChanged', 'intervalAdded', 'intervalRemoved')
return addEventListener(target, ListDataListener, events, listener, *args,
**kwargs)
def addListSelectionListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, ListSelectionListener,
'valueChanged', listener).
"""
from javax.swing.event import ListSelectionListener
return addEventListener(target, ListSelectionListener, 'valueChanged',
listener, *args, **kwargs)
def addRowSorterListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, addTreeSelectionListener,
'sorterChanged', listener).
"""
from javax.swing.event import RowSorterListener
return addEventListener(target, RowSorterListener, 'sorterChanged',
listener, *args, **kwargs)
def addTableModelListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, TableModelListener,
'tableChanged', listener).
"""
from javax.swing.event import TableModelListener
return addEventListener(target, TableModelListener, 'tableChanged',
listener, *args, **kwargs)
def addTreeSelectionListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, TreeSelectionListener,
'valueChanged', listener).
"""
from javax.swing.event import TreeSelectionListener
return addEventListener(target, TreeSelectionListener, 'valueChanged',
listener, *args, **kwargs)
def addUndoableEditListener(target, listener, *args, **kwargs):
"""
Shortcut for addEventListener(target, UndoableEditListener,
'undoableEditHappened', listener).
"""
from javax.swing.event import UndoableEditListener
return addEventListener(target, UndoableEditListener,
'undoableEditHappened', listener, *args, **kwargs)
|
#! /usr/bin/env python3
import os
import re
import sys
import sysconfig
import platform
import subprocess
from distutils.version import LooseVersion
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from setuptools.command.test import test as TestCommand
from shutil import copyfile, copymode
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)',
out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
cfg.upper(),
extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
# Copy *_test file to tests directory
#test_bin = os.path.join(self.build_temp, 'python_cpp_example_test')
#self.copy_test_file(test_bin)
print() # Add an empty line for cleaner output
def copy_test_file(self, src_file):
'''
Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed on standard output. Adapted from scikit-build.
'''
# Create directory if needed
dest_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'tests', 'bin')
if dest_dir != "" and not os.path.exists(dest_dir):
print("creating directory {}".format(dest_dir))
os.makedirs(dest_dir)
# Copy file
dest_file = os.path.join(dest_dir, os.path.basename(src_file))
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
requirements = [ 'cmake>=2.8.12', ]
setup(
name='pyransac',
version='0.1',
author='Ondra Chum, Dmytro Mishkin',
author_email='ducha.aiki@gmail.com',
description='RANSAC with bells and whistles for H and F estimation',
long_description='',
packages=find_packages('src'),
package_dir={'':'src'},
ext_modules=[CMakeExtension('pyransac/pyransac')],
cmdclass=dict(build_ext=CMakeBuild),
#test_suite='tests',
zip_safe=False,
install_requires=requirements,
)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Identity(nn.Module):
def __init__(self, channel):
super(Identity, self).__init__()
def forward(self, x):
return x
class MobileBottleneck(nn.Module):
def __init__(self, exp, se=False):
super(MobileBottleneck, self).__init__()
if se:
SELayer = SEModule
else:
SELayer = Identity
self.conv = nn.Sequential(
SELayer(exp),
)
def forward(self, x):
return self.conv(x)
net = MobileBottleneck(2,False)
x = torch.randn(1,1,2,3)
print(x)
y = net(x)
print(y)
|
"""TensorFlow V1 API __init__.py files."""
# keep sorted
TENSORFLOW_API_INIT_FILES_V1 = [
# BEGIN GENERATED FILES
"__init__.py",
"app/__init__.py",
"audio/__init__.py",
"autograph/__init__.py",
"autograph/experimental/__init__.py",
"bitwise/__init__.py",
"compat/__init__.py",
"config/__init__.py",
"data/__init__.py",
"data/experimental/__init__.py",
"debugging/__init__.py",
"distribute/__init__.py",
"distribute/cluster_resolver/__init__.py",
"distribute/experimental/__init__.py",
"distributions/__init__.py",
"dtypes/__init__.py",
"errors/__init__.py",
"experimental/__init__.py",
"feature_column/__init__.py",
"gfile/__init__.py",
"io/gfile/__init__.py",
"graph_util/__init__.py",
"image/__init__.py",
"io/__init__.py",
"queue/__init__.py",
"initializers/__init__.py",
"layers/__init__.py",
"layers/experimental/__init__.py",
"linalg/__init__.py",
"lite/__init__.py",
"lite/constants/__init__.py",
"lite/experimental/__init__.py",
"lite/experimental/nn/__init__.py",
"logging/__init__.py",
"losses/__init__.py",
"manip/__init__.py",
"math/__init__.py",
"metrics/__init__.py",
"nest/__init__.py",
"nn/__init__.py",
"nn/rnn_cell/__init__.py",
"profiler/__init__.py",
"python_io/__init__.py",
"quantization/__init__.py",
"ragged/__init__.py",
"random/__init__.py",
"raw_ops/__init__.py",
"resource_loader/__init__.py",
"strings/__init__.py",
"saved_model/__init__.py",
"saved_model/builder/__init__.py",
"saved_model/constants/__init__.py",
"saved_model/experimental/__init__.py",
"saved_model/loader/__init__.py",
"saved_model/main_op/__init__.py",
"saved_model/signature_constants/__init__.py",
"saved_model/signature_def_utils/__init__.py",
"saved_model/tag_constants/__init__.py",
"saved_model/utils/__init__.py",
"sets/__init__.py",
"signal/__init__.py",
"sparse/__init__.py",
"spectral/__init__.py",
"summary/__init__.py",
"sysconfig/__init__.py",
"test/__init__.py",
"tpu/experimental/__init__.py",
"tpu/__init__.py",
"train/__init__.py",
"train/experimental/__init__.py",
"train/queue_runner/__init__.py",
"user_ops/__init__.py",
"version/__init__.py",
# END GENERATED FILES
]
KERAS_API_INIT_FILES_V1 = [
"__init__.py",
"keras/__init__.py",
"keras/activations/__init__.py",
"keras/applications/__init__.py",
"keras/applications/densenet/__init__.py",
"keras/applications/inception_resnet_v2/__init__.py",
"keras/applications/inception_v3/__init__.py",
"keras/applications/mobilenet/__init__.py",
"keras/applications/mobilenet_v2/__init__.py",
"keras/applications/nasnet/__init__.py",
"keras/applications/resnet50/__init__.py",
"keras/applications/vgg16/__init__.py",
"keras/applications/vgg19/__init__.py",
"keras/applications/xception/__init__.py",
"keras/backend/__init__.py",
"keras/callbacks/__init__.py",
"keras/constraints/__init__.py",
"keras/datasets/__init__.py",
"keras/datasets/boston_housing/__init__.py",
"keras/datasets/cifar10/__init__.py",
"keras/datasets/cifar100/__init__.py",
"keras/datasets/fashion_mnist/__init__.py",
"keras/datasets/imdb/__init__.py",
"keras/datasets/mnist/__init__.py",
"keras/datasets/reuters/__init__.py",
"keras/estimator/__init__.py",
"keras/experimental/__init__.py",
"keras/initializers/__init__.py",
"keras/layers/__init__.py",
"keras/layers/experimental/__init__.py",
"keras/losses/__init__.py",
"keras/metrics/__init__.py",
"keras/models/__init__.py",
"keras/optimizers/__init__.py",
"keras/optimizers/schedules/__init__.py",
"keras/preprocessing/__init__.py",
"keras/preprocessing/image/__init__.py",
"keras/preprocessing/sequence/__init__.py",
"keras/preprocessing/text/__init__.py",
"keras/regularizers/__init__.py",
"keras/utils/__init__.py",
"keras/wrappers/__init__.py",
"keras/wrappers/scikit_learn/__init__.py",
]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# This is a part of CMSeeK, check the LICENSE file for more information
# Copyright (c) 2018 - 2020 Tuhinshubhra
import cmseekdb.basic as cmseek
import json
def start(version,ua):
if version == "0":
cmseek.warning("Skipping version vulnerability scan as WordPress Version wasn't detected")
wpvdbres = '0' # fix for issue #3
result = ""
vfc = ""
else: ## So we have a version let's scan for vulnerabilities
cmseek.info("Checking version vulnerabilities using wpvulns.com")
vfc = version.replace('.','') # NOT IMPORTANT: vfc = version for check well we have to kill all the .s in the version for looking it up on wpvulndb.. kinda weird if you ask me
#ws = cmseek.getsource("https://wpvulndb.com/api/v2/wordpresses/" + vfc, ua)
# print(ws[0])
ws = cmseek.getsource("https://wpvulns.com/version/{0}.json".format(version), ua)
if ws[0] == "1":
# wjson = json.loads(ws[1]) + vfd + "['release_date']"
wpvdbres = '1' ## We have the wpvulndb results
result = json.loads(ws[1]) #[version]
else:
wpvdbres = '0'
result = ""
cmseek.error('Error Retriving data from wpvulndb')
return [wpvdbres, result, vfc]
|
# /usr/bin/env python
# -*- coding: utf-8 -*-
from loguru import logger
from typing import Union
from pathlib import Path
def repo_status_to_dict(
repodir: Union[Path, str], reponame: str = None, diff: bool = False
):
"""
:param repodir:
:param reponame:
:param diff: Add diff string if true and if there are some changes.
:return:
"""
try:
import git
from git import Repo
except ImportError as e:
return {}
repodir = Path(repodir)
if is_git_repo(str(repodir)):
repo = Repo(str(repodir))
else:
return {}
if reponame is None:
reponame = repodir.resolve().stem
dirty = repo.is_dirty()
retval = {
f"repo {reponame} id": repo.head.object.hexsha,
f"repo {reponame} is dirty": dirty,
}
if dirty:
dirty_files = ",".join([item.a_path for item in repo.index.diff(None)])
retval[f"repo {reponame} dirty files"] = dirty_files
try:
describe = repo.git.describe()
retval[f"repo {reponame} describe"] = describe
except git.exc.GitCommandError as e:
logger.info(f"git describe error: {str(e)}")
return retval
def is_git_repo(path):
import git
from git import Repo
try:
_ = git.Repo(path).git_dir
return True
except git.exc.InvalidGitRepositoryError:
return False
|
import floto
from floto.decider import Decider
import json
class DynamicDecider(Decider):
"""DynamicDecider reads the execution logic defined by activity tasks from the workflow
input."""
def __init__(self, decider_spec, identity=None):
super().__init__(decider_spec=decider_spec, identity=identity)
def get_decisions(self):
input_ = self.history.get_workflow_input()
activity_tasks = self.get_activity_tasks_from_input(input_)
self.decision_builder = floto.decider.DecisionBuilder(activity_tasks=activity_tasks,
default_activity_task_list=self.default_activity_task_list)
self.decisions = self.decision_builder.get_decisions(self.history)
self.terminate_workflow = self.decision_builder.is_terminate_workflow()
def get_activity_tasks_from_input(self, input_):
if isinstance(input_, dict):
for k,v in input_.items():
tasks = None
if k == 'activity_tasks':
tasks = []
for t in v:
task = floto.specs.serializer.get_class(t['type'])
tasks.append(task.deserialized(**t))
else:
tasks = self.get_activity_tasks_from_input(v)
if tasks:
return tasks
return None
|
import sys
import time
import json
import asyncio
import requests
import urllib3
from PIL import Image
import websockets.legacy.client
from captcha.chaojiying import ChaoJiYing
from captcha.tujian import TuJian
from captcha.jd_captcha import JDcaptcha_base64
from captcha.jd_yolo_captcha import JDyolocaptcha
from utils.logger import Log
from utils.config import get_config
from utils.selenium_browser import get_browser
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
async def ws_conn(ws_conn_url):
"""
websocket连接
"""
async with websockets.legacy.client.connect(ws_conn_url) as websocket:
try:
recv = await asyncio.wait_for(websocket.recv(), get_config()["sms_captcha"]["ws_timeout"])
return recv
except asyncio.TimeoutError:
return ""
logger = Log().logger
def INFO(*args):
logger.info(" ".join(map(str, args)))
def WARN(*args):
logger.warning(" ".join(map(str, args)))
def ERROR(*args):
logger.error(" ".join(map(str, args)))
class JDMemberCloseAccount(object):
"""
京东退店铺会员
1. 全自动(超级鹰验证)
2. 半自动(手动点击图形验证码)
"""
def __init__(self):
INFO("欢迎执行JD全自动退会程序,如有使用问题请加TG群https://t.me/jdMemberCloseAccount进行讨论")
# 初始化基础配置
self.config = get_config()
self.selenium_cfg = get_config()["selenium"]
self.shop_cfg = get_config()["shop"]
self.sms_captcha_cfg = get_config()["sms_captcha"]
self.image_captcha_cfg = get_config()["image_captcha"]
self.ocr_cfg = self.sms_captcha_cfg["ocr"]
# 初始化selenium配置
self.browser = get_browser(self.config)
self.wait = WebDriverWait(self.browser, self.selenium_cfg["selenium_timeout"])
# 初始化短信验证码配置
if not self.sms_captcha_cfg["is_ocr"]:
if not self.sms_captcha_cfg["jd_wstool"]:
from utils.listener import SmsSocket
self.sms = SmsSocket()
elif self.sms_captcha_cfg["is_ocr"]:
if self.ocr_cfg["type"] == "":
WARN("当前已开启OCR模式,但是并未选择OCR类型,请在config.yaml补充ocr.type")
sys.exit(1)
if self.ocr_cfg["type"] == "baidu":
from captcha.baidu_ocr import BaiduOCR
self.baidu_ocr = BaiduOCR(self.ocr_cfg)
elif self.ocr_cfg["type"] == "aliyun":
from captcha.aliyun_ocr import AliYunOCR
self.aliyun_ocr = AliYunOCR(self.ocr_cfg)
elif self.ocr_cfg["type"] == "easyocr":
from captcha.easy_ocr import EasyOCR
self.easy_ocr = EasyOCR()
# 初始化图形验证码配置
if self.image_captcha_cfg["type"] == "cjy":
self.cjy = ChaoJiYing(self.image_captcha_cfg)
elif self.image_captcha_cfg["type"] == "tj":
self.tj = TuJian(self.image_captcha_cfg)
elif self.image_captcha_cfg["type"] == "local":
pass
elif self.image_captcha_cfg["type"] == "yolov4":
self.JDyolo = JDyolocaptcha(self.image_captcha_cfg)
else:
WARN("请在config.yaml中补充image_captcha.type")
sys.exit(1)
def get_code_pic(self, name='code_pic.png'):
"""
获取验证码图像
:param name:
:return:
"""
# 确定验证码的左上角和右下角坐标
code_img = self.wait.until(EC.presence_of_element_located((By.XPATH, "//div[@id='captcha_modal']//div")))
location = code_img.location
size = code_img.size
_range = (int(location['x']), int(location['y']), (int(location['x']) + int(size['width'])),
(int(location['y']) + int(size['height'])))
# 将整个页面截图
self.browser.save_screenshot(name)
# 获取浏览器大小
window_size = self.wait.until(EC.presence_of_element_located((By.XPATH, "//div[@id='root']")))
width, height = window_size.size['width'], window_size.size['height']
# 图片根据窗口大小resize,避免高分辨率影响坐标
i = Image.open(name)
new_picture = i.resize((width, height))
new_picture.save(name)
# 剪裁图形验证码区域
code_pic = new_picture.crop(_range)
code_pic.save(name)
time.sleep(2)
return code_img
def get_shop_cards(self):
"""
获取加入店铺列表
:return: 返回店铺列表
"""
url = "https://api.m.jd.com/client.action?functionId=getWalletReceivedCardList_New&clientVersion=9.5.2&build" \
"=87971&client=android&d_brand=Xiaomi&d_model=M2007J3SC&osVersion=11&screen=2266*1080&partner=xiaomi001" \
"&oaid=e02a70327f315862&openudid=3dab9a16bd95e38a&eid=eidA24e181233bsdmxzC3hIpQF2nJhWGGLb" \
"%2F1JscxFOzBjvkqrXbFQyAXZmstKs0K6bUwkQ0D3s1%2F7MzLZ7JDdhztfcdZur9xPTxU1ahqtHWYb54%2FyNK&sdkVersion=30" \
"&lang=zh_CN&uuid=3dab9a16bd95e38a&aid=3dab9a16bd95e38a&area=13_1000_40488_54442&networkType=wifi" \
"&wifiBssid=c609e931512437a8806ae06b86d3977b&uts=0f31TVRjBSsu47QjbY5aZUsO5LYa1B%2F3wqL7JjlFB60vmw6" \
"%2F8Xbj74d3sWoT4CTQgX7X0M07W75JvIfz5eu7NxdNJ73NSVbgTHkdsiVZ770PEn0MWPywxr4glUdddS6uxIQ5VfPG65uoUmlB6" \
"%2BBwwDqO1Nfxv8%2Bdm15xR%2BFG4fJWb6wCFO7DFMtnoOMo2CQ8mYoECYG3qT%2Bso7P%2FKLgQcg%3D%3D&uemps=0-0&st" \
"=1620105615175&sign=65996ece830b41aabdaba32c9d782d07&sv=100"
payload = "body=%7B%22v%22%3A%224.1%22%2C%22version%22%3A1580659200%7D&"
headers = {
'Host': 'api.m.jd.com',
'cookie': self.config["cookie"],
'charset': 'UTF-8',
'accept-encoding': 'br,gzip,deflate',
'user-agent': self.config["user-agent"][1],
'cache-control': 'no-cache',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'content-length': '60'
}
card_list = []
urllib3.disable_warnings()
resp = requests.request("POST", url, headers=headers, data=payload, verify=False)
if resp.content:
ret = json.loads(resp.text)
if ret["code"] == "0":
if ret["message"] == "用户未登录":
WARN("config.yaml中的cookie值有误,请确保pt_key和pt_pin都存在,如都存在请检查cookie是否失效")
sys.exit(1)
if "cardList" not in ret["result"]:
INFO("当前卡包中会员店铺为0个")
sys.exit(0)
card_list = (ret["result"]["cardList"])
else:
ERROR(ret)
return card_list
else:
ERROR("获取卡包列表接口返回None,请检查网络")
def refresh_cache(self):
"""
利用待领卡接口刷新卡包列表缓存
:return:
"""
url = "https://api.m.jd.com/client.action?functionId=getWalletUnreceivedCardList_New&clientVersion=10.0.2" \
"&build=88569&client=android&d_brand=Xiaomi&d_model=M2007J3SC&osVersion=11&screen=2266*1080&partner" \
"=xiaomi001&oaid=e02a70327f315862&openudid=3dab9a16bd95e38a&eid=eidA24e181233bsdmxzC3hIpQF2nJhWGGLb" \
"%2F1JscxFOzBjvkqrXbFQyAXZmstKs0K6bUwkQ0D3s1%2F7MzLZ7JDdhztfcdZur9xPTxU1ahqtHWYb54%2FyNK&sdkVersion=30" \
"&lang=zh_CN&uuid=3dab9a16bd95e38a&aid=3dab9a16bd95e38a&area=13_1000_40488_54442&networkType=wifi" \
"&wifiBssid=unknown&uts=0f31TVRjBSsa33%2BKCXYEGxOEcvF5WoCTLW6zy4ICUIZSJDN7StKCM709NzfQ4TH7UyK43CcV9m" \
"8NBxDef2fv9lr5dGonowgeJ4YODX5Jeb5TRw1PUE0YmmEdsQw4TlvNc5umf1j%2FKrR%2F3FAfMh%2Bs8nQ%2BG8trnDhaJW2kJKg" \
"Hzq7N3es4kOmO4MEmUYf2putd%2BK0ZMPqJ8MfHJCta74kmAA%3D%3D&uemps=0-0&st=1623387008796&sign=d8297b1521c" \
"0d56cdf290e2de658452e&sv=100"
payload = "body=%7B%22pageNum%22%3A1%2C%22pageSize%22%3A10%2C%22v%22%3A%224.3%22%2C%22version%22%3A1580659200" \
"%7D&"
headers = {
'Host': 'api.m.jd.com',
'cookie': self.config["cookie"],
'charset': 'UTF-8',
'accept-encoding': 'br,gzip,deflate',
'user-agent': self.config["user-agent"][1],
'cache-control': 'no-cache',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'content-length': '102'
}
urllib3.disable_warnings()
resp = requests.request("POST", url, headers=headers, data=payload, verify=False)
ret = json.loads(resp.text)
if ret["code"] == "0":
return True
else:
ERROR(ret)
return False
def main(self):
# 打开京东
self.browser.get("https://m.jd.com/")
if self.config["cookie"] == "":
WARN("请先在 config.yaml 里配置好cookie")
sys.exit(1)
# 写入 cookie
self.browser.delete_all_cookies()
for cookie in self.config['cookie'].split(";", 1):
self.browser.add_cookie(
{"name": cookie.split("=")[0].strip(" "), "value": cookie.split("=")[1].strip(";"), "domain": ".jd.com"}
)
self.browser.refresh()
cache_card_list, retried = [], 0
cnt, member_close_max_number = 0, self.shop_cfg["member_close_max_number"]
disgusting_shop, black_list = False, []
while True:
# 获取店铺列表
card_list = self.get_shop_cards()
if len(card_list) == 0:
INFO("本次运行获取到的店铺数为0个,判断为没有需要注销的店铺,即将退出程序")
sys.exit(0)
# 记录一下所有请求数据,防止第一轮做完之后缓存没有刷新导致获取的链接请求失败
if len(cache_card_list) == 0:
cache_card_list = [item['brandId'] for item in card_list]
else:
if retried >= 10:
INFO("连续%d次获取到相同的店铺列表,判断为%d分钟左右的缓存仍未刷新,即将退出程序" % (retried, retried / 2))
sys.exit(0)
if not disgusting_shop:
# 每次比较新一轮的数量对比上一轮,即新的列表集合是否是旧的子集
new_card_list = [item['brandId'] for item in card_list]
if set(new_card_list) <= set(cache_card_list) and len(new_card_list) == len(cache_card_list):
INFO("当前接口获取到的店铺列表和上一轮一致,认为接口缓存还未刷新,即将尝试刷新缓存")
if self.refresh_cache():
INFO("理论上缓存已经刷新成功,如页面未成功自动刷新请及时反馈")
disgusting_shop = True
continue
else:
INFO("当前接口获取到的店铺列表和上一轮一致,认为接口缓存还未刷新,30秒后会再次尝试")
time.sleep(30)
retried += 1
continue
else:
cache_card_list = new_card_list
else:
# 发现第二次缓存,多半是无法注销的店铺
try:
INFO("糟糕,这家店铺可能无法注销,该店铺名字为 %s,请先手动跳过" % card_list[len(black_list)]["brandName"])
disgusting_shop = False
if card_list[len(black_list)] in black_list:
black_list.append(card_list[len(black_list) + 1])
else:
black_list.append(card_list[len(black_list)])
except IndexError:
INFO("好了🙆,剩下的店铺应该都是无法注销的,请手动打开手机查看对应店铺,程序即将退出")
sys.exit(0)
# 跳过无法注销的店铺
shops = []
for item in black_list:
shops.append(item["brandName"])
# 加载配置文件中需要跳过的店铺
if self.shop_cfg['skip_shops'] != "":
shops = self.shop_cfg['skip_shops'].split(",")
INFO("本轮运行获取到", len(card_list), "家店铺会员信息")
for card in card_list:
# 判断本次运行数是否达到设置
if member_close_max_number != 0 and cnt >= member_close_max_number:
INFO("已注销店铺数达到配置中允许注销的最大次数,程序退出")
sys.exit(0)
# 判断该店铺是否要跳过
if card["brandName"] in shops:
INFO("发现需要跳过的店铺", card["brandName"])
continue
try:
# 打开注销页面
self.browser.get(
"https://shopmember.m.jd.com/member/memberCloseAccount?venderId=" + card["brandId"]
)
INFO("开始注销店铺", card["brandName"])
# 检查当前店铺退会链接是否失效
# noinspection PyBroadException
try:
WebDriverWait(self.browser, 1).until(EC.presence_of_element_located(
(By.XPATH, "//p[text()='网络请求失败']")
))
INFO("当前店铺退会链接已失效,暂判定为缓存导致,正在尝试清除卡包列表缓存...")
if self.refresh_cache():
INFO("理论上缓存已经刷新成功,如项目未继续执行请及时反馈")
break
else:
INFO("卡包列表缓存清除失败,即将跳过该店铺,失效店铺链接为:")
INFO("https://shopmember.m.jd.com/member/memberCloseAccount?venderId=" + card["brandId"])
continue
except Exception as _:
pass
# 检查手机尾号是否正确
if self.shop_cfg['phone_tail_number'] != "":
if self.wait.until(EC.presence_of_element_located(
(By.XPATH, "//div[@class='cm-ec']")
)).text[-4:] != self.shop_cfg['phone_tail_number']:
INFO("当前店铺手机尾号不是规定的尾号,已跳过")
continue
# 发送短信验证码
self.wait.until(EC.presence_of_element_located(
(By.XPATH, "//button[text()='发送验证码']")
), "发送短信验证码超时 " + card["brandName"]).click()
# 要连接的websocket地址
sms_code, ws_conn_url = "", self.sms_captcha_cfg["ws_conn_url"]
# ocr识别投屏验证码
if self.sms_captcha_cfg["is_ocr"]:
if len(self.ocr_cfg["ocr_range"]) != 4:
WARN("请在config.yaml中配置 ocr_range")
sys.exit(1)
else:
_range = (self.ocr_cfg["ocr_range"])
ocr_delay_time = self.ocr_cfg["ocr_delay_time"]
INFO("刚发短信,%d秒后识别验证码" % ocr_delay_time)
time.sleep(ocr_delay_time)
if self.ocr_cfg["type"] == "baidu":
INFO("开始调用百度OCR识别")
sms_code = self.baidu_ocr.baidu_ocr(_range, ocr_delay_time)
elif self.ocr_cfg["type"] == "aliyun":
INFO("开始调用阿里云OCR识别")
sms_code = self.aliyun_ocr.aliyun_ocr(_range, ocr_delay_time)
elif self.ocr_cfg["type"] == "easyocr":
INFO("开始调用EasyOCR识别")
sms_code = self.easy_ocr.easy_ocr(_range, ocr_delay_time)
else:
try:
if self.sms_captcha_cfg["jd_wstool"]:
recv = asyncio.get_event_loop().run_until_complete(ws_conn(ws_conn_url))
else:
recv = self.sms.listener()
if recv == "":
INFO("等待websocket推送短信验证码超时,即将跳过", card["brandName"])
continue
else:
sms_code = json.loads(recv)["sms_code"]
except Exception as e:
WARN("WebSocket监听时发生了问题", e.args)
sys.exit(1)
# 输入短信验证码
self.wait.until(EC.presence_of_element_located(
(By.XPATH, "//input[@type='number']")
), "输入短信验证码超时 " + card["brandName"]).send_keys(sms_code)
time.sleep(1)
# 点击注销按钮
self.wait.until(EC.presence_of_element_located(
(By.XPATH, "//div[text()='注销会员']")
), "点击注销按钮超时 " + card["brandName"]).click()
# 利用打码平台识别图形验证码并模拟点击
def auto_identify_captcha_click():
# 分割图形验证码
code_img = self.get_code_pic()
img = open('code_pic.png', 'rb').read()
pic_str, pic_id = "", ""
if self.image_captcha_cfg["type"] == "cjy":
# 调用超级鹰API接口识别点触验证码
INFO("开始调用超级鹰识别验证码")
resp = self.cjy.post_pic(img, self.image_captcha_cfg["cjy_kind"])
if "pic_str" in resp and resp["pic_str"] == "":
INFO("超级鹰验证失败,原因为:", resp["err_str"])
else:
pic_str = resp["pic_str"]
pic_id = resp["pic_id"]
elif self.image_captcha_cfg["type"] == "tj":
# 调用图鉴API接口识别点触验证码
INFO("开始调用图鉴识别验证码")
resp = self.tj.post_pic(img, self.image_captcha_cfg["tj_type_id"])
pic_str = resp["result"]
pic_id = resp["id"]
# 处理要点击的坐标
all_list = []
xy_list = []
x = int(pic_str.split(',')[0])
xy_list.append(x)
y = int(pic_str.split(',')[1])
xy_list.append(y)
all_list.append(xy_list)
# 循环遍历点击图片
for i in all_list:
x = i[0]
y = i[1]
ActionChains(self.browser).move_to_element_with_offset(code_img, x, y).click().perform()
time.sleep(1)
# 图形验证码坐标点击错误尝试重试
# noinspection PyBroadException
try:
WebDriverWait(self.browser, 3).until(EC.presence_of_element_located(
(By.XPATH, "//p[text()='验证失败,请重新验证']")
))
INFO("验证码坐标识别出错,将上报平台处理")
# 上报错误的图片到平台
if self.image_captcha_cfg["type"] == "cjy":
self.cjy.report_error(pic_id)
elif self.image_captcha_cfg["type"] == "tj":
self.tj.report_error(pic_id)
return False
except Exception as _:
return True
# 本地识别图形验证码并模拟点击
def local_auto_identify_captcha_click():
for _ in range(4):
time.sleep(1)
cpc_img = self.wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id="cpc_img"]')))
zoom = cpc_img.size['height'] / 170
cpc_img_path_base64 = self.wait.until(
EC.presence_of_element_located((By.XPATH, '//*[@id="cpc_img"]'))).get_attribute(
'src').replace("data:image/jpeg;base64,", "")
pcp_show_picture_path_base64 = self.wait.until(EC.presence_of_element_located(
(By.XPATH, '//*[@class="pcp_showPicture"]'))).get_attribute('src')
# 正在识别验证码
if self.image_captcha_cfg["type"] == "local":
INFO("正在通过本地引擎识别")
res = JDcaptcha_base64(cpc_img_path_base64, pcp_show_picture_path_base64)
else:
INFO("正在通过深度学习引擎识别")
res = self.JDyolo.JDyolo(cpc_img_path_base64, pcp_show_picture_path_base64)
if res[0]:
ActionChains(self.browser).move_to_element_with_offset(
cpc_img, int(res[1][0] * zoom),
int(res[1][1] * zoom)
).click().perform()
# 图形验证码坐标点击错误尝试重试
# noinspection PyBroadException
try:
WebDriverWait(self.browser, 3).until(EC.presence_of_element_located(
(By.XPATH, "//p[text()='验证失败,请重新验证']")
))
return False
except Exception as _:
return True
else:
INFO("识别未果")
self.wait.until(
EC.presence_of_element_located((By.XPATH, '//*[@class="jcap_refresh"]'))).click()
return False
# 识别点击,如果有一次失败将再次尝试一次,再失败就跳过
if self.image_captcha_cfg["type"] in ["local", "yolov4"]:
if not local_auto_identify_captcha_click():
INFO("验证码位置点击错误,尝试再试一次")
local_auto_identify_captcha_click()
else:
if not auto_identify_captcha_click():
INFO("验证码位置点击错误,尝试再试一次")
auto_identify_captcha_click()
# 解绑成功页面
self.wait.until(EC.presence_of_element_located(
(By.XPATH, "//div[text()='解绑会员成功']")
), "图形验证码识别超时 " + card["brandName"])
time.sleep(1)
cnt += 1
INFO("本次运行已成功注销店铺会员数量为:", cnt)
except Exception as e:
ERROR("发生了一点小问题:", e.args)
if self.config["debug"]:
import traceback
traceback.print_exc()
INFO("本轮店铺已执行完,即将开始获取下一轮店铺")
if __name__ == '__main__':
JDMemberCloseAccount().main()
|
from sqlalchemy.dialects import registry
registry.register(
"bigquery",
"sqlalchemy_bigquery.pyodbc",
"BigQueryDialect_pyodbc"
)
registry.register(
"bigquery.pyodbc",
"sqlalchemy_bigquery.pyodbc",
"BigQueryDialect_pyodbc"
)
# from sqlalchemy.testing.plugin.pytestplugin import *
|
import tempfile
import shutil
from yenerate import yenerate
from unittest import TestCase, main
class TestYenerate(TestCase):
def setUp(self):
self.root = tempfile.mkdtemp(prefix="yenerate_test")
def tearDown(self):
shutil.rmtree(self.root)
def test_yenerate(self):
output = yenerate.create_image("sample")
self.assertIsNotNone(output)
def test_wrap_line(self):
text = "this is a short text"
wrapped, _ = yenerate.wrap_text(text, max_width=100)
self.assertEqual(wrapped, text)
wrapped, _ = yenerate.wrap_text(text, max_width=10)
self.assertEqual(wrapped, "this is a\nshort text")
if __name__ == "__main__":
main()
|
#!/bin/env python
# -*- coding: utf-8 -*-
##
# test_streaming_problem.py: Checks correctness of azure.quantum.StreamingProblem.
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
import json
from typing import List
from azure.quantum.aio.optimization import (
Problem,
ProblemType,
StreamingProblem
)
from azure.quantum.optimization import Term
from common import QuantumTestBase
class TestStreamingProblem(QuantumTestBase):
async def __test_upload_problem(
self,
count: int,
terms_thresh: int,
size_thresh: int,
problem_type: ProblemType = ProblemType.ising,
initial_terms: List[Term] = [],
**kwargs
):
if not (self.in_recording or self.is_live):
# Temporarily disabling this test in playback mode
# due to multiple calls to the storage API
# that need to have a request id to distinguish
# them while playing back
print("Skipping this test in playback mode")
return
ws = self.create_async_workspace()
sProblem = await StreamingProblem.create(
ws, name="test", problem_type=problem_type, terms=initial_terms
)
rProblem = Problem(
"test", problem_type=problem_type, terms=initial_terms
)
sProblem.upload_terms_threshold = terms_thresh
sProblem.upload_size_threshold = size_thresh
for i in range(count):
await sProblem.add_term(c=i, indices=[i, i + 1])
rProblem.add_term(c=i, indices=[i, i + 1])
self.assertEqual(problem_type, sProblem.problem_type)
self.assertEqual(problem_type.name, sProblem.stats["type"])
self.assertEqual(
count + len(initial_terms), sProblem.stats["num_terms"]
)
self.assertEqual(
self.__kwarg_or_value(kwargs, "avg_coupling", 2),
sProblem.stats["avg_coupling"],
)
self.assertEqual(
self.__kwarg_or_value(kwargs, "max_coupling", 2),
sProblem.stats["max_coupling"],
)
self.assertEqual(
self.__kwarg_or_value(kwargs, "min_coupling", 2),
sProblem.stats["min_coupling"],
)
uri = await sProblem.upload(ws)
data = await sProblem.download()
uploaded = json.loads(data.serialize())
local = json.loads(rProblem.serialize())
self.assertEqual(uploaded, local)
def __kwarg_or_value(self, kwarg, name, default):
if name in kwarg:
return kwarg[name]
return default
def test_streaming_problem_small_chunks(self):
self.get_async_result(self.__test_upload_problem(4, 1, 1))
def test_streaming_problem_large_chunks(self):
self.get_async_result(self.__test_upload_problem(4, 1000, 10e6))
def test_streaming_problem_pubo(self):
self.get_async_result(self.__test_upload_problem(4, 1, 1, ProblemType.pubo))
def test_streaming_problem_initial_terms(self):
self.get_async_result( self.__test_upload_problem(
4,
1,
1,
initial_terms=[
Term(w=10, indices=[0, 1, 2]),
Term(w=20, indices=[1, 2, 3]),
],
avg_coupling=(4 * 2 + 6) / 6,
max_coupling=3,
) )
def check_all(self):
self.test_streaming_problem_small_chunks()
self.test_streaming_problem_large_chunks()
self.test_streaming_problem_pubo()
self.test_streaming_problem_initial_terms()
|
# Copyright 2018 Mathias Burger <mathias.burger@gmail.com>
#
# SPDX-License-Identifier: MIT
class Layer:
def __init__(self, properties: dict) -> None:
super().__init__()
self.name = properties['name']
self.visible = properties['visible']
self.opacity = float(properties['opacity'])
self.position = properties['position']
|
# Copyright (c) 2018, Arm Limited and affiliates.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import platform
# Make sure that any global generic setup is run
from . import lstools_base # noqa: F401
import logging
logger = logging.getLogger("mbedls.main")
logger.addHandler(logging.NullHandler())
del logging
def create(**kwargs):
"""! Factory used to create host OS specific mbed-lstools object
:param kwargs: keyword arguments to pass along to the constructors
@return Returns MbedLsTools object or None if host OS is not supported
"""
result = None
mbed_os = mbed_os_support()
if mbed_os is not None:
if mbed_os == "Windows7":
from .windows import MbedLsToolsWin7
result = MbedLsToolsWin7(**kwargs)
elif mbed_os == "LinuxGeneric":
from .linux import MbedLsToolsLinuxGeneric
result = MbedLsToolsLinuxGeneric(**kwargs)
elif mbed_os == "Darwin":
from .darwin import MbedLsToolsDarwin
result = MbedLsToolsDarwin(**kwargs)
return result
def mbed_os_support():
"""! Function used to determine if host OS is supported by mbed-lstools
@return Returns None if host OS is not supported else return OS short name
@details This function should be ported for new OS support
"""
result = None
os_info = mbed_lstools_os_info()
if os_info[0] == "nt" and os_info[1] == "Windows":
result = "Windows7"
elif os_info[0] == "posix" and os_info[1] == "Linux":
result = "LinuxGeneric"
elif os_info[0] == "posix" and os_info[1] == "Darwin":
result = "Darwin"
return result
def mbed_lstools_os_info():
"""! Returns information about host OS
@return Returns tuple with information about OS and host platform
"""
result = (
os.name,
platform.system(),
platform.release(),
platform.version(),
sys.platform,
)
return result
def mock_platform(mbeds, args):
for token in args.mock.split(","):
if ":" in token:
oper = "+" # Default
mid, platform_name = token.split(":")
if mid and mid[0] in ["+", "-"]:
oper = mid[0] # Operation (character)
mid = mid[1:] # We remove operation character
mbeds.mock_manufacture_id(mid, platform_name, oper=oper)
elif token and token[0] in ["-", "!"]:
# Operations where do not specify data after colon: --mock=-1234,-7678
oper = token[0]
mid = token[1:]
mbeds.mock_manufacture_id(mid, "dummy", oper=oper)
else:
logger.error("Could not parse mock from token: '%s'", token)
|
from constants import LDA_DIR, FEAT_DATA_DIR
from utils import series_to_str
import pickle
import pandas as pd
from time import time
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
#n_topics = [20, 50, 100]
#n_topics = [10, 30, 50, 70, 90]
n_topics = [10, 60, 110, 160, 210]
t0 = time()
with open(FEAT_DATA_DIR + 'product_tf_matrix', 'rb') as f:
tf_matrix = pickle.load(f)
for n in n_topics:
print("number of topics: %d"%n)
lda = LatentDirichletAllocation(n_topics=n,
evaluate_every = 5,
max_iter = 1000,
n_jobs = 1,
verbose = 1)
lda.fit(tf_matrix)
with open(LDA_DIR + 'p_lda_%d.model'%n, 'wb') as f:
pickle.dump(lda, f, pickle.HIGHEST_PROTOCOL)
print("done in %0.3fs." % (time() - t0))
print("finished!")
|
from core.composer.chain import Chain
from core.composer.node import NodeGenerator
from core.composer.optimisers.gp_operators import nodes_from_height
from core.repository.model_types_repository import ModelTypesIdsEnum
def chain_example():
# XG
# | \
# XG KNN
# | \ | \
# LR LDA LR LDA
chain = Chain()
root_of_tree, root_child_first, root_child_second = \
[NodeGenerator.secondary_node(model) for model in (ModelTypesIdsEnum.xgboost, ModelTypesIdsEnum.xgboost,
ModelTypesIdsEnum.knn)]
for root_node_child in (root_child_first, root_child_second):
for requirement_model in (ModelTypesIdsEnum.logit, ModelTypesIdsEnum.lda):
new_node = NodeGenerator.primary_node(requirement_model)
root_node_child.nodes_from.append(new_node)
chain.add_node(new_node)
chain.add_node(root_node_child)
root_of_tree.nodes_from.append(root_node_child)
chain.add_node(root_of_tree)
return chain
def test_nodes_from_height():
chain = chain_example()
found_nodes = nodes_from_height(chain.root_node, 1)
true_nodes = [node for node in chain.root_node.nodes_from]
assert all([node_model == found_node for node_model, found_node in zip(true_nodes, found_nodes)])
|
from typing import List
import re
from .types import Components
from .util import need_edition
version_regexp = re.compile(r"Release Version:\s*v?(\S+)", re.IGNORECASE)
hash_regexp = re.compile(r"Git Commit Hash:\s*(\w{40})", re.IGNORECASE)
edition_regexp = re.compile(r"Edition:\s*(Community|Enterprise)")
class Matcher:
component: str
input_string: str
version: str
hash: str
edition: str
result: List
def __init__(self, component, input, version, hash, edition) -> None:
self.component = component
self.input_string = input
self.version = version
self.hash = hash
self.edition = edition
self.result = []
if self.version[0] == "v": # am aware of out of index
self.version = self.version[1:]
def match(self) -> List[str]:
self.match_version() # it's okay to eliminate duplicated code
self.match_edition()
self.match_hash()
# call match functions, returns a complete error string
return self.result
def match_version(self):
if self.component == Components.importer:
return
match = version_regexp.search(self.input_string)
if match is None:
self.result.append("version not found")
elif match.groups()[0] != self.version:
self.result.append(f"invalid version: [{match.groups()[0]}]; want [{self.version}]")
def match_edition(self):
if self.component == Components.tiflash:
return
if not need_edition(self.component):
return
match = edition_regexp.search(self.input_string)
if match is None:
self.result.append("edition not found")
elif match.groups()[0].lower() != self.edition:
self.result.append(f"invalid edition: [{match.groups()[0]}]; want [{self.edition}]")
def match_hash(self):
if self.component == Components.importer:
return
match = hash_regexp.search(self.input_string)
if match is None:
self.result.append("hash not found")
elif match.groups()[0] != self.hash:
self.result.append(f"invalid hash: [{match.groups()[0]}]; want [{self.hash}]")
|
from __future__ import annotations
from typing import Type, Optional, List
from Exceptions.BranchHaveDiverged import BranchHaveDiverged
from Exceptions.BranchNotExist import BranchNotExist
from Exceptions.GitMergeConflictError import GitMergeConflictError
from Exceptions.NoBranchSelected import NoBranchSelected
from Exceptions.NotCleanWorkingTree import NotCleanWorkingTree
from Exceptions.RemoteDivergence import RemoteDivergence
from FlexioFlow.StateHandler import StateHandler
from Log.Log import Log
from Schemes.UpdateSchemeVersion import UpdateSchemeVersion
from Branches.Branches import Branches
from VersionControl.Git.Branches.GitFlowCmd import GitFlowCmd
from VersionControl.Git.GitCmd import GitCmd
from VersionControlProvider.Github.Message import Message
from VersionControlProvider.Issue import Issue
from VersionControlProvider.Topic import Topic
from ConsoleColors.Fg import Fg
from Core.ConfigHandler import ConfigHandler
class Finish:
def __init__(self,
state_handler: StateHandler,
config_handler: ConfigHandler,
issue: Optional[Type[Issue]],
topics: Optional[List[Topic]],
keep_branch: bool,
close_issue: bool
):
self.__state_handler: StateHandler = state_handler
self.__config_handler: ConfigHandler = config_handler
self.__issue: Optional[Type[Issue]] = issue
self.__topics: Optional[List[Topic]] = topics
self.__git: GitCmd = GitCmd(self.__state_handler)
self.__gitflow: GitFlowCmd = GitFlowCmd(self.__state_handler,config_handler)
self.__keep_branch: bool = keep_branch
self.__close_issue: bool = close_issue
self.__name: str = self.__git.get_branch_name_from_git(config_handler.release())
self.__version_check: str = self.__state_handler.version_as_str()
def __init_gitflow(self) -> Finish:
self.__gitflow.init_config()
return self
def __checkout_current_release(self):
self.__git.checkout_with_branch_name(self.__name)
def __pull_develop(self) -> Finish:
# if self.__git.has_remote() and not self.__git.is_local_remote_equal(Branches.DEVELOP.value):
# raise RemoteDivergence(Branches.DEVELOP.value + 'should be merged with remote')
self.__git.checkout(self.__config_handler.develop()).try_to_pull()
return self
def __pull_master(self) -> Finish:
# if self.__git.has_remote() and not self.__git.is_local_remote_equal(Branches.MASTER.value):
# raise RemoteDivergence(Branches.MASTER.value + 'should be merged with remote')
self.__git.checkout(self.__config_handler.master()).try_to_pull()
return self
def __merge_master(self) -> Finish:
message: Message = Message(
message='Merge ' + self.__name + 'into ' + self.__config_handler.master(),
issue=self.__issue
)
message_str: str = ''
if self.__close_issue:
message_str = message.with_close()
else:
message_str = message.message
self.__git.commit(
message_str,
['--allow-empty']
)
self.__git.checkout(self.__config_handler.master()).merge_with_version_message(
branch=self.__config_handler.release(),
message=message_str,
options=['--no-ff', '--strategy-option', 'theirs']
)
if self.__git.has_conflict():
raise GitMergeConflictError(self.__config_handler.master(), self.__git.get_conflict())
tag: str = self.__state_handler.version_as_str()
if tag != self.__version_check:
Log.error('Version have diverged during merge : ' + tag + 'should be ' + self.__version_check)
raise GitMergeConflictError(self.__config_handler.master())
self.__git.tag(
tag,
' '.join([
"'From Finished release : ",
self.__name,
'tag : ',
tag,
"'"])
).try_to_push_tag(tag).try_to_push()
self.__git.checkout(self.__config_handler.release()).merge_with_version_message_from_branch_name(
branch=tag,
message=Message(
message='Merge ' + tag + ' tag into ' + self.__name,
issue=self.__issue
).with_ref(),
options=['--no-ff']
)
return self
def __merge_develop(self) -> Finish:
self.__checkout_current_release()
self.__state_handler.next_dev_minor()
self.__state_handler.set_dev()
self.__state_handler.write_file()
UpdateSchemeVersion.from_state_handler(self.__state_handler)
self.__git.commit(
Message(
message=''.join(["'Finish release ready for dev next release : ", self.__state_handler.version_as_str()]),
issue=self.__issue
).with_ref()
)
self.__git.checkout(self.__config_handler.develop()).merge_with_version_message(
branch=self.__config_handler.release(),
message=Message(
message='',
issue=self.__issue
).with_ref()
)
if self.__git.has_conflict():
Log.error("""
{fg_fail}CONFLICT : resolve conflict, and remove your release branch manually{reset_fg}
""".format(
fg_fail=Fg.FAIL.value,
reset_fg=Fg.RESET.value,
))
raise GitMergeConflictError(self.__config_handler.develop(), self.__git.get_conflict())
self.__git.checkout(self.__config_handler.release()).undo_last_commit()
self.__git.checkout(self.__config_handler.develop()).try_to_push()
return self
def __delete_release(self) -> Finish:
if not self.__keep_branch:
self.__git.delete_local_branch_from_name(self.__name)
self.__git.try_delete_remote_branch_from_name(self.__name)
return self
def __finish_release(self):
if not self.__gitflow.has_release(False):
raise BranchNotExist(self.__config_handler.release())
self.__merge_master().__merge_develop()
self.__delete_release()
def process(self):
if not self.__gitflow.is_release():
raise NoBranchSelected('Checkout to release branch before')
if not self.__git.is_clean_working_tree():
raise NotCleanWorkingTree()
self.__pull_master()
if self.__git.is_branch_ahead(self.__config_handler.master(), self.__name):
Log.error("""
{fg_fail}{list}{reset_fg}
""".format(
fg_fail=Fg.FAIL.value,
list=self.__git.list_commit_diff(self.__config_handler.master(), self.__name),
reset_fg=Fg.RESET.value,
))
self.__checkout_current_release()
raise BranchHaveDiverged(
"""
{fg_fail}{message}{reset_fg}
""".format(
fg_fail=Fg.FAIL.value,
message='Oups !!! Master:'+self.__config_handler.master()+' have commit ahead ' + self.__name + ' merge before',
reset_fg=Fg.RESET.value,
)
)
self.__pull_develop().__finish_release()
|
# Welcome to your Focus Day Project. Replace this comment with something that introduces the user to your project. Be sure to mention the Focus Day and your initials and graduation year. (ie This game is for Pool Volume Day and is written by ML '23.)
# Also, be sure to use comments throughout your program. Use good programming practices, including organization, documentation and citation. Yes, you need to cite your sources! (You can do so using comments at the bottom of your code.)
#random number generator
import random
#input list for starting the game
start_list = ["Yes", "yes", "y", "Y", "yep", "Yep", "Yeah", "yeah", "ok", "Ok", "OK", "Okay", "okay", "k", "K"]
#questions
question1 = ("What is the difference between an epidemic and a pandemic?" + '\n' + "a. the size of the region the disease affects" + '\n' + "b. the mortality rate" + '\n' + "c. how quickly the disease spreads" + '\n' + "d. virus vs. bacteria")
question2 = ("Where was the first reported case of the Spanish Flu?" + '\n' + "a. Spain " + '\n' + "b. Kansas" + '\n' + "c. Venezuela" + '\n' + "d. Egypt")
question3 = ("When was the first confirmed case of coronavirus?" + '\n' + "a. January 21, 2020" + '\n' + "b. December 2, 2019" + '\n' + "c. December 31, 2019" + '\n' + "d. February 3, 2020")
question4 = ("What does CDC stand for?" + '\n' + "a. Center for the Doctors of California" + '\n' + "b. Coronavirus Death Count" + '\n' + "c. California Department of Care" + '\n' + "d. Center for Disease Control")
question5 = ("What does an epidemiologist mainly do?" + '\n' + "a. Study diseases and how to control them" + '\n' + "b. Help surgeons deliver the right amount of anesthetic" + '\n' + "c. Study groups of people within certain demographics" + '\n' + "d. Write newspaper and magazine articles about disease outbreaks")
question6 = ("Which century was the Black Plague in?" + '\n' + "a. 1200's" + '\n' + "b. 1300's" + '\n' + "c. 1400's" + '\n' + "d. 1500's")
question7 = ("How many people are estimated to have died from the Black Plague?" + '\n' + "a. 100,000-600,000" + '\n' + "b. 3 million-12 million" + '\n' + "c. 75 million-200million" + '\n' + "d. 250 million+")
question8 = ("What is the mortality rate of rabies?" + '\n' + "a. ~61%" + '\n' + "b. ~64%" + '\n' + "c. ~89%" + '\n' + "d. ~100%")
question9 = ("Which of the following is NOT a symptom of yellow fever?" + '\n' + "a. Swollen lymph nodes" + '\n' + "b. Yellowing skin and/or eyes" + '\n' + "c. Internal bleeding" + '\n' + "d. Organ failure")
question10 = ("What is Typhoid Mary's real name?" + '\n' + "a. Mary J. Blige" + '\n' + "b. Mary Mallon" + '\n' + "c. Mary Pope Osborn" + '\n' + "d. Mary Wollstonecraft")
question11 = ("Which outbreak started in 1918?" + '\n' + "a. Cocolizti Epidemic" + '\n' + "b. Yellow Fever Epidemic" + '\n' + "c. Spanish Influenza" + '\n' + "d. Asian Flu")
question12 = ("About how many people have died from AIDS?" + '\n' + "a. 700,000" + '\n' + "b. 18 million" + '\n' + "c. 98 million" + '\n' + "d. 35 million")
question13 = ("What is another name for smallpox?" + '\n' + "a. Variola Major/Minor" + '\n' + "b. Pharyngitis" + '\n' + "c. Anaplasmosis" + '\n' + "d. Valley Fever")
question14 = ("How is mononucleosis known for being transmitted?" + '\n' + "a. Air" + '\n' + "b. Kissing" + '\n' + "c. Skin contact" + '\n' + "d. It isn't contagious")
question15 = ("What is the annual leading cause of death?" + '\n' + "a. Respiratory diseases" + '\n' + "b. Alzheimer's" + '\n' + "c. Heart disease" + '\n' + "d. Cancer")
question16 = ("Approximately how many 'rare' diseases are there?" + '\n' + "a. 2,100" + '\n' + "b. 3,000" + '\n' + "c. 6,800" + '\n' + "d. 7,000")
question17 = ("Which of the following is NOT a common symptom of Lyme Disease?" + '\n' + "a. Shortness of breath" + '\n' + "b. Fever" + '\n' + "c. Chills" + '\n' + "d. Muscle aches")
question18 = ("How common is Alzheimer's in people over 65?" + '\n' + "a. 1 in 11" + '\n' + "b. 1 in 14" + '\n' + "c. 1 in 16" + '\n' + "d. 1 in 63")
question19 = ("How many adults in the US are considered obese?" + '\n' + "a. 480,000" + '\n' + "b. 36 million" + '\n' + "c. 70 million" + '\n' + "d. 99 million")
question20 = ("What is believed to be the oldest human disease?" + '\n' + "a. Smallpox" + '\n' + "b. Malaria" + '\n' + "c. Cholera" + '\n' + "d. Leprosy")
trivia_list = [question1, question2, question3, question4, question5, question6, question7, question8, question9, question10, question11, question12, question13, question14, question15, question16, question17, question18, question19, question20]
#question answers
queans1 = ["A", "a"]
queans2 = ["B", "b"]
queans3 = ["C", "c"]
queans4 = ["D", "d"]
#function to ask question and bet
def ask_question(queans1, trivia_list, random_cases):
x = random.choice(trivia_list)
print(x)
if x == trivia_list[0]:
bet1 = int(input("How many cases are you betting? "))
answer1 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer1 in queans1:
print("Correct!")
random_cases -= bet1
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet1
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[1]:
bet2 = int(input("How many cases are you betting? "))
answer2 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer2 in queans2:
print("Correct!")
random_cases -= bet2
print("There are now " + str(random_cases)+ " cases")
else:
print("Incorrect")
random_cases += bet2
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[2]:
bet3 = int(input("How many cases are you betting? "))
answer3 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer3 in queans3:
print("Correct!")
random_cases -= bet3
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet3
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[3]:
bet4 = int(input("How many cases are you betting? "))
answer4 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer4 in queans4:
print("Correct!")
random_cases -= bet4
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet4
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[4]:
bet5 = int(input("How many cases are you betting? "))
answer5 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer5 in queans1:
print("Correct!")
random_cases -= bet5
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet5
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[5]:
bet6 = int(input("How many cases are you betting? "))
answer6 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer6 in queans2:
print("Correct!")
random_cases -= bet6
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet6
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[6]:
bet7 = int(input("How many cases are you betting? "))
answer7 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer7 in queans3:
print("Correct!")
random_cases -= bet7
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet7
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[7]:
bet8 = int(input("How many cases are you betting? "))
answer8 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer8 in queans4:
print("Correct!")
random_cases -= bet8
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet8
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[8]:
bet9 = int(input("How many cases are you betting? "))
answer9 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer9 in queans1:
print("Correct!")
random_cases -= bet9
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet9
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[9]:
bet10 = int(input("How many cases are you betting? "))
answer10 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer10 in queans2:
print("Correct!")
random_cases -= bet10
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet10
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[10]:
bet11 = int(input("How many cases are you betting? "))
answer11 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer11 in queans3:
print("Correct!")
random_cases -= bet11
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet11
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[11]:
bet12 = int(input("How many cases are you betting? "))
answer12 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer12 in queans4:
print("Correct!")
random_cases -= bet12
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet12
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[12]:
bet13 = int(input("How many cases are you betting? "))
answer13 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer13 in queans1:
print("Correct!")
random_cases -= bet13
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet13
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[13]:
bet14 = int(input("How many cases are you betting? "))
answer14 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer14 in queans2:
print("Correct!")
random_cases -= bet14
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet14
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[14]:
bet15 = int(input("How many cases are you betting? "))
answer15 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer15 in queans3:
print("Correct!")
random_cases -= bet15
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet15
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[15]:
bet16 = int(input("How many cases are you betting? "))
answer16 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer16 in queans4:
print("Correct!")
random_cases -= bet16
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet16
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[16]:
bet17 = int(input("How many cases are you betting? "))
answer17 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer17 in queans1:
print("Correct!")
random_cases -= bet17
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet17
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[17]:
bet18 = int(input("How many cases are you betting? "))
answer18 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer18 in queans2:
print("Correct!")
random_cases -= bet18
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet18
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[18]:
bet19 = int(input("How many cases are you betting? "))
answer19 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer19 in queans3:
print("Correct!")
random_cases -= bet19
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet19
print("There are now " + str(random_cases) + " cases")
if x == trivia_list[19]:
bet20 = int(input("How many cases are you betting? "))
answer20 = input("Which letter is the correct answer? (A, B, C, or D) ")
if answer20 in queans4:
print("Correct!")
random_cases -= bet20
print("There are now " + str(random_cases) + " cases")
else:
print("Incorrect")
random_cases += bet20
print("There are now " + str(random_cases) + " cases")
return random_cases
#print instructions for user/start game
start = input("Welcome to 'Cure That Disease!' To start the game, you will be given a number of cases between 1 and 5000. This is how many cases you have to begin with. You will then be given a series of questions. Before each question, you can 'bet' a number of cases. If you get the question right, that number is taken off your case count. If you get it wrong, it is added to your case count. Be careful, because if your case count goes over 5000, you lose the game. There are only 20 questions, so keep that in mind when making your bets. Your objective is to get down to 0 cases. Are you ready? " )
while start not in start_list:
print("Sorry, I didn't understand that. Try typing in a different answer.")
start = input("Are you ready? ")
#number of cases
random_cases = random.randint(1,5001)
print ("We have just discovered a new disease! There are already " + str(random_cases) + " confirmed cases.")
#naming the disease
disease_name = input("What should we call this disease? ")
#trivia questions loop, goes until 0 or 5000
while 0<random_cases<5000:
random_cases = ask_question(queans1, trivia_list, random_cases)
if random_cases>5000:
print("Oops! " + disease_name + " has spread to too many people. Better luck next time!")
break
elif random_cases == 0:
print("Congrats! You have cured " + disease_name + "!")
break
#references
#https://www.cdc.gov/diseasesconditions/az/a.html
#https://www.cdc.gov/nchs/fastats/deaths.htm
#https://www.dailymail.co.uk/sciencetech/article-2568579/Leprosy-oldest-disease-humans-Bacteria-existed-MILLIONS-years-infected-ancestors-claims-study.html
#https://en.wikipedia.org/wiki/Obesity_in_the_United_States
#https://www.nhs.uk/conditions/alzheimers-disease/
#https://www.cdc.gov/lyme/signs_symptoms/index.html
#https://www.findacure.org.uk/rare-diseases/
#https://www.cdc.gov/nchs/fastats/leading-causes-of-death.htm
#https://simple.wikipedia.org/wiki/Smallpox
#https://machinelearningmastery.com/how-to-generate-random-numbers-in-python/
#https://www.livescience.com/worst-epidemics-and-pandemics-in-history.html
#https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3959940/
#https://www.famousbirthdays.com/names/mary.html
#https://www.mayoclinic.org/diseases-conditions/plague/symptoms-causes/syc-20351291
#https://www.mayoclinic.org/diseases-conditions/yellow-fever/symptoms-causes/syc-20353045
#https://www.who.int/news-room/fact-sheets/detail/rabies
#https://www.history.com/topics/middle-ages/black-death
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._builds_operations import build_cancel_request_initial, build_get_log_link_request, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BuildsOperations:
"""BuildsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2018_02_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.BuildListResult"]:
"""Gets all the builds for a registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param filter: The builds filter to apply on the operation.
:type filter: str
:param top: $top is supported for get list of builds, which limits the maximum number of builds
to return.
:type top: int
:param skip_token: $skipToken is supported on get list of builds, which provides the next page
in the list of builds.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BuildListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2018_02_01_preview.models.BuildListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BuildListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
filter=filter,
top=top,
skip_token=skip_token,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
filter=filter,
top=top,
skip_token=skip_token,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("BuildListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/builds'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
registry_name: str,
build_id: str,
**kwargs: Any
) -> "_models.Build":
"""Gets the detailed information for a given build.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param build_id: The build ID.
:type build_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Build, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2018_02_01_preview.models.Build
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Build"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
build_id=build_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Build', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/builds/{buildId}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
registry_name: str,
build_id: str,
build_update_parameters: "_models.BuildUpdateParameters",
**kwargs: Any
) -> "_models.Build":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Build"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(build_update_parameters, 'BuildUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
build_id=build_id,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Build', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Build', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/builds/{buildId}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
registry_name: str,
build_id: str,
build_update_parameters: "_models.BuildUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.Build"]:
"""Patch the build properties.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param build_id: The build ID.
:type build_id: str
:param build_update_parameters: The build update properties.
:type build_update_parameters:
~azure.mgmt.containerregistry.v2018_02_01_preview.models.BuildUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Build or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2018_02_01_preview.models.Build]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Build"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
build_id=build_id,
build_update_parameters=build_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Build', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/builds/{buildId}'} # type: ignore
@distributed_trace_async
async def get_log_link(
self,
resource_group_name: str,
registry_name: str,
build_id: str,
**kwargs: Any
) -> "_models.BuildGetLogResult":
"""Gets a link to download the build logs.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param build_id: The build ID.
:type build_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BuildGetLogResult, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2018_02_01_preview.models.BuildGetLogResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BuildGetLogResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_log_link_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
build_id=build_id,
template_url=self.get_log_link.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BuildGetLogResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_log_link.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/builds/{buildId}/getLogLink'} # type: ignore
async def _cancel_initial(
self,
resource_group_name: str,
registry_name: str,
build_id: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
build_id=build_id,
template_url=self._cancel_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/builds/{buildId}/cancel'} # type: ignore
@distributed_trace_async
async def begin_cancel(
self,
resource_group_name: str,
registry_name: str,
build_id: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Cancel an existing build.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param build_id: The build ID.
:type build_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._cancel_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
build_id=build_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/builds/{buildId}/cancel'} # type: ignore
|
import pandas as pd
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
def __init__(self, filename: Union[str, Path], drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.extract_raw()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={'age':'age_known'})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
class TransformData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(self, raw_data, adult_age_threshold_min = 13, drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.raw = raw_data
self.adult_age_threshold_min = adult_age_threshold_min
self.Xy = self.raw.Xy_raw.copy()
self.extract_title()
self.extract_last_name()
self.extract_cabin_number()
self.extract_cabin_prefix()
self.calc_is_child()
def calc_is_child(self):
self.Xy['is_child'] = self.Xy.age < self.adult_age_threshold_min
def extract_cabin_number(self):
self.Xy['cabin_number'] = self.Xy.ticket.str.extract('(\d+)$')
def extract_cabin_prefix(self):
self.Xy['cabin_prefix'] = self.Xy.ticket.str.extract('^(.+) ')
def extract_title(self):
"""[summary]
"""
self.Xy["title"] = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
)
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(in_df, groupby=['sex','title']):
Xy_age_estimate = in_df.groupby(['sex','title']).age_known.mean().to_frame().round(1)
Xy_age_estimate = Xy_age_estimate.rename(columns ={'age_known':'age_estimate'})
out_df = in_df.reset_index().merge(Xy_age_estimate, on=['sex', 'title'])
out_df['age'] = out_df['age_known'].fillna(out_df['age_estimate'])
return out_df
|
# Generated by Django 3.2.4 on 2021-06-28 03:42
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('body', models.TextField(blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('read', models.BooleanField(default=False)),
('archived', models.BooleanField(default=False)),
('recipients', models.ManyToManyField(related_name='emails_received', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='emails_sent', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='emails', to=settings.AUTH_USER_MODEL)),
],
),
]
|
import sym_lis2
from sym_lis2 import GlobalEnv as Env
import pytest, mock
'''
@pytest.mark.parametrize("test_input,expected", [
("(+ 1 2)", 3),
("(+ (+ 11 28) 2)", 41),
("(+ (+ 11 28) (* 1 2))", 41),
])
def test_calc1(test_input, expected):
g = Env()
assert g.eval_str(test_input) == expected
'''
def test_defines():
g = Env()
g.eval_str('(define "foo" "bar")')
assert g.eval_str('foo') == 'bar'
g.eval_str('(define "bar" 5)')
assert g.eval_str('bar') == 5
g.eval_str('(define "baz" (nsp (quote ("a")) (quote ("b"))))')
assert g.eval_str('baz') == {'a': 'b'}
'''
def test_procedure():
g = Env()
g.eval_str('(define "echo" (nsp (quote ("_proc")) (quote (((eval _args))))))')
assert g.eval_str('(echo foo bar)') == ['foo', 'bar']
'''
def test_integer_arithmetics():
g = Env()
assert g.eval_str('(+ 1 2)') == 3
assert g.eval_str('(+ 33 (+ 1 2))') == 36
assert g.eval_str('(* 1 2)') == 2
assert g.eval_str('(* 33 (+ 1 2))') == 99
def test_string_manipulation():
g = Env()
assert g.eval_str('(+ "foo_" "bar")') == "foo_bar"
g.eval_str('(define (+ "foo_" "bar") 5)')
assert g.eval_str('foo_bar') == 5
def test_index_n_eval():
g = Env()
print(g.eval_str('(quote ((+ 1 2) (+ 3 4)))'))
print(g.eval_str('(0 (quote ((+ 1 2) (+ 3 4))))'))
assert g.eval_str('(0 (quote ((+ 1 2) (+ 3 4))))') == 3
assert g.eval_str('(1 (quote ((+ 1 2) (+ 3 4))))') == 7
def test_dyn_scope():
g = Env()
g.eval_str('''(define "sum_typ"
(nsp (list "_proc")
(quote ((
(print _args)
(print (type _args))
(+ (0 _args) (1 _args))
)))))''')
assert g.eval_str('(sum_typ 1 2)') == 3
assert g.eval_str('(sum_typ 33 (sum_typ 1 2))') == 36
g.eval_str('''(define "sum_lex"
(nsp (quote ("_proc")) (quote ((
(+ foo bar)
)))))''')
g.eval_str("(define 'foo 1)")
g.eval_str("(define 'bar 2)")
assert g.eval_str('(sum_lex)') == 3
g.eval_str('''(define 'sum_dyn
(nsp (quote ("_proc")) (quote ((
(print 'sum_dyn (nsp_keys _dyn))
(+ (get _dyn "foo") (get _dyn "bar"))
)))))''')
g.eval_str('''(define 'sum_wrapper
(nsp (quote ("_proc")) (quote ((
(define 'foo 30)
(define 'bar 6)
(print 'sumwrapper 'NAMESPACE_dyn (nsp_keys _dyn))
(sum_dyn)
)))))''')
assert g.eval_str('(sum_wrapper)') == 36
'''
( dict_keys(['_args', '_dyn', 'foo', 'bar']),
(dict_keys(['_proc']),
dict_keys(['+', '-', '*', '/', '>', '<', '>=', '<=', '=', 'sum', 'equal?', 'length', 'print', 'type', 'eval', 'eval_explicit', 'define', 'map', 'list?', 'None', 'do', 'nsp_keys', 'sum_typ', 'sum_lex', 'foo', 'bar', 'sum_dyn', 'sum_wrapper']))
)
so, _dyn is the call nsp here!
but not in the basic_func???
'''
g.eval_str('''(define 'sum_dyn2
(nsp (quote ("_proc"))
(quote ((
(+ (get . "foo") (get . "bar"))
)))
))''')
g.eval_str('''(define 'sum_wrapper2
(nsp (quote ("_proc")) (quote ((
(define 'foo 30)
(define 'bar 6)
(print _dyn)
(sum_dyn2)
)))))''')
assert g.eval_str('(sum_wrapper2)') == 3
def test_dyn_nested_scope():
g = Env()
g.eval_str('''(define 'sum_dyn
(nsp (quote ("_proc")) (quote ((
(print 'sum_dyn '_dyn (nsp_keys _dyn))
(print 'sum_dyn '. (nsp_keys .))
(+ (get _dyn "foo") (get _dyn "bar"))
)))))''')
g.eval_str('''(define 'sum_dyn_nested
(nsp (quote ("_proc")) (quote ((
(define 'a_var_in_sum_dyn_nested_call 6)
(print 'sum_dyn_nested '_dyn (nsp_keys _dyn))
(print 'sum_dyn_nested '. (nsp_keys .))
(sum_dyn)
)))))''')
g.eval_str('''(define 'sum_wrapper
(nsp (quote ("_proc")) (quote ((
(define 'foo 30)
(define 'bar 6)
(print 'sumwrapper 'NAMESPACE_dyn (nsp_keys _dyn))
(sum_dyn_nested)
)))))''')
with pytest.raises(NameError):
# cannot find the variable, because the dynamic scope does not nest
g.eval_str('(sum_wrapper)') == 36
def test_add_quotes():
g = Env()
assert g.eval_str('(+ (quote (1 2)) (quote (3 4)))') == [1, 2, 3, 4]
assert g.eval_str('(+ (quote ((1 2))) (quote (3 4)))') == [[1, 2], 3, 4]
def test_list():
g = Env()
assert g.eval_str('(list (+ (quote (1 2)) (quote (3 4))))') == [[1, 2, 3, 4]]
assert g.eval_str('(list (+ (quote ((1 2))) (quote ((3 4)))))') == \
[[[1, 2], [3, 4]]]
def test_nsp_proc_structure():
g = Env()
assert g.eval_str('''(nsp
(quote ("_proc"))
(quote ((
(foo bar)
(baz 22)
))))''') == {'_proc': [['foo', 'bar'], ['baz', 22]]}
assert g.eval_str('''(nsp
(quote ("_proc"))
(list (+
(quote (
(foo bar)
))
(quote (
(baz 22)
))
)))''') == {'_proc': [['foo', 'bar'], ['baz', 22]]}
def test_remote_define():
g = Env()
g.eval_str('''(define 'foo (nsp (quote ()) (quote ())))''')
assert g.eval_str('foo') == {}
g.eval_str('''(define 'bar 5 foo)''')
g.eval_str('''(define 'bar 111)''')
assert g.eval_str('foo') == {'bar': 5}
assert g.eval_str('(eval . bar)') == 111
assert g.eval_str('(eval foo bar)') == 5
def test_semibuild_list():
g = Env()
assert g.eval_str('''(list
(+ 1 2)
(quote (foo bar))
'baz
)''') == [3, ['foo', 'bar'], 'baz']
def test_multiplus():
g = Env()
assert g.eval_str('(sum (list 1 2 3))') == 6
def test_multicall():
g = Env()
g.eval_str("(define 'foo 5)")
g.eval_str('''(define 'foo_inc (nsp
(quote ("_proc"))
(quote ((
(print foo)
(set! 'foo (+ foo 1))
foo_inc
)))
))''')
assert g.eval_str('foo') == 5
print(g.eval_str('(((foo_inc)))'))
assert g.eval_str('foo') == 8
def test_quote_eval():
g = Env()
g.eval_str("(define 'q (quote (define 'nome (nsp (list 'foo 'bar) (list 2 3)))))")
assert g.eval_str('q') == \
['define', "'nome", ['nsp', ['list', "'foo", "'bar"], ['list', 2, 3]]]
print('test eval q')
# (eval (eval q)) does not make sense because eval is recursive allready
g.eval_str('(q)')
#g.eval_str("(define 'nome (nsp (list 'foo 'bar) (list 2 3)))")
# TODO (q) must be == (eval q)? or it doesn't make sense?
# TODO: is this a special control form?
print(g.eval_str('nome'))
assert g.eval_str('nome') == {'foo': 2, 'bar': 3}
def test_map_eval():
g = Env()
r = g.eval_str('(map (eval .) (quote (1 (+ 1 10) (* 2 (1 (list 0 5 10))))))')
print(r)
assert r == [1, 11, 10]
'''
What if make
foo = "foo"
(foo) = lookup foo variable
'''
"""
def test_basic_procedure():
'''
(proc name (body a b))
->
g.eval_str('''(define name (nsp
(quote ("_proc"))
(quote (
(body a b)
))
))''')
'''
"""
def test_eval_explicit():
g = Env()
"""
def proc_eval_explicit(expr, _dyn=None):
# if the list starts with `eval` -- launch the usual eval
# if not -- recurse into child lists
if isinstance(expr, List) and len(expr) > 0:
if expr[0] == 'eval_explicit':
r = lisp_eval2(expr[1], in_namespace)
else:
r = list(map(lambda x: proc_eval_explicit(x, _dyn), expr))
else:
r = expr
"""
g.eval_str('''(define "eval_explicit2"
(nsp (list "_proc")
(quote ((
(define 'expr (index 0 _args))
(if (list? expr)
(if (equal? (index 0 expr) "eval_explicit2")
(eval . ((index 1 expr)))
(map (eval_explicit2) expr))
expr)
))
)
)
)''')
g.eval_str('(print "eval_explicit2" eval_explicit2)')
assert g.eval_str('''(eval_explicit2
(eval foo (bar (map (eval .) baz))
(eval_explicit2 (+ 1 2))))''') == \
['eval', 'foo', ['bar', ['map', ['eval', '.'], 'baz']], 3]
g.eval_str('(define "args" (quote (1 2 3)))')
assert g.eval_str('''(eval_explicit2
(eval bar (eval_explicit2 args)))''') == \
['eval', 'bar', [1, 2, 3]]
def test_exit():
g = Env()
with pytest.raises(SystemExit) as ex:
g.eval_str('(exit 0)')
assert ex.type == SystemExit
assert ex.value.code == 0
|
from django import forms
from django.forms.models import modelform_factory
from django.utils.translation import ugettext as _
from wagtail.wagtailadmin import widgets
from wagtail.wagtailadmin.forms import (
BaseCollectionMemberForm, collection_member_permission_formset_factory)
from wagtail.wagtailimages.fields import WagtailImageField
from wagtail.wagtailimages.formats import get_image_formats
from wagtail.wagtailimages.models import Image
from wagtail.wagtailimages.permissions import permission_policy as images_permission_policy
# Callback to allow us to override the default form field for the image file field
def formfield_for_dbfield(db_field, **kwargs):
# Check if this is the file field
if db_field.name == 'file':
return WagtailImageField(**kwargs)
# For all other fields, just call its formfield() method.
return db_field.formfield(**kwargs)
class BaseImageForm(BaseCollectionMemberForm):
permission_policy = images_permission_policy
def get_image_form(model):
fields = model.admin_form_fields
if 'collection' not in fields:
# force addition of the 'collection' field, because leaving it out can
# cause dubious results when multiple collections exist (e.g adding the
# document to the root collection where the user may not have permission) -
# and when only one collection exists, it will get hidden anyway.
fields = list(fields) + ['collection']
return modelform_factory(
model,
form=BaseImageForm,
fields=fields,
formfield_callback=formfield_for_dbfield,
# set the 'file' widget to a FileInput rather than the default ClearableFileInput
# so that when editing, we don't get the 'currently: ...' banner which is
# a bit pointless here
widgets={
'tags': widgets.AdminTagWidget,
'file': forms.FileInput(),
'focal_point_x': forms.HiddenInput(attrs={'class': 'focal_point_x'}),
'focal_point_y': forms.HiddenInput(attrs={'class': 'focal_point_y'}),
'focal_point_width': forms.HiddenInput(attrs={'class': 'focal_point_width'}),
'focal_point_height': forms.HiddenInput(attrs={'class': 'focal_point_height'}),
})
class ImageInsertionForm(forms.Form):
"""
Form for selecting parameters of the image (e.g. format) prior to insertion
into a rich text area
"""
format = forms.ChoiceField(
choices=[(format.name, format.label) for format in get_image_formats()],
widget=forms.RadioSelect
)
alt_text = forms.CharField()
class URLGeneratorForm(forms.Form):
filter_method = forms.ChoiceField(
label=_("Filter"),
choices=(
('original', _("Original size")),
('width', _("Resize to width")),
('height', _("Resize to height")),
('min', _("Resize to min")),
('max', _("Resize to max")),
('fill', _("Resize to fill")),
),
)
width = forms.IntegerField(_("Width"), min_value=0)
height = forms.IntegerField(_("Height"), min_value=0)
closeness = forms.IntegerField(_("Closeness"), min_value=0, initial=0)
GroupImagePermissionFormSet = collection_member_permission_formset_factory(
Image,
[
('add_image', _("Add"), _("Add/edit images you own")),
('change_image', _("Edit"), _("Edit any image")),
],
'wagtailimages/permissions/includes/image_permissions_formset.html'
)
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNodeenv(PythonPackage):
"""Node.js virtual environment"""
homepage = "https://github.com/ekalinin/nodeenv"
url = "https://pypi.io/packages/source/n/nodeenv/nodeenv-1.3.3.tar.gz"
version('1.3.3', sha256='ad8259494cf1c9034539f6cced78a1da4840a4b157e23640bc4a0c0546b0cb7a')
depends_on('py-setuptools', type='build')
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Beta, Normal, Uniform, HalfCauchy, StudentT
@bm.random_variable
def beta(n):
return Beta(2.0, 2.0)
@bm.random_variable
def flip_beta():
return Bernoulli(tensor([beta(0), beta(1)]))
@bm.random_variable
def beta_2_2():
return Beta(2.0, tensor([3.0, 4.0]))
@bm.random_variable
def flip_beta_2_2():
return Bernoulli(beta_2_2())
@bm.random_variable
def uniform_2_2():
return Uniform(0.0, tensor([1.0, 1.0]))
@bm.random_variable
def flip_uniform_2_2():
return Bernoulli(uniform_2_2())
@bm.random_variable
def flip_logits():
return Bernoulli(logits=tensor([beta(0), beta(1)]))
@bm.random_variable
def flip_const():
return Bernoulli(tensor([0.25, 0.75]))
@bm.random_variable
def flip_const_4():
return Bernoulli(tensor([0.25, 0.75, 0.5, 0.5]))
@bm.random_variable
def flip_const_2_3():
return Bernoulli(tensor([[0.25, 0.75, 0.5], [0.125, 0.875, 0.625]]))
@bm.random_variable
def normal_2_3():
mus = flip_const_2_3() # 2 x 3 tensor of 0 or 1
sigmas = tensor([2.0, 3.0, 4.0])
return Normal(mus, sigmas)
@bm.random_variable
def hc_3():
return HalfCauchy(tensor([1.0, 2.0, 3.0]))
@bm.random_variable
def studentt_2_3():
return StudentT(hc_3(), normal_2_3(), hc_3())
@bm.functional
def operators():
# Note that we do NOT devectorize the multiplication; it gets
# turned into a MatrixScale.
return ((beta_2_2() + tensor([[5.0, 6.0], [7.0, 8.0]])) * 10.0).exp()
class FixVectorizedModelsTest(unittest.TestCase):
def test_fix_vectorized_models_1(self) -> None:
self.maxDiff = None
observations = {flip_beta(): tensor([0.0, 1.0])}
queries = [flip_beta(), flip_const()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Sample];
N04[label=Tensor];
N05[label=Bernoulli];
N06[label=Sample];
N07[label="Observation tensor([0., 1.])"];
N08[label=Query];
N09[label="[0.25,0.75]"];
N10[label=Bernoulli];
N11[label=Sample];
N12[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N01 -> N03;
N02 -> N04;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N06 -> N07;
N06 -> N08;
N09 -> N10;
N10 -> N11;
N11 -> N12;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Sample];
N04[label=2];
N05[label=1];
N06[label=Bernoulli];
N07[label=Sample];
N08[label=Bernoulli];
N09[label=Sample];
N10[label=ToMatrix];
N11[label=Query];
N12[label=0.25];
N13[label=Bernoulli];
N14[label=Sample];
N15[label=0.75];
N16[label=Bernoulli];
N17[label=Sample];
N18[label=ToMatrix];
N19[label=Query];
N20[label="Observation False"];
N21[label="Observation True"];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N01 -> N03;
N02 -> N06;
N03 -> N08;
N04 -> N10;
N04 -> N18;
N05 -> N10;
N05 -> N18;
N06 -> N07;
N07 -> N10;
N07 -> N20;
N08 -> N09;
N09 -> N10;
N09 -> N21;
N10 -> N11;
N12 -> N13;
N13 -> N14;
N14 -> N18;
N15 -> N16;
N16 -> N17;
N17 -> N18;
N18 -> N19;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_2(self) -> None:
self.maxDiff = None
observations = {flip_const_4(): tensor([0.0, 1.0, 0.0, 1.0])}
queries = [flip_const_4()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label="[0.25,0.75,0.5,0.5]"];
N1[label=Bernoulli];
N2[label=Sample];
N3[label="Observation tensor([0., 1., 0., 1.])"];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N2 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
# Note that due to the order in which we do the rewriting we
# end up with a not-deduplicated Bernoulli(0.5) node here, which
# is slightly unfortunate but probably not worth fixing right now.
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=4];
N01[label=1];
N02[label=0.25];
N03[label=Bernoulli];
N04[label=Sample];
N05[label=0.75];
N06[label=Bernoulli];
N07[label=Sample];
N08[label=0.5];
N09[label=Bernoulli];
N10[label=Sample];
N11[label=Bernoulli];
N12[label=Sample];
N13[label=ToMatrix];
N14[label=Query];
N15[label="Observation False"];
N16[label="Observation True"];
N17[label="Observation False"];
N18[label="Observation True"];
N00 -> N13;
N01 -> N13;
N02 -> N03;
N03 -> N04;
N04 -> N13;
N04 -> N15;
N05 -> N06;
N06 -> N07;
N07 -> N13;
N07 -> N16;
N08 -> N09;
N08 -> N11;
N09 -> N10;
N10 -> N13;
N10 -> N17;
N11 -> N12;
N12 -> N13;
N12 -> N18;
N13 -> N14;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_3(self) -> None:
self.maxDiff = None
observations = {flip_const_2_3(): tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])}
queries = [flip_const_2_3()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label="[[0.25,0.75,0.5],\\\\n[0.125,0.875,0.625]]"];
N1[label=Bernoulli];
N2[label=Sample];
N3[label="Observation tensor([[0., 0., 0.],\\n [1., 1., 1.]])"];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N2 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=3];
N01[label=2];
N02[label=0.25];
N03[label=Bernoulli];
N04[label=Sample];
N05[label=0.75];
N06[label=Bernoulli];
N07[label=Sample];
N08[label=0.5];
N09[label=Bernoulli];
N10[label=Sample];
N11[label=0.125];
N12[label=Bernoulli];
N13[label=Sample];
N14[label=0.875];
N15[label=Bernoulli];
N16[label=Sample];
N17[label=0.625];
N18[label=Bernoulli];
N19[label=Sample];
N20[label=ToMatrix];
N21[label=Query];
N22[label="Observation False"];
N23[label="Observation False"];
N24[label="Observation False"];
N25[label="Observation True"];
N26[label="Observation True"];
N27[label="Observation True"];
N00 -> N20;
N01 -> N20;
N02 -> N03;
N03 -> N04;
N04 -> N20;
N04 -> N22;
N05 -> N06;
N06 -> N07;
N07 -> N20;
N07 -> N23;
N08 -> N09;
N09 -> N10;
N10 -> N20;
N10 -> N24;
N11 -> N12;
N12 -> N13;
N13 -> N20;
N13 -> N25;
N14 -> N15;
N15 -> N16;
N16 -> N20;
N16 -> N26;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N19 -> N27;
N20 -> N21;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_4(self) -> None:
# Demonstrate we can also do devectorizations on logits-style Bernoullis.
# (A logits Bernoulli with a beta prior is a likely mistake in a real model,
# but it is a convenient test case.)
self.maxDiff = None
observations = {}
queries = [flip_logits()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=Sample];
N4[label=Tensor];
N5[label="Bernoulli(logits)"];
N6[label=Sample];
N7[label=Query];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N1 -> N3;
N2 -> N4;
N3 -> N4;
N4 -> N5;
N5 -> N6;
N6 -> N7;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Sample];
N04[label=2];
N05[label=1];
N06[label=ToReal];
N07[label="Bernoulli(logits)"];
N08[label=Sample];
N09[label=ToReal];
N10[label="Bernoulli(logits)"];
N11[label=Sample];
N12[label=ToMatrix];
N13[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N01 -> N03;
N02 -> N06;
N03 -> N09;
N04 -> N12;
N05 -> N12;
N06 -> N07;
N07 -> N08;
N08 -> N12;
N09 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N13;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_5(self) -> None:
self.maxDiff = None
observations = {}
queries = [studentt_2_3()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite. Note that we have a size[3] stochastic input and
# a size[2, 3] stochastic input to the StudentT, and we broadcast the three
# HalfCauchy samples correctly
expected = """
digraph "graph" {
N00[label="[1.0,2.0,3.0]"];
N01[label=HalfCauchy];
N02[label=Sample];
N03[label="[[0.25,0.75,0.5],\\\\n[0.125,0.875,0.625]]"];
N04[label=Bernoulli];
N05[label=Sample];
N06[label="[2.0,3.0,4.0]"];
N07[label=Normal];
N08[label=Sample];
N09[label=StudentT];
N10[label=Sample];
N11[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N09;
N02 -> N09;
N03 -> N04;
N04 -> N05;
N05 -> N07;
N06 -> N07;
N07 -> N08;
N08 -> N09;
N09 -> N10;
N10 -> N11;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=3];
N01[label=2];
N02[label=1.0];
N03[label=HalfCauchy];
N04[label=Sample];
N05[label=0.25];
N06[label=Bernoulli];
N07[label=Sample];
N08[label=ToReal];
N09[label=2.0];
N10[label=Normal];
N11[label=Sample];
N12[label=StudentT];
N13[label=Sample];
N14[label=HalfCauchy];
N15[label=Sample];
N16[label=0.75];
N17[label=Bernoulli];
N18[label=Sample];
N19[label=ToReal];
N20[label=3.0];
N21[label=Normal];
N22[label=Sample];
N23[label=StudentT];
N24[label=Sample];
N25[label=HalfCauchy];
N26[label=Sample];
N27[label=0.5];
N28[label=Bernoulli];
N29[label=Sample];
N30[label=ToReal];
N31[label=4.0];
N32[label=Normal];
N33[label=Sample];
N34[label=StudentT];
N35[label=Sample];
N36[label=0.125];
N37[label=Bernoulli];
N38[label=Sample];
N39[label=ToReal];
N40[label=Normal];
N41[label=Sample];
N42[label=StudentT];
N43[label=Sample];
N44[label=0.875];
N45[label=Bernoulli];
N46[label=Sample];
N47[label=ToReal];
N48[label=Normal];
N49[label=Sample];
N50[label=StudentT];
N51[label=Sample];
N52[label=0.625];
N53[label=Bernoulli];
N54[label=Sample];
N55[label=ToReal];
N56[label=Normal];
N57[label=Sample];
N58[label=StudentT];
N59[label=Sample];
N60[label=ToMatrix];
N61[label=Query];
N00 -> N60;
N01 -> N60;
N02 -> N03;
N03 -> N04;
N04 -> N12;
N04 -> N12;
N04 -> N42;
N04 -> N42;
N05 -> N06;
N06 -> N07;
N07 -> N08;
N08 -> N10;
N09 -> N10;
N09 -> N14;
N09 -> N40;
N10 -> N11;
N11 -> N12;
N12 -> N13;
N13 -> N60;
N14 -> N15;
N15 -> N23;
N15 -> N23;
N15 -> N50;
N15 -> N50;
N16 -> N17;
N17 -> N18;
N18 -> N19;
N19 -> N21;
N20 -> N21;
N20 -> N25;
N20 -> N48;
N21 -> N22;
N22 -> N23;
N23 -> N24;
N24 -> N60;
N25 -> N26;
N26 -> N34;
N26 -> N34;
N26 -> N58;
N26 -> N58;
N27 -> N28;
N28 -> N29;
N29 -> N30;
N30 -> N32;
N31 -> N32;
N31 -> N56;
N32 -> N33;
N33 -> N34;
N34 -> N35;
N35 -> N60;
N36 -> N37;
N37 -> N38;
N38 -> N39;
N39 -> N40;
N40 -> N41;
N41 -> N42;
N42 -> N43;
N43 -> N60;
N44 -> N45;
N45 -> N46;
N46 -> N47;
N47 -> N48;
N48 -> N49;
N49 -> N50;
N50 -> N51;
N51 -> N60;
N52 -> N53;
N53 -> N54;
N54 -> N55;
N55 -> N56;
N56 -> N57;
N57 -> N58;
N58 -> N59;
N59 -> N60;
N60 -> N61;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_6(self) -> None:
self.maxDiff = None
observations = {}
queries = [flip_beta_2_2(), flip_uniform_2_2()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite: notice that here torch automatically
# broadcast the 2.0 to [2.0, 2.0] for us when the node was accumulated,
# and similarly for 0.0.
expected = """
digraph "graph" {
N00[label="[2.0,2.0]"];
N01[label="[3.0,4.0]"];
N02[label=Beta];
N03[label=Sample];
N04[label=Bernoulli];
N05[label=Sample];
N06[label=Query];
N07[label="[0.0,0.0]"];
N08[label="[1.0,1.0]"];
N09[label=Uniform];
N10[label=Sample];
N11[label=Bernoulli];
N12[label=Sample];
N13[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N07 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N13;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After: notice that we correctly generate two samples from a Flat distribution
# here.
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2];
N01[label=1];
N02[label=2.0];
N03[label=3.0];
N04[label=Beta];
N05[label=Sample];
N06[label=Bernoulli];
N07[label=Sample];
N08[label=4.0];
N09[label=Beta];
N10[label=Sample];
N11[label=Bernoulli];
N12[label=Sample];
N13[label=ToMatrix];
N14[label=Query];
N15[label=Flat];
N16[label=Sample];
N17[label=Bernoulli];
N18[label=Sample];
N19[label=Sample];
N20[label=Bernoulli];
N21[label=Sample];
N22[label=ToMatrix];
N23[label=Query];
N00 -> N13;
N00 -> N22;
N01 -> N13;
N01 -> N22;
N02 -> N04;
N02 -> N09;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N06 -> N07;
N07 -> N13;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N13;
N13 -> N14;
N15 -> N16;
N15 -> N19;
N16 -> N17;
N17 -> N18;
N18 -> N22;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N22 -> N23;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_7(self) -> None:
self.maxDiff = None
observations = {}
queries = [operators()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label="[2.0,2.0]"];
N1[label="[3.0,4.0]"];
N2[label=Beta];
N3[label=Sample];
N4[label="[[5.0,6.0],\\\\n[7.0,8.0]]"];
N5[label="+"];
N6[label=10.0];
N7[label="*"];
N8[label=Exp];
N9[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N5;
N5 -> N7;
N6 -> N7;
N7 -> N8;
N8 -> N9;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2];
N01[label=10.0];
N02[label=2.0];
N03[label=3.0];
N04[label=Beta];
N05[label=Sample];
N06[label=ToPosReal];
N07[label=5.0];
N08[label="+"];
N09[label=4.0];
N10[label=Beta];
N11[label=Sample];
N12[label=ToPosReal];
N13[label=6.0];
N14[label="+"];
N15[label=7.0];
N16[label="+"];
N17[label=8.0];
N18[label="+"];
N19[label=ToMatrix];
N20[label=MatrixScale];
N21[label=0];
N22[label=ColumnIndex];
N23[label=index];
N24[label=Exp];
N25[label=1];
N26[label=index];
N27[label=Exp];
N28[label=ColumnIndex];
N29[label=index];
N30[label=Exp];
N31[label=index];
N32[label=Exp];
N33[label=ToMatrix];
N34[label=Query];
N00 -> N19;
N00 -> N19;
N00 -> N33;
N00 -> N33;
N01 -> N20;
N02 -> N04;
N02 -> N10;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N06 -> N08;
N06 -> N16;
N07 -> N08;
N08 -> N19;
N09 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N14;
N12 -> N18;
N13 -> N14;
N14 -> N19;
N15 -> N16;
N16 -> N19;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N20 -> N22;
N20 -> N28;
N21 -> N22;
N21 -> N23;
N21 -> N29;
N22 -> N23;
N22 -> N26;
N23 -> N24;
N24 -> N33;
N25 -> N26;
N25 -> N28;
N25 -> N31;
N26 -> N27;
N27 -> N33;
N28 -> N29;
N28 -> N31;
N29 -> N30;
N30 -> N33;
N31 -> N32;
N32 -> N33;
N33 -> N34;
}
"""
self.assertEqual(expected.strip(), observed.strip())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes and functions for working with audio data.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = [
'AudioClip',
'load',
'save',
'AUDIO_SUPPORTED_CODECS',
'AUDIO_CHANNELS_MONO',
'AUDIO_CHANNELS_STEREO',
'AUDIO_CHANNEL_LEFT',
'AUDIO_EAR_LEFT',
'AUDIO_CHANNEL_RIGHT',
'AUDIO_EAR_RIGHT',
'AUDIO_CHANNEL_COUNT',
'AUDIO_EAR_COUNT'
]
import numpy as np
import soundfile as sf
from psychopy.tools.audiotools import *
from .exceptions import *
# supported formats for loading and saving audio samples to file
AUDIO_SUPPORTED_CODECS = [s.lower() for s in sf.available_formats().keys()]
# constants for specifying the number of channels
AUDIO_CHANNELS_MONO = 1
AUDIO_CHANNELS_STEREO = 2
# constants for indexing channels
AUDIO_CHANNEL_LEFT = AUDIO_EAR_LEFT = 0
AUDIO_CHANNEL_RIGHT = AUDIO_EAR_RIGHT = 1
AUDIO_CHANNEL_COUNT = AUDIO_EAR_COUNT = 2
class AudioClip(object):
"""Class for storing audio clip data.
This class is used to store and handle raw audio data, such as those
obtained from microphone recordings or loaded from files. PsychoPy stores
audio samples in contiguous arrays of 32-bit floating-point values ranging
between -1 and 1.
The `AudioClip` class provides basic audio editing capabilities too. You can
use operators on `AudioClip` instances to combine audio clips together. For
instance, the ``+`` operator will return a new `AudioClip` instance whose
samples are the concatenation of the two operands::
sndCombined = sndClip1 + sndClip2
Note that audio clips must have the same sample rates in order to be joined
using the addition operator. For online compatibility, use the `append()`
method instead.
There are also numerous static methods available to generate various tones
(e.g., sine-, saw-, and square-waves). Audio samples can also be loaded and
saved to files in various formats (e.g., WAV, FLAC, OGG, etc.)
You can play `AudioClip` by directly passing instances of this object to
the :class:`~psychopy.sound.Sound` class::
inport psychopy.core as core
import psyhcopy.sound as sound
myTone = AudioClip.sine(duration=5.0) # generate a tone
mySound = sound.Sound(myTone)
mySound.play()
core.wait(5.0) # wait for sound to finish playing
core.quit()
Parameters
----------
samples : ArrayLike
Nx1 or Nx2 array of audio samples for mono and stereo, respectively.
Values in the array representing the amplitude of the sound waveform
should vary between -1 and 1. If not, they will be clipped.
sampleRateHz : int
Sampling rate used to obtain `samples` in Hertz (Hz). The sample rate or
frequency is related to the quality of the audio, where higher sample
rates usually result in better sounding audio (albeit a larger memory
footprint and file size). The value specified should match the frequency
the clip was recorded at. If not, the audio may sound distorted when
played back. Usually, a sample rate of 48kHz is acceptable for most
applications (DVD audio quality). For convenience, module level
constants with form ``SAMPLE_RATE_*`` are provided to specify many
common samples rates.
userData : dict or None
Optional user data to associated with the audio clip.
"""
def __init__(self, samples, sampleRateHz=SAMPLE_RATE_48kHz, userData=None):
# samples should be a 2D array where columns represent channels
self._samples = np.atleast_2d(
np.asarray(samples, dtype=np.float32, order='C'))
self._samples.clip(-1, 1) # force values to be clipped
# set the sample rate of the clip
self._sampleRateHz = int(sampleRateHz)
# the duration of the audio clip
self._duration = len(self.samples) / float(self.sampleRateHz)
# user data
self._userData = userData if userData is not None else {}
assert isinstance(self._userData, dict)
# --------------------------------------------------------------------------
# Loading and saving
#
# These static methods are related to loading and saving audio clips from
# files. The file types supported are those that `libsoundfile` supports.
#
# Additional codecs such as `mp3` require the pydub package which is
# optional.
#
@staticmethod
def _checkCodecSupported(codec, raiseError=False):
"""Check if the audio format string corresponds to a supported codec.
Used internally to check if the user specified a valid codec identifier.
Parameters
----------
codec: str
Codec identifier (e.g., 'wav', 'mp3', etc.)
raiseError : bool
Raise an error (``) instead of returning a value. Default is
`False`.
Returns
-------
bool
`True` if the format is supported.
"""
if not isinstance(codec, str):
raise ValueError('Codec identifier must be a string.')
hasCodec = codec.lower() in AUDIO_SUPPORTED_CODECS
if raiseError and not hasCodec:
fmtList = ["'{}'".format(s) for s in AUDIO_SUPPORTED_CODECS]
raise AudioUnsupportedCodecError(
"Unsupported audio codec specified, must be either: " +
", ".join(fmtList))
return hasCodec
@staticmethod
def load(filename, codec=None):
"""Load audio samples from a file. Note that this is a static method!
Parameters
----------
filename : str
File name to load.
codec : str or None
Codec to use. If `None`, the format will be implied from the file
name.
Returns
-------
AudioClip
Audio clip containing samples loaded from the file.
"""
if codec is not None:
AudioClip._checkCodecSupported(codec, raiseError=True)
samples, sampleRateHz = sf.read(
filename,
dtype='float32',
always_2d=True,
format=codec)
return AudioClip(
samples=samples,
sampleRateHz=sampleRateHz)
def save(self, filename, codec=None):
"""Save an audio clip to file.
Parameters
----------
filename : str
File name to write audio clip to.
codec : str or None
Format to save audio clip data as. If `None`, the format will be
implied from the extension at the end of `filename`.
"""
if codec is not None:
AudioClip._checkCodecSupported(codec, raiseError=True)
sf.write(
filename,
data=self._samples,
samplerate=self._sampleRateHz,
format=codec)
# --------------------------------------------------------------------------
# Tone and noise generation methods
#
# These static methods are used to generate audio samples, such as random
# colored noise (e.g., white) and tones (e.g., sine, square, etc.)
#
# All of these methods return `AudioClip` objects containing the generated
# samples.
#
@staticmethod
def whiteNoise(duration=1.0, sampleRateHz=SAMPLE_RATE_48kHz, channels=2):
"""Generate gaussian white noise.
**New feature, use with caution.**
Parameters
----------
duration : float or int
Length of the sound in seconds.
sampleRateHz : int
Samples rate of the audio for playback.
channels : int
Number of channels for the output.
Returns
-------
AudioClip
"""
samples = whiteNoise(duration, sampleRateHz)
if channels > 1:
samples = np.tile(samples, (1, channels)).astype(np.float32)
return AudioClip(samples, sampleRateHz=sampleRateHz)
@staticmethod
def silence(duration=1.0, sampleRateHz=SAMPLE_RATE_48kHz, channels=2):
"""Generate audio samples for a silent period.
This is used to create silent periods of a very specific duration
between other audio clips.
Parameters
----------
duration : float or int
Length of the sound in seconds.
sampleRateHz : int
Samples rate of the audio for playback.
channels : int
Number of channels for the output.
Returns
-------
AudioClip
Examples
--------
Generate 5 seconds of silence to enjoy::
import psychopy.sound as sound
silence = sound.AudioClip.silence(10.)
Use the silence as a break between two audio clips when concatenating
them::
fullClip = clip1 + sound.AudioClip.silence(10.) + clip2
"""
samples = np.zeros(
(int(duration * sampleRateHz), channels), dtype=np.float32)
return AudioClip(samples, sampleRateHz=sampleRateHz)
@staticmethod
def sine(duration=1.0, freqHz=440, gain=0.8, sampleRateHz=SAMPLE_RATE_48kHz,
channels=2):
"""Generate audio samples for a tone with a sine waveform.
Parameters
----------
duration : float or int
Length of the sound in seconds.
freqHz : float or int
Frequency of the tone in Hertz (Hz). Note that this differs from the
`sampleRateHz`.
gain : float
Gain factor ranging between 0.0 and 1.0. Default is 0.8.
sampleRateHz : int
Samples rate of the audio for playback.
channels : int
Number of channels for the output.
Returns
-------
AudioClip
Examples
--------
Generate an audio clip of a tone 10 seconds long with a frequency of
400Hz::
import psychopy.sound as sound
tone400Hz = sound.AudioClip.sine(10., 400.)
Create a marker/cue tone and append it to pre-recorded instructions::
import psychopy.sound as sound
voiceInstr = sound.AudioClip.load('/path/to/instructions.wav')
markerTone = sound.AudioClip.sine(
1.0, 440., # duration and freq
sampleRateHz=voiceInstr.sampleRateHz) # must be the same!
fullInstr = voiceInstr + markerTone # create instructions with cue
fullInstr.save('/path/to/instructions_with_tone.wav') # save it
"""
samples = sinetone(duration, freqHz, gain, sampleRateHz)
if channels > 1:
samples = np.tile(samples, (1, channels)).astype(np.float32)
return AudioClip(samples, sampleRateHz=sampleRateHz)
@staticmethod
def square(duration=1.0, freqHz=440, dutyCycle=0.5, gain=0.8,
sampleRateHz=SAMPLE_RATE_48kHz, channels=2):
"""Generate audio samples for a tone with a square waveform.
Parameters
----------
duration : float or int
Length of the sound in seconds.
freqHz : float or int
Frequency of the tone in Hertz (Hz). Note that this differs from the
`sampleRateHz`.
dutyCycle : float
Duty cycle between 0.0 and 1.0.
gain : float
Gain factor ranging between 0.0 and 1.0. Default is 0.8.
sampleRateHz : int
Samples rate of the audio for playback.
channels : int
Number of channels for the output.
Returns
-------
AudioClip
"""
samples = squaretone(duration, freqHz, dutyCycle, gain, sampleRateHz)
if channels > 1:
samples = np.tile(samples, (1, channels)).astype(np.float32)
return AudioClip(samples, sampleRateHz=sampleRateHz)
@staticmethod
def sawtooth(duration=1.0, freqHz=440, peak=1.0, gain=0.8,
sampleRateHz=SAMPLE_RATE_48kHz, channels=2):
"""Generate audio samples for a tone with a sawtooth waveform.
Parameters
----------
duration : float or int
Length of the sound in seconds.
freqHz : float or int
Frequency of the tone in Hertz (Hz). Note that this differs from the
`sampleRateHz`.
peak : float
Location of the peak between 0.0 and 1.0. If the peak is at 0.5, the
resulting wave will be triangular. A value of 1.0 will cause the
peak to be located at the very end of a cycle.
gain : float
Gain factor ranging between 0.0 and 1.0. Default is 0.8.
sampleRateHz : int
Samples rate of the audio for playback.
channels : int
Number of channels for the output.
Returns
-------
AudioClip
"""
samples = sawtone(duration, freqHz, peak, gain, sampleRateHz)
if channels > 1:
samples = np.tile(samples, (1, channels)).astype(np.float32)
return AudioClip(samples, sampleRateHz=sampleRateHz)
# --------------------------------------------------------------------------
# Audio editing methods
#
# Methods related to basic editing of audio samples (operations such as
# splicing clips and signal gain).
#
def __add__(self, other):
"""Concatenate two audio clips."""
assert other.sampleRateHz == self._sampleRateHz
assert other.channels == self.channels
newSamples = np.ascontiguousarray(
np.vstack((self._samples, other.samples)),
dtype=np.float32)
toReturn = AudioClip(
samples=newSamples,
sampleRateHz=self._sampleRateHz)
return toReturn
def __iadd__(self, other):
"""Concatenate two audio clips inplace."""
assert other.sampleRateHz == self._sampleRateHz
assert other.channels == self.channels
self._samples = np.ascontiguousarray(
np.vstack((self._samples, other.samples)),
dtype=np.float32)
return self
def append(self, clip):
"""Append samples from another sound clip to the end of this one.
The `AudioClip` object must have the same sample rate and channels as
this object.
Parameters
----------
clip : AudioClip
Audio clip to append.
Returns
-------
AudioClip
This object with samples from `clip` appended.
Examples
--------
Join two sound clips together::
snd1.append(snd2)
"""
# if either clip is empty, just replace it
if len(self.samples) == 0:
return clip
if len(clip.samples) == 0:
return self
assert self.channels == clip.channels
assert self._sampleRateHz == clip.sampleRateHz
self._samples = np.ascontiguousarray(
np.vstack((self._samples, clip.samples)),
dtype=np.float32)
# recompute the duration of the new clip
self._duration = len(self.samples) / float(self.sampleRateHz)
return self
def copy(self):
"""Create an independent copy of this `AudioClip`.
Returns
-------
AudioClip
"""
return AudioClip(
samples=self._samples.copy(),
sampleRateHz=self._sampleRateHz)
def gain(self, factor, channel=None):
"""Apply gain the audio samples.
This will modify the internal store of samples inplace. Clipping is
automatically applied to samples after applying gain.
Parameters
----------
factor : float or int
Gain factor to multiply audio samples.
channel : int or None
Channel to apply gain to. If `None`, gain will be applied to all
channels.
"""
try:
arrview = self._samples[:, :] \
if channel is None else self._samples[:, channel]
except IndexError:
raise ValueError('Invalid value for `channel`.')
# multiply and clip range
arrview *= float(factor)
arrview.clip(-1, 1)
# --------------------------------------------------------------------------
# Audio analysis methods
#
# Methods related to basic analysis of audio samples, nothing too advanced
# but still useful.
#
def rms(self, channel=None):
"""Compute the root mean square (RMS) of the samples to determine the
average signal level.
Parameters
----------
channel : int or None
Channel to compute RMS (zero-indexed). If `None`, the RMS of all
channels will be computed.
Returns
-------
ndarray or float
An array of RMS values for each channel if ``channel=None`` (even if
there is one channel an array is returned). If `channel` *was*
specified, a `float` will be returned indicating the RMS of that
single channel.
"""
if channel is not None:
assert 0 < channel < self.channels
arr = self._samples if channel is None else self._samples[:, channel]
rms = np.sqrt(np.mean(np.square(arr), axis=0))
return rms if len(rms) > 1 else rms[0]
# --------------------------------------------------------------------------
# Properties
#
@property
def samples(self):
"""Nx1 or Nx2 array of audio samples (`~numpy.ndarray`).
Values must range from -1 to 1. Values outside that range will be
clipped, possibly resulting in distortion.
"""
return self._samples
@samples.setter
def samples(self, value):
self._samples = np.asarray(value, dtype=float) # convert to array
self._samples.clip(-1., 1.) # do clipping to keep samples in range
# recompute duration after updating samples
self._duration = len(self._samples) / float(self._sampleRateHz)
@property
def sampleRateHz(self):
"""Sample rate of the audio clip in Hz (`int`). Should be the same
value as the rate `samples` was captured at.
"""
return self._sampleRateHz
@sampleRateHz.setter
def sampleRateHz(self, value):
self._sampleRateHz = int(value)
# recompute duration after updating sample rate
self._duration = len(self._samples) / float(self._sampleRateHz)
@property
def duration(self):
"""The duration of the audio in seconds (`float`).
This value is computed using the specified sampling frequency and number
of samples.
"""
return self._duration
@property
def channels(self):
"""Number of audio channels in the clip (`int`).
If `channels` > 1, the audio clip is in stereo.
"""
return self._samples.shape[1]
@property
def isStereo(self):
"""`True` if there are two channels of audio samples.
Usually one for each ear. The first channel is usually the left ear, and
the second the right.
"""
return not self.isMono # are we moving in stereo? ;)
@property
def isMono(self):
"""`True` if there is only one channel of audio data.
"""
return self._samples.shape[1] == 1
@property
def userData(self):
"""User data associated with this clip (`dict`). Can be used for storing
additional data related to the clip. Note that `userData` is not saved
with audio files!
Example
-------
Adding fields to `userData`. For instance, we want to associated the
start time the clip was recorded at with it::
myClip.userData['date_recorded'] = t_start
We can access that field later by::
thisRecordingStartTime = myClip.userData['date_recorded']
"""
return self._userData
@userData.setter
def userData(self, value):
assert isinstance(value, dict)
self._userData = value
def convertToWAV(self):
"""Get a copy of stored audio samples in WAV PCM format.
Returns
-------
ndarray
Array with the same shapes as `.samples` but in 16-bit WAV PCM
format.
"""
return np.asarray(
self._samples * ((1 << 15) - 1), dtype=np.int16).tobytes()
def asMono(self, copy=True):
"""Convert the audio clip to mono (single channel audio).
Parameters
----------
copy : bool
If `True` an :class:`~psychopy.sound.AudioClip` containing a copy
of the samples will be returned. If `False`, channels will be
mixed inplace resulting a the same object being returned. User data
is not copied.
Returns
-------
:class:`~psychopy.sound.AudioClip`
Mono version of this object.
"""
samples = np.atleast_2d(self._samples) # enforce 2D
if samples.shape[1] > 1:
samplesMixed = np.atleast_2d(
np.sum(samples, axis=1, dtype=np.float32) / np.float32(2.)).T
else:
samplesMixed = samples.copy()
if copy:
return AudioClip(samplesMixed, self.sampleRateHz)
self._samples = samplesMixed # overwrite
return self
def transcribe(self, engine='sphinx', language='en-US', expectedWords=None,
config=None):
"""Convert speech in audio to text.
This feature passes the audio clip samples to a specified text-to-speech
engine which will attempt to transcribe any speech within. The efficacy
of the transcription depends on the engine selected, audio quality, and
language support. By default, Pocket Sphinx is used which provides
decent transcription capabilities offline for English and a few other
languages. For more robust transcription capabilities with a greater
range of language support, online providers such as Google may be used.
Speech-to-text conversion blocks the main application thread when used
on Python. Don't transcribe audio during time-sensitive parts of your
experiment! This issue is known to the developers and will be fixed in a
later release.
Parameters
----------
engine : str
Speech-to-text engine to use. Can be one of 'sphinx' for CMU Pocket
Sphinx or 'google' for Google Cloud.
language : str
BCP-47 language code (eg., 'en-US'). Note that supported languages
vary between transcription engines.
expectedWords : list or tuple
List of strings representing expected words or phrases. This will
constrain the possible output words to the ones specified. Note not
all engines support this feature (only Sphinx and Google Cloud do at
this time). A warning will be logged if the engine selected does not
support this feature. CMU PocketSphinx has an additional feature
where the sensitivity can be specified for each expected word. You
can indicate the sensitivity level to use by putting a ``:`` (colon)
after each word in the list (see the Example below). Sensitivity
levels range between 0 and 100. A higher number results in the
engine being more conservative, resulting in a higher likelihood of
false rejections. The default sensitivity is 80% for words/phrases
without one specified.
config : dict or None
Additional configuration options for the specified engine. These
are specified using a dictionary (ex. `config={'pfilter': 1}` will
enable the profanity filter when using the `'google'` engine).
Returns
-------
:class:`~psychopy.sound.transcribe.TranscriptionResult`
Transcription result.
Notes
-----
* Online transcription services (eg., Google) provide robust and
accurate speech recognition capabilities with broader language support
than offline solutions. However, these services may require a paid
subscription to use, reliable broadband internet connections, and may
not respect the privacy of your participants as their responses are
being sent to a third-party. Also consider that a track of audio data
being sent over the network can be large, users on metered connections
may incur additional costs to run your experiment.
* If the audio clip has multiple channels, they will be combined prior
to being passed to the transcription service if needed.
"""
# avoid circular import
from psychopy.sound.transcribe import transcribe
return transcribe(
self,
engine=engine,
language=language,
expectedWords=expectedWords,
config=config)
def load(filename, codec=None):
"""Load an audio clip from file.
Parameters
----------
filename : str
File name to load.
codec : str or None
Codec to use. If `None`, the format will be implied from the file name.
Returns
-------
AudioClip
Audio clip containing samples loaded from the file.
"""
return AudioClip.load(filename, codec)
def save(filename, clip, codec=None):
"""Save an audio clip to file.
Parameters
----------
filename : str
File name to write audio clip to.
clip : AudioClip
The clip with audio samples to write.
codec : str or None
Format to save audio clip data as. If `None`, the format will be
implied from the extension at the end of `filename`.
"""
clip.save(filename, codec)
if __name__ == "__main__":
pass
|
"""TensorFlow V2 API __init__.py files."""
# keep sorted
TENSORFLOW_API_INIT_FILES = [
# BEGIN GENERATED FILES
"__init__.py",
"audio/__init__.py",
"autograph/__init__.py",
"autograph/experimental/__init__.py",
"autodiff/__init__.py",
"bitwise/__init__.py",
"compat/__init__.py",
"config/__init__.py",
"config/experimental/__init__.py",
"config/optimizer/__init__.py",
"config/threading/__init__.py",
"data/__init__.py",
"data/experimental/__init__.py",
"debugging/__init__.py",
"debugging/experimental/__init__.py",
"distribute/__init__.py",
"distribute/cluster_resolver/__init__.py",
"distribute/experimental/__init__.py",
"dtypes/__init__.py",
"errors/__init__.py",
"experimental/__init__.py",
"experimental/tensorrt/__init__.py",
"experimental/dlpack/__init__.py",
"feature_column/__init__.py",
"io/gfile/__init__.py",
"graph_util/__init__.py",
"image/__init__.py",
"io/__init__.py",
"queue/__init__.py",
"linalg/__init__.py",
"linalg/experimental/__init__.py",
"lite/__init__.py",
"lite/experimental/__init__.py",
"lite/experimental/microfrontend/__init__.py",
"lite/experimental/microfrontend/python/__init__.py",
"lite/experimental/microfrontend/python/ops/__init__.py",
"lookup/__init__.py",
"lookup/experimental/__init__.py",
"math/__init__.py",
"math/special/__init__.py",
"mixed_precision/__init__.py",
"mixed_precision/experimental/__init__.py",
"mlir/__init__.py",
"mlir/experimental/__init__.py",
"nest/__init__.py",
"nn/__init__.py",
"profiler/__init__.py",
"profiler/experimental/__init__.py",
"profiler/experimental/client/__init__.py",
"profiler/experimental/server/__init__.py",
"quantization/__init__.py",
"ragged/__init__.py",
"random/__init__.py",
"random/experimental/__init__.py",
"raw_ops/__init__.py",
"saved_model/__init__.py",
"sets/__init__.py",
"signal/__init__.py",
"sparse/__init__.py",
"strings/__init__.py",
"summary/__init__.py",
"summary/experimental/__init__.py",
"sysconfig/__init__.py",
"test/__init__.py",
"tpu/experimental/embedding/__init__.py",
"tpu/experimental/__init__.py",
"tpu/__init__.py",
"train/__init__.py",
"train/experimental/__init__.py",
"version/__init__.py",
"xla/__init__.py",
"xla/experimental/__init__.py",
# END GENERATED FILES
]
KERAS_API_INIT_FILES = [
"__init__.py",
"keras/__init__.py",
"keras/activations/__init__.py",
"keras/applications/__init__.py",
"keras/applications/densenet/__init__.py",
"keras/applications/efficientnet/__init__.py",
"keras/applications/imagenet_utils/__init__.py",
"keras/applications/inception_resnet_v2/__init__.py",
"keras/applications/inception_v3/__init__.py",
"keras/applications/mobilenet/__init__.py",
"keras/applications/mobilenet_v2/__init__.py",
"keras/applications/nasnet/__init__.py",
"keras/applications/resnet/__init__.py",
"keras/applications/resnet_v2/__init__.py",
"keras/applications/resnet50/__init__.py",
"keras/applications/vgg16/__init__.py",
"keras/applications/vgg19/__init__.py",
"keras/applications/xception/__init__.py",
"keras/backend/__init__.py",
"keras/callbacks/__init__.py",
"keras/constraints/__init__.py",
"keras/datasets/__init__.py",
"keras/datasets/boston_housing/__init__.py",
"keras/datasets/cifar10/__init__.py",
"keras/datasets/cifar100/__init__.py",
"keras/datasets/fashion_mnist/__init__.py",
"keras/datasets/imdb/__init__.py",
"keras/datasets/mnist/__init__.py",
"keras/datasets/reuters/__init__.py",
"keras/estimator/__init__.py",
"keras/experimental/__init__.py",
"keras/initializers/__init__.py",
"keras/layers/__init__.py",
"keras/layers/experimental/__init__.py",
"keras/layers/experimental/preprocessing/__init__.py",
"keras/losses/__init__.py",
"keras/metrics/__init__.py",
"keras/mixed_precision/__init__.py",
"keras/mixed_precision/experimental/__init__.py",
"keras/premade/__init__.py",
"keras/models/__init__.py",
"keras/optimizers/__init__.py",
"keras/optimizers/schedules/__init__.py",
"keras/preprocessing/__init__.py",
"keras/preprocessing/image/__init__.py",
"keras/preprocessing/sequence/__init__.py",
"keras/preprocessing/text/__init__.py",
"keras/regularizers/__init__.py",
"keras/utils/__init__.py",
"keras/wrappers/__init__.py",
"keras/wrappers/scikit_learn/__init__.py",
]
|
# -*- coding: utf-8 -*-
# Copyright © 2014, German Neuroinformatics Node (G-Node)
#
# All rights reserved.
#
# Redistribution and use in section and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
from nixio.exceptions.exceptions import InvalidSlice
import os
import time
import unittest
from collections import OrderedDict
import numpy as np
import nixio as nix
from nixio.exceptions import DuplicateName, UnsupportedLinkType
from .tmp import TempDir
class TestMultiTags(unittest.TestCase):
def setUp(self):
interval = 1.0
ticks = [1.2, 2.3, 3.4, 4.5, 6.7]
unit = "ms"
self.tmpdir = TempDir("mtagtest")
self.testfilename = os.path.join(self.tmpdir.path, "mtagtest.nix")
self.file = nix.File.open(self.testfilename, nix.FileMode.Overwrite)
self.block = self.file.create_block("test block", "recordingsession")
self.my_array = self.block.create_data_array("my array", "test", nix.DataType.Int16, (0, 0))
self.my_tag = self.block.create_multi_tag("my tag", "tag", self.my_array)
self.your_array = self.block.create_data_array("your array", "test", nix.DataType.Int16, (0, 0))
self.your_tag = self.block.create_multi_tag("your tag", "tag", self.your_array)
self.data_array = self.block.create_data_array("featureTest", "test", nix.DataType.Double, (2, 10, 5))
data = np.zeros((2, 10, 5))
value = 0.
for i in range(2):
value = 0
for j in range(10):
for k in range(5):
value += 1
data[i, j, k] = value
self.data_array[:, :, :] = data
set_dim = self.data_array.append_set_dimension()
set_dim.labels = ["label_a", "label_b"]
sampled_dim = self.data_array.append_sampled_dimension(interval)
sampled_dim.unit = unit
range_dim = self.data_array.append_range_dimension(ticks)
range_dim.unit = unit
event_positions = np.zeros((2, 3))
event_positions[0, 0] = 0.0
event_positions[0, 1] = 3.0
event_positions[0, 2] = 3.4
event_positions[1, 0] = 0.0
event_positions[1, 1] = 8.0
event_positions[1, 2] = 2.3
event_extents = np.zeros((2, 3))
event_extents[0, 0] = 1.0
event_extents[0, 1] = 6.0
event_extents[0, 2] = 2.3
event_extents[1, 0] = 1.0
event_extents[1, 1] = 3.0
event_extents[1, 2] = 2.0
event_labels = ["event 1", "event 2"]
dim_labels = ["dim 0", "dim 1", "dim 2"]
self.event_array = self.block.create_data_array("positions", "test",
data=event_positions)
self.extent_array = self.block.create_data_array("extents", "test",
data=event_extents)
extent_set_dim = self.extent_array.append_set_dimension()
extent_set_dim.labels = event_labels
extent_set_dim = self.extent_array.append_set_dimension()
extent_set_dim.labels = dim_labels
self.feature_tag = self.block.create_multi_tag("feature_tag", "events",
self.event_array)
self.feature_tag.extents = self.extent_array
self.feature_tag.references.append(self.data_array)
def tearDown(self):
del self.file.blocks[self.block.id]
self.file.close()
self.tmpdir.cleanup()
def test_multi_tag_new_constructor(self):
pos = np.random.random_sample((2, 3))
ext = np.random.random_sample((2, 3))
mt = self.block.create_multi_tag("conv_test", "test", pos, ext)
np.testing.assert_almost_equal(pos, mt.positions[:])
np.testing.assert_almost_equal(ext, mt.extents[:])
# try reset positions and ext
assert mt.positions.name == "conv_test-positions"
assert mt.positions.type == "test-positions"
assert mt.extents.name == "conv_test-extents"
assert mt.extents.type == "test-extents"
# test positions extents deleted if multitag creation failed
pos = None
ext = np.random.random_sample((2, 3))
self.assertRaises(ValueError, self.block.create_multi_tag,
"err_test", "test", pos, ext)
self.block.create_data_array("dup_test-"
"positions", "test", data=[0])
pos = np.random.random_sample((2, 3))
ext = np.random.random_sample((2, 3))
self.assertRaises(DuplicateName, self.block.create_multi_tag,
"dup_test", "test", pos, ext)
del self.block.data_arrays["dup_test-positions"]
self.block.create_data_array("dup_test2-"
"extents", "test", data=[0])
pos = np.random.random_sample((2, 3))
ext = np.random.random_sample((2, 3))
self.assertRaises(DuplicateName, self.block.create_multi_tag,
"dup_test2", "test", pos, ext)
pos = np.random.random_sample((2, 3))
ext = [None, None]
self.assertRaises(TypeError, self.block.create_multi_tag,
"dup_test3", "test", pos, ext)
def test_multi_tag_flex(self):
pos1d = self.block.create_data_array("pos1", "pos", data=[[0], [1]])
pos1d1d = self.block.create_data_array("pos1d1d", "pos", data=[0, 1])
pos2d = self.block.create_data_array("pos2", "pos", data=[[0, 0], [1, 1]])
pos3d = self.block.create_data_array("pos3", "pos", data=[[0, 1, 2], [1, 2, 3]])
ext1d = self.block.create_data_array('ext1', 'ext', data=[[1], [1]])
ext1d1d = self.block.create_data_array('ext1d1d', 'ext', data=[1, 1])
ext2d = self.block.create_data_array('ext2', 'ext', data=[[1, 2], [0, 2]])
ext3d = self.block.create_data_array('ext3', 'ext', data=[[1, 1, 1], [1, 1, 1]])
mt1d = self.block.create_multi_tag("mt1d", "mt", pos1d)
mt1d.extents = ext1d
mt1d1d = self.block.create_multi_tag("mt1d1d", "mt", pos1d1d)
mt1d1d.extents = ext1d1d
mt2d = self.block.create_multi_tag("mt2d", "mt", pos2d)
mt2d.extents = ext2d
mt3d = self.block.create_multi_tag("mt3d", "mt", pos3d)
mt3d.extents = ext3d
# create some references
da1d = self.block.create_data_array('ref1d', 'ref', data=np.arange(10))
da1d.append_sampled_dimension(1., label="time", unit="s")
da2d = self.block.create_data_array('ref2d', 'ref', data=np.arange(100).reshape((10, 10)))
da2d.append_sampled_dimension(1., label="time", unit="s")
da2d.append_set_dimension()
da3d = self.block.create_data_array('ref3d', 'ref', data=np.arange(1000).reshape((10, 10, 10)))
da3d.append_sampled_dimension(1., label="time", unit="s")
da3d.append_set_dimension()
da3d.append_set_dimension()
mt1d.references.extend([da1d, da2d, da3d])
mt1d1d.references.extend([da1d, da2d, da3d])
mt2d.references.extend([da1d, da2d, da3d])
mt3d.references.extend([da1d, da2d, da3d])
np.testing.assert_almost_equal(mt1d.tagged_data(0, 0)[:], da1d[0:1])
np.testing.assert_almost_equal(mt1d.tagged_data(0, 1)[:], da2d[0:1, :])
np.testing.assert_almost_equal(mt1d.tagged_data(0, 2)[:], da3d[0:1, :, :])
np.testing.assert_almost_equal(mt1d1d.tagged_data(0, 0)[:], da1d[0:1])
np.testing.assert_almost_equal(mt1d1d.tagged_data(0, 1)[:], da2d[0:1, :])
np.testing.assert_almost_equal(mt1d1d.tagged_data(0, 2)[:], da3d[0:1, :, :])
np.testing.assert_almost_equal(mt2d.tagged_data(0, 0)[:], da1d[0:1])
np.testing.assert_almost_equal(mt2d.tagged_data(0, 1)[:], da2d[0:1, 0:2])
np.testing.assert_almost_equal(mt2d.tagged_data(0, 2)[:], da3d[0:1, 0:2, :])
np.testing.assert_almost_equal(mt3d.tagged_data(1, 0)[:], da1d[1:2])
np.testing.assert_almost_equal(mt3d.tagged_data(1, 1)[:], da2d[1:2, 2:3])
np.testing.assert_almost_equal(mt3d.tagged_data(1, 2)[:], da3d[1:2, 2:3, 3:4])
def test_multi_tag_eq(self):
assert self.my_tag == self.my_tag
assert not self.my_tag == self.your_tag
assert self.my_tag is not None
def test_multi_tag_id(self):
assert self.my_tag.id is not None
def test_multi_tag_name(self):
assert self.my_tag.name is not None
def test_multi_tag_type(self):
def set_none():
self.my_tag.type = None
assert self.my_tag.type is not None
self.assertRaises(Exception, set_none)
self.my_tag.type = "foo type"
assert self.my_tag.type == "foo type"
def test_multi_tag_definition(self):
assert self.my_tag.definition is None
self.my_tag.definition = "definition"
assert self.my_tag.definition == "definition"
self.my_tag.definition = None
assert self.my_tag.definition is None
def test_multi_tag_timestamps(self):
created_at = self.my_tag.created_at
assert created_at > 0
updated_at = self.my_tag.updated_at
assert updated_at > 0
self.my_tag.force_created_at(1403530068)
assert self.my_tag.created_at == 1403530068
def test_multi_tag_units(self):
assert self.my_tag.units == ()
self.my_tag.units = ["mV", "ms"]
assert self.my_tag.units == ("mV", "ms")
self.my_tag.units = [] # () also works!
assert self.my_tag.units == ()
def test_multi_tag_positions(self):
def set_none():
self.my_tag.positions = None
assert self.my_tag.positions is not None
old_positions = self.my_tag.positions
new_positions = self.block.create_data_array("pos", "position",
nix.DataType.Int16,
(0, 0))
self.my_tag.positions = new_positions
assert self.my_tag.positions == new_positions
self.assertRaises(TypeError, set_none)
self.my_tag.positions = old_positions
assert self.my_tag.positions == old_positions
def test_multi_tag_extents(self):
assert self.my_tag.extents is None
new_extents = self.block.create_data_array("ext", "extent",
nix.DataType.Int16, (0, 0))
self.my_tag.extents = new_extents
assert self.my_tag.extents == new_extents
self.my_tag.extents = None
assert self.my_tag.extents is None
def test_multi_tag_references(self):
assert len(self.my_tag.references) == 0
self.assertRaises(TypeError, self.my_tag.references.append, 100)
reference1 = self.block.create_data_array("reference1", "stimuli",
nix.DataType.Int16, (0,))
reference2 = self.block.create_data_array("reference2", "stimuli",
nix.DataType.Int16, (0,))
self.my_tag.references.append(reference1)
self.my_tag.references.append(reference2)
assert len(self.my_tag.references) == 2
assert reference1 in self.my_tag.references
assert reference2 in self.my_tag.references
# id and name access
assert reference1 == self.my_tag.references[reference1.name]
assert reference1 == self.my_tag.references[reference1.id]
assert reference2 == self.my_tag.references[reference2.name]
assert reference2 == self.my_tag.references[reference2.id]
assert reference1.name in self.my_tag.references
assert reference2.name in self.my_tag.references
assert reference1.id in self.my_tag.references
assert reference2.id in self.my_tag.references
del self.my_tag.references[reference2]
assert self.my_tag.references[0] == reference1
del self.my_tag.references[reference1]
assert len(self.my_tag.references) == 0
def test_multi_tag_features(self):
assert len(self.my_tag.features) == 0
data_array = self.block.create_data_array("feature", "stimuli",
nix.DataType.Int16, (0,))
feature = self.my_tag.create_feature(data_array,
nix.LinkType.Untagged)
assert len(self.my_tag.features) == 1
assert feature in self.my_tag.features
assert feature.id in self.my_tag.features
assert "notexist" not in self.my_tag.features
assert feature.id == self.my_tag.features[0].id
assert feature.id == self.my_tag.features[-1].id
# id and name access
assert feature.id == self.my_tag.features[feature.id].id
assert feature.id == self.my_tag.features[data_array.id].id
assert feature.id == self.my_tag.features[data_array.name].id
assert data_array == self.my_tag.features[data_array.id].data
assert data_array == self.my_tag.features[data_array.name].data
assert data_array.id in self.my_tag.features
assert data_array.name in self.my_tag.features
data_frame = self.block.create_data_frame(
"dataframe feature", "test",
col_dict=OrderedDict([("number", nix.DataType.Float)]),
data=[(10.,)]
)
df_feature = self.my_tag.create_feature(data_frame, nix.LinkType.Untagged)
assert len(self.my_tag.features) == 2
assert df_feature in self.my_tag.features
assert df_feature.id in self.my_tag.features
assert df_feature.id == self.my_tag.features[1].id
assert df_feature.id == self.my_tag.features[-1].id
# id and name access
assert df_feature.id == self.my_tag.features[df_feature.id].id
assert df_feature.id == self.my_tag.features[data_frame.id].id
assert df_feature.id == self.my_tag.features[data_frame.name].id
assert data_frame == self.my_tag.features[data_frame.id].data
assert data_frame == self.my_tag.features[data_frame.name].data
assert data_frame.id in self.my_tag.features
assert data_frame.name in self.my_tag.features
assert isinstance(self.my_tag.features[0].data, nix.DataArray)
assert isinstance(self.my_tag.features[1].data, nix.DataFrame)
del self.my_tag.features[0]
assert len(self.my_tag.features) == 1
del self.my_tag.features[0]
assert len(self.my_tag.features) == 0
def test_multi_tag_tagged_data(self):
sample_iv = 0.001
x_data = np.arange(0, 10, sample_iv)
y_data = np.sin(2 * np.pi * x_data)
block = self.block
da = block.create_data_array("sin", "data", data=y_data)
da.unit = 'dB'
dim = da.append_sampled_dimension(sample_iv)
dim.unit = 's'
pos = block.create_data_array('pos1', 'positions', data=np.array([0.]).reshape(1, 1))
pos.append_set_dimension()
pos.append_set_dimension()
pos.unit = 'ms'
ext = block.create_data_array('ext1', 'extents', data=np.array([2000.]).reshape(1, 1))
ext.append_set_dimension()
ext.append_set_dimension()
ext.unit = 'ms'
mtag = block.create_multi_tag("sin1", "tag", pos)
mtag.extents = ext
mtag.units = ['ms']
mtag.references.append(da)
assert mtag.tagged_data(0, 0).shape == (2000,)
assert np.array_equal(y_data[:2000], mtag.tagged_data(0, 0)[:])
assert mtag.tagged_data(0, 0, stop_rule=nix.SliceMode.Inclusive).shape == (2001,)
assert np.array_equal(y_data[:2001], mtag.tagged_data(0, 0, stop_rule=nix.SliceMode.Inclusive)[:])
# get by name
data = mtag.tagged_data(0, da.name)
assert data.shape == (2000,)
assert np.array_equal(y_data[:2000], data[:])
# get by id
data = mtag.tagged_data(0, da.id)
assert data.shape == (2000,)
assert np.array_equal(y_data[:2000], data[:])
# multi dimensional data
# position 1 should fail since the position in the third dimension does not point to a valid point
# positon 2 and 3 should deliver valid DataViews
# same for segment 0 should again return an invalid DataView because of dimension 3
sample_iv = 1.0
ticks = [1.2, 2.3, 3.4, 4.5, 6.7]
unit = "ms"
pos = self.block.create_data_array("pos", "test", data=[[1, 1, 1], [1, 1, 1.2], [1, 1, 1.2]])
pos.append_set_dimension()
pos.append_set_dimension()
ext = self.block.create_data_array("ext", "test", data=[[1, 5, 2], [1, 5, 2], [0, 4, 1]])
ext.append_set_dimension()
ext.append_set_dimension()
units = ["none", "ms", "ms"]
data = np.random.random_sample((3, 10, 5))
da = self.block.create_data_array("dimtest", "test", data=data)
setdim = da.append_set_dimension()
setdim.labels = ["Label A", "Label B", "Label D"]
samdim = da.append_sampled_dimension(sample_iv)
samdim.unit = unit
randim = da.append_range_dimension(ticks)
randim.unit = unit
postag = self.block.create_multi_tag("postag", "event", pos)
postag.references.append(da)
postag.units = units
segtag = self.block.create_multi_tag("region", "segment", pos)
segtag.references.append(da)
segtag.extents = ext
segtag.units = units
posdata = postag.tagged_data(0, 0)
assert not posdata.valid
assert "InvalidSlice error" in posdata.debug_message
assert posdata.data_extent is None
assert posdata.shape is None
with self.assertRaises(InvalidSlice):
posdata._write_data(np.random.randn(1))
assert sum(posdata[:].shape) == 0
posdata = postag.tagged_data(1, 0)
assert posdata.valid
assert posdata.debug_message == ""
assert len(posdata.shape) == 3
assert posdata.shape == (1, 1, 1)
assert np.isclose(posdata[0, 0, 0], data[1, 1, 0])
posdata = postag.tagged_data(2, 0)
assert len(posdata.shape) == 3
assert posdata.shape == (1, 1, 1)
assert np.isclose(posdata[0, 0, 0], data[1, 1, 0])
segdata = segtag.tagged_data(1, 0)
assert len(segdata.shape) == 3
assert segdata.shape == (1, 5, 2)
segdata = segtag.tagged_data(2, 0)
assert len(segdata.shape) == 3
assert segdata.shape == (1, 4, 1)
# retrieve all positions for all references
for ridx, _ in enumerate(mtag.references):
for pidx, _ in enumerate(mtag.positions):
mtag.tagged_data(pidx, ridx)
wrong_pos = self.block.create_data_array("incorpos", "test", data=[[1, 1, 1], [100, 1, 1]])
wrong_pos.append_set_dimension()
wrong_pos.append_set_dimension()
postag.positions = wrong_pos
self.assertRaises(IndexError, postag.tagged_data, 1, 1)
wrong_ext = self.block.create_data_array("incorext", "test", data=[[1, 500, 2], [0, 4, 1]])
wrong_ext.append_set_dimension()
wrong_ext.append_set_dimension()
segtag.extents = wrong_ext
self.assertRaises(IndexError, segtag.tagged_data, 0, 1)
def test_multi_tag_data_coefficients(self):
sample_iv = 0.001
x_data = np.arange(0, 10, sample_iv)
y_data = np.sin(2 * np.pi * x_data)
block = self.block
da = block.create_data_array("sin", "data", data=y_data)
da.unit = 'V'
da.polynom_coefficients = (10, 0.3)
dim = da.append_sampled_dimension(sample_iv)
dim.unit = 's'
pos = block.create_data_array('pos1', 'positions', data=np.array([0.]).reshape(1, 1))
pos.append_set_dimension()
pos.append_set_dimension()
pos.unit = 'ms'
ext = block.create_data_array('ext1', 'extents', data=np.array([2000.]).reshape(1, 1))
ext.append_set_dimension()
ext.append_set_dimension()
ext.unit = 'ms'
mtag = block.create_multi_tag("sin1", "tag", pos)
mtag.extents = ext
mtag.units = ['ms']
mtag.references.append(da)
assert np.array_equal(da[:2000], mtag.tagged_data(0, 0)[:])
da.expansion_origin = 0.89
assert np.array_equal(da[:2000], mtag.tagged_data(0, 0)[:])
def test_multi_tag_tagged_data_1d(self):
# MultiTags to vectors behave a bit differently
# Testing separately
oneddata = self.block.create_data_array("1dda", "data",
data=list(range(100)))
oneddata.append_sampled_dimension(0.1)
onedpos = self.block.create_data_array("1dpos", "positions",
data=[1, 9, 9.5])
onedmtag = self.block.create_multi_tag("2dmt", "mtag",
positions=onedpos)
onedmtag.references.append(oneddata)
for pidx, _ in enumerate(onedmtag.positions):
onedmtag.tagged_data(pidx, 0)
def test_multi_tag_feature_data(self):
index_data = self.block.create_data_array("indexed feature data", "test",
dtype=nix.DataType.Double, shape=(10, 10))
dim1 = index_data.append_sampled_dimension(1.0)
dim1.unit = "ms"
dim2 = index_data.append_sampled_dimension(1.0)
dim2.unit = "ms"
data1 = np.zeros((10, 10))
value = 0.0
total = 0.0
for i in range(10):
value = 100 * i
for j in range(10):
value += 1
data1[i, j] = value
total += data1[i, j]
index_data[:, :] = data1
tagged_data = self.block.create_data_array("tagged feature data", "test",
dtype=nix.DataType.Double, shape=(10, 20, 10))
dim1 = tagged_data.append_sampled_dimension(1.0)
dim1.unit = "ms"
dim2 = tagged_data.append_sampled_dimension(1.0)
dim2.unit = "ms"
dim3 = tagged_data.append_sampled_dimension(1.0)
dim3.unit = "ms"
data2 = np.zeros((10, 20, 10))
for i in range(10):
value = 100 * i
for j in range(20):
for k in range(10):
value += 1
data2[i, j, k] = value
tagged_data[:, :, :] = data2
self.feature_tag.create_feature(index_data, nix.LinkType.Indexed)
self.feature_tag.create_feature(tagged_data, nix.LinkType.Tagged)
self.feature_tag.create_feature(index_data, nix.LinkType.Untagged)
# preparations done, actually test
assert len(self.feature_tag.features) == 3
# indexed feature
feat_data = self.feature_tag.feature_data(0, 0)
assert len(feat_data.shape) == 2
assert feat_data.size == 10
assert np.sum(feat_data) == 55
# disabled, don't understand how it could ever have worked,
# there are only 3 positions
data_view = self.feature_tag.feature_data(9, 0)
assert np.sum(data_view[:, :]) == 9055
# untagged feature
data_view = self.feature_tag.feature_data(0, 2)
assert data_view.size == 100
data_view = self.feature_tag.feature_data(0, 2)
assert data_view.size == 100
assert np.sum(data_view) == total
# tagged feature
data_view = self.feature_tag.feature_data(0, 1)
assert len(data_view.shape) == 3
data_view = self.feature_tag.feature_data(1, 1)
assert len(data_view.shape) == 3
# === retrieve by name ===
# indexed feature
feat_data = self.feature_tag.feature_data(0, index_data.name)
assert len(feat_data.shape) == 2
assert feat_data.size == 10
assert np.sum(feat_data) == 55
# disabled, there are only 3 positions
data_view = self.feature_tag.feature_data(9, index_data.name)
assert np.sum(data_view[:, :]) == 9055
# tagged feature
data_view = self.feature_tag.feature_data(0, tagged_data.name)
assert len(data_view.shape) == 3
data_view = self.feature_tag.feature_data(1, tagged_data.name)
assert len(data_view.shape) == 3
def out_of_bounds():
self.feature_tag.feature_data(2, 1)
self.assertRaises(IndexError, out_of_bounds)
def test_timestamp_autoupdate(self):
pos = self.block.create_data_array("positions.time", "test.time",
nix.DataType.Int16, (0, 0))
mtag = self.block.create_multi_tag("mtag.time", "test.time", pos)
mtagtime = mtag.updated_at
time.sleep(1) # wait for time to change
mtag.positions = self.block.create_data_array("pos2.time",
"test.time",
nix.DataType.Int8, (0,))
self.assertNotEqual(mtag.updated_at, mtagtime)
mtagtime = mtag.updated_at
time.sleep(1) # wait for time to change
mtag.extents = self.block.create_data_array("extents.time",
"test.time",
nix.DataType.Int8, (0,))
self.assertNotEqual(mtag.updated_at, mtagtime)
def test_timestamp_noautoupdate(self):
self.file.auto_update_timestamps = False
pos = self.block.create_data_array("positions.time", "test.time",
nix.DataType.Int16, (0, 0))
mtag = self.block.create_multi_tag("mtag.time", "test.time", pos)
mtagtime = mtag.updated_at
time.sleep(1) # wait for time to change
mtag.positions = self.block.create_data_array("pos2.time",
"test.time",
nix.DataType.Int8, (0,))
self.assertEqual(mtag.updated_at, mtagtime)
mtagtime = mtag.updated_at
time.sleep(1) # wait for time to change
mtag.extents = self.block.create_data_array("extents.time",
"test.time",
nix.DataType.Int8, (0,))
self.assertEqual(mtag.updated_at, mtagtime)
def test_multi_tag_feature_dataframe(self):
numberdata = np.random.random(20)
number_feat = self.block.create_data_frame(
"number feature", "test",
col_dict=OrderedDict([("number", nix.DataType.Float)]),
data=[(n,) for n in numberdata]
)
column_descriptions = OrderedDict([("name", nix.DataType.String),
("duration", nix.DataType.Double)])
values = [("One", 0.1), ("Two", 0.2), ("Three", 0.3), ("Four", 0.4),
("Five", 0.5), ("Six", 0.6), ("Seven", 0.7), ("Eight", 0.8),
("Nine", 0.9), ("Ten", 1.0)]
ramp_feat = self.block.create_data_frame("ramp feature", "test",
col_dict=column_descriptions,
data=values)
ramp_feat.label = "voltage"
ramp_feat.units = (None, "s")
pos_tag = self.block.create_multi_tag("feature test", "test", [4, 7, 8])
with self.assertRaises(UnsupportedLinkType):
pos_tag.create_feature(number_feat, nix.LinkType.Tagged)
pos_tag.create_feature(number_feat, nix.LinkType.Untagged)
pos_tag.create_feature(number_feat, nix.LinkType.Indexed)
with self.assertRaises(UnsupportedLinkType):
pos_tag.create_feature(ramp_feat, nix.LinkType.Tagged)
pos_tag.create_feature(ramp_feat, nix.LinkType.Untagged)
pos_tag.create_feature(ramp_feat, nix.LinkType.Indexed)
assert len(pos_tag.features) == 4
for idx, _ in enumerate(pos_tag.positions):
data1 = pos_tag.feature_data(idx, 0)
data2 = pos_tag.feature_data(idx, 1)
data3 = pos_tag.feature_data(idx, 2)
data4 = pos_tag.feature_data(idx, 3)
# check expected data
assert np.all(data1[:] == number_feat[:])
assert np.all(data2[:] == number_feat[idx])
assert np.all(data3[:] == ramp_feat[:])
assert np.all(data4[:] == ramp_feat[idx])
# add extents (should have no effect)
extents = self.block.create_data_array("feature test.extents", "test",
data=[2, 2, 5])
pos_tag.extents = extents
for idx, _ in enumerate(pos_tag.positions):
data1 = pos_tag.feature_data(idx, 0)
data2 = pos_tag.feature_data(idx, 1)
data3 = pos_tag.feature_data(idx, 2)
data4 = pos_tag.feature_data(idx, 3)
# check expected data
assert np.all(data1[:] == number_feat[:])
assert np.all(data2[:] == number_feat[idx])
assert np.all(data3[:] == ramp_feat[:])
assert np.all(data4[:] == ramp_feat[idx])
def test_multi_tag_tagged_data_slice_mode(self):
data = np.random.random_sample((3, 100, 10))
da = self.block.create_data_array("signals", "test.signals", data=data)
da.unit = "mV"
da.append_set_dimension(labels=["A", "B", "C"])
sample_iv = 0.001
timedim = da.append_sampled_dimension(sampling_interval=sample_iv)
timedim.unit = "s"
posdim = da.append_range_dimension([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9])
posdim.unit = "mm"
# exact_tag has a pos+ext that is exactly equal to a dimension tick
exact_tag = self.block.create_multi_tag("tickpoint", "test.tag",
positions=[(0, 0.03, 0.0011), (1, 0.05, 0.0015)],
extents=[(1, 0.02, 0.0005), (1, 0.04, 0.0003)])
exact_tag.units = ["none", "s", "m"]
exact_tag.references.append(da)
# FIRST TAG
# dim2: [0.001, 0.002, ..., 0.03, 0.031, ..., 0.049, 0.05, 0.051, ...]
# ^ pos [30] ^ pos+ext [50]
# Inclusive mode includes index 50, exclusive does not
#
# dim3: [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9]
# ^ pos [1] ^ pos+ext [6]
# Inclusive mode includes index 6, exclusive does not
slice_default = exact_tag.tagged_data(0, 0)
assert slice_default.shape == (1, 20, 5)
np.testing.assert_array_equal(slice_default, da[0:1, 30:50, 1:6]) # default exclusive
slice_inclusive = exact_tag.tagged_data(0, 0, stop_rule=nix.SliceMode.Inclusive)
assert slice_inclusive.shape == (2, 21, 6)
np.testing.assert_array_equal(slice_inclusive, da[0:2, 30:51, 1:7])
slice_exclusive = exact_tag.tagged_data(0, 0, stop_rule=nix.SliceMode.Exclusive)
assert slice_exclusive.shape == (1, 20, 5)
np.testing.assert_array_equal(slice_exclusive, da[0:1, 30:50, 1:6])
# SECOND TAG
# dim2: [0.001, 0.002, ..., 0.05, 0.051, ..., 0.089, 0.09, 0.091, ...]
# ^ pos [50] ^ pos+ext [90]
# Inclusive mode includes index 90, exclusive does not
#
# dim3: [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9]
# ^ pos [5] ^ pos+ext [8]
# Inclusive mode includes index 8, exclusive does not
slice_default = exact_tag.tagged_data(1, 0)
assert slice_default.shape == (1, 40, 3)
np.testing.assert_array_equal(slice_default, da[1:2, 50:90, 5:8]) # default exclusive
slice_inclusive = exact_tag.tagged_data(1, 0, stop_rule=nix.SliceMode.Inclusive)
assert slice_inclusive.shape == (2, 41, 4)
np.testing.assert_array_equal(slice_inclusive, da[1:3, 50:91, 5:9])
slice_exclusive = exact_tag.tagged_data(1, 0, stop_rule=nix.SliceMode.Exclusive)
assert slice_exclusive.shape == (1, 40, 3)
np.testing.assert_array_equal(slice_exclusive, da[1:2, 50:90, 5:8])
# midpoint_tag has a pos+ext that falls between dimension ticks
midpoint_tag = self.block.create_multi_tag("midpoint", "test.tag",
positions=([0, 0.03, 0.0011], [1, 0.05, 0.0015]),
extents=([1, 0.0301, 0.00051], [1, 0.0401, 0.00031])) # .1 offset
midpoint_tag.units = ["none", "s", "m"]
# FIRST TAG
# dim2: [0.001, 0.002, ..., 0.03, 0.031, ..., 0.059, 0.06,| 0.061, ...]
# ^ pos [30] ^ pos+ext [60] + 0.1
# Both inclusive and exclusive include index 60
#
# dim3: [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,| 1.7, 1.8, 1.9]
# ^ pos [1] ^ pos+ext [6] + 0.1
# Both inclusive and exclusive include index 6
midpoint_tag.references.append(da)
# all slicing is inclusive since the pos+ext points are between ticks
slice_default = midpoint_tag.tagged_data(0, 0)
assert slice_default.shape == (1, 31, 6)
np.testing.assert_array_equal(slice_default, da[0:1, 30:61, 1:7])
slice_inclusive = midpoint_tag.tagged_data(0, 0, stop_rule=nix.SliceMode.Inclusive)
assert slice_inclusive.shape == (2, 31, 6)
np.testing.assert_array_equal(slice_inclusive, da[0:2, 30:61, 1:7])
slice_exclusive = midpoint_tag.tagged_data(0, 0, stop_rule=nix.SliceMode.Exclusive)
assert slice_exclusive.shape == (1, 31, 6)
np.testing.assert_array_equal(slice_exclusive, da[0:1, 30:61, 1:7])
# SECOND TAG
# dim2: [0.001, 0.002, ..., 0.05, 0.051, ..., 0.089, 0.09,| 0.091, ...]
# ^ pos [50] ^ pos+ext [90] + 0.1
# Both inclusive and exclusive include index 90
#
# dim3: [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,| 1.9]
# ^ pos [5] ^ pos+ext [8] + 0.1
# Both inclusive and exclusive include index 8
midpoint_tag.references.append(da)
# all slicing is inclusive since the pos+ext points are between ticks
slice_default = midpoint_tag.tagged_data(1, 0)
assert slice_default.shape == (1, 41, 4)
np.testing.assert_array_equal(slice_default, da[1:2, 50:91, 5:9])
slice_inclusive = midpoint_tag.tagged_data(1, 0, stop_rule=nix.SliceMode.Inclusive)
assert slice_inclusive.shape == (2, 41, 4)
np.testing.assert_array_equal(slice_inclusive, da[1:3, 50:91, 5:9])
slice_exclusive = midpoint_tag.tagged_data(1, 0, stop_rule=nix.SliceMode.Exclusive)
assert slice_exclusive.shape == (1, 41, 4)
np.testing.assert_array_equal(slice_exclusive, da[1:2, 50:91, 5:9])
def test_tagged_set_dim(self):
"""
Simple test where the slice can be calculated directly from the position and extent and compared to the original
data.
Set dimension slicing.
"""
nsignals = 10
data = np.random.random_sample((nsignals, 100))
da = self.block.create_data_array("data", "data", data=data)
da.append_set_dimension()
da.append_sampled_dimension(sampling_interval=1).unit = "s"
posarray = self.block.create_data_array("mtag.positions", "test.positions", dtype=float, shape=(1,))
extarray = self.block.create_data_array("mtag.extents", "test.extents", dtype=float, shape=(1,))
mtag = self.block.create_multi_tag("mtag", "simple", positions=posarray)
mtag.extents = extarray
mtag.references.append(da)
for pos in range(nsignals):
for ext in range(2, nsignals-pos):
mtag.positions[:] = [pos]
mtag.extents[:] = [ext]
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[pos:pos+ext])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[pos:pos+ext])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive), da[pos:pos+ext+1])
# +0.1 should round up (ceil) the start position
# +0.1 * 2 should round down (floor) the stop position and works the same for both inclusive and
# exclusive
mtag.positions[:] = [pos+0.1]
mtag.extents[:] = [ext+0.1]
start = pos+1
stop = pos+ext+1
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive), da[start:stop])
if pos+ext+2 < len(da):
# +0.9 should round up (ceil) the start position
# +0.9 * 2 should round down (floor) the stop position and works the same for both inclusive and
# exclusive
mtag.positions[:] = [pos+0.9]
mtag.extents[:] = [ext+0.9]
start = pos+1
stop = pos+ext+2
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive),
da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive),
da[start:stop])
def test_tagged_range_dim(self):
"""
Simple test where the slice can be calculated directly from the position and extent and compared to the original
data.
Range dimension slicing.
"""
nticks = 10
data = np.random.random_sample((nticks, 100))
da = self.block.create_data_array("data", "data", data=data)
da.append_range_dimension(ticks=range(nticks))
da.append_sampled_dimension(sampling_interval=1).unit = "s"
posarray = self.block.create_data_array("mtag.positions", "test.positions", dtype=float, shape=(1,))
extarray = self.block.create_data_array("mtag.extents", "test.extents", dtype=float, shape=(1,))
mtag = self.block.create_multi_tag("mtag", "simple", positions=posarray)
mtag.extents = extarray
mtag.references.append(da)
for pos in range(nticks):
for ext in range(2, nticks-pos):
mtag.positions[:] = [pos]
mtag.extents[:] = [ext]
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[pos:pos+ext])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[pos:pos+ext])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive), da[pos:pos+ext+1])
# +0.1 should round up (ceil) the start position
# +0.1 * 2 should round down (floor) the stop position and works the same for both inclusive and
# exclusive
mtag.positions[:] = [pos+0.1]
mtag.extents[:] = [ext+0.1]
start = pos+1
stop = pos+ext+1
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive), da[start:stop])
if pos+ext+2 < len(da):
# +0.9 should round up (ceil) the start position
# +0.9 * 2 should round down (floor) the stop position and works the same for both inclusive and
# exclusive
mtag.positions[:] = [pos+0.9]
mtag.extents[:] = [ext+0.9]
start = pos+1
stop = pos+ext+2
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive),
da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive),
da[start:stop])
def test_tagged_sampled_dim(self):
"""
Simple test where the slice can be calculated directly from the position and extent and compared to the original
data.
Sampled dimension slicing.
"""
nticks = 10
data = np.random.random_sample((nticks, 100))
da = self.block.create_data_array("data", "data", data=data)
da.append_sampled_dimension(sampling_interval=1).unit = "V"
da.append_sampled_dimension(sampling_interval=1).unit = "s"
posarray = self.block.create_data_array("mtag.positions", "test.positions", dtype=float, shape=(1,))
extarray = self.block.create_data_array("mtag.extents", "test.extents", dtype=float, shape=(1,))
mtag = self.block.create_multi_tag("mtag", "simple", positions=posarray)
mtag.extents = extarray
mtag.units = ["V", "s"]
mtag.references.append(da)
for pos in range(nticks):
for ext in range(2, nticks-pos):
mtag.positions[:] = [pos]
mtag.extents[:] = [ext]
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[pos:pos+ext])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[pos:pos+ext])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive),
da[pos:pos+ext+1])
# +0.1 should round up (ceil) the start position
# +0.1 * 2 should round down (floor) the stop position and works the same for both inclusive and
# exclusive
mtag.positions[:] = [pos+0.1]
mtag.extents[:] = [ext+0.1]
start = pos+1
stop = pos+ext+1
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive), da[start:stop])
if pos+ext+2 < len(da):
# +0.9 should round up (ceil) the start position
# +0.9 * 2 should round down (floor) the stop position and works the same for both inclusive and
# exclusive
mtag.positions[:] = [pos+0.9]
mtag.extents[:] = [ext+0.9]
start = pos+1
stop = pos+ext+2
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive),
da[start:stop])
np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive),
da[start:stop])
|
import os
import shutil
def getTestFontPath(fileName='TestFont.ufo'):
testDirectory = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 'testData')
return os.path.join(testDirectory, fileName)
def getTestFontCopyPath(testFontPath=None):
if testFontPath is None:
testFontPath = getTestFontPath()
dirName, fileName = os.path.split(testFontPath)
fileName = os.path.splitext(fileName)[0] + 'Copy.ufo'
return os.path.join(dirName, fileName)
def makeTestFontCopy(testFontPath=None):
if testFontPath is None:
testFontPath = getTestFontPath()
copyPath = getTestFontCopyPath(testFontPath)
shutil.copytree(testFontPath, copyPath)
return copyPath
def tearDownTestFontCopy(testFontPath=None):
if testFontPath is None:
testFontPath = getTestFontCopyPath()
shutil.rmtree(testFontPath)
class NotificationTestObject(object):
def testCallback(self, notification):
print notification.name, notification.data
|
import unittest
# We have to do some weird stuff to import the file due to the name
import imp
train = imp.load_source('train.5m', 'train.5m.py')
class TestTrain(unittest.TestCase):
def test_can_get_ontime(self):
status = 'On time'
self.assertEqual(train.shorten(status), '0')
def test_can_get_delay(self):
status = '1m late'
self.assertEqual(train.shorten(status), '+1')
def test_can_get_early(self):
status = '1m early'
self.assertEqual(train.shorten(status), '-1')
def test_can_shorten_tens_of_minutes(self):
status = '11m late'
self.assertEqual(train.shorten(status), '+11')
def test_will_use_last_item_in_array(self):
statuses = ['On time', '2m late', 'On time', 'On time', 'On time']
self.assertEqual(train.get_status(statuses), '0')
if __name__ == '__main__':
unittest.main()
|
import json
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMessage
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.views.generic import (CreateView, DetailView,
ListView, TemplateView, View)
from accounts.models import Account, Tags
from common.models import User, Comment, Attachments
from common.utils import STAGES, SOURCES, CURRENCY_CODES, STATUS_CHOICE
from contacts.models import Contact
from opportunity.forms import (OpportunityForm, OpportunityCommentForm,
OpportunityAttachmentForm)
from opportunity.models import Opportunity
from django.urls import reverse
from django.db.models import Q
from django.core.exceptions import PermissionDenied
from common.tasks import send_email_user_mentions
from opportunity.tasks import send_email_to_assigned_user
from common.access_decorators_mixins import (
sales_access_required, marketing_access_required, SalesAccessRequiredMixin, MarketingAccessRequiredMixin)
from teams.models import Teams
from leads.models import Lead
from listings.models import Listing
class OpportunityListView(SalesAccessRequiredMixin, LoginRequiredMixin, TemplateView):
model = Opportunity
context_object_name = "opportunity_list"
template_name = "opportunity.html"
def get_queryset(self):
queryset = self.model.objects.all().prefetch_related(
"contacts", "account")
if self.request.user.role != "ADMIN" and not \
self.request.user.is_superuser:
queryset = queryset.filter(
Q(assigned_to__in=[self.request.user]) |
Q(created_by=self.request.user.id))
if self.request.GET.get('tag', None):
queryset = queryset.filter(tags__in = self.request.GET.getlist('tag'))
request_post = self.request.POST
if request_post:
if request_post.get('name'):
queryset = queryset.filter(
name__icontains=request_post.get('name'))
if request_post.get('stage'):
queryset = queryset.filter(stage=request_post.get('stage'))
if request_post.get('status'):
queryset = queryset.filter(status=request_post.get('status'))
if request_post.get('lead_source'):
queryset = queryset.filter(
lead_source=request_post.get('lead_source'))
if request_post.get('account'):
queryset = queryset.filter(
account_id=request_post.get('account'))
if request_post.get('contacts'):
queryset = queryset.filter(
contacts=request_post.get('contacts'))
if request_post.get('tag'):
queryset = queryset.filter(tags__in=request_post.getlist('tag'))
return queryset.distinct()
def get_context_data(self, **kwargs):
context = super(OpportunityListView, self).get_context_data(**kwargs)
context["opportunity_list"] = self.get_queryset()
context["accounts"] = Account.objects.filter(status="open")
context["contacts"] = Contact.objects.all()
context["stages"] = STAGES
context["status"] = STATUS_CHOICE
context["sources"] = SOURCES
context["per_page"] = self.request.POST.get('per_page')
tag_ids = list(set(Opportunity.objects.values_list('tags', flat=True)))
context["tags"] = Tags.objects.filter(id__in=tag_ids)
if self.request.POST.get('tag', None):
context["request_tags"] = self.request.POST.getlist('tag')
elif self.request.GET.get('tag', None):
context["request_tags"] = self.request.GET.getlist('tag')
else:
context["request_tags"] = None
search = False
if (
self.request.POST.get('name') or self.request.POST.get('stage') or
self.request.POST.get('lead_source') or
self.request.POST.get('account') or
self.request.POST.get('contacts')
):
search = True
context["search"] = search
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
@login_required
@sales_access_required
def create_opportunity(request):
accounts = Account.objects.filter(status="open")
contacts = Contact.objects.all()
listings = Listing.objects.all()
leads = Lead.objects.all()
if request.user.role != "ADMIN" and not request.user.is_superuser:
accounts = Account.objects.filter(
created_by=request.user)
contacts = Contact.objects.filter(
Q(assigned_to__in=[request.user]) | Q(created_by=request.user))
users = []
if request.user.role == 'ADMIN' or request.user.is_superuser:
users = User.objects.filter(is_active=True).order_by('email')
elif request.user.google.all():
users = []
else:
users = User.objects.filter(role='ADMIN').order_by('email')
kwargs_data = {
"assigned_to": users,
"account": accounts, "contacts": contacts, "listings": listings, "leads": leads}
if request.POST:
form = OpportunityForm(request.POST, request.FILES, **kwargs_data)
if form.is_valid():
opportunity_obj = form.save(commit=False)
opportunity_obj.created_by = request.user
if request.POST.get('stage') in ['CLOSED WON', 'CLOSED LOST']:
opportunity_obj.closed_by = request.user
opportunity_obj.save()
if request.POST.getlist('assigned_to', []):
opportunity_obj.assigned_to.add(
*request.POST.getlist('assigned_to'))
# assigned_to_list = request.POST.getlist('assigned_to')
# current_site = get_current_site(request)
# recipients = assigned_to_list
# send_email_to_assigned_user.delay(recipients, opportunity_obj.id, domain=current_site.domain,
# protocol=request.scheme)
# for assigned_to_user in assigned_to_list:
# user = get_object_or_404(User, pk=assigned_to_user)
# mail_subject = 'Assigned to opportunity.'
# message = render_to_string(
# 'assigned_to/opportunity_assigned.html', {
# 'user': user,
# 'domain': current_site.domain,
# 'protocol': request.scheme,
# 'opportunity': opportunity_obj
# })
# email = EmailMessage(
# mail_subject, message, to=[user.email])
# email.content_subtype = "html"
# email.send()
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = opportunity_obj.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
opportunity_obj.assigned_to.add(user_id)
current_site = get_current_site(request)
recipients = list(opportunity_obj.assigned_to.all().values_list('id', flat=True))
send_email_to_assigned_user.delay(recipients, opportunity_obj.id, domain=current_site.domain,
protocol=request.scheme)
if request.POST.getlist('listings', []):
opportunity_obj.listings.add(
*request.POST.getlist('listings'))
if request.POST.getlist('leads', []):
opportunity_obj.leads.add(
*request.POST.getlist('leads'))
if request.POST.getlist('contacts', []):
opportunity_obj.contacts.add(
*request.POST.getlist('contacts'))
if request.POST.get('tags', ''):
tags = request.POST.get("tags")
splitted_tags = tags.split(",")
for t in splitted_tags:
tag = Tags.objects.filter(name=t.lower())
if tag:
tag = tag[0]
else:
tag = Tags.objects.create(name=t.lower())
opportunity_obj.tags.add(tag)
if request.FILES.get('oppurtunity_attachment'):
attachment = Attachments()
attachment.created_by = request.user
attachment.file_name = request.FILES.get(
'oppurtunity_attachment').name
attachment.opportunity = opportunity_obj
attachment.attachment = request.FILES.get(
'oppurtunity_attachment')
attachment.save()
success_url = reverse('opportunities:list')
if request.POST.get("savenewform"):
success_url = reverse("opportunities:save")
if request.POST.get('from_account'):
from_account = request.POST.get('from_account')
success_url = reverse("accounts:view_account", kwargs={
'pk': from_account})
# print(success_url)
return JsonResponse({'error': False, 'success_url': success_url})
return JsonResponse({'error': True, 'errors': form.errors})
context = {}
context["opportunity_form"] = OpportunityForm(**kwargs_data)
context["accounts"] = accounts
if request.GET.get('view_account'):
context['account'] = get_object_or_404(
Account, id=request.GET.get('view_account'))
context["leads"] = leads
if request.GET.get('view_lead'):
context['lead'] = get_object_or_404(
Lead, id=request.GET.get('view_lead'))
context["contacts"] = contacts
context["listings"] = listings
context["users"] = kwargs_data['assigned_to']
context["currencies"] = CURRENCY_CODES
context["stages"] = STAGES
context["status"] = STATUS_CHOICE
context["sources"] = SOURCES
context["teams"] = Teams.objects.all()
context["assignedto_list"] = [
int(i) for i in request.POST.getlist('assigned_to', []) if i]
context["listings_list"] = [
int(i) for i in request.POST.getlist('listings', []) if i]
context["contacts_list"] = [
int(i) for i in request.POST.getlist('contacts', []) if i]
context["leads_list"] = [
int(i) for i in request.POST.getlist('leads', []) if i]
return render(request, "create_opportunity.html", context)
class OpportunityDetailView(SalesAccessRequiredMixin, LoginRequiredMixin, DetailView):
model = Opportunity
context_object_name = "opportunity_record"
template_name = "view_opportunity.html"
def get_queryset(self):
queryset = super(OpportunityDetailView, self).get_queryset()
queryset = queryset.prefetch_related("contacts", "account", "listings", "leads")
return queryset
def get_context_data(self, **kwargs):
context = super(OpportunityDetailView, self).get_context_data(**kwargs)
user_assgn_list = [
assigned_to.id for assigned_to in
context['object'].assigned_to.all()]
user_assigned_accounts = set(self.request.user.account_assigned_users.values_list('id', flat=True))
if context['object'].account:
opportunity_account = set([context['object'].account.id])
else:
opportunity_account = set()
if user_assigned_accounts.intersection(opportunity_account):
user_assgn_list.append(self.request.user.id)
if self.request.user == context['object'].created_by:
user_assgn_list.append(self.request.user.id)
if self.request.user.role != "ADMIN" and not \
self.request.user.is_superuser:
if self.request.user.id not in user_assgn_list:
raise PermissionDenied
assigned_data = []
for each in context['opportunity_record'].assigned_to.all():
assigned_dict = {}
assigned_dict['id'] = each.id
assigned_dict['name'] = each.email
assigned_data.append(assigned_dict)
comments = context["opportunity_record"].opportunity_comments.all()
if self.request.user.is_superuser or self.request.user.role == 'ADMIN':
users_mention = list(User.objects.filter(is_active=True).values('username'))
elif self.request.user != context['object'].created_by:
users_mention = [{'username': context['object'].created_by.username}]
else:
users_mention = list(context['object'].assigned_to.all().values('username'))
context.update({
"comments": comments,
'attachments': context[
"opportunity_record"].opportunity_attachment.all(),
"users_mention": users_mention,
"assigned_data": json.dumps(assigned_data)})
return context
@login_required
@sales_access_required
def update_opportunity(request, pk):
opportunity_object = Opportunity.objects.filter(pk=pk).first()
accounts = Account.objects.filter(status="open")
contacts = Contact.objects.all()
listings = Listing.objects.all()
leads = Lead.objects.all()
if request.user.role != "ADMIN" and not request.user.is_superuser:
accounts = Account.objects.filter(
created_by=request.user)
contacts = Contact.objects.filter(
Q(assigned_to__in=[request.user]) | Q(created_by=request.user))
users = []
if request.user.role == 'ADMIN' or request.user.is_superuser:
users = User.objects.filter(is_active=True).order_by('email')
elif request.user.google.all():
users = []
else:
users = User.objects.filter(role='ADMIN').order_by('email')
kwargs_data = {
"assigned_to": users,
"account": accounts,
"listings": listings,
"contacts": contacts,
"leads": leads}
form = OpportunityForm(instance=opportunity_object, **kwargs_data)
if request.POST:
form = form = OpportunityForm(
request.POST, request.FILES,
instance=opportunity_object, **kwargs_data)
if form.is_valid():
assigned_to_ids = opportunity_object.assigned_to.all().values_list(
'id', flat=True)
opportunity_obj = form.save(commit=False)
if request.POST.get('stage') in ['CLOSED WON', 'CLOSED LOST']:
opportunity_obj.closed_by = request.user
previous_assigned_to_users = list(opportunity_obj.assigned_to.all().values_list('id', flat=True))
opportunity_obj.save()
opportunity_obj.contacts.clear()
opportunity_obj.listings.clear()
opportunity_obj.leads.clear()
all_members_list = []
if request.POST.getlist('assigned_to', []):
current_site = get_current_site(request)
assigned_form_users = form.cleaned_data.get(
'assigned_to').values_list('id', flat=True)
all_members_list = list(
set(list(assigned_form_users)) -
set(list(assigned_to_ids)))
# current_site = get_current_site(request)
# recipients = all_members_list
# send_email_to_assigned_user.delay(recipients, opportunity_obj.id, domain=current_site.domain,
# protocol=request.scheme)
# if all_members_list:
# for assigned_to_user in all_members_list:
# user = get_object_or_404(User, pk=assigned_to_user)
# mail_subject = 'Assigned to opportunity.'
# message = render_to_string(
# 'assigned_to/opportunity_assigned.html', {
# 'user': user,
# 'domain': current_site.domain,
# 'protocol': request.scheme,
# 'opportunity': opportunity_obj
# })
# email = EmailMessage(
# mail_subject, message, to=[user.email])
# email.content_subtype = "html"
# email.send()
opportunity_obj.assigned_to.clear()
opportunity_obj.assigned_to.add(
*request.POST.getlist('assigned_to'))
else:
opportunity_obj.assigned_to.clear()
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = opportunity_obj.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
opportunity_obj.assigned_to.add(user_id)
if request.POST.getlist('teams', []):
opportunity_obj.teams.clear()
opportunity_obj.teams.add(*request.POST.getlist('teams'))
else:
opportunity_obj.teams.clear()
current_site = get_current_site(request)
assigned_to_list = list(opportunity_obj.assigned_to.all().values_list('id', flat=True))
recipients = list(set(assigned_to_list) - set(previous_assigned_to_users))
send_email_to_assigned_user.delay(recipients, opportunity_obj.id, domain=current_site.domain,
protocol=request.scheme)
if request.POST.getlist('contacts', []):
opportunity_obj.contacts.add(
*request.POST.getlist('contacts'))
if request.POST.getlist('leads', []):
opportunity_obj.leads.add(
*request.POST.getlist('leads'))
if request.POST.getlist('listings', []):
opportunity_obj.listings.add(
*request.POST.getlist('listings'))
opportunity_obj.tags.clear()
if request.POST.get('tags', ''):
tags = request.POST.get("tags")
splitted_tags = tags.split(",")
for t in splitted_tags:
tag = Tags.objects.filter(name=t.lower())
if tag:
tag = tag[0]
else:
tag = Tags.objects.create(name=t.lower())
opportunity_obj.tags.add(tag)
if request.FILES.get('oppurtunity_attachment'):
attachment = Attachments()
attachment.created_by = request.user
attachment.file_name = request.FILES.get(
'oppurtunity_attachment').name
attachment.opportunity = opportunity_obj
attachment.attachment = request.FILES.get(
'oppurtunity_attachment')
attachment.save()
success_url = reverse('opportunities:list')
if request.POST.get('from_account'):
from_account = request.POST.get('from_account')
success_url = reverse("accounts:view_account", kwargs={
'pk': from_account})
return JsonResponse({'error': False, 'success_url': success_url})
return JsonResponse({'error': True, 'errors': form.errors})
context = {}
context["opportunity_obj"] = opportunity_object
user_assgn_list = [
assigned_to.id for assigned_to in
context["opportunity_obj"].assigned_to.all()]
if request.user == context['opportunity_obj'].created_by:
user_assgn_list.append(request.user.id)
if request.user.role != "ADMIN" and not request.user.is_superuser:
if request.user.id not in user_assgn_list:
raise PermissionDenied
context["opportunity_form"] = form
context["accounts"] = accounts
if request.GET.get('view_account'):
context['account'] = get_object_or_404(
Account, id=request.GET.get('view_account'))
context["contacts"] = contacts
context["leads"] = leads
if request.GET.get('view_lead'):
context['lead'] = get_object_or_404(
Lead, id=request.GET.get('view_lead'))
context["listings"] = listings
context["users"] = kwargs_data['assigned_to']
context["currencies"] = CURRENCY_CODES
context["stages"] = STAGES
context["status"] = STATUS_CHOICE
context["sources"] = SOURCES
#context["status"] = STATUS_CHOICE
context["teams"] = Teams.objects.all()
context["assignedto_list"] = [
int(i) for i in request.POST.getlist('assigned_to', []) if i]
context["contacts_list"] = [
int(i) for i in request.POST.getlist('contacts', []) if i]
context["listings_list"] = [
int(i) for i in request.POST.getlist('listings', []) if i]
context["leads_list"] = [
int(i) for i in request.POST.getlist('leads', []) if i]
return render(request, "create_opportunity.html", context)
class DeleteOpportunityView(SalesAccessRequiredMixin, LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(Opportunity, id=kwargs.get("pk"))
if (self.request.user.role == "ADMIN" or
self.request.user.is_superuser or
self.request.user == self.object.created_by):
self.object.delete()
if request.is_ajax():
return JsonResponse({'error': False})
if request.GET.get('view_account'):
account = request.GET.get('view_account')
return redirect("accounts:view_account", pk=account)
return redirect("opportunities:list")
raise PermissionDenied
class GetListingView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
account_id = request.GET.get("account")
if account_id:
#account = get_object_or_404(Account, id=account_id)
#leads = account.leads.all()
listings = Listing.objects.all()
else:
listings = Listing.objects.all()
data = {listing.pk:
listing.name for listing in listings.distinct()}
return JsonResponse(data)
class GetLeadView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
account_id = request.GET.get("account")
if account_id:
account = get_object_or_404(Account, id=account_id)
leads = account.leads.all()
#leads = Lead.objects.all()
else:
leads = Lead.objects.all()
data = {lead.pk:
lead.first_name for lead in leads.distinct()}
return JsonResponse(data)
class GetContactView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
account_id = request.GET.get("account")
if account_id:
account = get_object_or_404(Account, id=account_id)
contacts = account.contacts.all()
else:
contacts = Contact.objects.all()
data = {contact.pk:
contact.first_name for contact in contacts.distinct()}
return JsonResponse(data)
class AddCommentView(LoginRequiredMixin, CreateView):
model = Comment
form_class = OpportunityCommentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.opportunity = get_object_or_404(
Opportunity, id=request.POST.get('opportunityid'))
if (
request.user in self.opportunity.assigned_to.all() or
request.user == self.opportunity.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {'error': "You don't have permission to comment."}
return JsonResponse(data)
def form_valid(self, form):
comment = form.save(commit=False)
comment.commented_by = self.request.user
comment.opportunity = self.opportunity
comment.save()
comment_id = comment.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'opportunity', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": comment.id, "comment": comment.comment,
"commented_on": comment.commented_on,
"commented_on_arrow": comment.commented_on_arrow,
"commented_by": comment.commented_by.email
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class UpdateCommentView(LoginRequiredMixin, View):
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.comment_obj = get_object_or_404(
Comment, id=request.POST.get("commentid"))
if request.user == self.comment_obj.commented_by:
form = OpportunityCommentForm(
request.POST, instance=self.comment_obj)
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {'error': "You don't have permission to edit this comment."}
return JsonResponse(data)
def form_valid(self, form):
self.comment_obj.comment = form.cleaned_data.get("comment")
self.comment_obj.save(update_fields=["comment"])
comment_id = self.comment_obj.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'opportunity', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"commentid": self.comment_obj.id,
"comment": self.comment_obj.comment,
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class DeleteCommentView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Comment, id=request.POST.get("comment_id"))
if request.user == self.object.commented_by:
self.object.delete()
data = {"cid": request.POST.get("comment_id")}
return JsonResponse(data)
data = {'error': "You don't have permission to delete this comment."}
return JsonResponse(data)
class GetOpportunitiesView(LoginRequiredMixin, ListView):
model = Opportunity
context_object_name = "opportunities"
template_name = "opportunities_list.html"
class AddAttachmentsView(LoginRequiredMixin, CreateView):
model = Attachments
form_class = OpportunityAttachmentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.opportunity = get_object_or_404(
Opportunity, id=request.POST.get('opportunityid'))
if (
request.user in self.opportunity.assigned_to.all() or
request.user == self.opportunity.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {'error': "You don't have permission to add attachment."}
return JsonResponse(data)
def form_valid(self, form):
attachment = form.save(commit=False)
attachment.created_by = self.request.user
attachment.file_name = attachment.attachment.name
attachment.opportunity = self.opportunity
attachment.save()
return JsonResponse({
"attachment_id": attachment.id,
"attachment": attachment.file_name,
"attachment_url": attachment.attachment.url,
"created_on": attachment.created_on,
"created_on_arrow": attachment.created_on_arrow,
"created_by": attachment.created_by.email,
"download_url": reverse('common:download_attachment',
kwargs={'pk': attachment.id}),
"attachment_display": attachment.get_file_type_display(),
"file_type": attachment.file_type()
})
def form_invalid(self, form):
return JsonResponse({"error": form['attachment'].errors})
class DeleteAttachmentsView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Attachments, id=request.POST.get("attachment_id"))
if (request.user == self.object.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'):
self.object.delete()
data = {"aid": request.POST.get("attachment_id")}
return JsonResponse(data)
data = {
'error': "You don't have permission to delete this attachment."}
return JsonResponse(data)
|
import os
import sys
import pickle
import argparse
import numpy as np
from numpy.lib.format import open_memmap
training_subjects = [
1, 2, 4, 5, 8, 9, 13, 14, 15, 16, 17, 18, 19, 25, 27, 28, 31, 34, 35, 38
]
training_cameras = [2, 3]
max_body = 2
num_joint = 25
max_frame = 300
toolbar_width = 30
def read_skeleton(file):
with open(file, 'r') as f:
skeleton_sequence = {}
skeleton_sequence['numFrame'] = int(f.readline())
skeleton_sequence['frameInfo'] = []
for t in range(skeleton_sequence['numFrame']):
frame_info = {}
frame_info['numBody'] = int(f.readline())
frame_info['bodyInfo'] = []
for m in range(frame_info['numBody']):
body_info = {}
body_info_key = [
'bodyID', 'clipedEdges', 'handLeftConfidence',
'handLeftState', 'handRightConfidence', 'handRightState',
'isResticted', 'leanX', 'leanY', 'trackingState'
]
body_info = {
k: float(v)
for k, v in zip(body_info_key,
f.readline().split())
}
body_info['numJoint'] = int(f.readline())
body_info['jointInfo'] = []
for v in range(body_info['numJoint']):
joint_info_key = [
'x', 'y', 'z', 'depthX', 'depthY', 'colorX', 'colorY',
'orientationW', 'orientationX', 'orientationY',
'orientationZ', 'trackingState'
]
joint_info = {
k: float(v)
for k, v in zip(joint_info_key,
f.readline().split())
}
body_info['jointInfo'].append(joint_info)
frame_info['bodyInfo'].append(body_info)
skeleton_sequence['frameInfo'].append(frame_info)
return skeleton_sequence
def read_xyz(file, max_body=2, num_joint=25):
seq_info = read_skeleton(file)
data = np.zeros((3, seq_info['numFrame'], num_joint, max_body))
for n, f in enumerate(seq_info['frameInfo']):
for m, b in enumerate(f['bodyInfo']):
for j, v in enumerate(b['jointInfo']):
if m < max_body and j < num_joint:
data[:, n, j, m] = [v['x'], v['y'], v['z']]
else:
pass
return data
def print_toolbar(rate, annotation=''):
# setup toolbar
sys.stdout.write("{}[".format(annotation))
for i in range(toolbar_width):
if i * 1.0 / toolbar_width > rate:
sys.stdout.write(' ')
else:
sys.stdout.write('-')
sys.stdout.flush()
sys.stdout.write(']\r')
def end_toolbar():
sys.stdout.write("\n")
def gendata(data_path,
out_path,
ignored_sample_path=None,
benchmark='xview',
part='eval'):
if ignored_sample_path != None:
with open(ignored_sample_path, 'r') as f:
ignored_samples = [
line.strip() + '.skeleton' for line in f.readlines()
]
else:
ignored_samples = []
sample_name = []
sample_label = []
for filename in os.listdir(data_path):
if filename in ignored_samples:
continue
action_class = int(filename[filename.find('A') + 1:filename.find('A') +
4])
subject_id = int(filename[filename.find('P') + 1:filename.find('P') +
4])
camera_id = int(filename[filename.find('C') + 1:filename.find('C') +
4])
if benchmark == 'xview':
istraining = (camera_id in training_cameras)
elif benchmark == 'xsub':
istraining = (subject_id in training_subjects)
else:
raise ValueError()
if part == 'train':
issample = istraining
elif part == 'val':
issample = not (istraining)
else:
raise ValueError()
if issample:
sample_name.append(filename)
sample_label.append(action_class - 1)
with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f:
pickle.dump((sample_name, list(sample_label)), f)
# np.save('{}/{}_label.npy'.format(out_path, part), sample_label)
fp = open_memmap('{}/{}_data.npy'.format(out_path, part),
dtype='float32',
mode='w+',
shape=(len(sample_label), 3, max_frame, num_joint,
max_body))
for i, s in enumerate(sample_name):
print_toolbar(
i * 1.0 / len(sample_label),
'({:>5}/{:<5}) Processing {:>5}-{:<5} data: '.format(
i + 1, len(sample_name), benchmark, part))
data = read_xyz(os.path.join(data_path, s),
max_body=max_body,
num_joint=num_joint)
fp[i, :, 0:data.shape[1], :, :] = data
end_toolbar()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NTU-RGB-D Data Converter.')
parser.add_argument('--data_path',
default='data/NTU-RGB-D/nturgb+d_skeletons')
parser.add_argument(
'--ignored_sample_path',
default=
'tools/data_processing/nturgbd_samples_with_missing_skeletons.txt')
parser.add_argument('--out_folder', default='data/NTU-RGB-D')
benchmark = ['xsub', 'xview']
part = ['train', 'val']
arg = parser.parse_args()
for b in benchmark:
for p in part:
out_path = os.path.join(arg.out_folder, b)
if not os.path.exists(out_path):
os.makedirs(out_path)
gendata(arg.data_path,
out_path,
arg.ignored_sample_path,
benchmark=b,
part=p)
|
from prelude import *
#! the mean image having a black background is what gives black cats? or maybe its what the encoder learned
#! try to truncate towards the initial cat instead of towards the mean
#! the left half looks good initially. what happens to it?
# todo generic method for fading a mask (gaussian blur?) => only if refinement remains
# todo try on weird mask patterns
@dataclass(eq=False)
class Inpaint(Edit):
encoding_weight: ClassVar[float] = 0.00 #10.0
truncation_weight: ClassVar[float] = 0.0 #0.2
faded_mask_fraction: float = 0.1
def select_right_half(self, x):
return x[:, :, :, x.shape[-1] // 2 :]
def pad_left_half(self, x):
x = torch.cat((torch.zeros_like(x), x), dim=3)
if self.faded_mask_fraction > 0.0:
l = x.shape[-1] // 2
r = round(l * (1 + self.faded_mask_fraction))
x[:, :, :, l:r] *= torch.arange(0.0, 1.0, 1 / (r - l), device=x.device)
return x
def f(self, pred):
return self.pad_left_half(self.select_right_half(pred))
if __name__ == "__main__":
run_edit_on_examples(Inpaint())
|
from cereal import car
from selfdrive.car.volkswagen.values import CAR, BUTTON_STATES, CANBUS, NetworkLocation, TransmissionType, GearShifter
from selfdrive.car import STD_CARGO_KG, scale_rot_inertia, scale_tire_stiffness, gen_empty_fingerprint, get_safety_config
from selfdrive.car.interfaces import CarInterfaceBase
EventName = car.CarEvent.EventName
class CarInterface(CarInterfaceBase):
def __init__(self, CP, CarController, CarState):
super().__init__(CP, CarController, CarState)
self.displayMetricUnitsPrev = None
self.buttonStatesPrev = BUTTON_STATES.copy()
if CP.networkLocation == NetworkLocation.fwdCamera:
self.ext_bus = CANBUS.pt
self.cp_ext = self.cp
else:
self.ext_bus = CANBUS.cam
self.cp_ext = self.cp_cam
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=None):
ret = CarInterfaceBase.get_std_params(candidate, fingerprint)
ret.carName = "volkswagen"
ret.radarOffCan = True
if True: # pylint: disable=using-constant-test
# Set global MQB parameters
ret.safetyConfigs = [get_safety_config(car.CarParams.SafetyModel.volkswagen)]
ret.enableBsm = 0x30F in fingerprint[0] # SWA_01
if 0xAD in fingerprint[0]: # Getriebe_11
ret.transmissionType = TransmissionType.automatic
elif 0x187 in fingerprint[0]: # EV_Gearshift
ret.transmissionType = TransmissionType.direct
else:
ret.transmissionType = TransmissionType.manual
if any(msg in fingerprint[1] for msg in (0x40, 0x86, 0xB2, 0xFD)): # Airbag_01, LWI_01, ESP_19, ESP_21
ret.networkLocation = NetworkLocation.gateway
else:
ret.networkLocation = NetworkLocation.fwdCamera
# Global lateral tuning defaults, can be overridden per-vehicle
ret.steerActuatorDelay = 0.1
ret.steerRateCost = 1.0
ret.steerLimitTimer = 0.4
ret.steerRatio = 15.6 # Let the params learner figure this out
tire_stiffness_factor = 1.0 # Let the params learner figure this out
ret.lateralTuning.pid.kpBP = [0.]
ret.lateralTuning.pid.kiBP = [0.]
ret.lateralTuning.pid.kf = 0.00006
ret.lateralTuning.pid.kpV = [0.6]
ret.lateralTuning.pid.kiV = [0.2]
# Per-chassis tuning values, override tuning defaults here if desired
if candidate == CAR.ARTEON_MK1:
ret.mass = 1733 + STD_CARGO_KG
ret.wheelbase = 2.84
elif candidate == CAR.ATLAS_MK1:
ret.mass = 2011 + STD_CARGO_KG
ret.wheelbase = 2.98
elif candidate == CAR.GOLF_MK7:
ret.mass = 1397 + STD_CARGO_KG
ret.wheelbase = 2.62
elif candidate == CAR.JETTA_MK7:
ret.mass = 1328 + STD_CARGO_KG
ret.wheelbase = 2.71
elif candidate == CAR.PASSAT_MK8:
ret.mass = 1551 + STD_CARGO_KG
ret.wheelbase = 2.79
elif candidate == CAR.POLO_MK6:
ret.mass = 1230 + STD_CARGO_KG
ret.wheelbase = 2.55
elif candidate == CAR.TAOS_MK1:
ret.mass = 1498 + STD_CARGO_KG
ret.wheelbase = 2.69
elif candidate == CAR.TCROSS_MK1:
ret.mass = 1150 + STD_CARGO_KG
ret.wheelbase = 2.60
elif candidate == CAR.TIGUAN_MK2:
ret.mass = 1715 + STD_CARGO_KG
ret.wheelbase = 2.74
elif candidate == CAR.TOURAN_MK2:
ret.mass = 1516 + STD_CARGO_KG
ret.wheelbase = 2.79
elif candidate == CAR.TRANSPORTER_T61:
ret.mass = 1926 + STD_CARGO_KG
ret.wheelbase = 3.00 # SWB, LWB is 3.40, TBD how to detect difference
ret.minSteerSpeed = 14.0
elif candidate == CAR.TROC_MK1:
ret.mass = 1413 + STD_CARGO_KG
ret.wheelbase = 2.63
elif candidate == CAR.AUDI_A3_MK3:
ret.mass = 1335 + STD_CARGO_KG
ret.wheelbase = 2.61
elif candidate == CAR.AUDI_Q2_MK1:
ret.mass = 1205 + STD_CARGO_KG
ret.wheelbase = 2.61
elif candidate == CAR.AUDI_Q3_MK2:
ret.mass = 1623 + STD_CARGO_KG
ret.wheelbase = 2.68
elif candidate == CAR.SEAT_ATECA_MK1:
ret.mass = 1900 + STD_CARGO_KG
ret.wheelbase = 2.64
elif candidate == CAR.SEAT_LEON_MK3:
ret.mass = 1227 + STD_CARGO_KG
ret.wheelbase = 2.64
elif candidate == CAR.SKODA_KAMIQ_MK1:
ret.mass = 1265 + STD_CARGO_KG
ret.wheelbase = 2.66
elif candidate == CAR.SKODA_KAROQ_MK1:
ret.mass = 1278 + STD_CARGO_KG
ret.wheelbase = 2.66
elif candidate == CAR.SKODA_KODIAQ_MK1:
ret.mass = 1569 + STD_CARGO_KG
ret.wheelbase = 2.79
elif candidate == CAR.SKODA_OCTAVIA_MK3:
ret.mass = 1388 + STD_CARGO_KG
ret.wheelbase = 2.68
elif candidate == CAR.SKODA_SCALA_MK1:
ret.mass = 1192 + STD_CARGO_KG
ret.wheelbase = 2.65
elif candidate == CAR.SKODA_SUPERB_MK3:
ret.mass = 1505 + STD_CARGO_KG
ret.wheelbase = 2.84
else:
raise ValueError(f"unsupported car {candidate}")
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
ret.centerToFront = ret.wheelbase * 0.45
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront,
tire_stiffness_factor=tire_stiffness_factor)
return ret
# returns a car.CarState
def update(self, c, can_strings):
buttonEvents = []
# Process the most recent CAN message traffic, and check for validity
# The camera CAN has no signals we use at this time, but we process it
# anyway so we can test connectivity with can_valid
self.cp.update_strings(can_strings)
self.cp_cam.update_strings(can_strings)
ret = self.CS.update(self.cp, self.cp_cam, self.cp_ext, self.CP.transmissionType)
ret.canValid = self.cp.can_valid and self.cp_cam.can_valid
ret.steeringRateLimited = self.CC.steer_rate_limited if self.CC is not None else False
# TODO: add a field for this to carState, car interface code shouldn't write params
# Update the device metric configuration to match the car at first startup,
# or if there's been a change.
#if self.CS.displayMetricUnits != self.displayMetricUnitsPrev:
# put_nonblocking("IsMetric", "1" if self.CS.displayMetricUnits else "0")
# Check for and process state-change events (button press or release) from
# the turn stalk switch or ACC steering wheel/control stalk buttons.
for button in self.CS.buttonStates:
if self.CS.buttonStates[button] != self.buttonStatesPrev[button]:
be = car.CarState.ButtonEvent.new_message()
be.type = button
be.pressed = self.CS.buttonStates[button]
buttonEvents.append(be)
events = self.create_common_events(ret, extra_gears=[GearShifter.eco, GearShifter.sport, GearShifter.manumatic])
# Vehicle health and operation safety checks
if self.CS.parkingBrakeSet:
events.add(EventName.parkBrake)
if self.CS.tsk_status in (6, 7):
events.add(EventName.accFaulted)
# Low speed steer alert hysteresis logic
if self.CP.minSteerSpeed > 0. and ret.vEgo < (self.CP.minSteerSpeed + 1.):
self.low_speed_alert = True
elif ret.vEgo > (self.CP.minSteerSpeed + 2.):
self.low_speed_alert = False
if self.low_speed_alert:
events.add(EventName.belowSteerSpeed)
ret.events = events.to_msg()
ret.buttonEvents = buttonEvents
# update previous car states
self.displayMetricUnitsPrev = self.CS.displayMetricUnits
self.buttonStatesPrev = self.CS.buttonStates.copy()
self.CS.out = ret.as_reader()
return self.CS.out
def apply(self, c):
hud_control = c.hudControl
ret = self.CC.update(c, c.enabled, self.CS, self.frame, self.ext_bus, c.actuators,
hud_control.visualAlert,
hud_control.leftLaneVisible,
hud_control.rightLaneVisible,
hud_control.leftLaneDepart,
hud_control.rightLaneDepart)
self.frame += 1
return ret
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/Prog.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
# global, set by --debug=findlibs
print_find_libs = None
def ProgramScanner(**kw):
"""Return a prototype Scanner instance for scanning executable
files for static-lib dependencies"""
kw['path_function'] = SCons.Scanner.FindPathDirs('LIBPATH')
ps = SCons.Scanner.Base(scan, "ProgramScanner", **kw)
return ps
def scan(node, env, libpath = ()):
"""
This scanner scans program files for static-library
dependencies. It will search the LIBPATH environment variable
for libraries specified in the LIBS variable, returning any
files it finds as dependencies.
"""
try:
libs = env['LIBS']
except KeyError:
# There are no LIBS in this environment, so just return a null list:
return []
if SCons.Util.is_String(libs):
libs = libs.split()
else:
libs = SCons.Util.flatten(libs)
try:
prefix = env['LIBPREFIXES']
if not SCons.Util.is_List(prefix):
prefix = [ prefix ]
except KeyError:
prefix = [ '' ]
try:
suffix = env['LIBSUFFIXES']
if not SCons.Util.is_List(suffix):
suffix = [ suffix ]
except KeyError:
suffix = [ '' ]
pairs = []
for suf in map(env.subst, suffix):
for pref in map(env.subst, prefix):
pairs.append((pref, suf))
result = []
if callable(libpath):
libpath = libpath()
find_file = SCons.Node.FS.find_file
adjustixes = SCons.Util.adjustixes
for lib in libs:
if SCons.Util.is_String(lib):
lib = env.subst(lib)
for pref, suf in pairs:
l = adjustixes(lib, pref, suf)
l = find_file(l, libpath, verbose=print_find_libs)
if l:
result.append(l)
else:
result.append(lib)
return result
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Uploads files to Google Storage content addressed."""
import hashlib
import optparse
import os
import Queue
import re
import stat
import sys
import tarfile
import threading
import time
from download_from_google_storage import get_sha1
from download_from_google_storage import Gsutil
from download_from_google_storage import PrinterThread
from download_from_google_storage import GSUTIL_DEFAULT_PATH
USAGE_STRING = """%prog [options] target [target2 ...].
Target is the file intended to be uploaded to Google Storage.
If target is "-", then a list of files will be taken from standard input
This script will generate a file (original filename).sha1 containing the
sha1 sum of the uploaded file.
It is recommended that the .sha1 file is checked into the repository,
the original file removed from the repository, and a hook added to the
DEPS file to call download_from_google_storage.py.
Example usages
--------------
Scan the current directory and upload all files larger than 1MB:
find . -name .svn -prune -o -size +1000k -type f -print0 | %prog -0 -b bkt -
(Replace "bkt" with the name of a writable bucket.)
"""
def get_md5(filename):
md5_calculator = hashlib.md5()
with open(filename, 'rb') as f:
while True:
chunk = f.read(1024*1024)
if not chunk:
break
md5_calculator.update(chunk)
return md5_calculator.hexdigest()
def get_md5_cached(filename):
"""Don't calculate the MD5 if we can find a .md5 file."""
# See if we can find an existing MD5 sum stored in a file.
if os.path.exists('%s.md5' % filename):
with open('%s.md5' % filename, 'rb') as f:
md5_match = re.search('([a-z0-9]{32})', f.read())
if md5_match:
return md5_match.group(1)
else:
md5_hash = get_md5(filename)
with open('%s.md5' % filename, 'wb') as f:
f.write(md5_hash)
return md5_hash
def _upload_worker(
thread_num, upload_queue, base_url, gsutil, md5_lock, force,
use_md5, stdout_queue, ret_codes, gzip):
while True:
filename, sha1_sum = upload_queue.get()
if not filename:
break
file_url = '%s/%s' % (base_url, sha1_sum)
if gsutil.check_call('ls', file_url)[0] == 0 and not force:
# File exists, check MD5 hash.
_, out, _ = gsutil.check_call_with_retries('ls', '-L', file_url)
etag_match = re.search('ETag:\s+([a-z0-9]{32})', out)
if etag_match:
remote_md5 = etag_match.group(1)
# Calculate the MD5 checksum to match it to Google Storage's ETag.
with md5_lock:
if use_md5:
local_md5 = get_md5_cached(filename)
else:
local_md5 = get_md5(filename)
if local_md5 == remote_md5:
stdout_queue.put(
'%d> File %s already exists and MD5 matches, upload skipped' %
(thread_num, filename))
continue
stdout_queue.put('%d> Uploading %s...' % (
thread_num, filename))
gsutil_args = ['cp']
if gzip:
gsutil_args.extend(['-z', gzip])
gsutil_args.extend([filename, file_url])
code, _, err = gsutil.check_call_with_retries(*gsutil_args)
if code != 0:
ret_codes.put(
(code,
'Encountered error on uploading %s to %s\n%s' %
(filename, file_url, err)))
continue
# Mark executable files with the header "x-goog-meta-executable: 1" which
# the download script will check for to preserve the executable bit.
if not sys.platform.startswith('win'):
if os.stat(filename).st_mode & stat.S_IEXEC:
code, _, err = gsutil.check_call_with_retries(
'setmeta', '-h', 'x-goog-meta-executable:1', file_url)
if not code:
ret_codes.put(
(code,
'Encountered error on setting metadata on %s\n%s' %
(file_url, err)))
def get_targets(args, parser, use_null_terminator):
if not args:
parser.error('Missing target.')
if len(args) == 1 and args[0] == '-':
# Take stdin as a newline or null separated list of files.
if use_null_terminator:
return sys.stdin.read().split('\0')
else:
return sys.stdin.read().splitlines()
else:
return args
def upload_to_google_storage(
input_filenames, base_url, gsutil, force,
use_md5, num_threads, skip_hashing, gzip):
# We only want one MD5 calculation happening at a time to avoid HD thrashing.
md5_lock = threading.Lock()
# Start up all the worker threads plus the printer thread.
all_threads = []
ret_codes = Queue.Queue()
ret_codes.put((0, None))
upload_queue = Queue.Queue()
upload_timer = time.time()
stdout_queue = Queue.Queue()
printer_thread = PrinterThread(stdout_queue)
printer_thread.daemon = True
printer_thread.start()
for thread_num in range(num_threads):
t = threading.Thread(
target=_upload_worker,
args=[thread_num, upload_queue, base_url, gsutil, md5_lock,
force, use_md5, stdout_queue, ret_codes, gzip])
t.daemon = True
t.start()
all_threads.append(t)
# We want to hash everything in a single thread since its faster.
# The bottleneck is in disk IO, not CPU.
hashing_start = time.time()
for filename in input_filenames:
if not os.path.exists(filename):
stdout_queue.put('Main> Error: %s not found, skipping.' % filename)
continue
if os.path.exists('%s.sha1' % filename) and skip_hashing:
stdout_queue.put(
'Main> Found hash for %s, sha1 calculation skipped.' % filename)
with open(filename + '.sha1', 'rb') as f:
sha1_file = f.read(1024)
if not re.match('^([a-z0-9]{40})$', sha1_file):
print >> sys.stderr, 'Invalid sha1 hash file %s.sha1' % filename
return 1
upload_queue.put((filename, sha1_file))
continue
stdout_queue.put('Main> Calculating hash for %s...' % filename)
sha1_sum = get_sha1(filename)
with open(filename + '.sha1', 'wb') as f:
f.write(sha1_sum)
stdout_queue.put('Main> Done calculating hash for %s.' % filename)
upload_queue.put((filename, sha1_sum))
hashing_duration = time.time() - hashing_start
# Wait for everything to finish.
for _ in all_threads:
upload_queue.put((None, None)) # To mark the end of the work queue.
for t in all_threads:
t.join()
stdout_queue.put(None)
printer_thread.join()
# Print timing information.
print 'Hashing %s files took %1f seconds' % (
len(input_filenames), hashing_duration)
print 'Uploading took %1f seconds' % (time.time() - upload_timer)
# See if we ran into any errors.
max_ret_code = 0
for ret_code, message in ret_codes.queue:
max_ret_code = max(ret_code, max_ret_code)
if message:
print >> sys.stderr, message
if not max_ret_code:
print 'Success!'
return max_ret_code
def create_archives(dirs):
archive_names = []
for name in dirs:
tarname = '%s.tar.gz' % name
with tarfile.open(tarname, 'w:gz') as tar:
tar.add(name)
archive_names.append(tarname)
return archive_names
def validate_archive_dirs(dirs):
# We don't allow .. in paths in our archives.
if any(map(lambda x: '..' in x, dirs)):
return False
# We only allow dirs.
if any(map(lambda x: not os.path.isdir(x), dirs)):
return False
# We don't allow sym links in our archives.
if any(map(os.path.islink, dirs)):
return False
# We required that the subdirectories we are archiving are all just below
# cwd.
return not any(map(lambda x: x not in next(os.walk('.'))[1], dirs))
def main():
parser = optparse.OptionParser(USAGE_STRING)
parser.add_option('-b', '--bucket',
help='Google Storage bucket to upload to.')
parser.add_option('-e', '--boto', help='Specify a custom boto file.')
parser.add_option('-a', '--archive', action='store_true',
help='Archive directory as a tar.gz file')
parser.add_option('-f', '--force', action='store_true',
help='Force upload even if remote file exists.')
parser.add_option('-g', '--gsutil_path', default=GSUTIL_DEFAULT_PATH,
help='Path to the gsutil script.')
parser.add_option('-m', '--use_md5', action='store_true',
help='Generate MD5 files when scanning, and don\'t check '
'the MD5 checksum if a .md5 file is found.')
parser.add_option('-t', '--num_threads', default=1, type='int',
help='Number of uploader threads to run.')
parser.add_option('-s', '--skip_hashing', action='store_true',
help='Skip hashing if .sha1 file exists.')
parser.add_option('-0', '--use_null_terminator', action='store_true',
help='Use \\0 instead of \\n when parsing '
'the file list from stdin. This is useful if the input '
'is coming from "find ... -print0".')
parser.add_option('-z', '--gzip', metavar='ext',
help='Gzip files which end in ext. '
'ext is a comma-separated list')
(options, args) = parser.parse_args()
# Enumerate our inputs.
input_filenames = get_targets(args, parser, options.use_null_terminator)
if options.archive:
if not validate_archive_dirs(input_filenames):
parser.error('Only directories just below cwd are valid entries when '
'using the --archive argument. Entries can not contain .. '
' and entries can not be symlinks. Entries was %s' %
input_filenames)
return 1
input_filenames = create_archives(input_filenames)
# Make sure we can find a working instance of gsutil.
if os.path.exists(GSUTIL_DEFAULT_PATH):
gsutil = Gsutil(GSUTIL_DEFAULT_PATH, boto_path=options.boto)
else:
gsutil = None
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(path) and 'gsutil' in os.listdir(path):
gsutil = Gsutil(os.path.join(path, 'gsutil'), boto_path=options.boto)
if not gsutil:
parser.error('gsutil not found in %s, bad depot_tools checkout?' %
GSUTIL_DEFAULT_PATH)
base_url = 'gs://%s' % options.bucket
return upload_to_google_storage(
input_filenames, base_url, gsutil, options.force, options.use_md5,
options.num_threads, options.skip_hashing, options.gzip)
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
from contextlib import contextmanager
import time
@contextmanager
def timeit(name="code-block"):
"""
Execute the codeblock and measure the time.
>> with timeit('name') as f:
>> # Your code block
"""
try:
start = time.time()
yield
finally:
# Execution is over.
end = time.time() - start
print(f"Execution block: {name} finishes in : {end} sec.")
|
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# foss@huawei.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy
from src.compress import compress
import pandas as pd
import re
from src.setting import setting
import time
import math
from src.logger_setting.my_logger import get_logger
logger = get_logger()
PAT_DATA = re.compile(r"\d+.+\d+")
PAT_NUM = re.compile(r"\d+")
PAT_DATA_LOW = re.compile(r"-?\d+")
PAT_CQI0 = re.compile(r"CQI0.+?(\d+)")
PAT_CQI1 = re.compile(r"CQI1.+?(\d+)")
PAT_ECGI = re.compile(r"\d+-\d+")
def get_extract_data():
compress.empty_folder(os.path.join(setting.data_path, 'extractData'))
all_file = compress.get_all_csv_file(compress.cpe_unzip_path)
return all_file
def extract_data_thread(file):
if not os.stat(file).st_size > 0:
return
df = pd.DataFrame(columns=setting.parameter_json["extract_data_columns"]).rename(
columns=setting.parameter_json["extract_data_columns_rename"])
file_df = pd.read_csv(file, error_bad_lines=False, index_col=False, engine='python').rename(
columns=setting.parameter_json["extract_data_columns_rename"])
if not file_df.empty:
file_df['collectTime'] = pd.to_datetime(file_df['collectTime'])
file_df['collectTime'] = file_df.collectTime.dt.strftime('%Y-%m-%d %H:%M:%S')
date = str(file_df['collectTime'].values[0]).split(" ")[0].replace("-", "").replace("/", "")
df = df.append(file_df)
df = day_data_operate(df)
now_time = str(int(round(time.time() * 10000)))
df.to_csv(os.path.join(setting.data_path, 'extractData', date + '_' + now_time + r".csv"), index=False)
def day_data_operate(df):
col_names = df.columns
col_i_need = list(filter(lambda c: get_column(c), col_names))
df = df[col_i_need]
df['TotalDownload'] = df['TotalDownload'].apply(lambda x: get_traffic(x, PAT_DATA))
df['TotalUpload'] = df['TotalUpload'].apply(lambda x: get_traffic(x, PAT_DATA))
df['MaxDLThroughput'] = df['MaxDLThroughput'].apply(lambda x: get_throughput(x, PAT_DATA))
df['MaxULThroughput'] = df['MaxULThroughput'].apply(lambda x: get_throughput(x, PAT_DATA))
df['RSRP'] = df['RSRP'].apply(lambda x: get_number(x, PAT_DATA_LOW))
df['RSRQ'] = df['RSRQ'].apply(lambda x: get_number(x, PAT_DATA_LOW))
df['RSSI'] = df['RSSI'].apply(lambda x: get_number(x, PAT_DATA_LOW))
df['SINR'] = df['SINR'].apply(lambda x: get_number(x, PAT_DATA_LOW))
df['CQI'] = df['CQI'].apply(get_cqi)
df.loc[:, 'ECGI'] = df.apply(lambda x: get_ecgi(x['ECGI'], PAT_ECGI, x['ENODEBID'], x['CELLID']), axis=1)
return df
def get_data(value):
if type(value) == float and math.isnan(value):
return ''
else:
return value
def get_column(column):
columns_name = setting.parameter_json.get("extract_filter_columns")
for i in range(0, len(columns_name)):
if re.match(columns_name[i], column):
return True
return False
def get_number(data, pat, invalid_value=setting.INVALID_VALUE):
result = pat.findall(str(data))
if result:
return float(result[0])
else:
return invalid_value
# 流量转化为 Byte
def get_traffic(data, pat):
str_data = str(data)
result = get_number(data, pat, 0)
if result != 0:
if "KB" in str_data:
return result * 1024
elif "MB" in str_data:
return result * 1024 * 1024
elif "GB" in str_data:
return result * 1024 * 1024 * 1024
else:
return result * setting.parameter_json["to_Byte"] if result != setting.INVALID_VALUE else result
else:
return result
# 速率转Mbps
def get_throughput(data, pat):
str_data = str(data)
result = get_number(data, pat)
if result != setting.INVALID_VALUE:
if "Byte/s" in str_data:
return result * 8 / 1024 / 1024
elif "B/s" in str_data:
return result * 8 / 1024 / 1024
elif "KB/s" in str_data:
return result * 8 / 1024
elif "MB/s" in str_data:
return result * 8
elif "GB/s" in str_data:
return result * 8 * 1024
elif "Kbps" in str_data:
return result / 1024
elif "bps" in str_data:
return result / 1024 / 1024
else:
return result
else:
return result
def get_ecgi(ecgi, pat, enodebid, cellid):
result = pat.findall(str(ecgi))
if result:
rel_ecgi = PAT_NUM.findall(result[0])
return str(int(rel_ecgi[0])) + "-" + str(int(rel_ecgi[1]))
elif pat.findall(str(cellid)):
result = pat.findall(str(cellid))
rel_ecgi = PAT_NUM.findall(result[0])
return str(int(rel_ecgi[0])) + "-" + str(int(rel_ecgi[1]))
elif (type(enodebid) == float and math.isnan(enodebid)) or (type(cellid) == float and math.isnan(cellid)):
return setting.INVALID_STRING
else:
return str(int(enodebid)) + "-" + str(int(cellid))
def get_cqi(data):
if 'CQI' in str(data):
result = get_number(data, PAT_CQI0)
if result == '127' or int(result) < 0:
result = get_number(data, PAT_CQI1)
if result == '127' or int(result) < 0:
return setting.INVALID_VALUE
return result
else:
result = get_number(data, PAT_DATA)
if result == '127' or int(result) < 0:
return setting.INVALID_VALUE
else:
return result
def extract_data():
all_files = get_extract_data()
for file in all_files:
extract_data_thread(file)
if __name__ == '__main__':
print(time.localtime(time.time()))
extract_data()
print(time.localtime(time.time()))
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The UFO Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata
from test_framework.p2p import P2PInterface
from test_framework.test_framework import UFOTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(UFOTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-maxuploadtarget=800",
"-acceptnonstdtxn=1",
"-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise
]]
self.supports_cli = False
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(MSG_BLOCK, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_and_ping(getdata_request)
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for _ in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(800):
p2p_conns[1].send_and_ping(getdata_request)
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_and_ping(getdata_request)
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
self.log.info("Restarting node 0 with download permission and 1MB maxuploadtarget")
self.restart_node(0, ["-whitelist=download@127.0.0.1", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(20):
peer.send_and_ping(getdata_request)
assert_equal(peer.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
peer.send_and_ping(getdata_request)
self.log.info("Peer still connected after trying to download old block (download permission)")
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 1) # node is still connected
assert_equal(peer_info[0]['permissions'], ['download'])
if __name__ == '__main__':
MaxUploadTest().main()
|
from typing import Type, Dict
from easyagents.backends import core as bcore
import easyagents.backends.tfagents
#import easyagents.backends.kerasrl
class BackendAgentFactory(bcore.BackendAgentFactory):
"""Backend which redirects all calls to the some default implementation."""
backend_name = 'default'
def get_algorithms(self) -> Dict[Type, Type[easyagents.backends.core.BackendAgent]]:
"""Yields a mapping of EasyAgent types to the implementations provided by this backend."""
return {
# easyagents.agents.CemAgent: easyagents.backends.kerasrl.KerasRlCemAgent,
easyagents.agents.DqnAgent: easyagents.backends.tfagents.TfDqnAgent,
# easyagents.agents.DoubleDqnAgent: easyagents.backends.kerasrl.KerasRlDoubleDqnAgent,
# easyagents.agents.DuelingDqnAgent: easyagents.backends.kerasrl.KerasRlDuelingDqnAgent,
easyagents.agents.PpoAgent: easyagents.backends.tfagents.TfPpoAgent,
easyagents.agents.RandomAgent: easyagents.backends.tfagents.TfRandomAgent,
easyagents.agents.ReinforceAgent: easyagents.backends.tfagents.TfReinforceAgent,
easyagents.agents.SacAgent: easyagents.backends.tfagents.TfSacAgent}
|
_base_ = 'shufflenet-v1-1x_16xb64_in1k.py'
_deprecation_ = dict(
expected='shufflenet-v1-1x_16xb64_in1k.py',
reference='https://github.com/open-mmlab/mmclassification/pull/508',
)
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: models.py
# Project: core
# Author: Brian Cherinka
# Created: Saturday, 12th September 2020 12:55:22 pm
# License: BSD 3-clause "New" or "Revised" License
# Copyright (c) 2020 Brian Cherinka
# Last Modified: Saturday, 12th September 2020 12:55:22 pm
# Modified By: Brian Cherinka
from __future__ import print_function, division, absolute_import
import re
from marshmallow.fields import Field
import six
import orjson
from marshmallow import Schema, fields, post_load
from fuzzy_types.fuzzy import FuzzyList
# core classes
class BaseClass(object):
def __new__(cls, *args, **kwargs):
pass
class BaseSchema(Schema):
''' Base class to use for all new Schema objects '''
_class = None
class Meta:
ordered = True
render_module = orjson
@post_load
def make_object(self, data, **kwargs):
''' this function deserializes a schema to a class object '''
return self._class(**data)
class ObjectField(fields.Field):
''' custom marshmallow object field
This is a custom marshmallow Field class used to indicate that an attribute
should be represented by a custom model object type, rather than a string or integer. It
contains special methods for custom serialization and deserialization of model datatypes.
For example, the yaml string representation 'LOG' for a log-linear wavelength will get
deserialized into an instance Wavelength('LOG'). Custom fields are described at
https://marshmallow.readthedocs.io/en/3.0/custom_fields.html.
'''
def _serialize(self, value, attr, obj, **kwargs):
if value is None:
return ''
return (value.release if hasattr(value, 'release') else value.name if hasattr(value, 'name')
else value.title if hasattr(value, 'title') else '')
def _deserialize(self, value, attr, data, **kwargs):
name = self.default
assert isinstance(value, six.string_types), f'{value} must be a string'
data = self.models.get(name, None)
return data[value] if data and value in data else value
# main/helper functions
def _get_attr(obj: object, name: str):
''' Get an attribute from a class object
Attempts to retrieve an attribute from a class object
Parameters
----------
obj : object
A class object to access
name : str
The attribute name to access
Returns
-------
a class attribute
'''
if hasattr(obj, name):
return obj.__getattribute__(name)
else:
return None
def create_class(data: dict, mixin: object = None) -> object:
''' creates a new datamodel object class
Constructs a Python class object based on a model "schema" dictionary.
Converts a model yaml file, 'versions.yaml' into a Python Version class object,
which is used for instantiating the designated "objects" in the yaml section.
Parameters
----------
data : dict
The schema dictonary section of a yaml file
mixin : object
A custom model class to mixin with base model
Returns
-------
A new Python class object
'''
name = data.get('name', None) or data.get('title', None)
# define custom repr
def new_rep(self):
reprstr = f'<{name}({self._repr_fields})>'
return reprstr
# define custom str
def new_str(self):
name = (_get_attr(self, 'name') or _get_attr(self, 'title') or
_get_attr(self, 'release') or '')
return name
# get the attributes to add to the repr
props = data.get('attributes', None) or data.get('properties', None)
if props:
added_fields = [a for a, vals in props.items()
if vals.get('add_to_repr', None)]
# define a new init
def new_init(self, **kwargs):
repr_fields = ''
# loop for attributes
for key, value in list(kwargs.items()):
self.__setattr__(key, value)
# create a repr field string
if key in added_fields:
repr_fields += f', {key}={value}'
# create a string of the repr fields
name = (_get_attr(self, 'name') or _get_attr(self, 'title') or
_get_attr(self, 'release') or '')
self._repr_fields = f'{name}' + repr_fields
# create the new class and add the new methods
bases = (mixin, object,) if mixin else (object,)
obj = type(name, bases, {})
obj.__init__ = new_init
obj.__repr__ = new_rep
obj.__str__ = new_str
return obj
def parse_kind(value: str) -> tuple:
''' parse the kind value into a kind and subkind
Parses the schema "kind" attribute into a kind and subkind if
kind contain paranetheses, i.e. kind(subkind). For example,
list(objects) return kind=list, subkind=objects.
Parameters
----------
value : str
The type of field
Returns
-------
A tuple of the field type and any sub-type
'''
subkind = re.search(r'\((.+?)\)', value)
if subkind:
kind = value.split('(', 1)[0]
subkind = subkind.group(1)
else:
kind = value
# set default list or tuple subfield to string
if kind.lower() == 'list':
subkind = 'string'
elif kind.lower() == 'tuple':
subkind = 'string'
return kind, subkind
def get_field(value: str, key: str = None) -> Field:
''' Get a Marshmallow Fields type
Using the model schema attribute "kind" parameter, determines the
appropriate marshmallow field type. If the value is "Objects"
then it uses a custom ObjectField definition.
Parameters
----------
value : str
The kind of field to retrieve, e.g. string
key : str
The name of the attribute for the field
Returns
-------
a marshmallow field class
'''
if hasattr(fields, value):
field = fields.__getattribute__(value)
return field
elif value == 'Objects':
return ObjectField(data_key=key)
else:
raise ValueError(f'Marshmallow Fields does not have {value}')
def create_field(data: dict, key: str = None, required: bool = None,
nodefault: bool = None) -> Field:
''' creates a marshmallow.fields object
Parameters
----------
data : dict
A values dictionary for a given model attribute
key : str
The name of the attribute
required : bool
If True, sets the field as a required one. Default is False.
nodefault : bool
If True, turns off any defaults specified for fields. Default is False.
Returns
-------
A marshmallow field instance to attach to a schema
'''
# parse the kind of input
kind = data.get('kind', None) or data.get('type', None)
kind = kind.title() if kind else kind
kind, subkind = parse_kind(kind)
# get the marshmallow field
field = get_field(kind)
# create a parameters dictionary to pass into the fields object
params = {}
params['required'] = data.get('required', False) if required is None else required
if 'default' in data and not nodefault:
params['missing'] = data.get('default', None)
params['default'] = data.get('default', None)
# set key to use the model indicated if use_model is set
key = data['use_model'] if 'use_model' in data else key
# create any arguments for sub-fields
args = []
if subkind:
skinds = subkind.split(',')
subfields = [get_field(i.title(), key=key) for i in skinds]
# differentiate args for lists and tuples
if kind == 'List':
assert len(subfields) == 1, 'List can only accept one subfield type.'
args.extend(subfields)
elif kind == 'Tuple':
args.append(subfields)
# instantiate the fields object with the relevant args and parameters
return field(*args, **params)
def create_schema(data: dict, mixin: object = None) -> Schema:
''' creates a new class for schema validation
Constructs a marshmallow schema class object used to validate
the creation of new Python objects for this class. Takes a
model "schema" dictionary and builds new Python classes to represent
the model Object and an Object Schema for purposes of validation.
See https://marshmallow.readthedocs.io/en/3.0/quickstart.html for a guide on
deserializing data using marshmallow schema validation.
Parameters
----------
data : dict
The schema dictonary section of a yaml file
mixin : object
A custom model class to mixin with base model
Returns
-------
A marshmallow schema class object
'''
# create a dictionary of class attributes from the schema
name = data.get('name') or data.get('title')
attrs = {}
props = data.get('attributes', None) or data.get('properties', None)
if props:
# create marshmallow schema fields for each attribute
for attr, values in props.items():
attrs[attr] = create_field(values, key=attr)
# create the base object class
class_obj = create_class(data, mixin=mixin)
# add the object class to the schema attributes to allow
# for object deserialization from yaml representation. See BaseSchema for use.
attrs['_class'] = class_obj
# create the new schema class object
objSchema = type(name + 'Schema', (BaseSchema,), attrs)
# add the schema class instance to the object class for accessibility
class_obj._schema = objSchema()
return objSchema
def generate_models(data: dict, make_fuzzy: bool = True, mixin: object = None) -> list:
''' Generate a list of datamodel types
Converts a models yaml file, e.g. manga/versions.yaml, into a list of Python instances.
A model Schema class is created using the "schema" section of the yaml file. The schema
class is used to validate and instantiate the list of objects defined in the "objects"
section.
Parameters
----------
data : dict
A yaml loaded data structure
make_fuzzy : bool
If True, returns a Fuzzy list of models
mixin : object
A custom model class to mixin with base model
Returns
-------
A list of instantiated models
'''
# create the schema class object
schema = create_schema(data['schema'], mixin=mixin)
# validate and deserialize the model data in Python objects
models = schema(many=True).load(data['objects'], many=True)
# optionally make the model list fuzzy
if make_fuzzy:
models = FuzzyList(models)
return models
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DetectLanguageEntitiesResult(object):
"""
Result of entities detect call.
"""
def __init__(self, **kwargs):
"""
Initializes a new DetectLanguageEntitiesResult object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param entities:
The value to assign to the entities property of this DetectLanguageEntitiesResult.
:type entities: list[oci.ai_language.models.Entity]
"""
self.swagger_types = {
'entities': 'list[Entity]'
}
self.attribute_map = {
'entities': 'entities'
}
self._entities = None
@property
def entities(self):
"""
**[Required]** Gets the entities of this DetectLanguageEntitiesResult.
List of entities.
:return: The entities of this DetectLanguageEntitiesResult.
:rtype: list[oci.ai_language.models.Entity]
"""
return self._entities
@entities.setter
def entities(self, entities):
"""
Sets the entities of this DetectLanguageEntitiesResult.
List of entities.
:param entities: The entities of this DetectLanguageEntitiesResult.
:type: list[oci.ai_language.models.Entity]
"""
self._entities = entities
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
def render_graph_testGaussianBlur():
testGaussianBlur = RenderGraph("Gaussian Blur")
DepthPass = RenderPass("DepthPass", {'depthFormat': ResourceFormat.D32Float})
testGaussianBlur.addPass(DepthPass, "DepthPass")
SkyBox = RenderPass("SkyBox")
testGaussianBlur.addPass(SkyBox, "SkyBox")
ForwardLightingPass = RenderPass("ForwardLightingPass", {'sampleCount': 1, 'enableSuperSampling': False})
testGaussianBlur.addPass(ForwardLightingPass, "ForwardLightingPass")
GaussianBlurPass = RenderPass("GaussianBlurPass")
testGaussianBlur.addPass(GaussianBlurPass, "GaussianBlur")
testGaussianBlur.addEdge("DepthPass.depth", "ForwardLightingPass.depth")
testGaussianBlur.addEdge("DepthPass.depth", "SkyBox.depth")
testGaussianBlur.addEdge("SkyBox.target", "ForwardLightingPass.color")
testGaussianBlur.addEdge("ForwardLightingPass.color", "GaussianBlur.src")
testGaussianBlur.markOutput("GaussianBlur.dst")
return testGaussianBlur
test_Gaussian_Blur = render_graph_testGaussianBlur()
try: m.addGraph(test_Gaussian_Blur)
except NameError: None
|
from django.urls import path
from .views import DepositMoneyView, WithdrawMoneyView, TransactionRepostView, TransactionForMFB
app_name = 'transactions'
urlpatterns = [
path("<slug:slug>/withdraw-deposit-transactions/", DepositMoneyView.as_view(), name="deposit_money"),
path("<slug:slug>/list/", TransactionForMFB.as_view(), name="transaction_list"),
path("<slug:slug>/report/", TransactionRepostView.as_view(), name="transaction_report"),
path("withdraw/", WithdrawMoneyView.as_view(), name="withdraw_money"),
]
|
"""Test printing ObjC objects that use unbacked properties - so that the static ivar offsets are incorrect."""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestObjCIvarOffsets(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers to break inside main().
self.main_source = "main.m"
self.stop_line = line_number(
self.main_source, '// Set breakpoint here.')
@skipUnlessDarwin
@add_test_categories(['pyapi'])
def test_with_python_api(self):
"""Test printing ObjC objects that use unbacked properties"""
self.build()
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation(
self.main_source, self.stop_line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, "Created a process.")
self.assertTrue(
process.GetState() == lldb.eStateStopped,
"Stopped it too.")
thread_list = lldbutil.get_threads_stopped_at_breakpoint(
process, breakpoint)
self.assertTrue(len(thread_list) == 1)
thread = thread_list[0]
frame = thread.GetFrameAtIndex(0)
self.assertTrue(frame, "frame 0 is valid")
mine = thread.GetFrameAtIndex(0).FindVariable("mine")
self.assertTrue(mine, "Found local variable mine.")
# Test the value object value for BaseClass->_backed_int
error = lldb.SBError()
mine_backed_int = mine.GetChildMemberWithName("_backed_int")
self.assertTrue(
mine_backed_int,
"Found mine->backed_int local variable.")
backed_value = mine_backed_int.GetValueAsSigned(error)
self.assertTrue(error.Success())
self.assertTrue(backed_value == 1111)
# Test the value object value for DerivedClass->_derived_backed_int
mine_derived_backed_int = mine.GetChildMemberWithName(
"_derived_backed_int")
self.assertTrue(mine_derived_backed_int,
"Found mine->derived_backed_int local variable.")
derived_backed_value = mine_derived_backed_int.GetValueAsSigned(error)
self.assertTrue(error.Success())
self.assertTrue(derived_backed_value == 3333)
# Make sure we also get bit-field offsets correct:
mine_flag2 = mine.GetChildMemberWithName("flag2")
self.assertTrue(mine_flag2, "Found mine->flag2 local variable.")
flag2_value = mine_flag2.GetValueAsUnsigned(error)
self.assertTrue(error.Success())
self.assertTrue(flag2_value == 7)
|
from django.contrib import admin
from .models import *
admin.site.register(Drug)
admin.site.register(DrugForm)
admin.site.register(Manufacturer)
admin.site.register(Category)
admin.site.register(Buying)
admin.site.register(Selling)
admin.site.register(SiteConfig)
admin.site.register(GlobalChecker)
admin.site.register(Order)
admin.site.register(NotSentMail)
admin.site.register(Notification)
|
import math
import types
import matplotlib.pyplot as P
import numpy as N
def plotres(psr, deleted=False, group=None, **kwargs):
"""Plot residuals, compute unweighted rms residual."""
res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs
if (not deleted) and N.any(psr.deleted != 0):
res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0]
print("Plotting {0}/{1} nondeleted points.".format(len(res), psr.nobs))
meanres = math.sqrt(N.mean(res**2)) / 1e-6
if group is None:
i = N.argsort(t)
P.errorbar(t[i], res[i] / 1e-6, yerr=errs[i], fmt="x", **kwargs)
else:
if (not deleted) and N.any(psr.deleted):
flagmask = psr.flagvals(group)[~psr.deleted]
else:
flagmask = psr.flagvals(group)
unique = list(set(flagmask))
for flagval in unique:
f = flagmask == flagval
flagres, flagt, flagerrs = res[f], t[f], errs[f]
i = N.argsort(flagt)
P.errorbar(flagt[i], flagres[i] / 1e-6, yerr=flagerrs[i], fmt="x", **kwargs)
P.legend(unique, numpoints=1, bbox_to_anchor=(1.1, 1.1))
P.xlabel("MJD")
P.ylabel("res [us]")
P.title("{0} - rms res = {1:.2f} us".format(psr.name, meanres))
# select parameters by name or number, omit non-existing
def _select(p, pars, select):
sel = []
for s in select:
if isinstance(s, str) and s in pars:
sel.append(pars.index(s))
elif isinstance(s, int) and s < p:
sel.append(s)
return len(sel), sel
def plothist(
data,
pars=[],
offsets=[],
norms=[],
select=[],
weights={},
ranges={},
labels={},
skip=[],
append=False,
bins=50,
color="k",
linestyle=None,
linewidth=1,
title=None,
):
if hasattr(data, "data") and not isinstance(data, N.ndarray):
# parse a multinestdata structure
if not pars and hasattr(data, "parnames"):
pars = data.parnames
data = data.data
p = data.shape[-1]
if not pars:
pars = map("p{0}".format, range(p))
if offsets:
data = data.copy()
if isinstance(offsets, dict):
for i, par in enumerate(pars):
if par in offsets:
data[:, i] = data[:, i] - offsets[par]
else:
if len(offsets) < p:
offsets = offsets + [0.0] * (p - len(offsets))
data = data - N.array(offsets)
if norms:
if len(norms) < p:
norms = norms + [1.0] * (p - len(norms))
data = data / norms
if select:
p, sel = _select(p, pars, select)
data, pars = data[:, sel], [pars[s] for s in sel]
if weights:
weight = 1
for i, par in enumerate(pars):
if par in weights:
if isinstance(weights[par], types.FunctionType):
weight = weight * N.vectorize(weights[par])(data[:, i])
else:
weight = weight * weights[par]
else:
weight = None
# only need lines for multiple plots
# lines = ['dotted','dashdot','dashed','solid']
if not append:
P.figure(figsize=(16 * (min(p, 4) / 4.0), 3 * (int((p - 1) / 4) + 1)))
for i in range(p):
# figure out how big the multiplot needs to be
if type(append) == int: # need this since isinstance(False,int) == True
q = append
elif isinstance(append, (list, tuple)):
q = len(append)
else:
q = p
# increment subplot index if we're skipping
sp = i + 1
for s in skip:
if i >= s:
sp = sp + 1
# if we're given the actual parnames of an existing plot, figure out where we fall
if isinstance(append, (list, tuple)):
try:
sp = append.index(pars[i]) + 1
except ValueError:
continue
P.subplot(int((q - 1) / 4) + 1, min(q, 4), sp)
if append:
P.hold(True)
if pars[i] in ranges:
dx = ranges[pars[i]]
P.hist(
data[:, i],
bins=int(bins * (N.max(data[:, i]) - N.min(data[:, i])) / (dx[1] - dx[0])),
weights=weight,
normed=True,
histtype="step",
color=color,
linestyle=linestyle,
linewidth=linewidth,
)
P.xlim(dx)
else:
P.hist(
data[:, i],
bins=bins,
weights=weight,
normed=True,
histtype="step",
color=color,
linestyle=linestyle,
linewidth=linewidth,
)
P.xlabel(labels[pars[i]] if pars[i] in labels else pars[i])
# P.ticklabel_format(style='sci',axis='both',scilimits=(-3,4),useoffset='True')
P.locator_params(axis="both", nbins=6)
P.minorticks_on()
fx = P.ScalarFormatter(useOffset=True, useMathText=True)
fx.set_powerlimits((-3, 4))
fx.set_scientific(True)
fy = P.ScalarFormatter(useOffset=True, useMathText=True)
fy.set_powerlimits((-3, 4))
fy.set_scientific(True)
P.gca().xaxis.set_major_formatter(fx)
P.gca().yaxis.set_major_formatter(fy)
P.hold(False)
if title and not append:
P.suptitle(title)
P.tight_layout()
# to do: should fix this histogram so that the contours are correct
# even for restricted ranges...
def _plotonehist2(
x,
y,
parx,
pary,
smooth=False,
colormap=True,
ranges={},
labels={},
bins=50,
levels=3,
weights=None,
color="k",
linewidth=1,
):
hold = P.ishold()
hrange = [
ranges[parx] if parx in ranges else [N.min(x), N.max(x)],
ranges[pary] if pary in ranges else [N.min(y), N.max(y)],
]
[h, xs, ys] = N.histogram2d(x, y, bins=bins, normed=True, range=hrange, weights=weights)
if colormap:
P.contourf(0.5 * (xs[1:] + xs[:-1]), 0.5 * (ys[1:] + ys[:-1]), h.T, cmap=P.get_cmap("YlOrBr"))
P.hold(True)
H, tmp1, tmp2 = N.histogram2d(x, y, bins=bins, range=hrange, weights=weights)
if smooth:
# only need scipy if we're smoothing
import scipy.ndimage.filters as SNF
H = SNF.gaussian_filter(H, sigma=1.5 if smooth is True else smooth)
if weights is None:
H = H / len(x)
else:
H = H / N.sum(H) # I think this is right...
Hflat = -N.sort(-H.flatten()) # sort highest to lowest
cumprob = N.cumsum(Hflat) # sum cumulative probability
levels = [N.interp(level, cumprob, Hflat) for level in [0.6826, 0.9547, 0.9973][:levels]]
xs = N.linspace(hrange[0][0], hrange[0][1], bins)
ys = N.linspace(hrange[1][0], hrange[1][1], bins)
P.contour(xs, ys, H.T, levels, colors=color, linestyles=["-", "--", "-."][: len(levels)], linewidths=linewidth)
P.hold(hold)
if parx in ranges:
P.xlim(ranges[parx])
if pary in ranges:
P.ylim(ranges[pary])
P.xlabel(labels[parx] if parx in labels else parx)
P.ylabel(labels[pary] if pary in labels else pary)
P.locator_params(axis="both", nbins=6)
P.minorticks_on()
fx = P.ScalarFormatter(useOffset=True, useMathText=True)
fx.set_powerlimits((-3, 4))
fx.set_scientific(True)
fy = P.ScalarFormatter(useOffset=True, useMathText=True)
fy.set_powerlimits((-3, 4))
fy.set_scientific(True)
P.gca().xaxis.set_major_formatter(fx)
P.gca().yaxis.set_major_formatter(fy)
def plothist2(
data,
pars=[],
offsets=[],
smooth=False,
colormap=True,
select=[],
ranges={},
labels={},
bins=50,
levels=3,
weights=None,
cuts=None,
diagonal=True,
title=None,
color="k",
linewidth=1,
append=False,
):
if hasattr(data, "data") and not isinstance(data, N.ndarray):
# parse a multinestdata structure
if not pars and hasattr(data, "parnames"):
pars = data.parnames
data = data.data
m = data.shape[-1]
if not pars:
pars = map("p{0}".format, range(m))
if offsets:
if len(offsets) < m:
offsets = offsets + [0.0] * (m - len(offsets))
data = data - N.array(offsets)
if cuts:
for i, par in enumerate(pars):
if par in cuts:
data = data[data[:, i] > cuts[par][0], :]
data = data[data[:, i] < cuts[par][1], :]
if weights:
weight = 1
for i, par in enumerate(pars):
if par in weights:
if isinstance(weights[par], types.FunctionType):
weight = weight * N.vectorize(weights[par])(data[:, i])
else:
weight = weight * weights[par]
else:
weight = None
if select:
m, sel = _select(m, pars, select)
data, pars = data[:, sel], [pars[s] for s in sel]
if not append:
fs = min((m if diagonal else m - 1) * 4, 16)
P.figure(figsize=(fs, fs))
data = data.T
if diagonal:
for i in range(m):
if not append:
P.subplot(m, m, i * (m + 1) + 1)
if pars[i] in ranges:
dx = ranges[pars[i]]
P.hist(
data[i],
bins=int(50 * (N.max(data[i]) - N.min(data[i])) / (dx[1] - dx[0])),
weights=weight,
normed=True,
histtype="step",
color="k",
)
P.xlim(dx)
else:
P.hist(data[i], bins=50, weights=weight, normed=True, histtype="step", color="k")
P.xlabel(labels[pars[i]] if pars[i] in labels else pars[i])
P.ticklabel_format(style="sci", axis="both", scilimits=(-2, 2), useoffset="True")
# P.tick_params(labelsize=12)
for j in range(0, i):
if not append:
P.subplot(m, m, i * m + j + 1)
_plotonehist2(
data[j],
data[i],
pars[j],
pars[i],
smooth,
colormap,
ranges,
labels,
bins,
levels,
weights=weight,
color=color,
linewidth=linewidth,
)
else:
for i in range(m - 1):
for j in range(i + 1, m):
if not append:
P.subplot(m - 1, m - 1, (m - 1) * i + j)
_plotonehist2(
data[j],
data[i],
pars[j],
pars[i],
smooth,
colormap,
ranges,
labels,
bins,
levels,
weights=weight,
color=color,
linewidth=linewidth,
)
P.tight_layout()
if title and not append:
P.suptitle(title)
elif title:
P.title(title)
# if save:
# P.savefig('figs/{0}-{1}-2.png'.format(psr,flms[0]))
def plotgwsrc(gwb):
"""
Plot a GWB source population as a mollweide projection.
"""
theta, phi, omega, polarization = gwb.gw_dist()
rho = phi - N.pi
eta = 0.5 * N.pi - theta
# I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014:
# /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485:
# RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2))
# old_settings = N.seterr(invalid='ignore')
P.title("GWB source population")
_ = P.axes(projection="mollweide")
foo = P.scatter(rho, eta, marker=".", s=1)
# bar = N.seterr(**old_settings)
return foo
|
"""
Xena Robot Framework library.
This module should contain ONLY wrapper methods. All logic should be implemented inside xenamanager package.
Wrappers should follow:
- short and meaningful name
- minimal number of parameters and with simple order
- its better to create new wrapper than complicating parameters or add logic
- try to avoid default values
Limitations:
- no multi chassis support
- no multi-line support
@author yoram@ignissoft.com
"""
from __future__ import unicode_literals
import sys
import os
import getpass
import logging
import re
from importlib import import_module
from collections import OrderedDict
import site
from trafficgenerator.tgn_utils import ApiType
from xenavalkyrie.xena_app import init_xena
from xenavalkyrie.xena_port import XenaCaptureBufferType
from xenavalkyrie.xena_stream import XenaModifierType, XenaModifierAction
from xenavalkyrie.xena_statistics_view import XenaPortsStats, XenaStreamsStats, XenaTpldsStats
from xenavalkyrie.xena_tshark import Tshark, TsharkAnalyzer
__version__ = '0.4.0'
ROBOT_LIBRARY_DOC_FORMAT = 'reST'
class XenaRobot():
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
#
# Session management.
#
def __init__(self, api='socket', user=None, ip=None, port=57911):
""" Create Xena Valkyrie app object.
:param api: API type - socket or rest
:param user: user name for session and login
:param ip: optional REST server IP address
:param port: optional REST server TCP port
"""
user = user if user else getpass.getuser()
self.logger = logging.getLogger('log')
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(logging.StreamHandler(sys.stdout))
self.xm = init_xena(ApiType[api], self.logger, user, ip, port)
def add_chassis(self, chassis='None', port=22611, password='xena'):
""" Add chassis.
:param chassis: chassis IP address
:param port: chassis port number
:param password: chassis password
"""
self.xm.session.add_chassis(chassis, port, password)
def reserve_ports(self, *locations):
""" Reserve ports only if ports are released.
If one of the ports is reserved by another user the operation will fail.
:param locations: list <ip/module/port> of port locations.
"""
self.ports = self.xm.session.reserve_ports(locations, force=False)
def reserve_ports_by_force(self, *locations):
""" Reserve ports forcefully even if ports are reserved by other user.
:param locations: list <ip/module/port> of port locations.
"""
self.ports = self.xm.session.reserve_ports(locations, force=True)
def release_ports(self, *ports):
""" Reserve list of ports.
:param ports: ports indices (zero based) or ports locations as used in reserve command. If empty - clear stats
for all ports.
"""
for port in self._port_names_or_indices_to_objects(*ports):
port.release()
def load_config(self, port, config_file_name):
""" Load configuration file onto port.
:param port: port index (zero based) or port location as used in reserve command.
:param config_file_name: full path to configuration file name (xpc).
"""
self._port_name_or_index_to_object(port).load_config(config_file_name)
def save_config(self, port, config_file_name):
""" Save configuration file from port.
:param port: port index (zero based) or port location as used in reserve command.
:param config_file_name: full path to configuration file name (xpc).
"""
self._port_name_or_index_to_object(port).save_config(config_file_name)
def clear_statistics(self, *ports):
""" Clear statistics for list of ports.
:param ports: ports indices (zero based) or ports locations as used in reserve command. If empty - clear stats
for all ports.
"""
self.xm.session.clear_stats(*self._port_names_or_indices_to_objects(*ports))
def start_traffic(self, *ports):
""" Start traffic on list of ports and return immediately.
:param ports: ports indices (zero based) or ports locations as used in reserve command. If empty - start
traffic on all ports.
"""
self.xm.session.start_traffic(False, *self._port_names_or_indices_to_objects(*ports))
def run_traffic_blocking(self, *ports):
""" Start traffic on list of ports and wait until all traffic is finished.
:param ports: ports indices (zero based) or ports locations as used in reserve command. If empty - start
traffic on all ports.
"""
self.xm.session.start_traffic(True, *self._port_names_or_indices_to_objects(*ports))
def stop_traffic(self, *ports):
""" Stop traffic on list of ports.
:param ports: ports indices (zero based) or ports locations as used in reserve command. If empty - stop
traffic on all ports.
"""
self.xm.session.stop_traffic(*self._port_names_or_indices_to_objects(*ports))
def start_capture(self, *ports):
""" Start capture on list of ports.
:param ports: ports indices (zero based) or ports locations as used in reserve command. If empty - start
capture on all ports.
"""
self.xm.session.start_capture(*self._port_names_or_indices_to_objects(*ports))
def stop_capture(self, *ports):
""" Stop capture on list of ports.
:param ports: ports indices (zero based) or ports locations as used in reserve command. If empty - stop
capture on all ports.
"""
self.xm.session.stop_capture(*self._port_names_or_indices_to_objects(*ports))
def get_statistics(self, view='port'):
""" Get statistics for all ports/streams/TPLDs.
:param view: port/stream/tpld.
:return: dictionary of requested statistics.
"""
stats = _view_name_2_object[view.lower()](self.xm.session).read_stats()
return {k.name: v for k, v in stats.items()}
#
# Ports
#
def reset_port(self, port):
""" Reset port-level parameters to standard values, and delete all streams, filters, capture, and dataset
definitions.
:param port: port index (zero based) or port location as used in reserve command.
"""
self._port_name_or_index_to_object(port).reset()
def get_port_attribute(self, port, attribute):
""" Get port attribute.
:param port: port index (zero based) or port location as used in reserve command.
:param attribute: attribute name.
:return: attribute value.
:rtype: str
"""
return self._port_name_or_index_to_object(port).get_attribute(attribute)
def set_port_attributes(self, port, **attributes):
""" Set port attribute.
:param port: port index (zero based) or port location as used in reserve command.
:param attributes: dictionary of {attribute: value} to set
"""
return self._port_name_or_index_to_object(port).set_attributes(**attributes)
def exec_port_command(self, port, command, *arguments):
""" Execute any port command and return the returned value.
:param port: port index (zero based) or port location as used in reserve command.
:param command: command to execute.
:param arguments: optional list of command arguments.
"""
return self._port_name_or_index_to_object(port).send_command_return(command, *arguments)
#
# Streams.
#
def add_stream(self, port, name=None):
""" Add stream. The newly created stream will have unique TPLD.
:param port: port index (zero based) or port location as used in reserve command.
:param name: stream name.
:return: stream index.
"""
stream = self._port_name_or_index_to_object(port).add_stream(name)
return stream.id
def remove_stream(self, port, stream):
""" Remove stream.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
"""
index = self._stream_name_or_index_to_object(port, stream).id
self._port_name_or_index_to_object(port).remove_stream(index)
def get_stream_attribute(self, port, stream, attribute):
""" Get port attribute.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
:param attribute: attribute name.
:return: attribute value.
:rtype: str
"""
return self._stream_name_or_index_to_object(port, stream).get_attribute(attribute)
def set_stream_attributes(self, port, stream, **attributes):
""" Set stream attribute.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
:param attributes: dictionary of {attribute: value} to set
"""
return self._stream_name_or_index_to_object(port, stream).set_attributes(**attributes)
def exec_stream_command(self, port, stream, command, *arguments):
""" Execute any stream command and return the returned value.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
:param command: command to execute.
:param arguments: optional list of command arguments.
"""
return self._stream_name_or_index_to_object(port, stream).send_command_return(command, *arguments)
#
# Packet headers.
#
def get_packet(self, port, stream):
""" Get packet as a printable string.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
:return: string representation of stream packet.
"""
return self._stream_name_or_index_to_object(port, stream).get_packet_headers()
def get_packet_header(self, port, stream, header):
""" Get packet header.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
:param header: requested packet header.
:return: dictionary of <field: value>.
:rtype: dict of (str, str)
"""
header_body = self._get_packet_header(port, stream, header)
fields_str = re.sub('vlan.*----------', '', header_body._summarize(), re.MULTILINE, re.DOTALL)
fields = OrderedDict()
for field in fields_str.split("\n"):
key_values = field.strip().split('=')
if len(key_values) > 1:
key = key_values[0].split(" ")[0].strip()
fields[key.strip()] = key_values[-1].strip()
return fields
def add_packet_headers(self, port, stream, *headers):
""" Add packet headers.
All headers will be added with some default values (not the same as Xena Manager default) and it is the test
responsibility to set them.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
:param headers: list of header names to add (vlan, ip, ip6, tcp, etc.).
"""
packet_headers = self._stream_name_or_index_to_object(port, stream).get_packet_headers()
for header in headers:
if header.lower() == 'vlan':
header_py = 'ethernet.py'
header_class = 'Dot1Q'
else:
header_py = header.lower() + '.py'
header_class = header.upper()
for site_packages_path in site.getsitepackages():
pypacker_path = os.path.join(site_packages_path, 'pypacker')
for dirpath, _, filenames in os.walk(pypacker_path):
if header_py in filenames:
module_name = dirpath[len(site_packages_path) + 1:]
header_module = import_module(module_name.replace(os.path.sep, '.') + '.' + header_py[:-3])
header_object = getattr(header_module, header_class)()
if header.lower() == 'vlan':
packet_headers.vlan.append(header_object)
else:
packet_headers += header_object
self._stream_name_or_index_to_object(port, stream).set_packet_headers(packet_headers)
def set_packet_header_fields(self, port, stream, header, **fields):
""" Set packet header fields.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
:param header: packet header.
:param fields: dictionary of <field, value> to set.
:type fields: dict of (str, str)
"""
print('header = ' + header)
headers = self._stream_name_or_index_to_object(port, stream).get_packet_headers()
header_body = self._get_packet_header(header=header, headers=headers)
for field, value in fields.items():
setattr(header_body, field, int(value) if str(value).isdigit() else value)
self._stream_name_or_index_to_object(port, stream).set_packet_headers(headers)
#
# Modifiers.
#
def add_modifier(self, port, stream, position, modifier_type='standard'):
""" Add packet modifier.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
:param position: requested packet modifier position.
:param modifier_type: standard/extended
"""
self._stream_name_or_index_to_object(port, stream).add_modifier(XenaModifierType[modifier_type.lower()],
position=int(position))
def remove_modifier(self, port, stream, modifier):
""" Add packet modifier.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
:param modifier: modifier index (zero based).
"""
self._stream_name_or_index_to_object(port, stream).remove_modifier(int(modifier))
def get_modifier(self, port, stream, modifier):
""" Get packet modifier attributes.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
:param modifier: modifier index (zero based).
:return: dictionary of <field: value>.
:rtype: dict of (str, str)
"""
modifier_object = self._stream_name_or_index_to_object(port, stream).modifiers[int(modifier)]
return {'mask': modifier_object.mask,
'action': modifier_object.action,
'repeat': modifier_object.repeat,
'min_val': modifier_object.min_val,
'step': modifier_object.step,
'max_val': modifier_object.max_val}
def set_modifier_attributes(self, port, stream, modifier, **attributes):
""" Set packet modifier attributes.
:param port: port index (zero based) or port location as used in reserve command.
:param stream: stream index (zero based) or stream name.
:param modifier: modifier index (zero based).
:param attributes: dictionary of {attribute: value} to set.
"""
modifier = self._stream_name_or_index_to_object(port, stream).modifiers[int(modifier)]
for attribute, value in attributes.items():
if attribute.lower() == 'action':
setattr(modifier, attribute.lower(), XenaModifierAction[value.lower()])
else:
setattr(modifier, attribute.lower(), value.lower())
#
# Capture.
#
def create_tshark(self, wireshark_path):
self.tshark = Tshark(wireshark_path)
def save_capture_to_file(self, port, cap_file, cap_type='text'):
self._port_name_or_index_to_object(port).capture.get_packets(cap_type=XenaCaptureBufferType[cap_type],
file_name=cap_file, tshark=self.tshark)
def analyze_packets(self, pcap_file, *read_filters):
analyser = TsharkAnalyzer()
for read_filter in read_filters:
analyser.set_read_filter(read_filter)
return len(self.tshark.analyze(pcap_file, analyser))
#
# Basic 'back-door' commands.
#
def send_command(self, chassis, command):
""" Send command with no output. """
self.xm.session.chassis_list[chassis].send_command(command)
def send_command_return(self, chassis, command):
""" Send command and wait for single line output. """
return self.xm.session.chassis_list[chassis].send_command_return(command)
def send_command_return_multilines(self, chassis, command):
""" Send command and wait for multiple lines output. """
return self.xm.session.chassis_list[chassis].send_command_return_multilines(command)
#
# Private methods.
#
def _port_names_or_indices_to_objects(self, *names_or_indices):
"""
:rtype: list of (xenamanager.xena_port.XenaPort)
"""
return [self._port_name_or_index_to_object(n) for n in names_or_indices]
def _port_name_or_index_to_object(self, name_or_index):
"""
:rtype: xenamanager.xena_port.XenaPort
"""
return (list(self.ports.values())[int(name_or_index)] if name_or_index.isdecimal()
else self.ports[name_or_index])
def _stream_name_or_index_to_object(self, port, name_or_index):
"""
:rtype: xenamanager.xena_port.XenaPort
"""
if name_or_index.isdecimal():
return self._port_name_or_index_to_object(port).streams[int(name_or_index)]
else:
for stream in self._port_name_or_index_to_object(port).streams.values():
if stream.name == name_or_index:
return stream
def _get_packet_header(self, port=None, stream=None, header=None, headers=None):
headers = headers if headers else self._stream_name_or_index_to_object(port, stream).get_packet_headers()
if header.lower() == 'ethernet':
header_body = headers
elif header.lower().startswith('vlan'):
header_body = headers.vlan[int(re.findall('vlan\[([\d])\]', header.lower())[0])]
else:
header_body = headers.upper_layer
return header_body
_view_name_2_object = {'port': XenaPortsStats,
'stream': XenaStreamsStats,
'tpld': XenaTpldsStats}
|
import asyncio
import io
import logging
import pathlib
from typing import Awaitable, List
import click
from click.exceptions import ClickException
from pandablocks._control import interactive_control
from pandablocks.asyncio import AsyncioClient
from pandablocks.commands import GetState, SetState, T
# Default prompt
PROMPT = "< "
TUTORIAL = pathlib.Path(__file__).parent / "saves" / "tutorial.sav"
def asyncio_run(coro: Awaitable[T]) -> T:
loop = asyncio.get_event_loop()
try:
return loop.run_until_complete(coro)
finally:
to_cancel = asyncio.tasks.all_tasks(loop)
for task in to_cancel:
task.cancel()
loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True))
@click.group(invoke_without_command=True)
@click.option(
"--log-level",
default="INFO",
type=click.Choice(
["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"], case_sensitive=False
),
)
@click.version_option()
@click.pass_context
def cli(ctx, log_level: str):
"""PandaBlocks client library command line interface."""
level = getattr(logging, log_level.upper(), None)
logging.basicConfig(format="%(levelname)s:%(message)s", level=level)
# if no command is supplied, print the help message
if ctx.invoked_subcommand is None:
click.echo(cli.get_help(ctx))
@cli.command()
@click.option(
"--num",
help="Number of collections to capture",
default=1,
show_default=True,
)
@click.option(
"--arm",
help="Arm PCAP at the start, and after each successful acquisition",
is_flag=True,
)
@click.argument("host")
@click.argument("scheme")
def hdf(host: str, scheme: str, num: int, arm: bool):
"""
Write an HDF file for each PCAP acquisition for HOST
Uses the filename pattern specified by SCHEME, including %d for scan number
starting from 1
"""
async def _write_hdf_files(host: str, scheme: str, num: int, arm: bool):
# Local import as we might not have h5py installed and want other commands
# to work
from pandablocks.hdf import write_hdf_files
async with AsyncioClient(host) as client:
await write_hdf_files(client, scheme=scheme, num=num, arm=arm)
# Don't use asyncio.run to workaround Python3.7 bug
# https://bugs.python.org/issue38013
asyncio_run(_write_hdf_files(host, scheme, num, arm))
@cli.command()
@click.option("--prompt", help="Prompt character", default=PROMPT, show_default=True)
@click.option(
"--no-readline",
help="Disable readline history and completion",
is_flag=True,
)
@click.argument("host", type=str)
def control(host: str, prompt: str, no_readline: bool):
"""Open an interactive control console to HOST"""
interactive_control(host, prompt, not no_readline)
@cli.command()
@click.argument("host")
@click.argument("outfile", type=click.File("w"))
def save(host: str, outfile: io.TextIOWrapper):
"""
Save the current blocks configuration of HOST to OUTFILE
"""
async def _save(host: str) -> List[str]:
async with AsyncioClient(host) as client:
return await client.send(GetState())
state = asyncio_run(_save(host))
outfile.write("\n".join(state) + "\n")
@cli.command()
@click.argument("host")
@click.argument("infile", type=click.File("r"), required=False)
@click.option("--tutorial", help="load the tutorial settings", is_flag=True)
def load(host: str, infile: io.TextIOWrapper, tutorial: bool):
"""
Load a blocks configuration into HOST using the commands in INFILE
"""
if tutorial:
with TUTORIAL.open("r") as stream:
state = stream.read().splitlines()
elif infile is None:
raise ClickException("INFILE not specified")
else:
state = infile.read().splitlines()
async def _load(host: str, state: List[str]):
async with AsyncioClient(host) as client:
await client.send(SetState(state))
asyncio_run(_load(host, state))
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from neutron_lbaas.tests.tempest.lib.common import accounts
from neutron_lbaas.tests.tempest.lib.common import cred_provider
from neutron_lbaas.tests.tempest.lib.common import isolated_creds
from neutron_lbaas.tests.tempest.lib import config
from neutron_lbaas.tests.tempest.lib import exceptions
CONF = config.CONF
# Return the right implementation of CredentialProvider based on config
# Dropping interface and password, as they are never used anyways
# TODO(andreaf) Drop them from the CredentialsProvider interface completely
def get_isolated_credentials(name, network_resources=None,
force_tenant_isolation=False,
identity_version=None):
# If a test requires a new account to work, it can have it via forcing
# tenant isolation. A new account will be produced only for that test.
# In case admin credentials are not available for the account creation,
# the test should be skipped else it would fail.
if CONF.auth.allow_tenant_isolation or force_tenant_isolation:
return isolated_creds.IsolatedCreds(
name=name,
network_resources=network_resources,
identity_version=identity_version)
else:
if (CONF.auth.test_accounts_file and
os.path.isfile(CONF.auth.test_accounts_file)):
# Most params are not relevant for pre-created accounts
return accounts.Accounts(name=name,
identity_version=identity_version)
else:
return accounts.NotLockingAccounts(
name=name, identity_version=identity_version)
# We want a helper function here to check and see if admin credentials
# are available so we can do a single call from skip_checks if admin
# creds area vailable.
def is_admin_available():
is_admin = True
# If tenant isolation is enabled admin will be available
if CONF.auth.allow_tenant_isolation:
return is_admin
# Check whether test accounts file has the admin specified or not
elif (CONF.auth.test_accounts_file and
os.path.isfile(CONF.auth.test_accounts_file)):
check_accounts = accounts.Accounts(name='check_admin')
if not check_accounts.admin_available():
is_admin = False
else:
try:
cred_provider.get_configured_credentials('identity_admin',
fill_in=False)
except exceptions.InvalidConfiguration:
is_admin = False
return is_admin
# We want a helper function here to check and see if alt credentials
# are available so we can do a single call from skip_checks if alt
# creds area vailable.
def is_alt_available():
# If tenant isolation is enabled admin will be available
if CONF.auth.allow_tenant_isolation:
return True
# Check whether test accounts file has the admin specified or not
if (CONF.auth.test_accounts_file and
os.path.isfile(CONF.auth.test_accounts_file)):
check_accounts = accounts.Accounts(name='check_alt')
else:
check_accounts = accounts.NotLockingAccounts(name='check_alt')
try:
if not check_accounts.is_multi_user():
return False
else:
return True
except exceptions.InvalidConfiguration:
return False
|
import json
from pathlib import Path
from blspy import AugSchemeMPL, PublicKeyMPL, SignatureMPL
from chia.util.byte_types import hexstr_to_bytes
from chia.util.hash import std_hash
def validate_alert_file(file_path: Path, pubkey: str) -> bool:
text = file_path.read_text()
validated = validate_alert(text, pubkey)
return validated
def validate_alert(text: str, pubkey: str) -> bool:
json_obj = json.loads(text)
data = json_obj["data"]
message = bytes(data, "UTF-8")
signature = json_obj["signature"]
signature = SignatureMPL.from_bytes(hexstr_to_bytes(signature))
pubkey_bls = PublicKeyMPL.from_bytes(hexstr_to_bytes(pubkey))
sig_match_my = AugSchemeMPL.verify(pubkey_bls, message, signature)
return sig_match_my
def create_alert_file(alert_file_path: Path, key, genesis_challenge_preimage: str):
bytes_preimage = bytes(genesis_challenge_preimage, "UTF-8")
genesis_challenge = std_hash(bytes_preimage)
file_dict = {
"ready": True,
"genesis_challenge": genesis_challenge.hex(),
"genesis_challenge_preimage": genesis_challenge_preimage,
}
data: str = json.dumps(file_dict)
signature = AugSchemeMPL.sign(key, bytes(data, "utf-8"))
file_data = {"data": data, "signature": f"{signature}"}
file_data_json = json.dumps(file_data)
alert_file_path.write_text(file_data_json)
def create_not_ready_alert_file(alert_file_path: Path, key):
file_dict = {
"ready": False,
}
data: str = json.dumps(file_dict)
signature = AugSchemeMPL.sign(key, bytes(data, "utf-8"))
file_data = {"data": data, "signature": f"{signature}"}
file_data_json = json.dumps(file_data)
alert_file_path.write_text(file_data_json)
|
from os import system, getcwd, listdir
from os.path import exists
from sys import argv
import datetime
from colorama import Fore, Back
pwd=getcwd()
currentTime = str(datetime.datetime.now())
def move(src:str, dest:str)->int:
return system(f"mv {src} {dest}")
def checkArgs(minNo:int=-1, maxNo:int=-1)-> tuple:
if minNo == -1: minNo = 0
if maxNo == -1: maxNo = minNo
args = argv[1:]
argc = len(args)
data = None
if argc in range(minNo, maxNo+1):
data = True, argc, args
else:
if argc > maxNo:
data = True, argc,"Too less Arguments"
else:
data = True, argc,"Too many Arguments"
return data
def parseArguments() -> dict:
options = []
parameters = []
for arg in argv[1:]:
if arg[0] == '-':
options.append(arg)
else:
parameters.append(arg)
return {
'options' :options,
'parameters':parameters
}
def writeToFile(fileName:str=None, data:str=None, mode:str = 'a') -> None:
if not fileName or not data: return False
with open(fileName, mode) as file:
file.write(data)
return True
class Message:
Color = {
'fore' : {
'red' :Fore.RED,
'yellow' :Fore.YELLOW,
'green' :Fore.GREEN,
'blue' :Fore.BLUE,
'white' :Fore.WHITE,
'black' :Fore.BLACK,
},
'back' : {
'red' :Back.RED,
'yellow' :Back.YELLOW,
'green' :Back.GREEN,
'blue' :Back.BLUE,
'white' :Back.WHITE,
'black' :Back.BLACK,
}
}
@staticmethod
def colored(message:str, fontColor:str = None, backColor:str = None)-> None:
fontColor = Message.Color['fore'].get(fontColor)
backColor = Message.Color['back'].get(backColor)
if not fontColor: fontColor = Fore.RESET
if not backColor: backColor = Back.RESET
print(fontColor, backColor, message, Back.RESET, Fore.RESET)
@staticmethod
def warning(message:str):
print(Fore.RED, Back.LIGHTYELLOW_EX,"WARNING :"+Fore.LIGHTYELLOW_EX +Back.RESET,message,Fore.RESET)
@staticmethod
def error(message:str):
print(Fore.LIGHTYELLOW_EX, Back.RED,"ERROR :"+Fore.LIGHTRED_EX + Back.RESET,message,Fore.RESET)
@staticmethod
def success(message:str):
print(Fore.LIGHTGREEN_EX+"SUCCESS :", Fore.RESET, message, )
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class MybankCreditSupplychainInventoryOutApplyResponse(AlipayResponse):
def __init__(self):
super(MybankCreditSupplychainInventoryOutApplyResponse, self).__init__()
self._normal_int_amt = None
self._ovd_int_amt = None
self._ovd_int_pen_int_amt = None
self._ovd_prin_pen_int_amt = None
self._prin_amt = None
self._repay_amt = None
@property
def normal_int_amt(self):
return self._normal_int_amt
@normal_int_amt.setter
def normal_int_amt(self, value):
self._normal_int_amt = value
@property
def ovd_int_amt(self):
return self._ovd_int_amt
@ovd_int_amt.setter
def ovd_int_amt(self, value):
self._ovd_int_amt = value
@property
def ovd_int_pen_int_amt(self):
return self._ovd_int_pen_int_amt
@ovd_int_pen_int_amt.setter
def ovd_int_pen_int_amt(self, value):
self._ovd_int_pen_int_amt = value
@property
def ovd_prin_pen_int_amt(self):
return self._ovd_prin_pen_int_amt
@ovd_prin_pen_int_amt.setter
def ovd_prin_pen_int_amt(self, value):
self._ovd_prin_pen_int_amt = value
@property
def prin_amt(self):
return self._prin_amt
@prin_amt.setter
def prin_amt(self, value):
self._prin_amt = value
@property
def repay_amt(self):
return self._repay_amt
@repay_amt.setter
def repay_amt(self, value):
self._repay_amt = value
def parse_response_content(self, response_content):
response = super(MybankCreditSupplychainInventoryOutApplyResponse, self).parse_response_content(response_content)
if 'normal_int_amt' in response:
self.normal_int_amt = response['normal_int_amt']
if 'ovd_int_amt' in response:
self.ovd_int_amt = response['ovd_int_amt']
if 'ovd_int_pen_int_amt' in response:
self.ovd_int_pen_int_amt = response['ovd_int_pen_int_amt']
if 'ovd_prin_pen_int_amt' in response:
self.ovd_prin_pen_int_amt = response['ovd_prin_pen_int_amt']
if 'prin_amt' in response:
self.prin_amt = response['prin_amt']
if 'repay_amt' in response:
self.repay_amt = response['repay_amt']
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "faceRecog.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
"""Emoji
Available Commands:
.support
"""
from telethon import events
import asyncio
from userbot.utils import admin_cmd
@borg.on(admin_cmd("wolfuserbot"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(0,36)
#input_str = event.pattern_match.group(1)
# if input_str == "Read This Telegraph Whole info here":
await event.edit("Thanks")
animation_chars = [
"Click here to Go to Telegraph",
"[Click Here For Guide](https://telegra.ph/Easy-userbot-deploy-06-27)"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class UsagesOperations(object):
"""UsagesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2015-06-15".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-06-15"
self.config = config
def list(
self, location, custom_headers=None, raw=False, **operation_config):
"""Lists compute usages for a subscription.
:param location: The location where resource usage is queried.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`Usage
<azure.mgmt.network.v2015_06_15.models.Usage>`
:rtype: :class:`UsagePaged
<azure.mgmt.network.v2015_06_15.models.UsagePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms680383(v=vs.85).aspx
class MINIDUMP_LOCATION_DESCRIPTOR:
def __init__(self):
self.DataSize = None
self.Rva = None
@staticmethod
def parse(buff):
mld = MINIDUMP_LOCATION_DESCRIPTOR()
mld.DataSize = int.from_bytes(buff.read(4), byteorder='little', signed=False)
mld.Rva = int.from_bytes(buff.read(4), byteorder='little', signed=False)
return mld
def __str__(self):
t = 'Size: %s File offset: %s' % (hex(self.DataSize), hex(self.Rva))
return t
class MINIDUMP_LOCATION_DESCRIPTOR64:
def __init__(self):
self.DataSize = None
self.Rva = None
@staticmethod
def parse(buff):
mld = MINIDUMP_LOCATION_DESCRIPTOR64()
mld.DataSize = int.from_bytes(buff.read(8), byteorder='little', signed=False)
mld.Rva = int.from_bytes(buff.read(8), byteorder='little', signed=False)
return mld
def __str__(self):
t = 'Size: %s File offset: %s' % (hex(self.DataSize), hex(self.Rva))
return t
class MINIDUMP_STRING:
def __init__(self):
self.Length = None
self.Buffer = None
@staticmethod
def parse(buff):
ms = MINIDUMP_STRING()
ms.Length = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
ms.Buffer = buff.read(ms.Length)
return ms
@staticmethod
def get_from_rva(rva, buff):
pos = buff.tell()
buff.seek(rva, 0)
ms = MINIDUMP_STRING.parse(buff)
buff.seek(pos, 0)
return ms.Buffer.decode('utf-16-le')
class MinidumpMemorySegment:
def __init__(self):
self.start_virtual_address = None
self.size = None
self.end_virtual_address = None
self.start_file_address = None
def parse_mini(memory_decriptor, buff):
"""
memory_descriptor: MINIDUMP_MEMORY_DESCRIPTOR
buff: file_handle
"""
mms = MinidumpMemorySegment()
mms.start_virtual_address = memory_decriptor.StartOfMemoryRange
mms.size = memory_decriptor.Memory.DataSize
mms.start_file_address = memory_decriptor.Memory.Rva
mms.end_virtual_address = mms.start_virtual_address + mms.size
return mms
def parse_full(memory_decriptor, buff, rva):
mms = MinidumpMemorySegment()
mms.start_virtual_address = memory_decriptor.StartOfMemoryRange
mms.size = memory_decriptor.DataSize
mms.start_file_address = rva
mms.end_virtual_address = mms.start_virtual_address + mms.size
return mms
def inrange(self, virt_addr):
if virt_addr >= self.start_virtual_address and virt_addr < self.end_virtual_address:
return True
return False
def read(self, virtual_address, size, file_handler):
if virtual_address > self.end_virtual_address or virtual_address < self.start_virtual_address:
raise Exception('Reading from wrong segment!')
if virtual_address+size > self.end_virtual_address:
raise Exception('Read would cross boundaries!')
pos = file_handler.tell()
offset = virtual_address - self.start_virtual_address
file_handler.seek(self.start_file_address + offset, 0)
data = file_handler.read(size)
file_handler.seek(pos, 0)
return data
def search(self, pattern, file_handler):
if len(pattern) > self.size:
return []
pos = file_handler.tell()
file_handler.seek(self.start_file_address, 0)
data = file_handler.read(self.size)
file_handler.seek(pos, 0)
fl = []
offset = 0
while len(data) > len(pattern):
marker = data.find(pattern)
if marker == -1:
return fl
fl.append(marker + offset + self.start_virtual_address)
data = data[marker+1:]
offset = marker + 1
return fl
def get_header():
t = [
'VA Start',
'RVA',
'Size',
]
return t
def to_row(self):
t = [
hex(self.start_virtual_address),
hex(self.start_file_address),
hex(self.size)
]
return t
def __str__(self):
t = 'VA Start: %s, RVA: %s, Size: %s' % (hex(self.start_virtual_address), hex(self.start_file_address), hex(self.size))
return t
def hexdump( src, length=16, sep='.', start = 0):
'''
@brief Return {src} in hex dump.
@param[in] length {Int} Nb Bytes by row.
@param[in] sep {Char} For the text part, {sep} will be used for non ASCII char.
@return {Str} The hexdump
@note Full support for python2 and python3 !
'''
result = [];
# Python3 support
try:
xrange(0,1);
except NameError:
xrange = range;
for i in xrange(0, len(src), length):
subSrc = src[i:i+length];
hexa = '';
isMiddle = False;
for h in xrange(0,len(subSrc)):
if h == length/2:
hexa += ' ';
h = subSrc[h];
if not isinstance(h, int):
h = ord(h);
h = hex(h).replace('0x','');
if len(h) == 1:
h = '0'+h;
hexa += h+' ';
hexa = hexa.strip(' ');
text = '';
for c in subSrc:
if not isinstance(c, int):
c = ord(c);
if 0x20 <= c < 0x7F:
text += chr(c);
else:
text += sep;
if start == 0:
result.append(('%08x: %-'+str(length*(2+1)+1)+'s |%s|') % (i, hexa, text));
else:
result.append(('%08x(+%04x): %-'+str(length*(2+1)+1)+'s |%s|') % (start+i, i, hexa, text));
return '\n'.join(result);
def construct_table(lines, separate_head=True):
"""Prints a formatted table given a 2 dimensional array"""
#Count the column width
widths = []
for line in lines:
for i,size in enumerate([len(x) for x in line]):
while i >= len(widths):
widths.append(0)
if size > widths[i]:
widths[i] = size
#Generate the format string to pad the columns
print_string = ""
for i,width in enumerate(widths):
print_string += "{" + str(i) + ":" + str(width) + "} | "
if (len(print_string) == 0):
return
print_string = print_string[:-3]
#Print the actual data
t = ''
for i,line in enumerate(lines):
t += print_string.format(*line) + '\n'
if (i == 0 and separate_head):
t += "-"*(sum(widths)+3*(len(widths)-1)) + '\n'
return t
|
"""
Retrain the YOLO model for your own dataset.
"""
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = 'train.txt'
log_dir = 'logs/000/'
classes_path = 'model_data/voc_classes.txt'
anchors_path = 'model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416, 416) # multiple of 32, hw
is_tiny_version = len(anchors) == 6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2,
weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines) * val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred}, metrics=['accuracy'])
batch_size = 32
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train // batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors,
num_classes),
validation_steps=max(1, num_val // batch_size),
epochs=1,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred},
metrics=['accuracy']) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 32 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train // batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors,
num_classes),
validation_steps=max(1, num_val // batch_size),
epochs=2,
initial_epoch=1,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5')
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h // {0: 32, 1: 16, 2: 8}[l], w // {0: 32, 1: 16, 2: 8}[l], \
num_anchors // 3, num_classes + 5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors // 3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers) - 3)[freeze_body - 1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h // {0: 32, 1: 16}[l], w // {0: 32, 1: 16}[l], \
num_anchors // 2, num_classes + 5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors // 2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze the darknet body or freeze all but 2 output layers.
num = (20, len(model_body.layers) - 2)[freeze_body - 1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i == 0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i + 1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n == 0 or batch_size <= 0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
|
import numpy as np
import copy
from mygrid.grid import Section,TransformerModel, Auto_TransformerModel
import pandas as pd
"""
This scripts allows the user to calculate the unbalanced short-circuits on radial distribution
systems modeled on mygrid.
"""
def biphasic(distgrid, node_name, fs='Higher',Df=False, zc=0+0j):
"""
Calculates the two-phase short circuit
Parameters
----------
distgrid: mygrid.grid.DistGrid
node_name: str
The name of node fault
fs: str
Designates which phases participate in the short circuit
Options: 'Iab', 'Iac', 'Ibc' and 'Higher'.
Df: bool
Indicates whether the function returns a dataframe or a dictionary.
If true the function returns a DataFrame.
zc: complex
Contact Impedance
Returns:
Dict or a DataFrame
"""
zz=dict()
zus,zpus=upstream_area(distgrid, node_name)
zds,zpds=downstream_area(distgrid, node_name)
Xab=np.zeros((3,1), dtype=complex)
Xac=np.zeros((3,1), dtype=complex)
Xbc=np.zeros((3,1), dtype=complex)
voltage_source=voltage(distgrid,node_name)
zz.update(zpus)
zz.update(zpds)
l=0+0j
for i in [zds, zus]:
if type(i) != type(None):
l += np.linalg.inv(i)
l=np.linalg.inv(l)+zc
C=calc_c(l)
Cab=copy.copy(C)
Cac=copy.copy(C)
Cbc=copy.copy(C)
Cab[3,3]=Cab[4,4]=1
Cab[6,0]=Cab[6,1]=Cab[5,2]=1
Cac[3,3]=Cac[5,5]=1
Cac[6,0]=Cac[6,2]=Cac[4,1]=1
Cbc[4,4]=Cbc[5,5]=1
Cbc[6,1]=Cbc[6,2]=Cbc[3,0]=1
IPS=np.zeros((7,1),dtype=complex)
IPS[0:3,0:1]=np.linalg.inv(l).dot(voltage_source)
Xab +=np.linalg.inv(Cab).dot(IPS)[0:3]
Xac +=np.linalg.inv(Cac).dot(IPS)[0:3]
Xbc +=np.linalg.inv(Cbc).dot(IPS)[0:3]
If={'Fab': {'Ifa':Xab[0,0],'Ifb':Xab[1,0],'Ifc':Xab[2,0]},
'Fac': {'Ifa':Xac[0,0],'Ifb':Xac[1,0],'Ifc':Xac[2,0]},
'Fbc': {'Ifa':Xbc[0,0],'Ifb':Xbc[1,0],'Ifc':Xbc[2,0]}}
If=pd.DataFrame(If)
erase=None
if fs =='Higher':
fs=If.abs().max().idxmax()
if fs=='Fac':
Iz=If['Fac']
erase=[1]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid,ep=erase)
elif fs=='Fab':
Iz=If['Fab']
erase=[2]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid,ep=erase)
elif fs == 'Fbc':
Iz=If['Fbc']
erase=[0]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid,ep=erase)
if Df:
ict=dict_to_DataFrame(ict)
return ict
def biphasic_to_ground(distgrid,node_name, fs='Higher',Df=False, zc=0+0j):
"""
Calculates the two-phase-grounded short circuit
Parameters
----------
distgrid: mygrid.grid.DistGrid
node_name: str
The name of node fault
fs: str
Designates which phases participate in the short circuit
Options: 'Iab', 'Iac', 'Ibc' and 'Higher'.
Df: bool
Indicates whether the function returns a dataframe or a dictionary.
If true the function returns a DataFrame.
zc: complex
Contact Impedance
Returns:
Dict or a DataFrame
"""
zz=dict()
zus,zpus=upstream_area(distgrid, node_name)
zds,zpds=downstream_area(distgrid, node_name)
zz.update(zpus)
zz.update(zpds)
Xab=np.zeros((3,1), dtype=complex)
Xac=np.zeros((3,1), dtype=complex)
Xbc=np.zeros((3,1), dtype=complex)
voltage_source=voltage(distgrid,node_name)
l=0+0j
for i in [zds, zus]:
if type(i) != type(None):
l += np.linalg.inv(i)
l=np.linalg.inv(l)+zc
C=calc_c(l)
Cab=copy.copy(C)
Cac=copy.copy(C)
Cbc=copy.copy(C)
Cab[3,3]=Cab[4,4]=Cab[6,6]=1
Cab[5,2]=1
Cac[3,3]=Cac[5,5]=Cac[6,6]=1
Cac[4,1]=1
Cbc[4,4]=Cbc[5,5]=Cbc[6,6]=1
Cbc[3,0]=1
IPS=np.zeros((7,1),dtype=complex)
IPS[0:3,0:1]=np.linalg.inv(l).dot(voltage_source)
Xab +=np.linalg.inv(Cab).dot(IPS)[0:3]
Xac +=np.linalg.inv(Cac).dot(IPS)[0:3]
Xbc +=np.linalg.inv(Cbc).dot(IPS)[0:3]
If={'Fabg': {'Ifa':Xab[0,0],'Ifb':Xab[1,0],'Ifc':Xab[2,0]},
'Facg': {'Ifa':Xac[0,0],'Ifb':Xac[1,0],'Ifc':Xac[2,0]},
'Fbcg': {'Ifa':Xbc[0,0],'Ifb':Xbc[1,0],'Ifc':Xbc[2,0]}}
If=pd.DataFrame(If)
if fs =='Higher':
fs=If.abs().max().idxmax()
if fs=='Facg':
Iz=If['Facg']
erase=[1]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid,ep=erase)
elif fs=='Fabg':
Iz=If['Fabg']
erase=[2]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid,ep=erase)
elif fs == 'Fbcg':
Iz=If['Fbcg']
erase=[0]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid,ep=erase)
if Df:
ict=dict_to_DataFrame(ict)
return ict
def three_phase_to_ground(distgrid,node_name,Df=False, zc=0+0j):
"""
Calculates the three-phase-grounded short circuit
Parameters
----------
distgrid: mygrid.grid.DistGrid
node_name: str
The name of node fault
fs: str
Designates which phases participate in the short circuit
Options: 'Iab', 'Iac', 'Ibc' and 'Higher'.
Df: bool
Indicates whether the function returns a dataframe or a dictionary.
If true the function returns a DataFrame.
zc: complex
Contact Impedance
Returns:
Dict or a DataFrame
"""
zz=dict()
zus,zpus=upstream_area(distgrid, node_name)
zds, zpds=downstream_area(distgrid, node_name)
zz.update(zpus)
zz.update(zpds)
X=np.zeros((3,1), dtype=complex)
voltage_source=voltage(distgrid,node_name)
l=0+0j
for i in [zds, zus]:
if type(i) != type(None):
l += np.linalg.inv(i)
l=np.linalg.inv(l)+zc
C=calc_c(l)
C[3,3]=C[4,4]=C[5,5]=C[6,6]=1
IPS=np.zeros((7,1),dtype=complex)
IPS[0:3,0:1]=np.linalg.inv(l).dot(voltage_source)
X +=np.linalg.inv(C).dot(IPS)[0:3]
If={'Fabcg': {'Ifa':X[0,0],'Ifb':X[1,0],'Ifc':X[2,0]}}
If=pd.DataFrame(If)
ict=calc_contributions(zz,np.array(If).reshape(3,1),node_name,distgrid, ep=[])
if Df:
ict=dict_to_DataFrame(ict)
return ict
def three_phase(distgrid,node_name,Df=False, zc=0+0j):
"""
Calculates the three-phase short circuit
Parameters
----------
distgrid: mygrid.grid.DistGrid
node_name: str
The name of node fault
Df: bool
Indicates whether the function returns a dataframe or a dictionary.
If true the function returns a DataFrame.
zc: complex
Contact Impedance
Returns:
Dict or a DataFrame
"""
zz=dict()
zus,zpus=upstream_area(distgrid, node_name)
zds,zpds=downstream_area(distgrid, node_name)
zz.update(zpus)
zz.update(zpds)
X=np.zeros((3,1), dtype=complex)
voltage_source=voltage(distgrid,node_name)
l=0+0j
for i in [zds, zus]:
if type(i) != type(None):
l += np.linalg.inv(i)
l=np.linalg.inv(l)+zc
C=calc_c(l)
C[3,3]=C[4,4]=C[5,5]=1
C[6,0]=C[6,1]=C[6,2]=1
IPS=np.zeros((7,1),dtype=complex)
IPS[0:3,0:1]=np.linalg.inv(l).dot(voltage_source)
X +=np.linalg.inv(C).dot(IPS)[0:3]
If={'Fabc': {'Ifa':X[0,0],'Ifb':X[1,0],'Ifc':X[2,0]}}
If=pd.DataFrame(If)
ict=calc_contributions(zz,np.array(If).reshape(3,1),node_name,distgrid,ep=[])
if Df:
ict=dict_to_DataFrame(ict)
return ict
def mono_phase(distgrid,node_name,zf=0, fs='Higher',Df=False, zc=0+0j):
"""
Calculates the mono-phase short circuit
Parameters
----------
distgrid: mygrid.grid.DistGrid
node_name: str
The name of node fault
fs: str
Designates which phases participate in the short circuit
Options: 'Ia', 'Ia', 'Ib' and 'Higher'.
Df: bool
Indicates whether the function returns a dataframe or a dictionary.
If true the function returns a DataFrame.
zc: complex
Contact Impedance
Returns:
Dict or a DataFrame
"""
zz=dict()
zus,zpus=upstream_area(distgrid, node_name)
zds,zpds=downstream_area(distgrid, node_name)
zz.update(zpus)
zz.update(zpds)
Xa=np.zeros((3,1), dtype=complex)
Xb=np.zeros((3,1), dtype=complex)
Xc=np.zeros((3,1), dtype=complex)
voltage_source=voltage(distgrid,node_name)
l=0+0j
for i in [zds, zus]:
if type(i) != type(None):
l += np.linalg.inv(i)
l=np.linalg.inv(l)+zc
C=calc_c(l)
Ca=copy.copy(C)
Cb=copy.copy(C)
Cc=copy.copy(C)
Ca[3,3]=Ca[6,6]=1
Ca[4,1]=Ca[5,2]=1
Cb[4,4]=Cb[6,6]=1
Cb[3,0]=Cb[5,2]=1
Cc[5,5]=Cc[6,6]=1
Cc[3,0]=Cc[4,1]=1
IPS=np.zeros((7,1),dtype=complex)
IPS[0:3,0:1]=np.linalg.inv(l).dot(voltage_source)
Xa+=np.linalg.inv(Ca).dot(IPS)[0:3]
Xb+=np.linalg.inv(Cb).dot(IPS)[0:3]
Xc+=np.linalg.inv(Cc).dot(IPS)[0:3]
If={'Fag': {'Ifa':Xa[0,0],'Ifb':Xa[1,0],'Ifc':Xa[2,0]},
'Fbg': {'Ifa':Xb[0,0],'Ifb':Xb[1,0],'Ifc':Xb[2,0]},
'Fcg': {'Ifa':Xc[0,0],'Ifb':Xc[1,0],'Ifc':Xc[2,0]}}
If=pd.DataFrame(If)
if fs =='Higher':
fs=If.abs().max().idxmax()
if fs=='Fag':
Iz=If['Fag']
erase=[1,2]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid,ep=erase)
elif fs=='Fbg':
Iz=If['Fbg']
erase=[0,2]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid,ep=erase)
elif fs == 'Fcg':
Iz=If['Fcg']
erase=[0,1]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid,ep=erase)
if Df:
ict=dict_to_DataFrame(ict)
return ict
def min_mono_phase(distgrid,node_name,zf=0, zt=40, fs='Higher',Df=False, zc=0+0j):
"""
Calculates the min-mono-phase short circuit
Parameters
----------
distgrid: mygrid.grid.DistGrid
node_name: str
The name of node fault
fs: str
Designates which phases participate in the short circuit
Options: 'Ia', 'Ia', 'Ib' and 'Higher'.
Df: bool
Indicates whether the function returns a dataframe or a dictionary.
If true the function returns a DataFrame.
zc: complex
Contact Impedance
Returns:
Dict or a DataFrame
"""
zz=dict()
zus,zpus=upstream_area(distgrid, node_name)
zds,zpds=downstream_area(distgrid, node_name)
zz.update(zpus)
zz.update(zpds)
Xa=np.zeros((3,1), dtype=complex)
Xb=np.zeros((3,1), dtype=complex)
Xc=np.zeros((3,1), dtype=complex)
voltage_source=voltage(distgrid,node_name)
l=0+0j
for i in [zds, zus]:
if type(i) != type(None):
l += np.linalg.inv(i)
l=np.linalg.inv(l+zc)
C=calc_c(l+zt)
Ca=copy.copy(C)
Cb=copy.copy(C)
Cc=copy.copy(C)
Ca[3,3]=Ca[6,6]=1
Ca[4,1]=Ca[5,2]=1
Cb[4,4]=Cb[6,6]=1
Cb[3,0]=Cb[5,2]=1
Cc[5,5]=Cc[6,6]=1
Cc[3,0]=Cc[4,1]=1
IPS=np.zeros((7,1),dtype=complex)
IPS[0:3,0:1]=np.linalg.inv(l).dot(voltage_source)
Xa+=np.linalg.inv(Ca).dot(IPS)[0:3]
Xb+=np.linalg.inv(Cb).dot(IPS)[0:3]
Xc+=np.linalg.inv(Cc).dot(IPS)[0:3]
If={'Fag_min': {'Ifa':Xa[0,0],'Ifb':Xa[1,0],'Ifc':Xa[2,0]},
'Fbg_min': {'Ifa':Xb[0,0],'Ifb':Xb[1,0],'Ifc':Xb[2,0]},
'Fcg_min': {'Ifa':Xc[0,0],'Ifb':Xc[1,0],'Ifc':Xc[2,0]}}
If=pd.DataFrame(If)
if fs =='Higher':
fs=If.abs().max().idxmax()
elif fs=='Fag_min':
Iz=If['Fag_min']
erase=[1,2]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid ,ep=erase)
elif fs=='Fbg_min':
Iz=If['Fbg_min']
erase=[0,2]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid ,ep=erase)
elif fs == 'Fcg_min':
Iz=If['Fcg_min']
erase=[1,0]
ict=calc_contributions(zz,np.array(Iz).reshape(3,1),node_name,distgrid ,ep=erase)
if Df:
ict=dict_to_DataFrame(ict)
return ict
def calc_c(l):
C=np.zeros((7,7),dtype=complex)
C[0,0]=C[1,1]=C[2,2]=1
l=np.linalg.inv(l)
C[0:3,3:6]=l
C[0,6]=np.sum(l[0,0:3])
C[1,6]=np.sum(l[1,0:3])
C[2,6]=np.sum(l[2,0:3])
return C
def voltage(distgrid,node_name):
loads=distgrid.load_nodes
loads_path=distgrid.load_nodes_tree.node_to_root_path(node_name)
voltage_source=loads[distgrid.load_nodes_tree.root].vp
i=len(loads_path[0,0:])-1
while i >=1 :
n1=loads[loads_path[1,i]]
n2=loads[loads_path[1,i-1]]
section=distgrid.sections_by_nodes[(n1,n2)]
if isinstance(section.transformer, TransformerModel):
voltage_source=section.A.dot(voltage_source)
i-=1
return voltage_source
def downstream_area(distgrid, node_name):
tree=distgrid.load_nodes_tree.tree
rnp=distgrid.load_nodes_tree.rnp.tolist()
z, pp=resolve_downstream_area(distgrid, node_name, tree, rnp)
return z,pp
def upstream_area(distgrid, node_name):
tree=distgrid.load_nodes_tree.tree
rnp=distgrid.load_nodes_tree.rnp.tolist()
z, pp=resolve_upstream_area(distgrid, node_name, tree, rnp)
return z,pp
def resolve_upstream_area(distgrid, n1, tree, rnp, nf=False):
zpll=list()
zp=dict()
ds_neighbors=list()
load_nodes=distgrid.load_nodes
n1_depth=int(rnp[:][0][rnp[:][1].index(n1)])
n1=load_nodes[n1]
for i in distgrid.load_nodes_tree.tree[n1.name]:
if int(rnp[:][0][rnp[:][1].index(i)]) > n1_depth:
ds_neighbors.append(load_nodes[i])
if len(ds_neighbors)!=0:
if n1.generation != None:
if type(n1.generation) == type(list()):
for i in n1.generation:
zpll.append(n1.generation.Z)
zpll=inv_Z(zpll)
zp[n1]=zpll
else:
zpll.append(n1.generation.Z)
zp[n1]=n1.generation.Z
for i in ds_neighbors:
a, pp=resolve_upstream_area(distgrid, i.name, tree, rnp, nf=True)
zp.update(pp)
if type(a) == type(None):
continue
else:
zeq=0
if (n1, i) in distgrid.sections_by_nodes.keys():
zeq = distgrid.sections_by_nodes[(n1, i)]
if isinstance(zeq.transformer, TransformerModel):
a= zeq.a.dot(a+zeq.transformer.z).dot(zeq.d)
zpll.append(a)
elif isinstance(zeq.transformer, Auto_TransformerModel):
a= zeq.a.dot(a).dot(zeq.d)
zpll.append(a)
else:
a=zeq.Z + a
zpll.append(a)
zp[n1,i] = a
if len(zpll) == 0:
return None, zp
elif len(zpll) == 1 and n1.generation != None:
zp[n1]=inv_Z(zpll)
return zp[n1], zp
else:
return inv_Z(zpll), zp
else:
if n1.generation !=None:
if type(n1.generation) == type(list()):
for i in n1.generation:
zpll.append(i.Z)
zpll=inv_Z(zpll)
zp[n1]=zpll
return zpll, zp
else:
zp[n1]=n1.generation.Z
return n1.generation.Z, zp
else:
return None, zp
def inv_Z(zpll):
zeq=0
if len(zpll) !=1:
for i in zpll:
if np.all(i==0):
zeq=0
break
else:
zeq +=np.linalg.inv(i)
if np.all(zeq == 0):
return zeq
else:
zeq = np.linalg.inv(zeq)
return zeq
else:
return zpll[0]
def resolve_downstream_area(distgrid, n1, tree, rnp, n2=None, nf=False):
zpll=list()
zp=dict()
up_neighbor=None
ds_neighbors=list()
load_nodes=distgrid.load_nodes
n1_depth=int(rnp[:][0][rnp[:][1].index(n1)])
n1=load_nodes[n1]
if n1_depth !=0:
for i in distgrid.load_nodes_tree.tree[n1.name]:
if int(rnp[:][0][rnp[:][1].index(i)]) < n1_depth:
up_neighbor=load_nodes[i]
break
a, pp=resolve_downstream_area(distgrid, up_neighbor.name, tree, rnp, n2=n1.name, nf=True)
zp.update(pp)
if type(a) != type(None):
zeq=0
if (up_neighbor, n1) in distgrid.sections_by_nodes.keys():
zeq = distgrid.sections_by_nodes[(up_neighbor, n1)]
if isinstance(zeq.transformer, TransformerModel):
a = zeq.A.dot(a).dot(zeq.d) + zeq.transformer.z
zpll.append(a)
elif isinstance(zeq.transformer, Auto_TransformerModel):
a = zeq.A.dot(a).dot(zeq.d) + zeq.transformer.zz
zpll.append(a)
else:
a=zeq.Z + a
zpll.append(a)
zp[up_neighbor, n1] = a
if n1.generation != None and nf:
if type(n1.generation) == type(list()):
for i in n1.generation:
zpll.append(i.Z)
zp[n1]=zpll
else:
zpll.append(n1.generation.Z)
zp[n1]=zpll
if n1.external_grid != None:
zpll.append(n1.external_grid.Z)
zp[n1]=zpll
if nf:
for i in distgrid.load_nodes_tree.tree[n1.name]:
if int(rnp[:][0][rnp[:][1].index(i)]) > n1_depth and (i != n2):
ds_neighbors.append(load_nodes[i])
if len(ds_neighbors) !=0:
for i in ds_neighbors:
a,pp=resolve_upstream_area(distgrid, i.name, tree, rnp)
zp.update(pp)
if type(a) == type(None):
continue
else:
zeq=0
if (n1, i) in distgrid.sections_by_nodes.keys():
zeq = distgrid.sections_by_nodes[(n1, i)]
if isinstance(zeq.transformer, TransformerModel):
a= zeq.a.dot(a+zeq.transformer.z).dot(zeq.d)
zpll.append(a)
elif isinstance(zeq.transformer, Auto_TransformerModel):
a= zeq.a.dot(a + zeq.transformer.zz).dot(zeq.d)
zpll.append(a)
else:
a=zeq.Z + a
zpll.append(a)
zp[n1,i] = a
if len(zpll) == 0:
return None, zp
if len(zpll) == 1:
return zpll[0], zp
else:
zeq=0
for i in zpll:
if i.any()==0:
zeq=i
return zeq, zp
zeq +=np.linalg.inv(i)
zeq = np.linalg.inv(zeq)
return zeq, zp
def calc_contributions(zz,Iz,nodes,distgrid,ep):
tree=distgrid.load_nodes_tree.tree
ln=distgrid.load_nodes
nodes=[ln[nodes]]
ict=dict()
visit_nodes=list()
iz_nodes=dict()
iz_nodes[nodes[0].name]=[Iz,ep]
ict[nodes[0].name]=Iz
root_name=distgrid.load_nodes_tree.root
while len(nodes) !=0:
next_nodes=list()
for i in nodes:
stop=False
adjacent_nodes=[ln[x] for x in tree[i.name] if ln[x] not in visit_nodes]
isl=dict()
inv=np.zeros((3,3), dtype=complex)
p=0
if i.generation !=None:
if type(i.generation)==type(list()):
ep_n=iz_nodes[i.name][1]
for j in i. generation:
p=np.linalg.inv(vectorize_zz(j.Z,iz_nodes[i.name][1]))
isl[j.name]=[p,None]
inv +=p
else:
ep_n=iz_nodes[i.name][1]
p=np.linalg.inv(i.generation.Z)
isl[i.generation.name]=[p,None]
inv +=p
if i.name==root_name:
if i.external_grid.Z.all()==0:
stop=True
else:
ep_n=iz_nodes[i.name][1]
p=np.linalg.inv(i.external_grid.Z)
isl[i.external_grid.name]=[p,None]
inv +=p
if not(stop):
for j in adjacent_nodes:
p=0
if (i, j) in zz.keys():
section=distgrid.sections_by_nodes[(i,j)]
next_nodes.append(j)
p=np.linalg.inv(vectorize_zz(zz[(i,j)].round(6), iz_nodes[i.name][1]))
if isinstance(section.transformer,TransformerModel):
ep_n = new_phase_erase(section.transformer.connection,iz_nodes[i.name][1])
isl[j.name] = [p, section.a]
elif isinstance(section.transformer, Auto_TransformerModel):
ep_n = new_phase_erase(section.transformer.connection,iz_nodes[i.name][1])
isl[j.name] = [p, section.a]
else:
ep_n=iz_nodes[i.name][1]
isl[j.name] = [p, None]
inv += p
elif (j, i) in zz.keys():
section=distgrid.sections_by_nodes[(j,i)]
next_nodes.append(j)
p=np.linalg.inv(vectorize_zz(zz[(j,i)].round(6), ep))
if isinstance(section.transformer,TransformerModel):
ep_n = new_phase_erase(section.transformer.connection,iz_nodes[i.name][1])
isl[j.name] = [p, section.d]
elif isinstance(section.transformer, Auto_TransformerModel):
ep_n = new_phase_erase(section.transformer.connection,iz_nodes[i.name][1])
isl[j.name] = [p, section.d]
else:
ep_n=iz_nodes[i.name][1]
isl[j.name] = [p, None]
inv += p
if len(isl) !=0:
z=np.linalg.inv(inv)
for y in isl.keys():
izz=isl[y][0].dot(z).dot(iz_nodes[i.name][0])
if type(isl[y][1]) != type(None):
izz=isl[y][1].dot(izz)
if y[0]=="GD":
iz_nodes[y] = [izz,ep_n]
ict[y] = izz
else:
iz_nodes[y] = [izz, ep_n]
ict[y] = izz
visit_nodes.extend(nodes)
nodes=next_nodes
return ict
def vectorize_zz(value,phase_erase=None):
for i in phase_erase:
value[:,i]=0
value[i,:]=0
value[i,i]=10e9
return value
def dict_to_DataFrame(ict):
for i in ict.keys():
ict[i]=ict[i].reshape(1,3,).tolist()[0]
ict=pd.DataFrame(ict)
return ict
def new_phase_erase(tf_type,old_phase_erase):
pr=[0,1,2]
if tf_type == "Dyn":
if len(old_phase_erase)==2:
pr.remove(old_phase_erase[0])
pr.remove(old_phase_erase[1])
return pr
elif len(old_phase_erase)==1:
return []
else:
return []
|
"""iWear URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('main.urls')),
path('users/', include('users.urls')),
]
|
"""
File: annotation.py
Author: Nrupatunga
Email: nrupatunga.s@byjus.com
Github: https://github.com/nrupatunga
Description: Bounding box annotations
"""
import sys
from loguru import logger
try:
from goturn.helper.BoundingBox import BoundingBox
except ImportError:
logger.error('Please run $source settings.sh from root directory')
sys.exit(1)
class annotation:
def __init__(self):
"""Annotation class stores bounding box, image path"""
self.bbox = BoundingBox(0, 0, 0, 0)
self.image_path = []
self.disp_width = 0
self.disp_height = 0
def setbbox(self, x1, x2, y1, y2):
""" set the bounding box """
self.bbox.x1 = x1
self.bbox.x2 = x2
self.bbox.y1 = y1
self.bbox.y2 = y2
def setImagePath(self, img_path):
""" set the image path """
self.image_path = img_path
def setWidthHeight(self, disp_width, disp_height):
""" set width and height """
self.disp_width = disp_width
self.disp_height = disp_height
def __repr__(self):
return str({'bbox': self.bbox, 'image_path': self.image_path,
'w': self.disp_width, 'h': self.disp_height})
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
# Copyright: (c) 2017, Sébastien DA ROCHA <sebastien@da-rocha.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: iptables
short_description: Modify iptables rules
version_added: "2.0"
author:
- Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
- Sébastien DA ROCHA (@sebastiendarocha)
description:
- C(iptables) is used to set up, maintain, and inspect the tables of IP packet
filter rules in the Linux kernel.
- This module does not handle the saving and/or loading of rules, but rather
only manipulates the current rules that are present in memory. This is the
same as the behaviour of the C(iptables) and C(ip6tables) command which
this module uses internally.
notes:
- This module just deals with individual rules.If you need advanced
chaining of rules the recommended way is to template the iptables restore
file.
options:
table:
description:
- This option specifies the packet matching table which the command should operate on.
- If the kernel is configured with automatic module loading, an attempt will be made
to load the appropriate module for that table if it is not already there.
type: str
choices: [ filter, nat, mangle, raw, security ]
default: filter
state:
description:
- Whether the rule should be absent or present.
type: str
choices: [ absent, present ]
default: present
action:
description:
- Whether the rule should be appended at the bottom or inserted at the top.
- If the rule already exists the chain will not be modified.
type: str
choices: [ append, insert ]
default: append
version_added: "2.2"
rule_num:
description:
- Insert the rule as the given rule number.
- This works only with C(action=insert).
type: str
version_added: "2.5"
ip_version:
description:
- Which version of the IP protocol this rule should apply to.
type: str
choices: [ ipv4, ipv6 ]
default: ipv4
chain:
description:
- Specify the iptables chain to modify.
- This could be a user-defined chain or one of the standard iptables chains, like
C(INPUT), C(FORWARD), C(OUTPUT), C(PREROUTING), C(POSTROUTING), C(SECMARK) or C(CONNSECMARK).
type: str
protocol:
description:
- The protocol of the rule or of the packet to check.
- The specified protocol can be one of C(tcp), C(udp), C(udplite), C(icmp), C(ipv6-icmp) or C(icmpv6),
C(esp), C(ah), C(sctp) or the special keyword C(all), or it can be a numeric value,
representing one of these protocols or a different one.
- A protocol name from I(/etc/protocols) is also allowed.
- A C(!) argument before the protocol inverts the test.
- The number zero is equivalent to all.
- C(all) will match with all protocols and is taken as default when this option is omitted.
type: str
source:
description:
- Source specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A C(!) argument before the
address specification inverts the sense of the address.
type: str
destination:
description:
- Destination specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A C(!) argument before the
address specification inverts the sense of the address.
type: str
tcp_flags:
description:
- TCP flags specification.
- C(tcp_flags) expects a dict with the two keys C(flags) and C(flags_set).
type: dict
default: {}
version_added: "2.4"
suboptions:
flags:
description:
- List of flags you want to examine.
type: list
elements: str
flags_set:
description:
- Flags to be set.
type: list
elements: str
match:
description:
- Specifies a match to use, that is, an extension module that tests for
a specific property.
- The set of matches make up the condition under which a target is invoked.
- Matches are evaluated first to last if specified as an array and work in short-circuit
fashion, i.e. if one extension yields false, evaluation will stop.
type: list
elements: str
default: []
jump:
description:
- This specifies the target of the rule; i.e., what to do if the packet matches it.
- The target can be a user-defined chain (other than the one
this rule is in), one of the special builtin targets which decide the
fate of the packet immediately, or an extension (see EXTENSIONS
below).
- If this option is omitted in a rule (and the goto parameter
is not used), then matching the rule will have no effect on the
packet's fate, but the counters on the rule will be incremented.
type: str
gateway:
description:
- This specifies the IP address of host to send the cloned packets.
- This option is only valid when C(jump) is set to C(TEE).
type: str
version_added: "2.8"
log_prefix:
description:
- Specifies a log text for the rule. Only make sense with a LOG jump.
type: str
version_added: "2.5"
log_level:
description:
- Logging level according to the syslogd-defined priorities.
- The value can be strings or numbers from 1-8.
- This parameter is only applicable if C(jump) is set to C(LOG).
type: str
version_added: "2.8"
choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ]
goto:
description:
- This specifies that the processing should continue in a user specified chain.
- Unlike the jump argument return will not continue processing in
this chain but instead in the chain that called us via jump.
type: str
in_interface:
description:
- Name of an interface via which a packet was received (only for packets
entering the C(INPUT), C(FORWARD) and C(PREROUTING) chains).
- When the C(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a C(+), then any interface which begins with
this name will match.
- If this option is omitted, any interface name will match.
type: str
out_interface:
description:
- Name of an interface via which a packet is going to be sent (for
packets entering the C(FORWARD), C(OUTPUT) and C(POSTROUTING) chains).
- When the C(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a C(+), then any interface which begins
with this name will match.
- If this option is omitted, any interface name will match.
type: str
fragment:
description:
- This means that the rule only refers to second and further fragments
of fragmented packets.
- Since there is no way to tell the source or destination ports of such
a packet (or ICMP type), such a packet will not match any rules which specify them.
- When the "!" argument precedes fragment argument, the rule will only match head fragments,
or unfragmented packets.
type: str
set_counters:
description:
- This enables the administrator to initialize the packet and byte
counters of a rule (during C(INSERT), C(APPEND), C(REPLACE) operations).
type: str
source_port:
description:
- Source port or port range specification.
- This can either be a service name or a port number.
- An inclusive range can also be specified, using the format C(first:last).
- If the first port is omitted, C(0) is assumed; if the last is omitted, C(65535) is assumed.
- If the first port is greater than the second one they will be swapped.
type: str
destination_port:
description:
- "Destination port or port range specification. This can either be
a service name or a port number. An inclusive range can also be
specified, using the format first:last. If the first port is omitted,
'0' is assumed; if the last is omitted, '65535' is assumed. If the
first port is greater than the second one they will be swapped.
This is only valid if the rule also specifies one of the following
protocols: tcp, udp, dccp or sctp."
type: str
destination_ports:
description:
- This specifies multiple destination port numbers or port ranges to match in the multiport module.
- It can only be used in conjunction with the protocols tcp, udp, udplite, dccp and sctp.
type: list
elements: str
version_added: "2.11"
to_ports:
description:
- This specifies a destination port or range of ports to use, without
this, the destination port is never altered.
- This is only valid if the rule also specifies one of the protocol
C(tcp), C(udp), C(dccp) or C(sctp).
type: str
to_destination:
description:
- This specifies a destination address to use with C(DNAT).
- Without this, the destination address is never altered.
type: str
version_added: "2.1"
to_source:
description:
- This specifies a source address to use with C(SNAT).
- Without this, the source address is never altered.
type: str
version_added: "2.2"
syn:
description:
- This allows matching packets that have the SYN bit set and the ACK
and RST bits unset.
- When negated, this matches all packets with the RST or the ACK bits set.
type: str
choices: [ ignore, match, negate ]
default: ignore
version_added: "2.5"
set_dscp_mark:
description:
- This allows specifying a DSCP mark to be added to packets.
It takes either an integer or hex value.
- Mutually exclusive with C(set_dscp_mark_class).
type: str
version_added: "2.1"
set_dscp_mark_class:
description:
- This allows specifying a predefined DiffServ class which will be
translated to the corresponding DSCP mark.
- Mutually exclusive with C(set_dscp_mark).
type: str
version_added: "2.1"
comment:
description:
- This specifies a comment that will be added to the rule.
type: str
ctstate:
description:
- A list of the connection states to match in the conntrack module.
- Possible values are C(INVALID), C(NEW), C(ESTABLISHED), C(RELATED), C(UNTRACKED), C(SNAT), C(DNAT).
type: list
elements: str
default: []
src_range:
description:
- Specifies the source IP range to match in the iprange module.
type: str
version_added: "2.8"
dst_range:
description:
- Specifies the destination IP range to match in the iprange module.
type: str
version_added: "2.8"
match_set:
description:
- Specifies a set name which can be defined by ipset.
- Must be used together with the match_set_flags parameter.
- When the C(!) argument is prepended then it inverts the rule.
- Uses the iptables set extension.
type: str
version_added: "2.11"
match_set_flags:
description:
- Specifies the necessary flags for the match_set parameter.
- Must be used together with the match_set parameter.
- Uses the iptables set extension.
type: str
choices: [ "src", "dst", "src,dst", "dst,src" ]
version_added: "2.11"
limit:
description:
- Specifies the maximum average number of matches to allow per second.
- The number can specify units explicitly, using `/second', `/minute',
`/hour' or `/day', or parts of them (so `5/second' is the same as
`5/s').
type: str
limit_burst:
description:
- Specifies the maximum burst before the above limit kicks in.
type: str
version_added: "2.1"
uid_owner:
description:
- Specifies the UID or username to use in match by owner rule.
- From Ansible 2.6 when the C(!) argument is prepended then the it inverts
the rule to apply instead to all users except that one specified.
type: str
version_added: "2.1"
gid_owner:
description:
- Specifies the GID or group to use in match by owner rule.
type: str
version_added: "2.9"
reject_with:
description:
- 'Specifies the error packet type to return while rejecting. It implies
"jump: REJECT".'
type: str
version_added: "2.1"
icmp_type:
description:
- This allows specification of the ICMP type, which can be a numeric
ICMP type, type/code pair, or one of the ICMP type names shown by the
command 'iptables -p icmp -h'
type: str
version_added: "2.2"
flush:
description:
- Flushes the specified table and chain of all rules.
- If no chain is specified then the entire table is purged.
- Ignores all other parameters.
type: bool
default: false
version_added: "2.2"
policy:
description:
- Set the policy for the chain to the given target.
- Only built-in chains can have policies.
- This parameter requires the C(chain) parameter.
- If you specify this parameter, all other parameters will be ignored.
- This parameter is used to set default policy for the given C(chain).
Do not confuse this with C(jump) parameter.
type: str
choices: [ ACCEPT, DROP, QUEUE, RETURN ]
version_added: "2.2"
wait:
description:
- Wait N seconds for the xtables lock to prevent multiple instances of
the program from running concurrently.
type: str
version_added: "2.10"
'''
EXAMPLES = r'''
- name: Block specific IP
ansible.builtin.iptables:
chain: INPUT
source: 8.8.8.8
jump: DROP
become: yes
- name: Forward port 80 to 8600
ansible.builtin.iptables:
table: nat
chain: PREROUTING
in_interface: eth0
protocol: tcp
match: tcp
destination_port: 80
jump: REDIRECT
to_ports: 8600
comment: Redirect web traffic to port 8600
become: yes
- name: Allow related and established connections
ansible.builtin.iptables:
chain: INPUT
ctstate: ESTABLISHED,RELATED
jump: ACCEPT
become: yes
- name: Allow new incoming SYN packets on TCP port 22 (SSH)
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
destination_port: 22
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new SSH connections.
- name: Match on IP ranges
ansible.builtin.iptables:
chain: FORWARD
src_range: 192.168.1.100-192.168.1.199
dst_range: 10.0.0.1-10.0.0.50
jump: ACCEPT
- name: Allow source IPs defined in ipset "admin_hosts" on port 22
ansible.builtin.iptables:
chain: INPUT
match_set: admin_hosts
match_set_flags: src
destination_port: 22
jump: ALLOW
- name: Tag all outbound tcp packets with DSCP mark 8
ansible.builtin.iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark: 8
protocol: tcp
- name: Tag all outbound tcp packets with DSCP DiffServ class CS1
ansible.builtin.iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark_class: CS1
protocol: tcp
- name: Insert a rule on line 5
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
destination_port: 8080
jump: ACCEPT
action: insert
rule_num: 5
# Think twice before running following task as this may lock target system
- name: Set the policy for the INPUT chain to DROP
ansible.builtin.iptables:
chain: INPUT
policy: DROP
- name: Reject tcp with tcp-reset
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
reject_with: tcp-reset
ip_version: ipv4
- name: Set tcp flags
ansible.builtin.iptables:
chain: OUTPUT
jump: DROP
protocol: tcp
tcp_flags:
flags: ALL
flags_set:
- ACK
- RST
- SYN
- FIN
- name: Iptables flush filter
ansible.builtin.iptables:
chain: "{{ item }}"
flush: yes
with_items: [ 'INPUT', 'FORWARD', 'OUTPUT' ]
- name: Iptables flush nat
ansible.builtin.iptables:
table: nat
chain: '{{ item }}'
flush: yes
with_items: [ 'INPUT', 'OUTPUT', 'PREROUTING', 'POSTROUTING' ]
- name: Log packets arriving into an user-defined chain
ansible.builtin.iptables:
chain: LOGGING
action: append
state: present
limit: 2/second
limit_burst: 20
log_prefix: "IPTABLES:INFO: "
log_level: info
- name: Allow connections on multiple ports
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
destination_ports:
- "80"
- "443"
- "8081:8083"
jump: ACCEPT
'''
import re
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
IPTABLES_WAIT_SUPPORT_ADDED = '1.4.20'
IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED = '1.6.0'
BINS = dict(
ipv4='iptables',
ipv6='ip6tables',
)
ICMP_TYPE_OPTIONS = dict(
ipv4='--icmp-type',
ipv6='--icmpv6-type',
)
def append_param(rule, param, flag, is_list):
if is_list:
for item in param:
append_param(rule, item, flag, False)
else:
if param is not None:
if param[0] == '!':
rule.extend(['!', flag, param[1:]])
else:
rule.extend([flag, param])
def append_tcp_flags(rule, param, flag):
if param:
if 'flags' in param and 'flags_set' in param:
rule.extend([flag, ','.join(param['flags']), ','.join(param['flags_set'])])
def append_match_flag(rule, param, flag, negatable):
if param == 'match':
rule.extend([flag])
elif negatable and param == 'negate':
rule.extend(['!', flag])
def append_csv(rule, param, flag):
if param:
rule.extend([flag, ','.join(param)])
def append_match(rule, param, match):
if param:
rule.extend(['-m', match])
def append_jump(rule, param, jump):
if param:
rule.extend(['-j', jump])
def append_wait(rule, param, flag):
if param:
rule.extend([flag, param])
def construct_rule(params):
rule = []
append_wait(rule, params['wait'], '-w')
append_param(rule, params['protocol'], '-p', False)
append_param(rule, params['source'], '-s', False)
append_param(rule, params['destination'], '-d', False)
append_param(rule, params['match'], '-m', True)
append_tcp_flags(rule, params['tcp_flags'], '--tcp-flags')
append_param(rule, params['jump'], '-j', False)
if params.get('jump') and params['jump'].lower() == 'tee':
append_param(rule, params['gateway'], '--gateway', False)
append_param(rule, params['log_prefix'], '--log-prefix', False)
append_param(rule, params['log_level'], '--log-level', False)
append_param(rule, params['to_destination'], '--to-destination', False)
append_match(rule, params['destination_ports'], 'multiport')
append_csv(rule, params['destination_ports'], '--dports')
append_param(rule, params['to_source'], '--to-source', False)
append_param(rule, params['goto'], '-g', False)
append_param(rule, params['in_interface'], '-i', False)
append_param(rule, params['out_interface'], '-o', False)
append_param(rule, params['fragment'], '-f', False)
append_param(rule, params['set_counters'], '-c', False)
append_param(rule, params['source_port'], '--source-port', False)
append_param(rule, params['destination_port'], '--destination-port', False)
append_param(rule, params['to_ports'], '--to-ports', False)
append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
append_param(
rule,
params['set_dscp_mark_class'],
'--set-dscp-class',
False)
append_match_flag(rule, params['syn'], '--syn', True)
if 'conntrack' in params['match']:
append_csv(rule, params['ctstate'], '--ctstate')
elif 'state' in params['match']:
append_csv(rule, params['ctstate'], '--state')
elif params['ctstate']:
append_match(rule, params['ctstate'], 'conntrack')
append_csv(rule, params['ctstate'], '--ctstate')
if 'iprange' in params['match']:
append_param(rule, params['src_range'], '--src-range', False)
append_param(rule, params['dst_range'], '--dst-range', False)
elif params['src_range'] or params['dst_range']:
append_match(rule, params['src_range'] or params['dst_range'], 'iprange')
append_param(rule, params['src_range'], '--src-range', False)
append_param(rule, params['dst_range'], '--dst-range', False)
if 'set' in params['match']:
append_param(rule, params['match_set'], '--match-set', False)
append_match_flag(rule, 'match', params['match_set_flags'], False)
elif params['match_set']:
append_match(rule, params['match_set'], 'set')
append_param(rule, params['match_set'], '--match-set', False)
append_match_flag(rule, 'match', params['match_set_flags'], False)
append_match(rule, params['limit'] or params['limit_burst'], 'limit')
append_param(rule, params['limit'], '--limit', False)
append_param(rule, params['limit_burst'], '--limit-burst', False)
append_match(rule, params['uid_owner'], 'owner')
append_match_flag(rule, params['uid_owner'], '--uid-owner', True)
append_param(rule, params['uid_owner'], '--uid-owner', False)
append_match(rule, params['gid_owner'], 'owner')
append_match_flag(rule, params['gid_owner'], '--gid-owner', True)
append_param(rule, params['gid_owner'], '--gid-owner', False)
if params['jump'] is None:
append_jump(rule, params['reject_with'], 'REJECT')
append_param(rule, params['reject_with'], '--reject-with', False)
append_param(
rule,
params['icmp_type'],
ICMP_TYPE_OPTIONS[params['ip_version']],
False)
append_match(rule, params['comment'], 'comment')
append_param(rule, params['comment'], '--comment', False)
return rule
def push_arguments(iptables_path, action, params, make_rule=True):
cmd = [iptables_path]
cmd.extend(['-t', params['table']])
cmd.extend([action, params['chain']])
if action == '-I' and params['rule_num']:
cmd.extend([params['rule_num']])
if make_rule:
cmd.extend(construct_rule(params))
return cmd
def check_present(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-C', params)
rc, _, __ = module.run_command(cmd, check_rc=False)
return (rc == 0)
def append_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-A', params)
module.run_command(cmd, check_rc=True)
def insert_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-I', params)
module.run_command(cmd, check_rc=True)
def remove_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-D', params)
module.run_command(cmd, check_rc=True)
def flush_table(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
module.run_command(cmd, check_rc=True)
def set_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
cmd.append(params['policy'])
module.run_command(cmd, check_rc=True)
def get_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-L', params, make_rule=False)
rc, out, _ = module.run_command(cmd, check_rc=True)
chain_header = out.split("\n")[0]
result = re.search(r'\(policy ([A-Z]+)\)', chain_header)
if result:
return result.group(1)
return None
def get_iptables_version(iptables_path, module):
cmd = [iptables_path, '--version']
rc, out, _ = module.run_command(cmd, check_rc=True)
return out.split('v')[1].rstrip('\n')
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
state=dict(type='str', default='present', choices=['absent', 'present']),
action=dict(type='str', default='append', choices=['append', 'insert']),
ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
chain=dict(type='str'),
rule_num=dict(type='str'),
protocol=dict(type='str'),
wait=dict(type='str'),
source=dict(type='str'),
to_source=dict(type='str'),
destination=dict(type='str'),
to_destination=dict(type='str'),
match=dict(type='list', elements='str', default=[]),
tcp_flags=dict(type='dict',
options=dict(
flags=dict(type='list', elements='str'),
flags_set=dict(type='list', elements='str'))
),
jump=dict(type='str'),
gateway=dict(type='str'),
log_prefix=dict(type='str'),
log_level=dict(type='str',
choices=['0', '1', '2', '3', '4', '5', '6', '7',
'emerg', 'alert', 'crit', 'error',
'warning', 'notice', 'info', 'debug'],
default=None,
),
goto=dict(type='str'),
in_interface=dict(type='str'),
out_interface=dict(type='str'),
fragment=dict(type='str'),
set_counters=dict(type='str'),
source_port=dict(type='str'),
destination_port=dict(type='str'),
destination_ports=dict(type='list', elements='str', default=[]),
to_ports=dict(type='str'),
set_dscp_mark=dict(type='str'),
set_dscp_mark_class=dict(type='str'),
comment=dict(type='str'),
ctstate=dict(type='list', elements='str', default=[]),
src_range=dict(type='str'),
dst_range=dict(type='str'),
match_set=dict(type='str'),
match_set_flags=dict(type='str', choices=['src', 'dst', 'src,dst', 'dst,src']),
limit=dict(type='str'),
limit_burst=dict(type='str'),
uid_owner=dict(type='str'),
gid_owner=dict(type='str'),
reject_with=dict(type='str'),
icmp_type=dict(type='str'),
syn=dict(type='str', default='ignore', choices=['ignore', 'match', 'negate']),
flush=dict(type='bool', default=False),
policy=dict(type='str', choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
),
mutually_exclusive=(
['set_dscp_mark', 'set_dscp_mark_class'],
['flush', 'policy'],
),
required_if=[
['jump', 'TEE', ['gateway']],
['jump', 'tee', ['gateway']],
]
)
args = dict(
changed=False,
failed=False,
ip_version=module.params['ip_version'],
table=module.params['table'],
chain=module.params['chain'],
flush=module.params['flush'],
rule=' '.join(construct_rule(module.params)),
state=module.params['state'],
)
ip_version = module.params['ip_version']
iptables_path = module.get_bin_path(BINS[ip_version], True)
# Check if chain option is required
if args['flush'] is False and args['chain'] is None:
module.fail_json(msg="Either chain or flush parameter must be specified.")
if module.params.get('log_prefix', None) or module.params.get('log_level', None):
if module.params['jump'] is None:
module.params['jump'] = 'LOG'
elif module.params['jump'] != 'LOG':
module.fail_json(msg="Logging options can only be used with the LOG jump target.")
# Check if wait option is supported
iptables_version = LooseVersion(get_iptables_version(iptables_path, module))
if iptables_version >= LooseVersion(IPTABLES_WAIT_SUPPORT_ADDED):
if iptables_version < LooseVersion(IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED):
module.params['wait'] = ''
else:
module.params['wait'] = None
# Flush the table
if args['flush'] is True:
args['changed'] = True
if not module.check_mode:
flush_table(iptables_path, module, module.params)
# Set the policy
elif module.params['policy']:
current_policy = get_chain_policy(iptables_path, module, module.params)
if not current_policy:
module.fail_json(msg='Can\'t detect current policy')
changed = current_policy != module.params['policy']
args['changed'] = changed
if changed and not module.check_mode:
set_chain_policy(iptables_path, module, module.params)
else:
insert = (module.params['action'] == 'insert')
rule_is_present = check_present(iptables_path, module, module.params)
should_be_present = (args['state'] == 'present')
# Check if target is up to date
args['changed'] = (rule_is_present != should_be_present)
if args['changed'] is False:
# Target is already up to date
module.exit_json(**args)
# Check only; don't modify
if not module.check_mode:
if should_be_present:
if insert:
insert_rule(iptables_path, module, module.params)
else:
append_rule(iptables_path, module, module.params)
else:
remove_rule(iptables_path, module, module.params)
module.exit_json(**args)
if __name__ == '__main__':
main()
|
# Copyright 2020 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import os
import sys
import logging
import importlib
from abc import abstractmethod
from datetime import datetime
from typing import Union, Tuple
from collections import OrderedDict
import numpy as np
import pandas as pd
from IPython.display import display, clear_output
from pandas.plotting import register_matplotlib_converters
from tensortrade.oms.orders import TradeSide
from tensortrade.env.generic import Renderer, TradingEnv
if importlib.util.find_spec("matplotlib"):
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
register_matplotlib_converters()
if importlib.util.find_spec("plotly"):
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def _create_auto_file_name(filename_prefix: str,
ext: str,
timestamp_format: str = '%Y%m%d_%H%M%S') -> str:
timestamp = datetime.now().strftime(timestamp_format)
filename = filename_prefix + timestamp + '.' + ext
return filename
def _check_path(path: str, auto_create: bool = True) -> None:
if not path or os.path.exists(path):
return
if auto_create:
os.mkdir(path)
else:
raise OSError(f"Path '{path}' not found.")
def _check_valid_format(valid_formats: list, save_format: str) -> None:
if save_format not in valid_formats:
raise ValueError("Acceptable formats are '{}'. Found '{}'".format("', '".join(valid_formats), save_format))
class BaseRenderer(Renderer):
"""The abstract base renderer to be subclassed when making a renderer
the incorporates a `Portfolio`.
"""
def __init__(self):
super().__init__()
self._max_episodes = None
self._max_steps = None
@staticmethod
def _create_log_entry(episode: int = None,
max_episodes: int = None,
step: int = None,
max_steps: int = None,
date_format: str = "%Y-%m-%d %H:%M:%S %p") -> str:
"""
Creates a log entry to be used by a renderer.
Parameters
----------
episode : int
The current episode.
max_episodes : int
The maximum number of episodes that can occur.
step : int
The current step of the current episode.
max_steps : int
The maximum number of steps within an episode that can occur.
date_format : str
The format for logging the date.
Returns
-------
str
a log entry
"""
log_entry = f"[{datetime.now().strftime(date_format)}]"
if episode is not None:
log_entry += f" Episode: {episode + 1}/{max_episodes if max_episodes else ''}"
if step is not None:
log_entry += f" Step: {step}/{max_steps if max_steps else ''}"
return log_entry
def render(self, env: 'TradingEnv', **kwargs):
price_history = None
if len(env.observer.renderer_history) > 0:
price_history = pd.DataFrame(env.observer.renderer_history)
performance = pd.DataFrame.from_dict(env.action_scheme.portfolio.performance, orient='index')
self.render_env(
episode=kwargs.get("episode", None),
max_episodes=kwargs.get("max_episodes", None),
step=env.clock.step,
max_steps=kwargs.get("max_steps", None),
price_history=price_history,
net_worth=performance.net_worth,
performance=performance.drop(columns=['base_symbol']),
trades=env.action_scheme.broker.trades
)
@abstractmethod
def render_env(self,
episode: int = None,
max_episodes: int = None,
step: int = None,
max_steps: int = None,
price_history: 'pd.DataFrame' = None,
net_worth: 'pd.Series' = None,
performance: 'pd.DataFrame' = None,
trades: 'OrderedDict' = None) -> None:
"""Renderers the current state of the environment.
Parameters
----------
episode : int
The episode that the environment is being rendered for.
max_episodes : int
The maximum number of episodes that will occur.
step : int
The step of the current episode that is happening.
max_steps : int
The maximum number of steps that will occur in an episode.
price_history : `pd.DataFrame`
The history of instrument involved with the environment. The
required columns are: date, open, high, low, close, and volume.
net_worth : `pd.Series`
The history of the net worth of the `portfolio`.
performance : `pd.Series`
The history of performance of the `portfolio`.
trades : `OrderedDict`
The history of trades for the current episode.
"""
raise NotImplementedError()
def save(self) -> None:
"""Saves the rendering of the `TradingEnv`.
"""
pass
def reset(self) -> None:
"""Resets the renderer.
"""
pass
class EmptyRenderer(Renderer):
"""A renderer that does renders nothing.
Needed to make sure that environment can function without requiring a
renderer.
"""
def render(self, env, **kwargs):
pass
class ScreenLogger(BaseRenderer):
"""Logs information the screen of the user.
Parameters
----------
date_format : str
The format for logging the date.
"""
DEFAULT_FORMAT: str = "[%(asctime)-15s] %(message)s"
def __init__(self, date_format: str = "%Y-%m-%d %-I:%M:%S %p"):
super().__init__()
self._date_format = date_format
def render_env(self,
episode: int = None,
max_episodes: int = None,
step: int = None,
max_steps: int = None,
price_history: pd.DataFrame = None,
net_worth: pd.Series = None,
performance: pd.DataFrame = None,
trades: 'OrderedDict' = None):
print(self._create_log_entry(episode, max_episodes, step, max_steps, date_format=self._date_format))
class FileLogger(BaseRenderer):
"""Logs information to a file.
Parameters
----------
filename : str
The file name of the log file. If omitted, a file name will be
created automatically.
path : str
The path to save the log files to. None to save to same script directory.
log_format : str
The log entry format as per Python logging. None for default. For
more details, refer to https://docs.python.org/3/library/logging.html
timestamp_format : str
The format of the timestamp of the log entry. Node for default.
"""
DEFAULT_LOG_FORMAT: str = '[%(asctime)-15s] %(message)s'
DEFAULT_TIMESTAMP_FORMAT: str = '%Y-%m-%d %H:%M:%S'
def __init__(self,
filename: str = None,
path: str = 'log',
log_format: str = None,
timestamp_format: str = None) -> None:
super().__init__()
_check_path(path)
if not filename:
filename = _create_auto_file_name('log_', 'log')
self._logger = logging.getLogger(self.id)
self._logger.setLevel(logging.INFO)
if path:
filename = os.path.join(path, filename)
handler = logging.FileHandler(filename)
handler.setFormatter(
logging.Formatter(
log_format if log_format is not None else self.DEFAULT_LOG_FORMAT,
datefmt=timestamp_format if timestamp_format is not None else self.DEFAULT_TIMESTAMP_FORMAT
)
)
self._logger.addHandler(handler)
@property
def log_file(self) -> str:
"""The filename information is being logged to. (str, read-only)
"""
return self._logger.handlers[0].baseFilename
def render_env(self,
episode: int = None,
max_episodes: int = None,
step: int = None,
max_steps: int = None,
price_history: pd.DataFrame = None,
net_worth: pd.Series = None,
performance: pd.DataFrame = None,
trades: 'OrderedDict' = None) -> None:
log_entry = self._create_log_entry(episode, max_episodes, step, max_steps)
self._logger.info(f"{log_entry} - Performance:\n{performance}")
class PlotlyTradingChart(BaseRenderer):
"""Trading visualization for TensorTrade using Plotly.
Parameters
----------
display : bool
True to display the chart on the screen, False for not.
height : int
Chart height in pixels. Affects both display and saved file
charts. Set to None for 100% height. Default is None.
save_format : str
A format to save the chart to. Acceptable formats are
html, png, jpeg, webp, svg, pdf, eps. All the formats except for
'html' require Orca. Default is None for no saving.
path : str
The path to save the char to if save_format is not None. The folder
will be created if not found.
filename_prefix : str
A string that precedes automatically-created file name
when charts are saved. Default 'chart_'.
timestamp_format : str
The format of the date shown in the chart title.
auto_open_html : bool
Works for save_format='html' only. True to automatically
open the saved chart HTML file in the default browser, False otherwise.
include_plotlyjs : Union[bool, str]
Whether to include/load the plotly.js library in the saved
file. 'cdn' results in a smaller file by loading the library online but
requires an Internet connect while True includes the library resulting
in much larger file sizes. False to not include the library. For more
details, refer to https://plot.ly/python-api-reference/generated/plotly.graph_objects.Figure.html
Notes
-----
Possible Future Enhancements:
- Saving images without using Orca.
- Limit displayed step range for the case of a large number of steps and let
the shown part of the chart slide after filling that range to keep showing
recent data as it's being added.
References
----------
.. [1] https://plot.ly/python-api-reference/generated/plotly.graph_objects.Figure.html
.. [2] https://plot.ly/python/figurewidget/
.. [3] https://plot.ly/python/subplots/
.. [4] https://plot.ly/python/reference/#candlestick
.. [5] https://plot.ly/python/#chart-events
"""
def __init__(self,
display: bool = True,
height: int = None,
timestamp_format: str = '%Y-%m-%d %H:%M:%S',
save_format: str = None,
path: str = 'charts',
filename_prefix: str = 'chart_',
auto_open_html: bool = False,
include_plotlyjs: Union[bool, str] = 'cdn') -> None:
super().__init__()
self._height = height
self._timestamp_format = timestamp_format
self._save_format = save_format
self._path = path
self._filename_prefix = filename_prefix
self._include_plotlyjs = include_plotlyjs
self._auto_open_html = auto_open_html
if self._save_format and self._path and not os.path.exists(path):
os.mkdir(path)
self.fig = None
self._price_chart = None
self._volume_chart = None
self._performance_chart = None
self._net_worth_chart = None
self._base_annotations = None
self._last_trade_step = 0
self._show_chart = display
def _create_figure(self, performance_keys: dict) -> None:
fig = make_subplots(
rows=4, cols=1, shared_xaxes=True, vertical_spacing=0.03,
row_heights=[0.55, 0.15, 0.15, 0.15],
)
fig.add_trace(go.Candlestick(name='Price', xaxis='x1', yaxis='y1',
showlegend=False), row=1, col=1)
fig.update_layout(xaxis_rangeslider_visible=False)
fig.add_trace(go.Bar(name='Volume', showlegend=False,
marker={'color': 'DodgerBlue'}),
row=2, col=1)
for k in performance_keys:
fig.add_trace(go.Scatter(mode='lines', name=k), row=3, col=1)
fig.add_trace(go.Scatter(mode='lines', name='Net Worth', marker={'color': 'DarkGreen'}),
row=4, col=1)
fig.update_xaxes(linecolor='Grey', gridcolor='Gainsboro')
fig.update_yaxes(linecolor='Grey', gridcolor='Gainsboro')
fig.update_xaxes(title_text='Price', row=1)
fig.update_xaxes(title_text='Volume', row=2)
fig.update_xaxes(title_text='Performance', row=3)
fig.update_xaxes(title_text='Net Worth', row=4)
fig.update_xaxes(title_standoff=7, title_font=dict(size=12))
self.fig = go.FigureWidget(fig)
self._price_chart = self.fig.data[0]
self._volume_chart = self.fig.data[1]
self._performance_chart = self.fig.data[2]
self._net_worth_chart = self.fig.data[-1]
self.fig.update_annotations({'font': {'size': 12}})
self.fig.update_layout(template='plotly_white', height=self._height, margin=dict(t=50))
self._base_annotations = self.fig.layout.annotations
def _create_trade_annotations(self,
trades: 'OrderedDict',
price_history: 'pd.DataFrame') -> 'Tuple[go.layout.Annotation]':
"""Creates annotations of the new trades after the last one in the chart.
Parameters
----------
trades : `OrderedDict`
The history of trades for the current episode.
price_history : `pd.DataFrame`
The price history of the current episode.
Returns
-------
`Tuple[go.layout.Annotation]`
A tuple of annotations used in the renderering process.
"""
annotations = []
for trade in reversed(trades.values()):
trade = trade[0]
tp = float(trade.price)
ts = float(trade.size)
if trade.step <= self._last_trade_step:
break
if trade.side.value == 'buy':
color = 'DarkGreen'
ay = 15
qty = round(ts / tp, trade.quote_instrument.precision)
text_info = dict(
step=trade.step,
datetime=price_history.iloc[trade.step - 1]['date'],
side=trade.side.value.upper(),
qty=qty,
size=ts,
quote_instrument=trade.quote_instrument,
price=tp,
base_instrument=trade.base_instrument,
type=trade.type.value.upper(),
commission=trade.commission
)
elif trade.side.value == 'sell':
color = 'FireBrick'
ay = -15
# qty = round(ts * tp, trade.quote_instrument.precision)
text_info = dict(
step=trade.step,
datetime=price_history.iloc[trade.step - 1]['date'],
side=trade.side.value.upper(),
qty=ts,
size=round(ts * tp, trade.base_instrument.precision),
quote_instrument=trade.quote_instrument,
price=tp,
base_instrument=trade.base_instrument,
type=trade.type.value.upper(),
commission=trade.commission
)
else:
raise ValueError(f"Valid trade side values are 'buy' and 'sell'. Found '{trade.side.value}'.")
hovertext = 'Step {step} [{datetime}]<br>' \
'{side} {qty} {quote_instrument} @ {price} {base_instrument} {type}<br>' \
'Total: {size} {base_instrument} - Comm.: {commission}'.format(**text_info)
annotations += [go.layout.Annotation(
x=trade.step - 1, y=tp,
ax=0, ay=ay, xref='x1', yref='y1', showarrow=True,
arrowhead=2, arrowcolor=color, arrowwidth=4,
arrowsize=0.8, hovertext=hovertext, opacity=0.6,
hoverlabel=dict(bgcolor=color)
)]
if trades:
self._last_trade_step = trades[list(trades)[-1]][0].step
return tuple(annotations)
def render_env(self,
episode: int = None,
max_episodes: int = None,
step: int = None,
max_steps: int = None,
price_history: pd.DataFrame = None,
net_worth: pd.Series = None,
performance: pd.DataFrame = None,
trades: 'OrderedDict' = None) -> None:
if price_history is None:
raise ValueError("renderers() is missing required positional argument 'price_history'.")
if net_worth is None:
raise ValueError("renderers() is missing required positional argument 'net_worth'.")
if performance is None:
raise ValueError("renderers() is missing required positional argument 'performance'.")
if trades is None:
raise ValueError("renderers() is missing required positional argument 'trades'.")
if not self.fig:
self._create_figure(performance.keys())
if self._show_chart: # ensure chart visibility through notebook cell reruns
display(self.fig)
self.fig.layout.title = self._create_log_entry(episode, max_episodes, step, max_steps)
self._price_chart.update(dict(
open=price_history['open'],
high=price_history['high'],
low=price_history['low'],
close=price_history['close']
))
self.fig.layout.annotations += self._create_trade_annotations(trades, price_history)
self._volume_chart.update({'y': price_history['volume']})
for trace in self.fig.select_traces(row=3):
trace.update({'y': performance[trace.name]})
self._net_worth_chart.update({'y': net_worth})
if self._show_chart:
self.fig.show()
def save(self) -> None:
"""Saves the current chart to a file.
Notes
-----
All formats other than HTML require Orca installed and server running.
"""
if not self._save_format:
return
else:
valid_formats = ['html', 'png', 'jpeg', 'webp', 'svg', 'pdf', 'eps']
_check_valid_format(valid_formats, self._save_format)
_check_path(self._path)
filename = _create_auto_file_name(self._filename_prefix, self._save_format)
filename = os.path.join(self._path, filename)
if self._save_format == 'html':
self.fig.write_html(file=filename, include_plotlyjs='cdn', auto_open=self._auto_open_html)
else:
self.fig.write_image(filename)
def reset(self) -> None:
self._last_trade_step = 0
if self.fig is None:
return
self.fig.layout.annotations = self._base_annotations
clear_output(wait=True)
class MatplotlibTradingChart(BaseRenderer):
""" Trading visualization for TensorTrade using Matplotlib
Parameters
---------
display : bool
True to display the chart on the screen, False for not.
save_format : str
A format to save the chart to. Acceptable formats are
png, jpg, svg, pdf.
path : str
The path to save the char to if save_format is not None. The folder
will be created if not found.
filename_prefix : str
A string that precedes automatically-created file name
when charts are saved. Default 'chart_'.
"""
def __init__(self,
display: bool = True,
save_format: str = None,
path: str = 'charts',
filename_prefix: str = 'chart_') -> None:
super().__init__()
self._volume_chart_height = 0.33
self._df = None
self.fig = None
self._price_ax = None
self._volume_ax = None
self.net_worth_ax = None
self._show_chart = display
self._save_format = save_format
self._path = path
self._filename_prefix = filename_prefix
if self._save_format and self._path and not os.path.exists(path):
os.mkdir(path)
def _create_figure(self) -> None:
self.fig = plt.figure()
self.net_worth_ax = plt.subplot2grid((6, 1), (0, 0), rowspan=2, colspan=1)
self.price_ax = plt.subplot2grid((6, 1), (2, 0), rowspan=8,
colspan=1, sharex=self.net_worth_ax)
self.volume_ax = self.price_ax.twinx()
plt.subplots_adjust(left=0.11, bottom=0.24, right=0.90, top=0.90, wspace=0.2, hspace=0)
def _render_trades(self, step_range, trades) -> None:
trades = [trade for sublist in trades.values() for trade in sublist]
for trade in trades:
if trade.step in range(sys.maxsize)[step_range]:
date = self._df.index.values[trade.step]
close = self._df['close'].values[trade.step]
color = 'green'
if trade.side is TradeSide.SELL:
color = 'red'
self.price_ax.annotate(' ', (date, close),
xytext=(date, close),
size="large",
arrowprops=dict(arrowstyle='simple', facecolor=color))
def _render_volume(self, step_range, times) -> None:
self.volume_ax.clear()
volume = np.array(self._df['volume'].values[step_range])
self.volume_ax.plot(times, volume, color='blue')
self.volume_ax.fill_between(times, volume, color='blue', alpha=0.5)
self.volume_ax.set_ylim(0, max(volume) / self._volume_chart_height)
self.volume_ax.yaxis.set_ticks([])
def _render_price(self, step_range, times, current_step) -> None:
self.price_ax.clear()
self.price_ax.plot(times, self._df['close'].values[step_range], color="black")
last_time = self._df.index.values[current_step]
last_close = self._df['close'].values[current_step]
last_high = self._df['high'].values[current_step]
self.price_ax.annotate('{0:.2f}'.format(last_close), (last_time, last_close),
xytext=(last_time, last_high),
bbox=dict(boxstyle='round',
fc='w', ec='k', lw=1),
color="black",
fontsize="small")
ylim = self.price_ax.get_ylim()
self.price_ax.set_ylim(ylim[0] - (ylim[1] - ylim[0]) * self._volume_chart_height, ylim[1])
# def _render_net_worth(self, step_range, times, current_step, net_worths, benchmarks):
def _render_net_worth(self, step_range, times, current_step, net_worths) -> None:
self.net_worth_ax.clear()
self.net_worth_ax.plot(times, net_worths[step_range], label='Net Worth', color="g")
self.net_worth_ax.legend()
legend = self.net_worth_ax.legend(loc=2, ncol=2, prop={'size': 8})
legend.get_frame().set_alpha(0.4)
last_time = times[-1]
last_net_worth = list(net_worths[step_range])[-1]
self.net_worth_ax.annotate('{0:.2f}'.format(last_net_worth), (last_time, last_net_worth),
xytext=(last_time, last_net_worth),
bbox=dict(boxstyle='round',
fc='w', ec='k', lw=1),
color="black",
fontsize="small")
self.net_worth_ax.set_ylim(min(net_worths) / 1.25, max(net_worths) * 1.25)
def render_env(self,
episode: int = None,
max_episodes: int = None,
step: int = None,
max_steps: int = None,
price_history: 'pd.DataFrame' = None,
net_worth: 'pd.Series' = None,
performance: 'pd.DataFrame' = None,
trades: 'OrderedDict' = None) -> None:
if price_history is None:
raise ValueError("renderers() is missing required positional argument 'price_history'.")
if net_worth is None:
raise ValueError("renderers() is missing required positional argument 'net_worth'.")
if performance is None:
raise ValueError("renderers() is missing required positional argument 'performance'.")
if trades is None:
raise ValueError("renderers() is missing required positional argument 'trades'.")
if not self.fig:
self._create_figure()
if self._show_chart:
plt.show(block=False)
current_step = step -1
self._df = price_history
if max_steps:
window_size=max_steps
else:
window_size=20
current_net_worth = round(net_worth[len(net_worth)-1], 1)
initial_net_worth = round(net_worth[0], 1)
profit_percent = round((current_net_worth - initial_net_worth) / initial_net_worth * 100, 2)
self.fig.suptitle('Net worth: $' + str(current_net_worth) +
' | Profit: ' + str(profit_percent) + '%')
window_start = max(current_step - window_size, 0)
step_range = slice(window_start, current_step)
times = self._df.index.values[step_range]
if len(times) > 0:
# self._render_net_worth(step_range, times, current_step, net_worths, benchmarks)
self._render_net_worth(step_range, times, current_step, net_worth)
self._render_price(step_range, times, current_step)
self._render_volume(step_range, times)
self._render_trades(step_range, trades)
self.price_ax.set_xticklabels(times, rotation=45, horizontalalignment='right')
plt.setp(self.net_worth_ax.get_xticklabels(), visible=False)
plt.pause(0.001)
def save(self) -> None:
"""Saves the rendering of the `TradingEnv`.
"""
if not self._save_format:
return
else:
valid_formats = ['png', 'jpeg', 'svg', 'pdf']
_check_valid_format(valid_formats, self._save_format)
_check_path(self._path)
filename = _create_auto_file_name(self._filename_prefix, self._save_format)
filename = os.path.join(self._path, filename)
self.fig.savefig(filename, format=self._save_format)
def reset(self) -> None:
"""Resets the renderer.
"""
self.fig = None
self._price_ax = None
self._volume_ax = None
self.net_worth_ax = None
self._df = None
_registry = {
"screen-log": ScreenLogger,
"file-log": FileLogger,
"plotly": PlotlyTradingChart,
"matplot": MatplotlibTradingChart
}
def get(identifier: str) -> 'BaseRenderer':
"""Gets the `BaseRenderer` that matches the identifier.
Parameters
----------
identifier : str
The identifier for the `BaseRenderer`
Returns
-------
`BaseRenderer`
The renderer associated with the `identifier`.
Raises
------
KeyError:
Raised if identifier is not associated with any `BaseRenderer`
"""
if identifier not in _registry.keys():
msg = f"Identifier {identifier} is not associated with any `BaseRenderer`."
raise KeyError(msg)
return _registry[identifier]()
|
import railrl.misc.hyperparameter as hyp
from experiments.murtaza.multiworld.skew_fit.reacher.generate_uniform_dataset import generate_uniform_dataset_reacher
from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in
from railrl.launchers.launcher_util import run_experiment
from railrl.torch.grill.launcher import *
import railrl.torch.vae.vae_schedules as vae_schedules
from railrl.torch.vae.conv_vae import imsize48_default_architecture, imsize48_default_architecture_with_more_hidden_layers
from railrl.launchers.arglauncher import run_variants
from railrl.torch.grill.launcher import grill_her_twin_sac_online_vae_full_experiment, grill_her_twin_sac_full_experiment
from multiworld.envs.pygame.multiobject_pygame_env import Multiobj2DEnv
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multiobj_subset import SawyerMultiobjectEnv
from railrl.torch.vae.conditional_conv_vae import ACE
from railrl.torch.vae.vae_trainer import ACETrainer
if __name__ == "__main__":
variant = dict(
double_algo=False,
online_vae_exploration=False,
imsize=48,
init_camera=sawyer_init_camera_zoomed_in,
# env_id='SawyerPushNIPSEasy-v0',
env_class=Multiobj2DEnv,
env_kwargs=dict(
render_onscreen=False,
ball_radius=1.5,
images_are_rgb=True,
show_goal=False,
change_background=False,
fixed_colors=False,
),
grill_variant=dict(
save_video=True,
custom_goal_sampler='replay_buffer',
online_vae_trainer_kwargs=dict(
beta=20,
lr=0,
),
save_video_period=50,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
vf_kwargs=dict(
hidden_sizes=[400, 300],
),
max_path_length=50,
algo_kwargs=dict(
batch_size=128,
num_epochs=1000,
num_eval_steps_per_epoch=500,
num_expl_steps_per_train_loop=500,
num_trains_per_train_loop=5,
min_num_steps_before_training=1000,
vae_training_schedule=vae_schedules.never_train,
oracle_data=False,
vae_save_period=25,
parallel_vae_train=False,
),
twin_sac_trainer_kwargs=dict(
discount=0.98,
reward_scale=1,
soft_target_tau=1e-3,
target_update_period=1, # 1
use_automatic_entropy_tuning=True,
),
replay_buffer_kwargs=dict(
start_skew_epoch=10,
max_size=int(100000),
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
exploration_rewards_type='None',
vae_priority_type='vae_prob',
priority_function_kwargs=dict(
sampling_method='importance_sampling',
decoder_distribution='gaussian_identity_variance',
# decoder_distribution='bernoulli',
num_latents_to_sample=10,
),
power=-1,
relabeling_goal_sampling_mode='vae_prior',
),
exploration_goal_sampling_mode='vae_prior',
evaluation_goal_sampling_mode='reset_of_env',
normalize=False,
render=False,
exploration_noise=0.2,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=True,
),
algorithm='ONLINE-VAE-SAC-BERNOULLI',
vae_path="/home/khazatsky/rail/data/rail-khazatsky/sasha/PCVAE/ACE/run1/id0/itr_800.pkl",
),
train_vae_variant=dict(
representation_size=4,
beta=10,
num_epochs=0,
dump_skew_debug_plots=False,
# decoder_activation='gaussian',
decoder_activation='sigmoid',
use_linear_dynamics=True,
generate_vae_dataset_kwargs=dict(
N=0,
n_random_steps=5000,
test_p=.9,
use_cached=False,
show=False,
oracle_dataset=False,
oracle_dataset_using_set_to_goal=False,
non_presampled_goal_img_is_garbage=False,
random_rollout_data=True,
conditional_vae_dataset=True,
save_trajectories=False,
enviorment_dataset=False,
),
vae_trainer_class=ACETrainer,
vae_class=ACE,
vae_kwargs=dict(
input_channels=3,
architecture=imsize48_default_architecture_with_more_hidden_layers,
decoder_distribution='gaussian_identity_variance',
),
# TODO: why the redundancy?
algo_kwargs=dict(
start_skew_epoch=5000,
is_auto_encoder=False,
batch_size=32,
lr=1e-3,
skew_config=dict(
method='vae_prob',
power=0,
),
skew_dataset=False,
linearity_weight=50,
distance_weight=10,
priority_function_kwargs=dict(
decoder_distribution='gaussian_identity_variance',
sampling_method='importance_sampling',
# sampling_method='true_prior_sampling',
num_latents_to_sample=10,
),
use_parallel_dataloading=False,
),
save_period=25,
),
)
search_space = {
'seedid': range(2),
'grill_variant.reward_params.type':['latent_distance'],
'train_vae_variant.representation_size': [(4, 4),], #(3 * objects, 3 * colors)
'train_vae_variant.beta': [50],
'train_vae_variant.generate_vae_dataset_kwargs.n_random_steps': [100],
'grill_variant.vae_path': ["/home/khazatsky/rail/data/rail-khazatsky/sasha/PCVAE/ACE/run1000/id1/vae.pkl", "/home/khazatsky/rail/data/rail-khazatsky/sasha/PCVAE/ACE/run1/id0/itr_800.pkl"]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(grill_her_twin_sac_online_vae_full_experiment, variants, run_id=10)
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import spearmanr as sr
from scipy.cluster import hierarchy as hc
from typing import List, Any, Union, Tuple, Optional, Dict
import random, math
# TODO: Remove Dependencies, starting with Sklearn
from sklearn.metrics import roc_curve, \
precision_recall_curve, roc_auc_score, \
confusion_matrix
import itertools
import logging
# TODO: Make categorical_cols optional argument (None) to
# avoid ambiguity when there are no categorical cols
def normalize_numeric(
df,
numerical_cols: List[str] = []):
"""
Normalizes numeric columns by substracting the mean and dividing
by standard deviation. If the parameter numerical_cols is not
provided, it will take all the columns of dtype np.number.
:Example:
norm_df = xai.normalize_numeric(
df,
normalize_numeric=["age", "other_numeric_attribute"])
:param df: Pandas Dataframe containing data (inputs and target)
:type df: pd.DataFrame
:param numerical_cols: List of strings containing numercial cols
:type categorical_cols: str
:returns: Dataframe with normalized numerical values.
:rtype: pandas.DataFrame
"""
tmp_df = df.copy()
if not len(numerical_cols):
numerical_cols = df.select_dtypes(include=[np.number]).columns
for k in numerical_cols:
tmp_df[k] = tmp_df[k].astype(np.float32)
tmp_df[k] -= tmp_df[k].mean()
tmp_df[k] /= tmp_df[k].std()
return tmp_df
def convert_categories(
df,
categorical_cols: List[str] = []):
"""
Converts columns to numeric categories. If the categorical_cols
parameter is passed as a list then those columns are converted.
Otherwise, all np.object columns are converted.
:Example:
import xai
cat_df = xai.convert_categories(df)
:param df: Pandas Dataframe containing data (inputs and target)
:type df: pandas.DataFrame
:param categorical_cols: List of strings containing categorical cols
:type categorical_cols: str
:returns: Dataframe with categorical numerical values.
:rtype: pandas.DataFrame
"""
tmp_df = df.copy()
if not len(categorical_cols):
categorical_cols = df.select_dtypes(include=[np.object, np.bool]).columns
tmp_df[categorical_cols] = tmp_df[categorical_cols].astype('category')
tmp_df[categorical_cols] = tmp_df[categorical_cols].apply(lambda x: x.cat.codes)
tmp_df[categorical_cols] = tmp_df[categorical_cols].astype('int8')
return tmp_df
def group_by_columns(
df: pd.DataFrame,
columns: List[str],
bins: int = 6,
categorical_cols: List[str] = []):
"""
Groups dataframe by the categories (or bucketized values) for all columns provided.
If categorical it uses categories,
if numeric, it uses bins. If more than one column is provided, the columns
provided are, for example, age and binary_target_label, then the result
would be a pandas DataFrame that is grouped by age groups for each of the
positive and negative/positive labels.
:Example:
columns=["loan", "gender"]
df_groups = xai.group_by_columns(
df,
columns=columns,
bins=10,
categorical_cols=["gender", "loan"])
for group, df_group in df_groups:
print(group)
print(grouped_df.head())
:param df: Pandas Dataframe containing data (inputs and target)
:type df: pandas.DataFrame
:param bins: [Default: 6] Number of bins to be used for numerical cols
:type bins: int
:param categorical_cols: [Default: []] Columns within dataframe that are
categorical. Columns that are not np.objects or np.bool and
are not part explicitly
provided here will be treated as numeric, and bins will be used.
:type categorical_cols: List[str]
:returns: Dataframe with categorical numerical values.
:rtype: pandas.core.groupby.groupby.DataFrameGroupBy
"""
if not len(categorical_cols):
categorical_cols = _infer_categorical(df)
group_list = []
for c in columns:
col = df[c]
if c in categorical_cols or not bins:
grp = c
else:
col_min = col.min()
col_max = col.max()
# TODO: Use the original bins for display purposes as they may come normalised
col_bins = pd.cut(col, list(np.linspace(col_min, col_max, bins)))
grp = col_bins
group_list.append(grp)
grouped = df.groupby(group_list)
return grouped
def imbalance_plot(
df: pd.DataFrame,
*cross_cols: str,
categorical_cols: List[str] = [],
bins: int = 6,
threshold: float = 0.5):
"""
Shows the number of examples provided for each of the values across the
product tuples in the columns provided. If you would like to do processing
with the sub-groups created by this class please see the
group_by_columns function.
:Example:
import xai
class_counts = xai.imbalance_plot(
df,
"gender", "loan",
bins=10,
threshold=0.8)
:param df: Pandas Dataframe containing data (inputs and target)
:type df: pandas.DataFrame
:param *cross_cols: One or more positional arguments (passed as *args) that
are used to split the data into the cross product of their values
:type cross_cross: List[str]
:param categorical_cols: [Default: []] Columns within dataframe that are
categorical. Columns that are not np.objects and are not part explicitly
provided here will be treated as numeric, and bins will be used.
:type categorical_cols: List[str]
:param bins: [Default: 6] Number of bins to be used for numerical cols
:type bins: int
:param threshold: [Default: 0.5] Threshold to display in the chart.
:type threshold: float
:returns: Null
:rtype: None
"""
if not cross_cols:
raise TypeError("imbalance_plot requires at least 1 string column name")
grouped = group_by_columns(
df,
list(cross_cols),
bins=bins,
categorical_cols=categorical_cols)
grouped_col = grouped[cross_cols[0]]
count_grp = grouped_col.count()
count_max = count_grp.values.max()
ratios = round(count_grp/count_max,4)
# TODO: Make threshold a minimum number of examples per class
imbalances = ratios < threshold
cm = plt.cm.get_cmap('RdYlBu_r')
colors = [cm(1-r/threshold/2) if t else cm(0) \
for r,t in zip(ratios, imbalances)]
ax = count_grp.plot.bar(color=colors)
lp = plt.axhline(threshold*count_max, color='r')
lp.set_label(f"Threshold: {threshold*count_max:.2f} ({threshold*100:.2f}%)")
plt.legend()
plt.show()
def balance(
df: pd.DataFrame,
*cross_cols: str,
upsample: float = 0.5,
downsample: int = 1,
bins: int = 6,
categorical_cols: List[str] = [],
plot: bool = True):
"""
Balances a dataframe based on the columns and cross columns provided.
The results can be upsampled or downsampled. By default, there is no
downsample, and the upsample is towards a minimum of 50% of the
frequency of the highest class.
:Example:
cat_df = xai.balance(
df,
"gender", "loan",
upsample=0.8,
downsample=0.8)
:param df: Pandas Dataframe containing data (inputs and target )
:type df: pandas.DataFrame
:param *cross_cols: One or more positional arguments (passed as *args) that
are used to split the data into the cross product of their values
:type cross_cols: List[str]
:param upsample: [Default: 0.5] Target upsample for columns lower
than percentage.
:type upsample: float
:param downsample: [Default: 1] Target downsample for columns higher
than percentage.
:type downsample: float
:param bins: [Default: 6] Number of bins to be used for numerical cols
:type bins: int
:param categorical_cols: [Default: []] Columns within dataframe that are
categorical. Columns that are not np.objects and are not part explicitly
provided here will be treated as numeric, and bins will be used.
:type categorical_cols: List[str]
:param threshold: [Default: 0.5] Threshold to display in the chart.
:type threshold: float
:returns: Dataframe with categorical numerical values.
:rtype: pandas.DataFrame
"""
if not len(categorical_cols):
categorical_cols = df.select_dtypes(include=[np.object, np.bool]).columns
grouped = group_by_columns(
df,
list(cross_cols),
bins=bins,
categorical_cols=categorical_cols)
count_grp = grouped.count()
count_max = count_grp.values.max()
count_upsample = int(upsample*count_max)
count_downsample = int(downsample*count_max)
def norm(x):
if x.shape[0] < count_upsample:
return x.sample(count_upsample, replace=True)
elif x.shape[0] > count_downsample:
return x.sample(count_downsample)
else:
return x
tmp_df = grouped.apply(norm) \
.reset_index(drop=True)
if plot:
imbalance_plot(
tmp_df,
*cross_cols,
bins=bins,
categorical_cols=categorical_cols)
return tmp_df
def _plot_correlation_dendogram(
corr: pd.DataFrame,
cols: List[str],
plt_kwargs={}):
"""
Plot dendogram of a correlation matrix, using the columns provided.
This consists of a chart that that shows hierarchically the variables
that are most correlated by the connecting trees. The closer to the right
that the connection is, the more correlated the features are.
If you would like to visualise this as a tree, please
see the function _plot_correlation_dendogram.
:Example:
columns_to_include=["age", "loan", "gender"]
xai._plot_correlation_dendogram(df, cols=columns_to_include)
:returns: Null
:rtype: None
"""
corr = np.round(corr, 4)
corr_condensed = hc.distance.squareform(1-corr)
z = hc.linkage(corr_condensed, method="average")
fig = plt.figure(**plt_kwargs)
dendrogram = hc.dendrogram(
z, labels=cols, orientation="left", leaf_font_size=16)
plt.show()
def _plot_correlation_matrix(
corr,
cols: List[str],
plt_kwargs={}):
"""
Plot a matrix of the correlation matrix, using the columns provided in params.
This visualisation contains all the columns in the X and Y axis, where the
intersection of the column and row displays the correlation value.
The closer this correlation factor is to 1, the more correlated the features
are. If you would like to visualise this as a tree, please see
the function _plot_correlation_dendogram.
:Example:
columns_to_include=["age", "loan", "gender"]
xai._plot_correlation_matrix(df, cols=columns_to_include)
:returns: Null
:rtype: None
"""
fig = plt.figure(**plt_kwargs)
ax = fig.add_subplot(111)
cax = ax.matshow(
corr,
cmap='coolwarm',
vmin=-1,
vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,len(cols),1)
ax.set_xticks(ticks)
plt.xticks(rotation=90)
ax.set_yticks(ticks)
ax.set_xticklabels(cols)
ax.set_yticklabels(cols)
plt.show()
def correlations(
df: pd.DataFrame,
include_categorical: bool = False,
plot_type: str = "dendogram",
plt_kwargs={},
categorical_cols: List[str] = []):
"""
Computes the correlations for the columns provided and plots the relevant
image as requested by the parameters.
:Example:
cat_df = xai.balance(
df,
"gender", "loan",
upsample=0.8,
downsample=0.8)
:param df: Pandas Dataframe containing data (inputs and target )
:type df: pandas.DataFrame
:param *cross_cols: One or more positional arguments (passed as *args) that
are used to split the data into the cross product of their values
:type cross_cols: List[str]
:param upsample: [Default: 0.5] Target upsample for columns lower
than percentage.
:type upsample: float
:param downsample: [Default: 1] Target downsample for columns higher
than percentage.
:type downsample: float
:param bins: [Default: 6] Number of bins to be used for numerical cols
:type bins: int
:param categorical_cols: [Default: []] Columns within dataframe that are
categorical. Columns that are not np.objects and are not part explicitly
provided here will be treated as numeric, and bins will be used.
:type categorical_cols: List[str]
:param threshold: [Default: 0.5] Threshold to display in the chart.
:type threshold: float
:returns: Returns a dataframe containing the correlation values for the features
:rtype: pandas.DataFrame
"""
corr = None
cols: List = []
if include_categorical:
corr = sr(df).correlation
cols = df.columns
else:
if not len(categorical_cols):
categorical_cols = df.select_dtypes(include=[np.object, np.bool]).columns
cols = [c for c in df.columns if c not in categorical_cols]
corr = df[cols].corr()
cols = corr.columns
if plot_type == "dendogram":
_plot_correlation_dendogram(corr, cols, plt_kwargs=plt_kwargs)
elif plot_type == "matrix":
_plot_correlation_matrix(corr, cols, plt_kwargs=plt_kwargs)
else:
raise ValueError(f"Variable plot_type not valid. Provided: {plot_type}")
return corr
def confusion_matrix_plot(
y_test,
pred,
scaled=True,
label_x_neg="PREDICTED NEGATIVE",
label_x_pos="PREDICTED POSITIVE",
label_y_neg="ACTUAL NEGATIVE",
label_y_pos="ACTUAL POSITIVE"):
"""
Plots a confusion matrix for a binary classifier with the expected and
predicted values provided.
:Example:
xai.confusion_matrix_plot(
actual_labels,
predicted_labels,
scaled=True)
:param y_test: Array containing binary "actual" labels for data
:type y_test: Union[np.array, list]
:param pred: Array containing binary "predictedd" labels for data
:type pred: Union[np.array, list]
:param scaled: [Default: True] Whether the values are scaled to 0-1 or
displayed as total number of instances
:type scaled: bool
:param label_x_neg: [Default: "PREDICTED NEGATIVE"] Plot label for
the predicted negative values
:type label_x_neg: str
:param label_x_pos: [Default: "PREDICTED POSITIVE"] Plot label for
the predicted positive values
:type label_x_pos: str
:param label_y_neg: [Default: "ACTUAL NEGATIVE"] Plot label for
the actual negative values
:type label_y_neg: str
:param label_y_pos: [Default: "ACTUAL POSITIVE"] Plot label for
the actual positive values
:type label_y_pos: str
:returns: Null
:rtype: None
"""
confusion = confusion_matrix(y_test, pred)
columns = [label_y_neg, label_y_pos]
index = [label_x_neg, label_x_pos]
if scaled:
confusion_scaled = (confusion.astype("float") /
confusion.sum(axis=1)[:, np.newaxis])
confusion = pd.DataFrame(
confusion_scaled,
index=index,
columns=columns)
else:
confusion = pd.DataFrame(
confusion,
index=index,
columns=columns)
cmap = plt.get_cmap("Blues")
plt.figure()
plt.imshow(confusion, interpolation="nearest", cmap=cmap)
plt.title("Confusion matrix")
plt.colorbar()
plt.xticks(np.arange(2), columns, rotation=45)
plt.yticks(np.arange(2), index, rotation=45)
threshold = 0.5 if scaled else confusion.max().max() / 2
for i, j in itertools.product(
range(confusion.shape[0]),
range(confusion.shape[1])):
txt = "{:,}".format(confusion.iloc[i,j])
if scaled: txt = "{:0.4f}".format(confusion.iloc[i,j])
plt.text(j, i, txt,
horizontalalignment="center",
color=("white" if confusion.iloc[i,j] > threshold else "black"))
plt.tight_layout()
plt.show()
def balanced_train_test_split(
x: pd.DataFrame,
y: Union[np.ndarray, list],
*cross_cols: str,
categorical_cols: List[str] = [],
min_per_group: int = 20,
max_per_group: Optional[int] = None,
fallback_type: str = "upsample",
bins: int =6,
random_state: int=None
) -> Tuple[
pd.DataFrame,
np.ndarray,
pd.DataFrame,
np.ndarray,
np.ndarray,
np.ndarray]:
"""
Splits the "x" DataFrame and "y" Array into train/test split training sets with
a balanced number of examples for each of the categories of the columns provided.
For example, if the columns provided are "gender" and "loan", the resulting splits
would contain an equal number of examples for Male with Loan Approved, Male with
Loan Rejected, Female with Loan Approved, and Female with Loan Rejected. The
"fallback_type" parameter provides the behaviour that is triggered if there are not
enough datapoint examples for one of the subcategory groups - the default is "half"
Example
-------
.. code-block:: python
x: pd.DataFrame # Contains the input features
y: np.array # Contains the labels for the data
categorical_cols: List[str] # Name of columns that are categorical
x_train, y_train, x_test, y_test, train_idx, test_idx = \\
xai.balanced_train_test_split(
x, y, balance_on=["gender"],
categorical_cols=categorical_cols, min_per_group=300,
fallback_type="half")
Args
-----
x :
Pandas dataframe containing all the features in dataset
y :
Array containing "actual" labels for the dataset
*cross_cols :
One or more positional arguments (passed as *args) that
are used to split the data into the cross product of their values
categorical_cols :
[Default: []] Columns within dataframe that are
categorical. Columns that are not np.objects and are not part explicitly
provided here will be treated as numeric, and bins will be used.
min_per_group :
[Default: 20] This is the number of examples for each
of the groups created
max_per_group :
[Default: None] This is the maximum number of examples for
each group to be provided with.
fallback_type :
[Default: upsample] This is the fallback mechanism for when
one of the groups contains less elements than the number provided
through min_per_group. The options are "upsample", "ignore" and "error".
- "upsample": This will get samples with replacement so will repeat elements
- "ignore": Will just ignore and return all the elements available
- "error": Throw an exception for any groups with less elements
bins :
[Default: 6] Number of bins to be used for numerical cols
random_state:
[Default: None] Random seed for the internal sampling
Returns
-------
x_train : pd.DataFrame
DataFrame containing traning datapoints
y_train : np.ndarray
Array containing labels for training datapoints
x_test : pd.DataFrame
DataFrame containing test datapoints
y_test : np.ndarray
Array containing labels for test datapoints
train_idx : np.ndarray
Boolean array with True on Training indexes
test_idx : np.ndarray
Boolean array with True on Testing indexes
"""
if not cross_cols:
raise TypeError("imbalance_plot requires at least 1 string column name")
if min_per_group < 1:
raise TypeError("min_per_group must be at least 1")
if max_per_group and max_per_group < min_per_group:
raise TypeError(f"min_per_group ({min_per_group}) must be less or equal than "
f"max_per_group ({max_per_group}) if max_per_group is provided.")
if random_state:
random.setstate(random_state)
tmp_df = x.copy()
tmp_df["target"] = y
cross = ["target"] + list(cross_cols)
if not categorical_cols:
categorical_cols = _infer_categorical(tmp_df)
# TODO: Enable for non-categorical targets
categorical_cols = ["target"] + categorical_cols
grouped = group_by_columns(
tmp_df,
cross,
bins=bins,
categorical_cols=categorical_cols)
def resample(x):
group_size = x.shape[0]
if max_per_group:
if group_size > max_per_group:
return x.sample(max_per_group)
if group_size > min_per_group:
return x.sample(min_per_group)
if fallback_type == "upsample":
return x.sample(min_per_group, replace=True)
elif fallback_type == "ignore":
return x
elif fallback_type == "error":
raise ValueError("Number of samples for group are not enough,"
" and fallback_type provided was 'error'")
else:
raise(f"Sampling type provided not found: given {fallback_type}, "\
"expected: 'error', or 'half'")
group = grouped.apply(resample)
selected_idx = [g[-1] for g in group.index.values]
train_idx = np.full(tmp_df.shape[0], True, dtype=bool)
train_idx[selected_idx] = False
test_idx = np.full(tmp_df.shape[0], False, dtype=bool)
test_idx[selected_idx] = True
df_train = tmp_df.iloc[train_idx]
df_test = tmp_df.iloc[test_idx]
x_train = df_train.drop("target", axis=1)
y_train = df_train["target"].values
x_test = df_test.drop("target", axis=1)
y_test = df_test["target"].values
return x_train, y_train, x_test, y_test, train_idx, test_idx
def convert_probs(
probs: np.ndarray,
threshold: float = 0.5
) -> np.ndarray:
"""
Converts all the probabilities in the array provided into binary labels
as per the threshold provided which is 0.5 by default.
Example
---------
.. code-block:: python
probs = np.array([0.1, 0.2, 0.7, 0.8, 0.6])
labels = xai.convert_probs(probs, threshold=0.65)
print(labels)
> [0, 0, 1, 1, 0]
Args
-------
probs :
Numpy array or list containing a list of floats between 0 and 1
threshold :
Float that provides the threshold for which probabilities over the
threshold will be converted to 1
Returns
----------
: np.ndarray
Numpy array containing the labels based on threshold provided
"""
return (probs >= threshold).astype(int)
def evaluation_metrics(
y_valid,
y_pred
) -> Dict[str, float]:
"""
Calculates model performance metrics (accuracy, precision, recall, etc)
from the actual and predicted lables provided.
Example
---------
.. code-block:: python
y_actual: np.ndarray
y_predicted: np.ndarray
metrics = xai.evaluation_metrics(y_actual, y_predicted)
for k,v in metrics.items():
print(f"{k}: {v}")
> precision: 0.8,
> recall: 0.9,
> specificity: 0.7,
> accuracy: 0.8,
> auc: 0.7,
> f1: 0.8
Args
-------
y_valid :
Numpy array with the actual labels for the datapoints
y_pred :
Numpy array with the predicted labels for the datapoints
Returns
----------
: Dict[str, float]
Dictionary containing the metrics as follows:
.. code-block:: python
return {
"precision": precision,
"recall": recall,
"specificity": specificity,
"accuracy": accuracy,
"auc": auc,
"f1": f1
}
"""
TP = np.sum( y_pred[y_valid==1] )
TN = np.sum( y_pred[y_valid==0] == 0 )
FP = np.sum( y_pred[y_valid==0] )
FN = np.sum( y_pred[y_valid==1] == 0 )
# Adding an OR to ensure it doesn't divide by zero
precision = TP / ((TP+FP) or 0.001)
recall = TP / ((TP+FN) or 0.001)
specificity = TN / ((TN+FP) or 0.001)
accuracy = (TP+TN) / (TP+TN+FP+FN)
f1 = 2 * (precision * recall) / ((precision + recall) or 0.001)
try:
auc = roc_auc_score(y_valid, y_pred)
except ValueError:
auc = 0
return {
"precision": precision,
"recall": recall,
"specificity": specificity,
"accuracy": accuracy,
"auc": auc,
"f1": f1
}
def metrics_plot(
target: np.ndarray,
predicted: np.ndarray,
df: pd.DataFrame = pd.DataFrame(),
cross_cols: List[str] = [],
categorical_cols: List[str] = [],
bins: int = 6,
plot: bool = True,
exclude_metrics: List[str] = [],
plot_threshold: float = 0.5
) -> pd.DataFrame:
"""
Creates a plot that displays statistical metrics including precision,
recall, accuracy, auc, f1 and specificity for each of the groups created
for the columns provided by cross_cols. For example, if the columns passed
are "gender" and "age", the resulting plot will show the statistical metrics
for Male and Female for each binned group.
Example
---------
.. code-block:: python
target: np.ndarray
predicted: np.ndarray
df_metrics = xai.metrics_plot(
target,
predicted,
df=df_data,
cross_cols=["gender", "age"],
bins=3
Args
-------
target:
Numpy array containing the target labels for the datapoints
predicted :
Numpy array containing the predicted labels for the datapoints
df :
Pandas dataframe containing all the features for the datapoints.
It can be empty if only looking to calculate global metrics, but
if you would like to compute for categories across columns, the
columns you are grouping by need to be provided
cross_cols :
Contains the columns that you would like to use to cross the values
bins :
[Default: 6] The number of bins in which you'd like
numerical columns to be split
plot :
[Default: True] If True a plot will be drawn with the results
exclude_metrics :
These are the metrics that you can choose to exclude if you only
want specific ones (for example, excluding "f1", "specificity", etc)
plot_threshold:
The percentage that will be used to draw the threshold line in the plot
which would provide guidance on what is the ideal metrics to achieve.
Returns
----------
: pd.DataFrame
Pandas Dataframe containing all the metrics for the groups provided
"""
grouped = _group_metrics(
target,
predicted,
df,
cross_cols,
categorical_cols,
bins,
target_threshold=plot_threshold)
prfs = []
classes = []
for group, group_df in grouped:
group_valid = group_df['target'].values
group_pred = group_df["predicted"].values
metrics_dict = \
evaluation_metrics(group_valid, group_pred)
# Remove metrics as specified by params
[metrics_dict.pop(k, None) for k in exclude_metrics]
prfs.append(list(metrics_dict.values()))
classes.append(str(group))
prfs_cols = metrics_dict.keys()
prfs_df = pd.DataFrame(
np.array(prfs).transpose(),
columns=classes,
index=prfs_cols)
if plot:
prfs_df.plot.bar(figsize=(20,5))
lp = plt.axhline(0.5, color='r')
lp = plt.axhline(1, color='g')
return prfs_df
def roc_plot(
target,
predicted,
df=pd.DataFrame(),
cross_cols=[],
categorical_cols=[],
bins=6,
plot=True):
return _curve(
target=target,
predicted=predicted,
curve_type="roc",
df=df,
cross_cols=cross_cols,
categorical_cols=categorical_cols,
bins=bins,
plot=plot)
def pr_plot(
target,
predicted,
df=pd.DataFrame(),
cross_cols=[],
categorical_cols=[],
bins=6,
plot=True):
return _curve(
target=target,
predicted=predicted,
curve_type="pr",
df=df,
cross_cols=cross_cols,
categorical_cols=categorical_cols,
bins=bins,
plot=plot)
def _curve(
target,
predicted,
curve_type="roc",
df=pd.DataFrame(),
cross_cols=[],
categorical_cols=[],
bins=6,
plot=True):
if curve_type == "roc":
curve_func = roc_curve
y_label = 'False Positive Rate'
x_label = 'True Positive Rate'
p1 = [0,1]
p2 = [0,1]
y_lim = [0, 1.05]
legend_loc = "lower right"
elif curve_type == "pr":
curve_func = precision_recall_curve
y_label = "Recall"
x_label = "Precision"
p1 = [1,0]
p2 = [0.5,0.5]
y_lim = [0.25, 1.05]
legend_loc = "lower left"
else:
raise ValueError("Curve function provided not valid. "
f" curve_func provided: {curve_func}")
grouped = _group_metrics(
target,
predicted,
df,
cross_cols,
categorical_cols,
bins)
if plot:
plt.figure()
r1s = r2s = []
for group, group_df in grouped:
group_valid = group_df["target"]
group_pred = group_df["predicted"]
r1, r2, _ = curve_func(group_valid, group_pred)
r1s.append(r1)
r2s.append(r2)
if plot:
if curve_type == "pr": r1,r2 = r2,r1
plt.plot(r1, r2, label=group)
plt.plot(p1, p2, 'k--')
if plot:
plt.xlim([0.0, 1.0])
plt.ylim(y_lim)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(loc=legend_loc)
plt.show()
return r1s, r2s
def _infer_categorical(df):
categorical_cols = df.select_dtypes(
include=[np.object, np.bool, np.int8]).columns
logging.warn("No categorical_cols passed so inferred using np.object, "
f"np.int8 and np.bool: {categorical_cols}. If you see an error"
" these are not "
"correct, please provide them as a string array as: "
"categorical_cols=['col1', 'col2', ...]")
return categorical_cols
def _group_metrics(
target,
predicted,
df,
cross_cols,
categorical_cols,
bins,
target_threshold=None):
if not all(c in df.columns for c in cross_cols):
raise KeyError("Cross columns don't match columns in dataframe provided.")
df_tmp = df.copy()
df_tmp["target"] = target
df_tmp["predicted"] = predicted
# Convert predictions into classes
if target_threshold and df_tmp["predicted"].dtype.kind == 'f':
df_tmp["predicted"] = convert_probs(
df_tmp["predicted"], threshold=target_threshold)
if not categorical_cols and cross_cols:
categorical_cols = _infer_categorical(df_tmp)
if not cross_cols:
grouped = [("target", df_tmp),]
else:
grouped = group_by_columns(
df_tmp,
cross_cols,
bins=bins,
categorical_cols=categorical_cols)
return grouped
def smile_imbalance(
y_test,
probs,
threshold=0.5,
manual_review=None,
display_breakdown=False,
bins=10):
# TODO: Change function so it only iterates once
preds = convert_probs(probs, threshold).flatten()
d = pd.DataFrame(probs)
d.columns = ["probs"]
d["preds"] = preds
d["target"] = y_test
tps = np.full(y_test.shape, False, bool)
d["true-positives"] = np.full(y_test.shape[0], False, bool)
d["true-negatives"] = np.full(y_test.shape[0], False, bool)
d["false-positives"] = np.full(y_test.shape[0], False, bool)
d["false-negatives"] = np.full(y_test.shape[0], False, bool)
d["manual-review"] = np.full(y_test.shape[0], False, bool)
d["true-positives"].loc[y_test == 1] = preds[y_test == 1] == 1
d["true-negatives"].loc[y_test == 0] = preds[y_test == 0] == 0
d["false-positives"].loc[y_test == 0] = preds[y_test == 0] == 1
d["false-negatives"].loc[y_test == 1] = preds[y_test == 1] == 0
d["correct"] = d["true-positives"].values
d["correct"].loc[d["true-negatives"] == 1] = True
d["incorrect"] = d["false-positives"].values
d["incorrect"].loc[d["false-negatives"] == 1] = True
if display_breakdown:
disp_cols = ["true-positives",
"true-negatives",
"false-positives",
"false-negatives"]
else:
disp_cols = ["correct", "incorrect"]
if manual_review:
gt = probs > manual_review
lt = probs < threshold
d["manual-review"] = gt * lt > 0
if display_breakdown:
d["true-positives"].loc[d["manual-review"]] = False
d["true-negatives"].loc[d["manual-review"]] = False
d["false-positives"].loc[d["manual-review"]] = False
d["false-negatives"].loc[d["manual-review"]] = False
else:
d["correct"].loc[d["manual-review"]] = False
d["incorrect"].loc[d["manual-review"]] = False
disp_cols.append("manual-review")
d["true-positives"] = d["true-positives"].astype(int)
d["true-negatives"] = d["true-negatives"].astype(int)
d["false-positives"] = d["false-positives"].astype(int)
d["false-negatives"] = d["false-negatives"].astype(int)
d["correct"] = d["correct"].astype(int)
d["incorrect"] = d["incorrect"].astype(int)
grouped = group_by_columns(d, ["probs"], bins=bins)
ax = grouped[disp_cols].sum().plot.bar(stacked=True, figsize=(15,5))
lim = ax.get_xlim()
ran = lim[1] - lim[0]
thre = ran*threshold + lim[0]
plt.axvline(thre)
if manual_review:
manr = ran*manual_review + lim[0]
plt.axvline(manr)
# TODO: Need to fix this hack and use the index
ax_xticks = [label.get_text().split()[1][:-1] for label in ax.get_xticklabels()]
ax.set_xticklabels(ax_xticks)
return d
def feature_importance(x, y, func, repeat=10, plot=True):
base_score = func(x, y)
imp = [0] * len(x.columns)
for i in range(repeat):
for j, c in enumerate(x.columns):
tmp = x[c].values.copy()
np.random.shuffle(x[c].values)
score = func(x, y)
x[c] = tmp
imp[j] += base_score - score
imp = [a/repeat for a in imp]
imp_df = pd.DataFrame(data=[imp], columns=x.columns)
if plot:
imp_df.sum().sort_values().plot.barh()
return imp_df
|
/home/runner/.cache/pip/pool/6f/79/0f/8599f6c360fdc1e9e096d5f99c621943711150e43953e2b9bc728f75e8
|
import aiohttp
import os
OOTR_BASE_URL = os.environ.get('OOTR_BASE_URL', 'https://ootrandomizer.com')
OOTR_API_KEY = os.environ.get('OOTR_API_KEY')
async def roll_ootr(settings, version='6.1.0', encrypt=True):
async with aiohttp.request(
method='post',
url=f"{OOTR_BASE_URL}/api/sglive/seed/create",
raise_for_status=True,
json=settings,
params={
"key": OOTR_API_KEY,
"version": version,
"encrypt": str(encrypt).lower()
}
) as resp:
result = await resp.json()
return result
|
import pickle, pprint, math
import sys
import pdb
from collections import defaultdict as ddict
import operator
import numpy
import operator
import time
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('EVAL-EMBED')
def incoming_neighbours(entity, graph):
relations_entity_is_tail = graph['incoming'][entity].keys()
incoming_neighbours = []
for r in relations_entity_is_tail:
for e in graph['relations_tail'][r].keys():
incoming_neighbours.append(e)
return incoming_neighbours
def outgoing_neighbours(entity, graph):
relations_entity_is_head = graph['outgoing'][entity].keys()
outgoing_neighbours = []
for r in relations_entity_is_head:
for e in graph['relations_head'][r].keys():
outgoing_neighbours.append(e)
return outgoing_neighbours
def make_graph(triples, N, M):
graph_outgoing = [ddict(list) for _ in range(N)]
graph_incoming = [ddict(list) for _ in range(N)]
graph_relations_head = [ddict(list)for _ in range(M)]
graph_relations_tail = [ddict(list)for _ in range(M)]
for t in triples:
head = t[0]
tail = t[1]
relation = t[2]
graph_outgoing[head][relation].append(tail)
graph_incoming[tail][relation].append(head)
graph_relations_head[relation][head].append(tail)
graph_relations_tail[relation][tail].append(head)
return {'outgoing': graph_outgoing, 'incoming': graph_incoming, 'relations_head': graph_relations_head, 'relations_tail':graph_relations_tail}
def processFile(datafile):
with open(datafile,'r')as fin:
data = fin.read()
records = data.split(']')
# Remove the last element (extra newline)
del(records[-1])
embeddings = [[] for _ in range(len(records))]
for i,r in enumerate(records):
embeddings_str = r.split(',[')[1].split()
for e in embeddings_str:
embeddings[i].append(float(e))
return numpy.array(embeddings)
def processPickleFile(datafile):
with open(datafile, 'rb') as fin:
data = pickle.load(fin)
return data
def l1Distance(em1, em2):
distances = []
for i, (e1, e2) in enumerate(zip(em1, em2)):
out = 0
r = numpy.abs(e1 - e2)
for el in r:
out += el
distances.append((i, out))
return distances
# cosine similarity function
# http://stackoverflow.com/questions/1746501/can-someone-give-an-example-of-cosine-similarity-in-a-very-simple-graphical-wa
def cosTheta(v1, v2):
dot_product = sum(n1 * n2 for n1, n2 in zip(v1, v2) )
magnitude1 = math.sqrt(sum(n ** 2 for n in v1))
magnitude2 = math.sqrt(sum(n ** 2 for n in v2))
return dot_product / (magnitude1 * magnitude2)
def cosineMatrix(em):
N = len(em)
mat = numpy.full((N,N), 2)
for i in range(N):
mat[i][i] = 1
i = 0
while (i < N):
j = i+1
while (j < N):
mat[i][j] = mat[j][i] = cosTheta(em[i], em[j])
j += 1
i += 1
return mat
def similarity(em, mat, TOPK, training_graph, test_graph, flog):
out = [[] for i in range(len(em))]
for i, e in enumerate(em):
flog.write ("Entity (%d): " %(i))
log.info ("Entity (%d): " %(i))
cos_dict = ddict()
#incoming = incoming_neighbours(i, graph)
#outgoing = outgoing_neighbours(i, graph)
incoming_train = incoming_neighbours(i, training_graph)
outgoing_train = outgoing_neighbours(i, training_graph)
incoming_test = incoming_neighbours(i, test_graph)
outgoing_test = outgoing_neighbours(i, test_graph)
for j, obj in enumerate(em):
if i == j:
continue
theta = mat[i][j]
cos_dict[j] = theta
#print ("%d,%f" % (i, theta))
sorted_dict = sorted(cos_dict.items(), key=operator.itemgetter(1), reverse=True)
flog.write("cosine results collected and sorted, ")
log.info("cosine results collected and sorted, ")
# Add one more column for training/test/Unknown
for k,v in enumerate(sorted_dict):
if k == TOPK:
break
if k in incoming_train or k in outgoing_train:
out[i].append((v, True, "TRAIN"))
elif k in incoming_test or k in outgoing_test:
out[i].append((v, True, "TEST"))
else:
out[i].append((v, False))
flog.write(" Table of evaluation built\n")
log.info(" Table of evaluation built\n")
return out
if __name__=='__main__':
if len(sys.argv) != 4:
print ("Usage: python %s <embeddings.txt> <kb.bin> <TOPK>" % (sys.arg[0]))
sys.exit()
logfile = sys.argv[1] + ".log"
flog = open(logfile, 'w')
start = time.time()
embeddings = processFile(sys.argv[1])
kb = processPickleFile(sys.argv[2])
flog.write("Time to process files = %ds" % (time.time() - start))
log.info("Time to process files = %ds" % (time.time() - start))
TOPK = int(sys.argv[3])
N = len(kb['entities'])
M = len(kb['relations'])
training = kb['train_subs']
valid = kb['valid_subs']
test = kb['test_subs']
# this is unfiltered evaluation (because the triples from the training sets are supposed to be guessed correctly)
#dataset = training + valid + test
start = time.time()
training_graph = make_graph(training, N, M)
test_graph = make_graph(test, N, M)
flog.write("Time to build graphs from triples = %ds\n" %(time.time() - start))
log.info("Time to build graphs from triples = %ds\n" %(time.time() - start))
if N != len(embeddings):
print("Number of entities don't match (embeddings file and database)")
sys.exit()
start = time.time()
mat = cosineMatrix(embeddings)
flog.write("Time to make cosine distance matrix = %ds\n" % (time.time() - start))
log.info("Time to make cosine distance matrix = %ds\n" % (time.time() - start))
start = time.time()
cosines = similarity(embeddings, mat, TOPK, training_graph, test_graph, flog)
flog.write("Time to sort and rank best matching objects = %ds\n"%(time.time() - start))
log.info("Time to sort and rank best matching objects = %ds\n"%(time.time() - start))
start = time.time()
outFile = sys.argv[2] + "-" + "TOP-" + str(TOPK) + ".eval.out"
data = "{"
for i, pairs in enumerate(cosines):
data += str(i) + ": {"
for p in pairs:
data += str(p) + "\n"
data += "}"
data += "}"
with open(outFile, 'w') as fout:
fout.write(data)
flog.write("Time to write out file = %ds" % (time.time() - start))
log.info("Time to write out file = %ds" % (time.time() - start))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.