hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a309bbed8b8d838c5cf768de6a3518266a5473ad | 13,539 | py | Python | mmdet/models/dense_heads/anchor_free_head.py | DongshuoYin/garbage_dump_detection | 1087f2cd35f781c5d973db5a59ecf8eb3bfa2537 | [
"Apache-2.0"
] | null | null | null | mmdet/models/dense_heads/anchor_free_head.py | DongshuoYin/garbage_dump_detection | 1087f2cd35f781c5d973db5a59ecf8eb3bfa2537 | [
"Apache-2.0"
] | null | null | null | mmdet/models/dense_heads/anchor_free_head.py | DongshuoYin/garbage_dump_detection | 1087f2cd35f781c5d973db5a59ecf8eb3bfa2537 | [
"Apache-2.0"
] | null | null | null | from abc import abstractmethod
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from mmcv.runner import force_fp32
from mmdet.core import multi_apply
from ..builder import HEADS, build_loss
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
@HEADS.register_module()
class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
"""Anchor-free head (FCOS, Fovea, RepPoints, etc.).
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels. Used in child classes.
stacked_convs (int): Number of stacking convs of the head.
strides (tuple): Downsample factor of each feature map.
dcn_on_last_conv (bool): If true, use dcn in the last layer of
towers. Default: False.
conv_bias (bool | str): If specified as `auto`, it will be decided by
the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
None, otherwise False. Default: "auto".
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
""" # noqa: W605
_version = 1
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
dcn_on_last_conv=False,
conv_bias='auto',
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
conv_cfg=None,
norm_cfg=None,
train_cfg=None,
test_cfg=None):
super(AnchorFreeHead, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.dcn_on_last_conv = dcn_on_last_conv
assert conv_bias == 'auto' or isinstance(conv_bias, bool)
self.conv_bias = conv_bias
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
"""Initialize layers of the head."""
self._init_cls_convs()
self._init_reg_convs()
self._init_predictor()
def _init_cls_convs(self):
"""Initialize classification conv layers of the head."""
self.cls_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.conv_bias))
def _init_reg_convs(self):
"""Initialize bbox regression conv layers of the head."""
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.conv_bias))
def _init_predictor(self):
"""Initialize predictor layers of the head."""
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
def init_weights(self):
"""Initialize weights of the head."""
for m in self.cls_convs:
if isinstance(m.conv, nn.Conv2d):
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
if isinstance(m.conv, nn.Conv2d):
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv_cls, std=0.01, bias=bias_cls)
normal_init(self.conv_reg, std=0.01)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""Hack some keys of the model state dict so that can load checkpoints
of previous version."""
version = local_metadata.get('version', None)
if version is None:
# the key is different in early versions
# for example, 'fcos_cls' become 'conv_cls' now
bbox_head_keys = [
k for k in state_dict.keys() if k.startswith(prefix)
]
ori_predictor_keys = []
new_predictor_keys = []
# e.g. 'fcos_cls' or 'fcos_reg'
for key in bbox_head_keys:
ori_predictor_keys.append(key)
key = key.split('.')
conv_name = None
if key[1].endswith('cls'):
conv_name = 'conv_cls'
elif key[1].endswith('reg'):
conv_name = 'conv_reg'
elif key[1].endswith('centerness'):
conv_name = 'conv_centerness'
else:
assert NotImplementedError
if conv_name is not None:
key[1] = conv_name
new_predictor_keys.append('.'.join(key))
else:
ori_predictor_keys.pop(-1)
for i in range(len(new_predictor_keys)):
state_dict[new_predictor_keys[i]] = state_dict.pop(
ori_predictor_keys[i])
super()._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually contain classification scores and bbox predictions.
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * 4.
"""
return multi_apply(self.forward_single, feats)[:2]
def forward_single(self, x):
"""Forward features of a single scale levle.
Args:
x (Tensor): FPN feature maps of the specified stride.
Returns:
tuple: Scores for each class, bbox predictions, features
after classification and regression conv layers, some
models needs these features like FCOS.
"""
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.conv_cls(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
bbox_pred = self.conv_reg(reg_feat)
return cls_score, bbox_pred, cls_feat, reg_feat
@abstractmethod
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute loss of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * 4.
gt_bboxes (list[Tensor]): Ground truth bboxes for each JPEGImages with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each JPEGImages, e.g.,
JPEGImages size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
"""
raise NotImplementedError
@abstractmethod
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg=None,
rescale=None):
"""Transform network output for a batch into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_points * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_points * 4, H, W)
img_metas (list[dict]): Meta information of each JPEGImages, e.g.,
JPEGImages size, scaling factor, etc.
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used
rescale (bool): If True, return boxes in original JPEGImages space
"""
raise NotImplementedError
@abstractmethod
def get_targets(self, points, gt_bboxes_list, gt_labels_list):
"""Compute regression, classification and centerss targets for points
in multiple images.
Args:
points (list[Tensor]): Points of each fpn level, each has shape
(num_points, 2).
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each JPEGImages,
each has shape (num_gt, 4).
gt_labels_list (list[Tensor]): Ground truth labels of each box,
each has shape (num_gt,).
"""
raise NotImplementedError
def _get_points_single(self,
featmap_size,
stride,
dtype,
device,
flatten=False):
"""Get points of a single scale level."""
h, w = featmap_size
x_range = torch.arange(w, dtype=dtype, device=device)
y_range = torch.arange(h, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
if flatten:
y = y.flatten()
x = x.flatten()
return y, x
def get_points(self, featmap_sizes, dtype, device, flatten=False):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each JPEGImages.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self._get_points_single(featmap_sizes[i], self.strides[i],
dtype, device, flatten))
return mlvl_points
def aug_test(self, feats, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
feats (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains features for all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has JPEGImages information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[ndarray]: bbox results of each class
"""
return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
| 39.703812 | 82 | 0.566216 |
5112299e9268ddcdd966f52bff221591d2417c87 | 4,743 | py | Python | anydrive_typing_aid/src/anydrive_typing_aid/controllers/base_controller.py | ethz-asl/typing_aid | ee44575b5198c34162b6f38dda798d97ae10f7d7 | [
"BSD-3-Clause"
] | null | null | null | anydrive_typing_aid/src/anydrive_typing_aid/controllers/base_controller.py | ethz-asl/typing_aid | ee44575b5198c34162b6f38dda798d97ae10f7d7 | [
"BSD-3-Clause"
] | null | null | null | anydrive_typing_aid/src/anydrive_typing_aid/controllers/base_controller.py | ethz-asl/typing_aid | ee44575b5198c34162b6f38dda798d97ae10f7d7 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import rospy
from std_msgs.msg import String
import anydrive_typing_aid.utils.utilities as utilities
class BaseController:
def __init__(self, drv_interface, parameters, save_dir):
self.drv_interface = drv_interface
self.parameters = parameters
self.save_dir = save_dir
self.log_time_str = utilities.get_time_string()
self.rate_hz = parameters["rate_hz"]
self.sampling_time = 1.0 / self.rate_hz
self.controller_start_time = rospy.get_time()
self.quit = False
log_strings = [
"t",
"p_cmd",
"v_cmd",
"tau_cmd",
"lift_running",
"p_meas",
"v_meas",
"tau_meas",
"i_meas",
]
self.log = {log_string: list() for log_string in log_strings}
def subscribe(self):
rospy.Subscriber("lift_arm", String, self.lifting_callback_base)
def lifting_callback(self, msg):
raise NotImplementedError
def lifting_callback_base(self, msg):
if msg.data == "q":
self.quit = True
else:
self.lifting_callback(msg)
def stop(self):
self.drv_interface.stop_drive()
utilities.save_parameters(self)
utilities.save_log(self)
def limit_checking(self, state):
if (
state[2] < self.parameters["tau_lower_limit"]
or state[2] > self.parameters["tau_upper_limit"]
):
rospy.logwarn("Torque limit exceeded")
return False
return True
def compute_trajectory(
self,
lower_y,
upper_y,
duration_up,
steepness_up,
duration_down,
steepness_down,
duration_const,
use_depression=False,
depression_y=None,
depression_proportion=0.5,
):
if use_depression:
assert depression_y is not None
t_ramp_up, y_ramp_up = utilities.sigmoid(
0.0, duration_up, lower_y, upper_y, steepness_up, self.sampling_time,
)
t_const_start = t_ramp_up[-1] + self.sampling_time
t_const, y_const = utilities.const(
upper_y, t_const_start, t_const_start + duration_const, self.sampling_time,
)
if len(t_const) > 0:
t_ramp_down_start = t_const[-1] + self.sampling_time
else:
t_ramp_down_start = t_const_start
if not use_depression:
t_ramp_down, y_ramp_down = utilities.sigmoid(
t_ramp_down_start,
t_ramp_down_start + duration_down,
upper_y,
lower_y,
steepness_down,
self.sampling_time,
)
else:
assert depression_proportion >= 0.0 and depression_proportion <= 1.0
t_ramp_down_1, y_ramp_down_1 = utilities.sigmoid(
t_ramp_down_start,
t_ramp_down_start + depression_proportion * duration_down,
upper_y,
depression_y,
steepness_down,
self.sampling_time,
)
t_ramp_down_2_start = t_ramp_down_1[-1] + self.sampling_time
t_ramp_down_2, y_ramp_down_2 = utilities.sigmoid(
t_ramp_down_2_start,
t_ramp_down_2_start + (1 - depression_proportion) * duration_down,
depression_y,
lower_y,
steepness_down,
self.sampling_time,
)
t_ramp_down = np.concatenate((t_ramp_down_1, t_ramp_down_2))
y_ramp_down = np.concatenate((y_ramp_down_1, y_ramp_down_2))
t = np.concatenate((t_ramp_up, t_const, t_ramp_down))
y = np.concatenate((y_ramp_up, y_const, y_ramp_down))
dy = utilities.compute_derivative(y, self.sampling_time)
return t, y, dy
def step(self):
state = self.drv_interface.get_state()
res = self.limit_checking(state)
if not res or self.quit:
return False
current_time = rospy.get_time()
p_cmd, v_cmd, tau_cmd, lift_running = self.individual_step(current_time, state)
self.log["t"].append(current_time - self.controller_start_time)
self.log["p_cmd"].append(p_cmd)
self.log["v_cmd"].append(v_cmd)
self.log["tau_cmd"].append(tau_cmd)
self.log["lift_running"].append(lift_running)
self.log["p_meas"].append(state[0])
self.log["v_meas"].append(state[1])
self.log["tau_meas"].append(state[2])
self.log["i_meas"].append(state[3])
return True
def individual_step(self, current_time, state):
raise NotImplementedError
| 32.9375 | 87 | 0.591187 |
f38bee993a3763eeb3c67716a717c9c98e9d919a | 878 | py | Python | setup.py | v-morello/sffix | 5d27d729524519f3f7399018c3e5c149cc493647 | [
"MIT"
] | null | null | null | setup.py | v-morello/sffix | 5d27d729524519f3f7399018c3e5c149cc493647 | [
"MIT"
] | null | null | null | setup.py | v-morello/sffix | 5d27d729524519f3f7399018c3e5c149cc493647 | [
"MIT"
] | null | null | null | import os
import setuptools
from setuptools import setup
def parse_version():
thisdir = os.path.dirname(__file__)
version_file = os.path.join(thisdir, 'sffix', '_version.py')
with open(version_file, 'r') as fobj:
text = fobj.read()
items = {}
exec(text, None, items)
return items['__version__']
version = parse_version()
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='sffix',
url='https://github.com/v-morello/sffix',
author='Vincent Morello',
author_email='vmorello@gmail.com',
description='Modify multi-chunk PSRFITS headers so that dspsr considers them time-contiguous',
long_description=long_description,
long_description_content_type='text/markdown',
version=version,
packages=setuptools.find_packages(),
install_requires=['astropy'],
license='MIT License',
) | 28.322581 | 98 | 0.697039 |
043aa277d765f7c8418c1d9ee5e006446719c955 | 5,037 | py | Python | flexget/plugins/output/dump.py | jacaru/flexget | 2ac1799f76b6b1572c0b088299abf9683d4afba6 | [
"MIT"
] | null | null | null | flexget/plugins/output/dump.py | jacaru/flexget | 2ac1799f76b6b1572c0b088299abf9683d4afba6 | [
"MIT"
] | null | null | null | flexget/plugins/output/dump.py | jacaru/flexget | 2ac1799f76b6b1572c0b088299abf9683d4afba6 | [
"MIT"
] | null | null | null | from loguru import logger
from flexget import options, plugin
from flexget.event import event
from flexget.terminal import console
logger = logger.bind(name='dump')
def dump(entries, debug=False, eval_lazy=False, trace=False, title_only=False):
"""
Dump *entries* to stdout
:param list entries: Entries to be dumped.
:param bool debug: Print non printable fields as well.
:param bool eval_lazy: Evaluate lazy fields.
:param bool trace: Display trace information.
:param bool title_only: Display only title field
"""
def sort_key(field):
# Sort certain fields above the rest
if field == 'title':
return (0,)
if field == 'url':
return (1,)
if field == 'original_url':
return (2,)
return 3, field
for entry in entries:
for field in sorted(entry, key=sort_key):
if field.startswith('_') and not debug:
continue
if title_only and field != 'title':
continue
if entry.is_lazy(field) and not eval_lazy:
value = '<LazyField - value will be determined when it is accessed>'
else:
try:
value = entry[field]
except KeyError:
value = '<LazyField - lazy lookup failed>'
if isinstance(value, str):
try:
console('%-17s: %s' % (field, value.replace('\r', '').replace('\n', '')))
except Exception:
console('%-17s: %r (warning: unable to print)' % (field, value))
elif isinstance(value, list):
console('%-17s: %s' % (field, '[%s]' % ', '.join(str(v) for v in value)))
elif isinstance(value, (int, float, dict)):
console('%-17s: %s' % (field, value))
elif value is None:
console('%-17s: %s' % (field, value))
else:
try:
value = str(entry[field])
console('%-17s: %s' % (field, value.replace('\r', '').replace('\n', '')))
except Exception:
if debug:
console('%-17s: [not printable] (%r)' % (field, value))
if trace:
console('-- Processing trace:')
for item in entry.traces:
console('%-10s %-7s %s' % (item[0], '' if item[1] is None else item[1], item[2]))
if not title_only:
console('')
class OutputDump:
"""
Outputs all entries to console
"""
schema = {'type': 'boolean'}
@plugin.priority(0)
def on_task_output(self, task, config):
if not config or task.options.dump_entries is None:
return
eval_lazy = 'eval' in task.options.dump_entries
trace = 'trace' in task.options.dump_entries
title = 'title' in task.options.dump_entries
states = ['accepted', 'rejected', 'failed', 'undecided']
dumpstates = [s for s in states if s in task.options.dump_entries]
specificstates = dumpstates
if not dumpstates:
dumpstates = states
undecided = [entry for entry in task.all_entries if entry.undecided]
if 'undecided' in dumpstates:
if undecided:
console('-- Undecided: --------------------------')
dump(undecided, task.options.debug, eval_lazy, trace, title)
elif specificstates:
console('No undecided entries')
if 'accepted' in dumpstates:
if task.accepted:
console('-- Accepted: ---------------------------')
dump(task.accepted, task.options.debug, eval_lazy, trace, title)
elif specificstates:
console('No accepted entries')
if 'rejected' in dumpstates:
if task.rejected:
console('-- Rejected: ---------------------------')
dump(task.rejected, task.options.debug, eval_lazy, trace, title)
elif specificstates:
console('No rejected entries')
if 'failed' in dumpstates:
if task.failed:
console('-- Failed: -----------------------------')
dump(task.failed, task.options.debug, eval_lazy, trace, title)
elif specificstates:
console('No failed entries')
@event('plugin.register')
def register_plugin():
plugin.register(OutputDump, 'dump', builtin=True, api_ver=2)
@event('options.register')
def register_parser_arguments():
options.get_parser('execute').add_argument(
'--dump',
nargs='*',
choices=['eval', 'trace', 'accepted', 'rejected', 'undecided', 'title'],
dest='dump_entries',
help=(
'display all entries in task with fields they contain, '
'use `--dump eval` to evaluate all lazy fields. Specify an entry '
'state/states to only dump matching entries.'
),
)
| 37.311111 | 97 | 0.532658 |
54cb996072028b165811e4bc7d2114b89cd252cd | 3,309 | py | Python | research/audioset/vggish_input.py | xychu/models | 0344c5503ee55e24f0de7f37336a6e08f10976fd | [
"Apache-2.0"
] | 3,326 | 2018-01-26T22:42:25.000Z | 2022-02-16T13:16:39.000Z | research/audioset/vggish_input.py | wzy1510300a28/models | 42a3da72313b8814ef0ced8f425af90b57313b9f | [
"Apache-2.0"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | research/audioset/vggish_input.py | wzy1510300a28/models | 42a3da72313b8814ef0ced8f425af90b57313b9f | [
"Apache-2.0"
] | 1,474 | 2018-02-01T04:33:18.000Z | 2022-03-08T07:02:20.000Z | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute input examples for VGGish from audio waveform."""
import numpy as np
import resampy
from scipy.io import wavfile
import mel_features
import vggish_params
def waveform_to_examples(data, sample_rate):
"""Converts audio waveform into an array of examples for VGGish.
Args:
data: np.array of either one dimension (mono) or two dimensions
(multi-channel, with the outer dimension representing channels).
Each sample is generally expected to lie in the range [-1.0, +1.0],
although this is not required.
sample_rate: Sample rate of data.
Returns:
3-D np.array of shape [num_examples, num_frames, num_bands] which represents
a sequence of examples, each of which contains a patch of log mel
spectrogram, covering num_frames frames of audio and num_bands mel frequency
bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.
"""
# Convert to mono.
if len(data.shape) > 1:
data = np.mean(data, axis=1)
# Resample to the rate assumed by VGGish.
if sample_rate != vggish_params.SAMPLE_RATE:
data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE)
# Compute log mel spectrogram features.
log_mel = mel_features.log_mel_spectrogram(
data,
audio_sample_rate=vggish_params.SAMPLE_RATE,
log_offset=vggish_params.LOG_OFFSET,
window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS,
hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS,
num_mel_bins=vggish_params.NUM_MEL_BINS,
lower_edge_hertz=vggish_params.MEL_MIN_HZ,
upper_edge_hertz=vggish_params.MEL_MAX_HZ)
# Frame features into examples.
features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS
example_window_length = int(round(
vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))
example_hop_length = int(round(
vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate))
log_mel_examples = mel_features.frame(
log_mel,
window_length=example_window_length,
hop_length=example_hop_length)
return log_mel_examples
def wavfile_to_examples(wav_file):
"""Convenience wrapper around waveform_to_examples() for a common WAV format.
Args:
wav_file: String path to a file, or a file-like object. The file
is assumed to contain WAV audio data with signed 16-bit PCM samples.
Returns:
See waveform_to_examples.
"""
sr, wav_data = wavfile.read(wav_file)
assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype
samples = wav_data / 32768.0 # Convert to [-1.0, +1.0]
return waveform_to_examples(samples, sr)
| 38.034483 | 80 | 0.737987 |
02f9297e4576269d3456d25895fc6eef000ad0b6 | 2,655 | py | Python | longitude/core/data_sources/postgres/default.py | Rovaro/Longitude | b17b40a7b19edb10c62238ea20d136a3a8147f13 | [
"MIT"
] | 1 | 2020-11-06T11:12:42.000Z | 2020-11-06T11:12:42.000Z | longitude/core/data_sources/postgres/default.py | Rovaro/Longitude | b17b40a7b19edb10c62238ea20d136a3a8147f13 | [
"MIT"
] | 22 | 2017-11-20T21:18:55.000Z | 2021-07-06T10:22:14.000Z | longitude/core/data_sources/postgres/default.py | Rovaro/Longitude | b17b40a7b19edb10c62238ea20d136a3a8147f13 | [
"MIT"
] | 4 | 2018-03-22T08:38:03.000Z | 2020-06-14T04:29:15.000Z | import psycopg2
import psycopg2.extensions
from ...common.query_response import LongitudeQueryResponse
from ..base import DataSource
from .common import psycopg2_type_as_string
class PostgresDataSource(DataSource):
def __init__(self, options={}):
super().__init__(options)
self._conn = psycopg2.connect(
host=options.get('host', 'localhost'),
port=options.get('port', 5432),
database=options.get('db', ''),
user=options.get('user', 'postgres'),
password=options.get('password', '')
)
self._auto_commit = options.get('auto_commit', False)
self._cursor = self._conn.cursor()
def __del__(self):
if self._cursor:
self._cursor.close()
if self._conn:
self._conn.close()
def execute_query(self, query_template, params, **opts):
data = {
'fields': [],
'rows': []
}
self._cursor.execute(query_template, params)
if self._cursor.description:
data['fields'] = self._cursor.description
data['rows'] = self._cursor.fetchall()
if self._auto_commit:
self.commit()
return data
def commit(self):
self._conn.commit()
def parse_response(self, response):
if response:
raw_fields = response['fields']
fields_names = {n.name: {'type': psycopg2_type_as_string(n.type_code)} for n in raw_fields}
rows = [dict(zip(fields_names.keys(), row)) for row in response['rows']]
return LongitudeQueryResponse(rows=rows, fields=fields_names)
return None
def copy_from(self, data, filepath, to_table):
headers = data.readline().decode('utf-8').split(',')
self._cursor.copy_from(data, to_table, columns=headers, sep=',')
if self._auto_commit:
self.commit()
def write_dataframe(self, *args, **kwargs):
raise NotImplementedError('Use the SQLAlchemy data source if you need dataframes!')
def read_dataframe(self, *args, **kwargs):
# TODO: It is possible to read dataframes using psycopg2, but we do not support it for now to encourage
# the use of SQLAlchemy for such tasks
raise NotImplementedError('Use the SQLAlchemy data source if you need dataframes!')
def query_dataframe(self, *args, **kwargs):
# TODO: It is possible to read dataframes using psycopg2, but we do not support it for now to encourage
# the use of SQLAlchemy for such tasks
raise NotImplementedError('Use the SQLAlchemy data source if you need dataframes!')
| 34.934211 | 111 | 0.629755 |
abb21caede1458937ad3bd9ec2f1a9f9ae434a88 | 4,077 | py | Python | boards/tests/test_postEdit_view.py | Xerrex/django_boards_project | 60be478867afa4c9dbc6eb9494dfc046f438b75a | [
"MIT"
] | null | null | null | boards/tests/test_postEdit_view.py | Xerrex/django_boards_project | 60be478867afa4c9dbc6eb9494dfc046f438b75a | [
"MIT"
] | 4 | 2020-06-05T23:12:01.000Z | 2021-06-29T07:06:30.000Z | boards/tests/test_postEdit_view.py | Xerrex/django_boards_project | 60be478867afa4c9dbc6eb9494dfc046f438b75a | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse, resolve
from django.forms import ModelForm
from boards.models import Board, Topic, Post
from boards.views import PostEditView
class PostEditViewTestCase(TestCase):
'''
Base test case to be used in all `PostEditView` view tests
'''
def setUp(self):
self.board = Board.objects.create(name='Django', description='Django board.')
self.username = 'john'
self.password = '123'
user = User.objects.create_user(username=self.username, email='john@doe.com', password=self.password)
self.topic = Topic.objects.create(subject='Hello, world', board=self.board, starter=user)
self.post = Post.objects.create(message='Lorem ipsum dolor sit amet', topic=self.topic, created_by=user)
self.url = reverse('post_edit', kwargs={
'pk': self.board.pk,
'topic_pk': self.topic.pk,
'post_pk': self.post.pk
})
class LoginRequiredPostEditViewTests(PostEditViewTestCase):
def test_redirection(self):
login_url = reverse('login')
response = self.client.get(self.url)
self.assertRedirects(response, f'{login_url}?next={self.url}')
class UnauthorizedPostEditViewTests(PostEditViewTestCase):
def setUp(self):
super().setUp()
username = 'jane'
password = '321'
user = User.objects.create_user(username=username, email='jane@doe.com', password=password)
self.client.login(username=username, password=password)
self.response = self.client.get(self.url)
def test_status_code(self):
'''
A topic should be edited only by the owner.
Unauthorized users should get a 404 response (Page Not Found)
'''
self.assertEquals(self.response.status_code, 404)
class PostEditViewTests(PostEditViewTestCase):
def setUp(self):
super().setUp()
self.client.login(username=self.username, password=self.password)
self.response = self.client.get(self.url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_class(self):
view = resolve('/boards/1/topics/1/posts/1/edit/')
self.assertEquals(view.func.view_class, PostEditView)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
def test_contains_form(self):
form = self.response.context.get('form')
self.assertIsInstance(form, ModelForm)
def test_form_inputs(self):
'''
The view must contain two inputs: csrf, message textarea
'''
self.assertContains(self.response, '<input', 1)
self.assertContains(self.response, '<textarea', 1)
class SuccessfulPostEditViewTests(PostEditViewTestCase):
def setUp(self):
super().setUp()
self.client.login(username=self.username, password=self.password)
self.response = self.client.post(self.url, {'message': 'edited message'})
def test_redirection(self):
'''
A valid form submission should redirect the user
'''
topic_posts_url = reverse('topic_posts', kwargs={'pk': self.board.pk, 'topic_pk': self.topic.pk})
self.assertRedirects(self.response, topic_posts_url)
def test_post_changed(self):
self.post.refresh_from_db()
self.assertEquals(self.post.message, 'edited message')
class InvalidPostEditViewTests(PostEditViewTestCase):
def setUp(self):
'''
Submit an empty dictionary to the `reply_topic` view
'''
super().setUp()
self.client.login(username=self.username, password=self.password)
self.response = self.client.post(self.url, {})
def test_status_code(self):
'''
An invalid form submission should return to the same page
'''
self.assertEquals(self.response.status_code, 200)
def test_form_errors(self):
form = self.response.context.get('form')
self.assertTrue(form.errors) | 35.763158 | 112 | 0.668629 |
b80ccdaa7aa49960f4deb9d2feed6994a22a2c52 | 270 | py | Python | acmicpc/1764/1764.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 3 | 2019-03-09T05:19:23.000Z | 2019-04-06T09:26:36.000Z | acmicpc/1764/1764.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2020-02-23T10:38:04.000Z | 2020-02-23T10:38:04.000Z | acmicpc/1764/1764.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2019-05-22T13:47:53.000Z | 2019-05-22T13:47:53.000Z | N, M = map(int, input().split())
unknown_not_listened = {input() for param in range(N)}
unknown_not_seen = {input() for param in range(M)}
intersection = sorted(unknown_not_listened & unknown_not_seen)
print(len(intersection))
for item in intersection:
print(item)
| 30 | 62 | 0.740741 |
96030cea2ef77f3fbc003808007135e08e52281a | 4,973 | py | Python | python/model/Actor_Critic.py | maswin/DeepRTS | 9922078951810d65fc212a0c2ee32f75765d3cd0 | [
"MIT"
] | null | null | null | python/model/Actor_Critic.py | maswin/DeepRTS | 9922078951810d65fc212a0c2ee32f75765d3cd0 | [
"MIT"
] | 1 | 2019-11-25T06:40:34.000Z | 2019-12-03T03:59:13.000Z | python/model/Actor_Critic.py | maswin/DeepRTS | 9922078951810d65fc212a0c2ee32f75765d3cd0 | [
"MIT"
] | null | null | null | from keras.layers import Dense, Input, Activation, Dropout
from keras.optimizers import Adam
from keras.layers.core import Dense
import random
import keras
import keras.backend as K
from keras.models import Sequential, Model, load_model
import numpy as np
from keras.callbacks import TensorBoard, History
import tensorflow as tf
from time import time
from collections import deque
class Actor_Critic():
def __init__(self, load_network = False, load_weight = False, load_file = None):
self.actor_learning_rate = 0.0001
self.critic_learning_rate = 0.0001
self.discount_factor = 0.99
self.num_actions = 4
self.actor_policy, self.actor_predict = self.create_actor_network(load_network, load_weight, load_file)
self.critic = self.create_critic_network()
self.state_memory = []
self.reward_memory = []
self.action_memory = []
self.value_memory = []
self.next_state_memory = []
self.actor_history = History()
self.critic_history = History()
def create_actor_network(self, load_network = False, load_weight = False, load_file = None):
if load_network is True:
model = load_model(load_file)
return (None, model)
input = Input(shape = (363,))
advantage = Input(shape = [1])
dense1 = Dense(128, activation = 'relu')(input)
dense2 = Dense(64, activation = 'relu')(dense1)
prob_output = Dense(self.num_actions, activation = 'softmax')(dense2)
opt = Adam(self.actor_learning_rate)
def custom_loss(y_true, y_pred):
clip_out = K.clip(y_pred,1e-8, 1-1e-8)
log_lik = y_true * K.log(clip_out)
return K.sum(-log_lik * advantage)
policy_n = Model(inputs = [input, advantage], outputs = [prob_output])
policy_n.compile(loss = custom_loss, optimizer=opt)
predict_n = Model(inputs= [input], outputs = [prob_output])
if load_weight is True:
predict_n.load_weights(load_file)
return (None, predict_n)
return policy_n, predict_n
def create_critic_network(self):
model = Sequential()
model.add(Dense(128, activation ='relu', input_dim = 363))
# model.add(Dropout(0.15))
model.add(Dense(64, activation ='relu'))
# model.add(Dropout(0.15))
model.add(Dense(1, activation = 'linear'))
opt = Adam(self.critic_learning_rate)
model.compile(loss = 'mean_squared_error', optimizer=opt, metrics = ['accuracy'])
return model
def actor_prediction(self, state):
predicted_probs = self.actor_predict.predict(state)[0]
pred_action = np.random.choice(range(self.num_actions), p = predicted_probs)
return pred_action
def critic_prediction(self, state):
return (self.critic.predict(state))
def predict_action(self, state):
action = self.actor_prediction(state)
return action
def learn(self, state, action, reward, next_state, done):
state_v = self.critic_prediction(state)
next_state_v = self.critic_prediction(next_state)
target = reward + self.discount_factor * next_state_v * (1- int(done))
advantage = target - state_v
actions = np.zeros((1,self.num_actions))
actions[np.arange(1), action] = 1
self.actor_policy.fit([state, advantage], actions, verbose=0, callbacks=[self.actor_history])
loss = self.actor_history.history['loss'][0]
f = open("./logs_ac/model_metrics_actor.csv",'a+')
f.write(str(loss)+ "\n")
f.close()
self.critic.fit(state, target, verbose=0, callbacks=[self.critic_history])
loss = self.critic_history.history['loss'][0]
f = open("./logs_ac/model_metrics_critic.csv",'a+')
f.write(str(loss)+ "\n")
f.close()
def save_model(self, iteration='1'):
self.actor_predict.save_weights("./weight_store"+"/ac_weight_"+iteration+".h5")
self.actor_predict.save("./model_store"+"/ac_model_"+iteration+".h5")
# def get_random_state():
# return(np.random.rand(1,300))
# network_obj = Actor_Critic()
# s= get_random_state()
# print(network_obj.predict_action(s))
# s1 = get_random_state()
# r = 0.6
# a = 1
# done = False
# network_obj.learn(s,a,r,s1,done)
# # network_obj.remember(s,network_obj.predict_action(s),1)
# # s= get_random_state()
# # network_obj.remember(s,network_obj.predict_action(s),1)
# # s= get_random_state()
# # network_obj.remember(s,network_obj.predict_action(s),1)
# # s= get_random_state()
# # network_obj.remember(s,network_obj.predict_action(s),1)
# # s= get_random_state()
# # network_obj.remember(s,network_obj.predict_action(s),1)
# # s= get_random_state()
# # network_obj.remember(s,network_obj.predict_action(s),1)
# Referred to : https://github.com/philtabor/Deep-Q-Learning-Paper-To-Code
| 36.566176 | 111 | 0.65373 |
ff579d5e53d3feff6c6afbcc75def3210e64b664 | 776 | py | Python | codedigger/codeforces/tests/test_views.py | jyothiprakashpanaik/Backend | 9ab1b57436a0a1a6197777c0b36c842e71121d3a | [
"Apache-2.0"
] | 17 | 2020-10-07T22:40:37.000Z | 2022-01-20T07:19:09.000Z | codedigger/codeforces/tests/test_views.py | jyothiprakashpanaik/Backend | 9ab1b57436a0a1a6197777c0b36c842e71121d3a | [
"Apache-2.0"
] | 42 | 2021-06-03T01:58:04.000Z | 2022-01-31T14:49:22.000Z | codedigger/codeforces/tests/test_views.py | jyothiprakashpanaik/Backend | 9ab1b57436a0a1a6197777c0b36c842e71121d3a | [
"Apache-2.0"
] | 25 | 2020-10-06T17:55:19.000Z | 2021-12-09T07:56:50.000Z | from rest_framework import response
from .test_setup import TestSetUp
from user.exception import ValidationException
from codeforces.api import (user_info, user_rating, contest_list,
contest_standings, contest_ratingChanges,
user_status)
from django.urls import reverse
class TestViews(TestSetUp):
def test_search_user(self):
url = reverse('search-user')
url += '?q=tou'
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(max(len(response.data['result']), 5), 5)
users = response.data['result']
for i in users:
handle = i['handle'].lower()
self.assertEqual('tou', handle[:3])
| 35.272727 | 69 | 0.639175 |
083dc606cf8076db883cb4e9f3760fdfa852c2e4 | 313 | py | Python | DQMServices/Components/python/DQMMessageLogger_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DQMServices/Components/python/DQMMessageLogger_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DQMServices/Components/python/DQMMessageLogger_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z |
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
DQMMessageLogger = DQMEDAnalyzer('DQMMessageLogger',
Categories = cms.vstring(),
Directory = cms.string("MessageLogger")
)
| 26.083333 | 68 | 0.58147 |
b7e0dce2097d475e43cc63bb5de10d78fdff55ac | 11,702 | py | Python | scenario_runner/srunner/scenariomanager/result_writer.py | barasm-hita/carla-vehicle-testing | 9fdfbb9af5bccaa66f510e7db214740975cc1613 | [
"MIT"
] | 302 | 2018-11-21T20:13:03.000Z | 2022-03-30T05:40:40.000Z | scenario_runner/srunner/scenariomanager/result_writer.py | barasm-hita/carla-vehicle-testing | 9fdfbb9af5bccaa66f510e7db214740975cc1613 | [
"MIT"
] | 466 | 2018-11-30T13:49:04.000Z | 2022-03-30T09:54:04.000Z | scenario_runner/srunner/scenariomanager/result_writer.py | barasm-hita/carla-vehicle-testing | 9fdfbb9af5bccaa66f510e7db214740975cc1613 | [
"MIT"
] | 291 | 2018-11-21T10:12:26.000Z | 2022-03-28T10:36:55.000Z | #!/usr/bin/env python
# Copyright (c) 2018-2019 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module contains the result gatherer and write for CARLA scenarios.
It shall be used from the ScenarioManager only.
"""
from __future__ import print_function
import time
import json
from tabulate import tabulate
class ResultOutputProvider(object):
"""
This module contains the _result gatherer and write for CARLA scenarios.
It shall be used from the ScenarioManager only.
"""
def __init__(self, data, result, stdout=True, filename=None, junitfile=None, jsonfile=None):
"""
Setup all parameters
- _data contains all scenario-related information
- _result is overall pass/fail info
- _stdout (True/False) is used to (de)activate terminal output
- _filename is used to (de)activate file output in tabular form
- _junit is used to (de)activate file output in junit form
- _json is used to (de)activate file output in json form
"""
self._data = data
self._result = result
self._stdout = stdout
self._filename = filename
self._junit = junitfile
self._json = jsonfile
self._start_time = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(self._data.start_system_time))
self._end_time = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(self._data.end_system_time))
def write(self):
"""
Public write function
"""
if self._junit is not None:
self._write_to_junit()
if self._json is not None:
self._write_to_reportjson()
output = self.create_output_text()
if self._filename is not None:
with open(self._filename, 'w', encoding='utf-8') as fd:
fd.write(output)
if self._stdout:
print(output)
def create_output_text(self):
"""
Creates the output message
"""
output = "\n"
output += " ======= Results of Scenario: {} ---- {} =======\n".format(
self._data.scenario_tree.name, self._result)
end_line_length = len(output) - 3
output += "\n"
# Lis of all the actors
output += " > Ego vehicles:\n"
for ego_vehicle in self._data.ego_vehicles:
output += "{}; ".format(ego_vehicle)
output += "\n\n"
output += " > Other actors:\n"
for actor in self._data.other_actors:
output += "{}; ".format(actor)
output += "\n\n"
# Simulation part
output += " > Simulation Information\n"
system_time = round(self._data.scenario_duration_system, 2)
game_time = round(self._data.scenario_duration_game, 2)
ratio = round(self._data.scenario_duration_game / self._data.scenario_duration_system, 3)
list_statistics = [["Start Time", "{}".format(self._start_time)]]
list_statistics.extend([["End Time", "{}".format(self._end_time)]])
list_statistics.extend([["Duration (System Time)", "{}s".format(system_time)]])
list_statistics.extend([["Duration (Game Time)", "{}s".format(game_time)]])
list_statistics.extend([["Ratio (System Time / Game Time)", "{}s".format(ratio)]])
output += tabulate(list_statistics, tablefmt='fancy_grid')
output += "\n\n"
# Criteria part
output += " > Criteria Information\n"
header = ['Actor', 'Criterion', 'Result', 'Actual Value', 'Expected Value']
list_statistics = [header]
for criterion in self._data.scenario.get_criteria():
name_string = criterion.name
if criterion.optional:
name_string += " (Opt.)"
else:
name_string += " (Req.)"
actor = "{} (id={})".format(criterion.actor.type_id[8:], criterion.actor.id)
criteria = name_string
result = "FAILURE" if criterion.test_status == "RUNNING" else criterion.test_status
actual_value = criterion.actual_value
expected_value = criterion.expected_value_success
list_statistics.extend([[actor, criteria, result, actual_value, expected_value]])
# Timeout
actor = ""
criteria = "Timeout (Req.)"
result = "SUCCESS" if self._data.scenario_duration_game < self._data.scenario.timeout else "FAILURE"
actual_value = round(self._data.scenario_duration_game, 2)
expected_value = round(self._data.scenario.timeout, 2)
list_statistics.extend([[actor, criteria, result, actual_value, expected_value]])
# Global and final output message
list_statistics.extend([['', 'GLOBAL RESULT', self._result, '', '']])
output += tabulate(list_statistics, tablefmt='fancy_grid')
output += "\n"
output += " " + "=" * end_line_length + "\n"
return output
def _write_to_reportjson(self):
"""
Write a machine-readable report to JSON
The resulting report has the following format:
{
criteria: [
{
name: "CheckCollisions",
expected: "0",
actual: "2",
optional: false,
success: false
}, ...
]
}
"""
json_list = []
def result_dict(name, actor, optional, expected, actual, success):
"""
Convenience function to convert its arguments into a JSON-ready dict
:param name: Name of the test criterion
:param actor: Actor ID as string
:param optional: If the criterion is optional
:param expected: The expected value of the criterion (eg 0 for collisions)
:param actual: The actual value
:param success: If the test was passed
:return: A dict data structure that will be written to JSON
"""
return {
"name": name,
"actor": actor,
"optional": optional,
"expected": expected,
"actual": actual,
"success": success,
}
for criterion in self._data.scenario.get_criteria():
json_list.append(
result_dict(
criterion.name,
"{}-{}".format(criterion.actor.type_id[8:], criterion.actor.id),
criterion.optional,
criterion.expected_value_success,
criterion.actual_value,
criterion.test_status in ["SUCCESS", "ACCEPTABLE"]
)
)
# add one entry for duration
timeout = self._data.scenario.timeout
duration = self._data.scenario_duration_game
json_list.append(
result_dict(
"Duration", "all", False, timeout, duration, duration <= timeout
)
)
result_object = {
"scenario": self._data.scenario_tree.name,
"success": self._result in ["SUCCESS", "ACCEPTABLE"],
"criteria": json_list
}
with open(self._json, "w", encoding='utf-8') as fp:
json.dump(result_object, fp, indent=4)
def _write_to_junit(self):
"""
Writing to Junit XML
"""
test_count = 0
failure_count = 0
for criterion in self._data.scenario.get_criteria():
test_count += 1
if criterion.test_status != "SUCCESS":
failure_count += 1
# handle timeout
test_count += 1
if self._data.scenario_duration_game >= self._data.scenario.timeout:
failure_count += 1
with open(self._junit, "w", encoding='utf-8') as junit_file:
junit_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
test_suites_string = ("<testsuites tests=\"%d\" failures=\"%d\" disabled=\"0\" "
"errors=\"0\" timestamp=\"%s\" time=\"%5.2f\" "
"name=\"Simulation\" package=\"Scenarios\">\n" %
(test_count,
failure_count,
self._start_time,
self._data.scenario_duration_system))
junit_file.write(test_suites_string)
test_suite_string = (" <testsuite name=\"%s\" tests=\"%d\" failures=\"%d\" "
"disabled=\"0\" errors=\"0\" time=\"%5.2f\">\n" %
(self._data.scenario_tree.name,
test_count,
failure_count,
self._data.scenario_duration_system))
junit_file.write(test_suite_string)
for criterion in self._data.scenario.get_criteria():
testcase_name = criterion.name + "_" + \
criterion.actor.type_id[8:] + "_" + str(criterion.actor.id)
result_string = (" <testcase name=\"{}\" status=\"run\" "
"time=\"0\" classname=\"Scenarios.{}\">\n".format(
testcase_name, self._data.scenario_tree.name))
if criterion.test_status != "SUCCESS":
result_string += " <failure message=\"{}\" type=\"\"><![CDATA[\n".format(
criterion.name)
result_string += " Actual: {}\n".format(
criterion.actual_value)
result_string += " Expected: {}\n".format(
criterion.expected_value_success)
result_string += "\n"
result_string += " Exact Value: {} = {}]]></failure>\n".format(
criterion.name, criterion.actual_value)
else:
result_string += " Exact Value: {} = {}\n".format(
criterion.name, criterion.actual_value)
result_string += " </testcase>\n"
junit_file.write(result_string)
# Handle timeout separately
result_string = (" <testcase name=\"Duration\" status=\"run\" time=\"{}\" "
"classname=\"Scenarios.{}\">\n".format(
self._data.scenario_duration_system,
self._data.scenario_tree.name))
if self._data.scenario_duration_game >= self._data.scenario.timeout:
result_string += " <failure message=\"{}\" type=\"\"><![CDATA[\n".format(
"Duration")
result_string += " Actual: {}\n".format(
self._data.scenario_duration_game)
result_string += " Expected: {}\n".format(
self._data.scenario.timeout)
result_string += "\n"
result_string += " Exact Value: {} = {}]]></failure>\n".format(
"Duration", self._data.scenario_duration_game)
else:
result_string += " Exact Value: {} = {}\n".format(
"Duration", self._data.scenario_duration_game)
result_string += " </testcase>\n"
junit_file.write(result_string)
junit_file.write(" </testsuite>\n")
junit_file.write("</testsuites>\n")
| 40.213058 | 108 | 0.532559 |
b21045f7e474be5d6b08b7868378686f08f907b4 | 4,744 | py | Python | api/portfolio_api.py | NikolayXHD/tinkoff-api-python | 4a4b71f7af1d752b8566299c058b712b513fa92f | [
"MIT"
] | null | null | null | api/portfolio_api.py | NikolayXHD/tinkoff-api-python | 4a4b71f7af1d752b8566299c058b712b513fa92f | [
"MIT"
] | null | null | null | api/portfolio_api.py | NikolayXHD/tinkoff-api-python | 4a4b71f7af1d752b8566299c058b712b513fa92f | [
"MIT"
] | null | null | null | from __future__ import annotations
from ..api_client import ApiClient
from .. import models
class PortfolioApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def portfolio_currencies_get(self, **kwargs):
"""Получение валютных активов клиента
:param str broker_account_id: Номер счета (по умолчанию - Тинькофф)
:rtype: models.PortfolioCurrenciesResponse
"""
kwargs['_return_http_data_only'] = True
return self.portfolio_currencies_get_with_http_info(**kwargs)
def portfolio_currencies_get_with_http_info(self, **kwargs):
"""Получение валютных активов клиента
:param str broker_account_id: Номер счета (по умолчанию - Тинькофф)
:rtype: models.PortfolioCurrenciesResponse
"""
all_params = ['broker_account_id']
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in params['kwargs'].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
' to method portfolio_currencies_get' % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'broker_account_id' in params:
query_params.append(
('brokerAccountId', params['broker_account_id'])
)
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']
)
auth_settings = ['sso_auth']
return self.api_client.call_api(
'/portfolio/currencies',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortfolioCurrenciesResponse',
auth_settings=auth_settings,
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats,
)
def portfolio_get(self, **kwargs):
"""Получение портфеля клиента
:param str broker_account_id: Номер счета (по умолчанию - Тинькофф)
:rtype: models.PortfolioResponse
"""
kwargs['_return_http_data_only'] = True
return self.portfolio_get_with_http_info(**kwargs)
def portfolio_get_with_http_info(self, **kwargs):
"""Получение портфеля клиента
:param str broker_account_id: Номер счета (по умолчанию - Тинькофф)
:rtype: models.PortfolioResponse
"""
all_params = ['broker_account_id']
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in params['kwargs'].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
' to method portfolio_get' % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'broker_account_id' in params:
query_params.append(
('brokerAccountId', params['broker_account_id'])
)
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']
)
auth_settings = ['sso_auth']
return self.api_client.call_api(
'/portfolio',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortfolioResponse',
auth_settings=auth_settings,
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats,
)
| 31.210526 | 75 | 0.596332 |
98dd0a04dbbddadc483dbe6273676ab806fed36e | 2,907 | py | Python | OriginalRetroZoom/logger.py | letmaik/InnovationChallenge2021 | 4c32b450bcdd941ac32354bf20d426736b3bcbda | [
"MIT"
] | null | null | null | OriginalRetroZoom/logger.py | letmaik/InnovationChallenge2021 | 4c32b450bcdd941ac32354bf20d426736b3bcbda | [
"MIT"
] | null | null | null | OriginalRetroZoom/logger.py | letmaik/InnovationChallenge2021 | 4c32b450bcdd941ac32354bf20d426736b3bcbda | [
"MIT"
] | 1 | 2021-03-07T12:30:34.000Z | 2021-03-07T12:30:34.000Z | import datetime
import json
import os
import time
import math
ISO_8601 = '%Y-%m-%d %H:%M:%S.%f'
class Logger:
def __init__(self, limt=.25):
self.time_data = {'Daily_limt': limt*60, 'Start_Date_Time': None,
'End_Date_Time': None, 'Time_Spent': 0}
self.emotion_data = {
'Average_Emotion': None, 'Emotion_Occurrences': []}
def startTimer(self):
self.time_data['Start_Date_Time'] = str(datetime.datetime.now())
current_data = {'Time_Data': self.time_data,
'Emotion_Data': self.emotion_data}
with open(f'productivity_data_{datetime.date.today()}.json', 'w') as f:
json.dump(current_data, f)
def endTimer(self):
if os.path.exists(f'productivity_data_{datetime.date.today()}.json'):
with open(f'productivity_data_{datetime.date.today()}.json', 'r+') as f:
current_data = json.load(f)
self.time_data['End_Date_Time'] = str(datetime.datetime.now())
self.updateTimeSpent()
self.update_json()
def updateTimeSpent(self):
start = datetime.datetime.strptime(
self.time_data['Start_Date_Time'], ISO_8601)
end = datetime.datetime.strptime(
self.time_data['End_Date_Time'], ISO_8601)
self.time_data['Time_Spent'] = math.ceil((end - start).total_seconds())
with open(f'productivity_data_{datetime.date.today()}.json', 'r+') as f:
current_data = json.load(f)
self.time_data['Time_Spent'] = current_data['Time_Data']['Time_Spent'] + \
self.time_data['Time_Spent']
self.update_json()
def log_emotion(self, emotion):
if emotion != '':
with open(f'productivity_data_{datetime.date.today()}.json') as json_file:
self.emotion_data['Emotion_Occurrences'].append(
(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), emotion))
self.update_average_emotion()
def update_average_emotion(self):
freq = {}
for item in self.emotion_data['Emotion_Occurrences']:
if (item[1] in freq):
freq[item[1]] += 1
else:
freq[item[1]] = 1
self.emotion_data['Average_Emotion'] = (max(freq, key=freq.get))
self.update_json()
def update_json(self):
current_data = {'Time_Data': self.time_data,
'Emotion_Data': self.emotion_data}
with open(f'productivity_data_{datetime.date.today()}.json', 'r+') as f:
f.seek(0)
f.truncate()
json.dump(current_data, f)
if __name__ == "__main__":
log = Logger()
log.startTimer()
time.sleep(5)
log.log_emotion('Happy')
time.sleep(5)
log.log_emotion('Sad')
time.sleep(5)
log.log_emotion('Happy')
log.endTimer() | 36.797468 | 86 | 0.585139 |
378764d87f4d3e638f2ba2cd0d389a8e1d5bd3d6 | 2,172 | py | Python | docs/tutorials/v/symplot.py | seshasaibehara/multishifter | a2f533d22886973ae0bb85201e36c5e691c627fc | [
"MIT"
] | 19 | 2019-01-31T21:38:01.000Z | 2021-11-23T09:47:17.000Z | docs/tutorials/v/symplot.py | seshasaibehara/multishifter | a2f533d22886973ae0bb85201e36c5e691c627fc | [
"MIT"
] | 2 | 2019-07-27T01:38:04.000Z | 2020-12-04T02:54:58.000Z | docs/tutorials/v/symplot.py | seshasaibehara/multishifter | a2f533d22886973ae0bb85201e36c5e691c627fc | [
"MIT"
] | 3 | 2020-10-17T23:06:01.000Z | 2022-01-18T14:31:44.000Z | import json
import os
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
font = {'family' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
def load_record(record_file):
with open(record_file) as f:
records = json.load(f)
return records
def write_json(data,target_file):
with open(target_file,'w') as f:
json.dump(data,f)
return
def tabulize_record(record):
df=pd.read_json(json.dumps(record["ids"]), orient="index")
return df
def in_plane_vectors(record):
a,b=record["grid"]
au,bu=record["shift_units"]
avec,bvec=np.array(au)*a,np.array(bu)*b
return avec,bvec
def scatter_equivalent_shifts(ax,record,**kwargs):
unwinded=tabulize_record(record)
#Find shifts for any cleave entry (if you just ran "shift" you don't need this step)
nocleave=unwinded.loc[unwinded["cleavage"]==unwinded["cleavage"].iloc[0]]
#Get in plane shift vectors
xy=np.array(nocleave["shift"].to_list())
ab=np.array(nocleave["grid_point"].to_list())
#We'll use their equivalent group value to color the plot
c=nocleave["orbit"].to_numpy()
#This is all we need, we save it to a single array
points=np.hstack((ab,xy,c[:,np.newaxis]))
#We'll duplicate the "walls" along a and b onto the next periodic image
avec,bvec=in_plane_vectors(record)
a,b=record["grid"]
#Get the walls and repeat them on the next boundary
aarg=points[:,1].argsort()[0:a]
awall=points[aarg]
awall[:,[2,3]]+=bvec
points=np.vstack((points,awall))
barg=points[:,0].argsort()[0:b+1]
bwall=points[barg]
bwall[:,[2,3]]+=avec
points=np.vstack((points,bwall))
#Scatter the grid points, coloring symmetrically equivalent ones with the same value
ax.scatter(points[:,2],points[:,3],c=points[:,4],**kwargs)
ax.set_aspect('equal')
ax.set_xticks([])
ax.set_yticks([])
def main():
record=load_record("./record.json")
fig=plt.figure()
ax=fig.add_subplot(111)
scatter_equivalent_shifts(ax,record,s=85,cmap=matplotlib.cm.get_cmap("twilight"))
plt.show()
if __name__ == "__main__":
main()
| 26.487805 | 88 | 0.669429 |
37bd5421e615732f0a48dbc5432fed39c925cff3 | 10,298 | py | Python | bdssnmpadaptor/mapping_modules/fwdd_global_interface_physical_statistics.py | NidoriNet/bdsSnmpAdaptor | 0b3be71fe31dd05f1a049114a0782190bae5ee48 | [
"BSD-2-Clause"
] | 1 | 2019-07-09T12:33:29.000Z | 2019-07-09T12:33:29.000Z | bdssnmpadaptor/mapping_modules/fwdd_global_interface_physical_statistics.py | NidoriNet/bdsSnmpAdaptor | 0b3be71fe31dd05f1a049114a0782190bae5ee48 | [
"BSD-2-Clause"
] | 27 | 2019-03-14T21:50:56.000Z | 2019-07-09T13:38:29.000Z | bdssnmpadaptor/mapping_modules/fwdd_global_interface_physical_statistics.py | NidoriNet/bdsSnmpAdaptor | 0b3be71fe31dd05f1a049114a0782190bae5ee48 | [
"BSD-2-Clause"
] | 1 | 2019-07-09T12:28:08.000Z | 2019-07-09T12:28:08.000Z | # -*- coding: future_fstrings -*-
#
# This file is part of bdsSnmpAdaptor software.
#
# Copyright (C) 2017-2019, RtBrick Inc
# License: BSD License 2.0
#
import binascii
import struct
import time
from bdssnmpadaptor import if_tools
HEX_STRING_LAMBDA = lambda x: int(x, 16)
LELL_LAMBDA = lambda x: int(
struct.Struct('<q').unpack(binascii.unhexlify(x))[0])
class FwddGlobalInterfacePhysicalStatistics(object):
"""Implement SNMP IF-MIB for physical BDS interfaces.
Populates SNMP managed objects of SNMP `IF-MIB` module from
`global.interface.physical.statistics` BDS table.
Notes
-----
Expected input:
.. code-block:: json
{
"objects": [
{
"attribute": {
"bcm_stat_tx_double_vlan_tag_frame": "0000000000000000",
"bcm_stat_rx_double_vlan_tag_frame": "0000000000000000",
"bcm_stat_tx_vlan_tag_frame": "0000000000000000",
"bcm_stat_rx_vlan_tag_frame": "0000000000000000",
"port_stat_ether_stats_pkts_4096_to_9216_octets": "0000000000000000",
"port_stat_ether_stats_pkts_2048_to_4095_octets": "0000000000000000",
"port_stat_ether_stats_pkts_1522_to_2047_octets": "0000000000000000",
"port_stat_ieee_8021_pfc_indications": "0000000000000000",
"port_stat_ieee_8021_pfc_requests": "0000000000000000",
"port_stat_if_out_multicast_pkts": "0000000000000000",
"port_stat_if_out_broadcast_pkts": "0000000000000000",
"port_stat_if_in_multicast_pkts": "0000000000000000",
"port_stat_if_in_broadcast_pkts": "0000000000000000",
"port_stat_ipv6_if_stats_out_mcast_pkts": "0000000000000000",
"port_stat_ipv6_if_stats_in_mcast_pkts": "0000000000000000",
"port_stat_ipv6_if_stats_out_discards": "0000000000000000",
"port_stat_ipv6_if_stats_out_forw_datagrams": "0000000000000000",
"port_stat_ipv6_if_stats_in_discards": "0000000000000000",
"port_stat_ipv6_if_stats_in_addr_errors": "0000000000000000",
"port_stat_ipv6_if_stats_in_hdr_errors": "0000000000000000",
"port_stat_ipv6_if_stats_in_receives": "0000000000000000",
"port_stat_if_hc_out_broadcast_pkts": "0000000000000000",
"port_stat_if_hc_out_multicast_pkts": "0000000000000000",
"port_stat_if_hc_out_ucast_pkts": "0000000000000000",
"port_stat_ether_stats_pkts_512_to_1023_octets": "0000000000000000",
"port_stat_ether_stats_pkts_256_to_511_octets": "0000000000000000",
"port_stat_ether_stats_pkts_128_to_255_octets": "0000000000000000",
"port_stat_ether_stats_pkts_65_to_127_octets": "0000000000000000",
"port_stat_ether_stats_pkts_64_octets": "0000000000000000",
"port_stat_ether_stats_fragments": "0000000000000000",
"port_stat_ether_stats_undersize_pkts": "0000000000000000",
"port_stat_ether_stats_broadcast_pkts": "0000000000000000",
"port_stat_ether_stats_multicast_pkts": "0000000000000000",
"port_stat_ether_stats_drop_events": "0000000000000000",
"port_stat_dot1d_port_in_discards": "0000000000000000",
"port_stat_dot1d_tp_port_out_frames": "0000000000000000",
"port_stat_dot1d_tp_port_in_frames": "0000000000000000",
"port_stat_dot1d_base_port_mtu_exceeded_discards": "0000000000000000",
"port_stat_dot1d_base_port_delay_exceeded_drops": "0000000000000000",
"port_stat_ip_in_discards": "0000000000000000",
"port_stat_if_out_octets": "0000000000000000",
"port_stat_if_in_unknown_protos": "0000000000000000",
"port_stat_if_in_errors": "0000000000000000",
"port_stat_if_in_discards": "0000000000000000",
"port_stat_if_in_non_ucast_pkts": "0000000000000000",
"port_stat_if_in_ucast_pkts": "0000000000000000",
"port_stat_if_in_octets": "0000000000000000",
"interface_name": "ifp-0/1/1",
"port_stat_if_out_ucast_pkts": "0000000000000000",
"port_stat_if_out_non_ucast_pkts": "0000000000000000",
"port_stat_if_out_discards": "0000000000000000",
"port_stat_if_out_errors": "0000000000000000",
"port_stat_if_out_qlen": "0000000000000000",
"port_stat_ip_in_receives": "0000000000000000",
"port_stat_ip_in_hdr_errors": "0000000000000000",
"port_stat_ip_forw_datagrams": "0000000000000000",
"port_stat_ether_stats_pkts_1024_to_1518_octets": "0000000000000000",
"port_stat_ether_stats_oversize_pkts": "0000000000000000",
"port_stat_ether_rx_oversize_pkts": "0000000000000000",
"port_stat_ether_tx_oversize_pkts": "0000000000000000",
"port_stat_ether_stats_jabbers": "0000000000000000",
"port_stat_ether_stats_octets": "0000000000000000",
"port_stat_ether_stats_pkts": "0000000000000000",
"port_stat_ether_stats_collisions": "0000000000000000",
"port_stat_ether_stats_crc_align_errors": "0000000000000000",
"port_stat_ether_stats_tx_no_errors": "0000000000000000",
"port_stat_ether_stats_rx_no_errors": "0000000000000000",
"port_stat_dot3_stats_alignment_errors": "0000000000000000",
"port_stat_dot3_stats_fcs_errors": "0000000000000000",
"port_stat_dot3_stats_single_collision_frames": "0000000000000000",
"port_stat_dot3_stats_multiple_collision_frames": "0000000000000000",
"port_stat_dot3_stats_sqet_test_errors": "0000000000000000",
"port_stat_dot3_stats_deferred_transmissions": "0000000000000000",
"port_stat_dot3_stats_late_collisions": "0000000000000000",
"port_stat_dot3_stats_excessive_collisions": "0000000000000000",
"port_stat_dot3_stats_internal_mac_xmit_errors": "0000000000000000",
"port_stat_dot3_stats_carrier_sense_errors": "0000000000000000",
"port_stat_dot3_stats_frame_too_longs": "0000000000000000",
"port_stat_dot3_stats_internal_mac_receive_errors": "0000000000000000",
"port_stat_dot3_stats_symbol_errors": "0000000000000000",
"port_stat_dot3_stat_sol_in_unknown_opcodes": "0000000000000000",
"port_stat_dot3_in_pause_frames": "0000000000000000",
"port_stat_dot3_out_pause_frames": "0000000000000000",
"port_stat_if_hc_in_octets": "0000000000000000",
"port_stat_if_hc_in_ucast_pkts": "0000000000000000",
"port_stat_if_hc_in_multicast_pkts": "0000000000000000",
"port_stat_if_hc_in_broadcast_pkts": "0000000000000000",
"port_stat_if_hc_out_octets": "0000000000000000"
},
"update": true,
"sequence": 1
}
]
}
"""
@classmethod
def setOids(cls, oidDb, bdsData, bdsIds, birthday):
"""Populates OID DB with BDS information.
Takes known objects from JSON document, puts them into
the OID DB as specific MIB managed objects.
Args:
oidDb (OidDb): OID DB instance to work on
bdsData (dict): BDS information to put into OID DB
bdsIds (list): list of last known BDS record sequence IDs
birthday (float): timestamp of system initialization
Raises:
BdsError: on OID DB population error
"""
newBdsIds = [obj['sequence'] for obj in bdsData['objects']]
if newBdsIds == bdsIds:
return
currentSysTime = int((time.time() - birthday) * 100)
add = oidDb.add
for i, bdsJsonObject in enumerate(bdsData['objects']):
attribute = bdsJsonObject['attribute']
ifName = attribute['interface_name']
index = if_tools.ifIndexFromIfName(ifName)
add('IF-MIB', 'ifInOctets', index,
value=LELL_LAMBDA(attribute['port_stat_if_in_octets']))
add('IF-MIB', 'ifInUcastPkts', index,
value=LELL_LAMBDA(attribute['port_stat_if_in_ucast_pkts']))
add('IF-MIB', 'ifInNUcastPkts', index,
value=LELL_LAMBDA(attribute['port_stat_if_in_non_ucast_pkts']))
add('IF-MIB', 'ifInDiscards', index,
value=LELL_LAMBDA(attribute['port_stat_if_in_discards']))
add('IF-MIB', 'ifInErrors', index,
value=LELL_LAMBDA(attribute['port_stat_if_in_errors']))
add('IF-MIB', 'ifInUnknownProtos', index,
value=LELL_LAMBDA(attribute['port_stat_if_in_unknown_protos']))
add('IF-MIB', 'ifOutOctets', index,
value=LELL_LAMBDA(attribute['port_stat_if_out_octets']))
add('IF-MIB', 'ifOutUcastPkts', index,
value=LELL_LAMBDA(attribute['port_stat_if_out_ucast_pkts']))
add('IF-MIB', 'ifOutNUcastPkts', index,
value=LELL_LAMBDA(attribute['port_stat_if_out_non_ucast_pkts']))
add('IF-MIB', 'ifOutDiscards', index,
value=LELL_LAMBDA(attribute['port_stat_if_out_discards']))
add('IF-MIB', 'ifOutErrors', index,
value=LELL_LAMBDA(attribute['port_stat_if_out_errors']))
add('IF-MIB', 'ifOutQLen', index,
value=LELL_LAMBDA(attribute['port_stat_if_out_qlen']))
if i < len(bdsIds):
# possible table entry change
ifLastChange = None if newBdsIds[i] == bdsIds[i] else currentSysTime
else:
# initial run or table size change
ifLastChange = currentSysTime if bdsIds else 0
add('IF-MIB', 'ifLastChange', index, value=ifLastChange)
# count *all* IF-MIB interfaces we currently have - some
# may be contributed by other modules
ifNumber = len(oidDb.getObjectsByName('IF-MIB', 'ifIndex'))
add('IF-MIB', 'ifNumber', 0, value=ifNumber)
add('IF-MIB', 'ifStackLastChange', 0,
value=currentSysTime if bdsIds else 0)
add('IF-MIB', 'ifTableLastChange', 0,
value=currentSysTime if bdsIds else 0)
bdsIds[:] = newBdsIds
| 46.179372 | 84 | 0.667508 |
8fc0af0e3ca210af1ea8f2653767df4817cc2d04 | 13,899 | py | Python | nnabla_rl/model_trainers/policy/trpo_policy_trainer.py | sony/nnabla-rl | 6a9a91ac5363b8611e0c9f736590729952a8d460 | [
"Apache-2.0"
] | 75 | 2021-06-14T02:35:19.000Z | 2022-03-23T04:30:24.000Z | nnabla_rl/model_trainers/policy/trpo_policy_trainer.py | sony/nnabla-rl | 6a9a91ac5363b8611e0c9f736590729952a8d460 | [
"Apache-2.0"
] | 2 | 2021-12-17T08:46:54.000Z | 2022-03-15T02:04:53.000Z | nnabla_rl/model_trainers/policy/trpo_policy_trainer.py | sony/nnabla-rl | 6a9a91ac5363b8611e0c9f736590729952a8d460 | [
"Apache-2.0"
] | 3 | 2021-06-15T13:32:57.000Z | 2022-03-25T16:53:14.000Z | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, Optional, Sequence, cast
import numpy as np
import nnabla as nn
import nnabla.functions as NF
from nnabla_rl.environments.environment_info import EnvironmentInfo
from nnabla_rl.logger import logger
from nnabla_rl.model_trainers.model_trainer import ModelTrainer, TrainerConfig, TrainingBatch, TrainingVariables
from nnabla_rl.models import Model, StochasticPolicy
from nnabla_rl.utils.data import set_data_to_variable
from nnabla_rl.utils.misc import copy_network_parameters, create_variable
from nnabla_rl.utils.optimization import conjugate_gradient
def _hessian_vector_product(flat_grads, params, vector):
''' Compute multiplied vector hessian of parameters and vector
Args:
flat_grads (nn.Variable): gradient of parameters, should be flattened
params (list[nn.Variable]): list of parameters
vector (numpy.ndarray): input vector, shape is the same as flat_grads
Returns:
hessian_vector (numpy.ndarray): multiplied vector of hessian of parameters and vector
See:
https://www.telesens.co/2018/06/09/efficiently-computing-the-fisher-vector-product-in-trpo/
'''
assert flat_grads.shape[0] == len(vector)
if isinstance(vector, np.ndarray):
vector = nn.Variable.from_numpy_array(vector)
hessian_multiplied_vector_loss = NF.sum(flat_grads * vector)
hessian_multiplied_vector_loss.forward()
for param in params:
param.grad.zero()
hessian_multiplied_vector_loss.backward()
hessian_multiplied_vector = [param.g.copy().flatten() for param in params]
return np.concatenate(hessian_multiplied_vector)
def _concat_network_params_in_ndarray(params):
''' Concatenate network parameters in numpy.ndarray,
this function returns copied parameters
Args:
params (OrderedDict): parameters
Returns:
flat_params (numpy.ndarray): flatten parameters in numpy.ndarray type
'''
flat_params = []
for param in params.values():
flat_param = param.d.copy().flatten()
flat_params.append(flat_param)
return np.concatenate(flat_params)
def _update_network_params_by_flat_params(params, new_flat_params):
''' Update Network parameters by hand
Args:
params (OrderedDict): parameteres
new_flat_params (numpy.ndarray): flattened new parameters
'''
if not isinstance(new_flat_params, np.ndarray):
raise ValueError("Invalid new_flat_params")
total_param_numbers = 0
for param in params.values():
param_shape = param.shape
param_numbers = len(param.d.flatten())
new_param = new_flat_params[total_param_numbers:total_param_numbers +
param_numbers].reshape(param_shape)
param.d = new_param
total_param_numbers += param_numbers
assert total_param_numbers == len(new_flat_params)
@dataclass
class TRPOPolicyTrainerConfig(TrainerConfig):
gpu_batch_size: Optional[int] = None
sigma_kl_divergence_constraint: float = 0.01
maximum_backtrack_numbers: int = 10
conjugate_gradient_damping: float = 0.1
conjugate_gradient_iterations: int = 20
backtrack_coefficient: float = 0.5
def __post_init__(self):
self._assert_positive(self.sigma_kl_divergence_constraint, 'sigma_kl_divergence_constraint')
self._assert_positive(self.maximum_backtrack_numbers, 'maximum_backtrack_numbers')
self._assert_positive(self.conjugate_gradient_damping, 'conjugate_gradient_damping')
self._assert_positive(self.conjugate_gradient_iterations, 'conjugate_gradient_iterations')
class TRPOPolicyTrainer(ModelTrainer):
# type declarations to type check with mypy
# NOTE: declared variables are instance variable and NOT class variable, unless it is marked with ClassVar
# See https://mypy.readthedocs.io/en/stable/class_basics.html for details
_config: TRPOPolicyTrainerConfig
_approximate_return: nn.Variable
_approximate_return_flat_grads: nn.Variable
_kl_divergence: nn.Variable
_kl_divergence_flat_grads: nn.Variable
_old_policy: StochasticPolicy
def __init__(self,
model: StochasticPolicy,
env_info: EnvironmentInfo,
config: TRPOPolicyTrainerConfig = TRPOPolicyTrainerConfig()):
super(TRPOPolicyTrainer, self).__init__(model, {}, env_info, config)
def _update_model(self,
models: Sequence[Model],
solvers: Dict[str, nn.solver.Solver],
batch: TrainingBatch,
training_variables: TrainingVariables,
**kwargs) -> Dict[str, np.ndarray]:
s = batch.s_current
a = batch.a_current
advantage = batch.extra['advantage']
policy = models[0]
old_policy = self._old_policy
full_step_params_update = self._compute_full_step_params_update(
policy, s, a, advantage, training_variables)
self._linesearch_and_update_params(policy, s, a, advantage,
full_step_params_update, training_variables)
copy_network_parameters(policy.get_parameters(), old_policy.get_parameters(), tau=1.0)
trainer_state: Dict[str, Any] = {}
return trainer_state
def _gpu_batch_size(self, batch_size):
# We use gpu_batch_size to reduce one forward gpu calculation memory
# Usually, gpu_batch_size is the same as batch_size
if self._config.gpu_batch_size is None:
return batch_size
if self._config.gpu_batch_size < batch_size:
return self._config.gpu_batch_size
else:
return batch_size
def _build_training_graph(self, models: Sequence[Model], training_variables: TrainingVariables):
if len(models) != 1:
raise RuntimeError('TRPO training only support 1 model for training')
models = cast(Sequence[StochasticPolicy], models)
policy = models[0]
if not hasattr(self, '_old_policy'):
self._old_policy = cast(StochasticPolicy, policy.deepcopy('old_policy'))
old_policy = self._old_policy
# policy learning
distribution = policy.pi(training_variables.s_current)
old_distribution = old_policy.pi(training_variables.s_current)
self._kl_divergence = NF.mean(old_distribution.kl_divergence(distribution))
_kl_divergence_grads = nn.grad([self._kl_divergence], policy.get_parameters().values())
self._kl_divergence_flat_grads = NF.concatenate(*[grad.reshape((-1,)) for grad in _kl_divergence_grads])
self._kl_divergence_flat_grads.need_grad = True
log_prob = distribution.log_prob(training_variables.a_current)
old_log_prob = old_distribution.log_prob(training_variables.a_current)
prob_ratio = NF.exp(log_prob - old_log_prob)
advantage = training_variables.extra['advantage']
self._approximate_return = NF.mean(prob_ratio*advantage)
_approximate_return_grads = nn.grad([self._approximate_return], policy.get_parameters().values())
self._approximate_return_flat_grads = NF.concatenate(
*[grad.reshape((-1,)) for grad in _approximate_return_grads])
self._approximate_return_flat_grads.need_grad = True
copy_network_parameters(policy.get_parameters(), old_policy.get_parameters(), tau=1.0)
def _compute_full_step_params_update(self, policy, s_batch, a_batch, adv_batch, training_variables):
_, _, approximate_return_flat_grads = self._forward_all_variables(
s_batch, a_batch, adv_batch, training_variables)
def fisher_vector_product_wrapper(step_direction):
return self._fisher_vector_product(policy, s_batch, a_batch,
step_direction, training_variables)
step_direction = conjugate_gradient(
fisher_vector_product_wrapper, approximate_return_flat_grads,
max_iterations=self._config.conjugate_gradient_iterations)
fisher_vector_product = self._fisher_vector_product(
policy, s_batch, a_batch, step_direction, training_variables)
sAs = float(np.dot(step_direction, fisher_vector_product))
# adding 1e-8 to avoid computational error
beta = (2.0 * self._config.sigma_kl_divergence_constraint / (sAs + 1e-8)) ** 0.5
full_step_params_update = beta * step_direction
return full_step_params_update
def _fisher_vector_product(self, policy, s_batch, a_batch, vector, training_variables):
sum_hessian_multiplied_vector = 0
gpu_batch_size = self._gpu_batch_size(len(s_batch))
total_blocks = len(s_batch) // gpu_batch_size
for block_index in range(total_blocks):
start_idx = block_index * gpu_batch_size
set_data_to_variable(training_variables.s_current, s_batch[start_idx:start_idx+gpu_batch_size])
set_data_to_variable(training_variables.a_current, a_batch[start_idx:start_idx+gpu_batch_size])
for param in policy.get_parameters().values():
param.grad.zero()
self._kl_divergence_flat_grads.forward()
hessian_vector_product = _hessian_vector_product(self._kl_divergence_flat_grads,
policy.get_parameters().values(),
vector)
hessian_multiplied_vector = hessian_vector_product + self._config.conjugate_gradient_damping * vector
sum_hessian_multiplied_vector += hessian_multiplied_vector
return sum_hessian_multiplied_vector / total_blocks
def _linesearch_and_update_params(
self, policy, s_batch, a_batch, adv_batch, full_step_params_update, training_variables):
current_flat_params = _concat_network_params_in_ndarray(policy.get_parameters())
current_approximate_return, _, _ = self._forward_all_variables(
s_batch, a_batch, adv_batch, training_variables)
for step_size in self._config.backtrack_coefficient**np.arange(self._config.maximum_backtrack_numbers):
new_flat_params = current_flat_params + step_size * full_step_params_update
_update_network_params_by_flat_params(policy.get_parameters(), new_flat_params)
approximate_return, kl_divergence, _ = self._forward_all_variables(
s_batch, a_batch, adv_batch, training_variables)
improved = approximate_return - current_approximate_return > 0.
is_in_kl_divergence_constraint = kl_divergence < self._config.sigma_kl_divergence_constraint
if improved and is_in_kl_divergence_constraint:
return
elif not improved:
logger.debug("TRPO LineSearch: Not improved, Shrink step size and Retry")
elif not is_in_kl_divergence_constraint:
logger.debug("TRPO LineSearch: Not fullfill constraints, Shrink step size and Retry")
else:
raise RuntimeError("Should not reach here")
logger.debug("TRPO LineSearch: Reach max iteration so Recover current parmeteres...")
_update_network_params_by_flat_params(policy.get_parameters(), current_flat_params)
def _forward_all_variables(self, s_batch, a_batch, adv_batch, training_variables):
sum_approximate_return = 0.0
sum_kl_divergence = 0.0
sum_approximate_return_flat_grad = 0.0
gpu_batch_size = self._gpu_batch_size(len(s_batch))
total_blocks = len(s_batch) // gpu_batch_size
for block_index in range(total_blocks):
start_idx = block_index * gpu_batch_size
set_data_to_variable(training_variables.s_current, s_batch[start_idx:start_idx+gpu_batch_size])
set_data_to_variable(training_variables.a_current, a_batch[start_idx:start_idx+gpu_batch_size])
training_variables.extra['advantage'].d = adv_batch[start_idx:start_idx+gpu_batch_size]
nn.forward_all([self._approximate_return,
self._kl_divergence,
self._approximate_return_flat_grads])
sum_approximate_return += float(self._approximate_return.d)
sum_kl_divergence += float(self._kl_divergence.d)
sum_approximate_return_flat_grad += self._approximate_return_flat_grads.d
approximate_return = sum_approximate_return / total_blocks
approximate_return_flat_grads = sum_approximate_return_flat_grad / total_blocks
kl_divergence = sum_kl_divergence / total_blocks
return approximate_return, kl_divergence, approximate_return_flat_grads
def _setup_training_variables(self, batch_size: int) -> TrainingVariables:
gpu_batch_size = self._gpu_batch_size(batch_size)
s_current_var = create_variable(gpu_batch_size, self._env_info.state_shape)
a_current_var = create_variable(gpu_batch_size, self._env_info.action_shape)
advantage_var = create_variable(gpu_batch_size, 1)
extra = {}
extra['advantage'] = advantage_var
return TrainingVariables(gpu_batch_size, s_current_var, a_current_var, extra=extra)
| 46.48495 | 113 | 0.711706 |
b4289aa0f12e1bb4d0b95ba9002649583ed6bd68 | 751 | py | Python | src/my_rshine/urls.py | zainab66/rshine-django | af02f26a4ad3a578db126ea7ae5bc45f288da302 | [
"bzip2-1.0.6"
] | null | null | null | src/my_rshine/urls.py | zainab66/rshine-django | af02f26a4ad3a578db126ea7ae5bc45f288da302 | [
"bzip2-1.0.6"
] | null | null | null | src/my_rshine/urls.py | zainab66/rshine-django | af02f26a4ad3a578db126ea7ae5bc45f288da302 | [
"bzip2-1.0.6"
] | null | null | null | """my_rshine URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.136364 | 77 | 0.70972 |
1d8b87a25bcffa02a596aae5b84439715c00f584 | 4,701 | py | Python | SVM/svm.py | Sh-Zh-7/AI-Term-Project | b0b396a2673721b943f05ae562af480bce4edf2e | [
"MIT"
] | 1 | 2021-12-06T09:12:06.000Z | 2021-12-06T09:12:06.000Z | SVM/svm.py | Sh-Zh-7/hands-on-digits-recognizer | b0b396a2673721b943f05ae562af480bce4edf2e | [
"MIT"
] | null | null | null | SVM/svm.py | Sh-Zh-7/hands-on-digits-recognizer | b0b396a2673721b943f05ae562af480bce4edf2e | [
"MIT"
] | null | null | null | from svmlib import *
from os import listdir
# Convert matrix to vector
def Img2Vector(filename):
# Return matrix
ret_val = np.zeros((1, 1024))
# Read file's content
file = open(filename)
content = file.readlines()
for i in range(32):
line = content[i]
for j in range(32):
ret_val[0, 32 * i + j] = int(line[j])
return ret_val
# Get our data set and labels(real number)
def GetDataSetByDir(dirname):
training_set_dir = listdir(dirname)
m = len(training_set_dir)
# Prepare our data set and targets
labels = []
data_set = np.zeros((m, 1024))
# Get our data set and targets
for i in range(m):
filename = training_set_dir[i]
label = int(filename.split('_')[0])
labels.append(label)
data_set[i, :] = Img2Vector(dirname + filename)
return data_set, labels
# Get -1 and 1 class set
def GetClassSet(labels):
ret_set = []
for number in range(10):
class_set = [1 if label == number else -1 for label in labels]
ret_set.append(class_set)
return ret_set
# Make multi-class classifier by using one-vs-all method
def OVR(data_set, labels, k_tup):
bs = []
alphas = []
# Train 0~9 classifier
class_sets = []
for number in range(10):
class_set = [1 if label == number else -1 for label in labels] # Create binary labels
# Get b and alpha in optimization
b, alpha = SMO(data_set, class_set, 200, 0.0001, 10000, k_tup)
print("Current number:%d" % number)
bs.append(b)
alphas.append(alpha)
class_sets.append(class_set)
return bs, alphas, class_sets
def GetSupportVectors(data_set, labels, alphas):
sv_indexes = []
svs_all_num = []
labels_svs_all_num = []
for alpha in alphas:
sv_index = np.nonzero(alpha.A > 0)[0]
svs = data_set[sv_index]
labels_svs = labels[sv_index]
# Add them to lists
sv_indexes.append(sv_index)
svs_all_num.append(svs)
labels_svs_all_num.append(labels_svs)
return sv_indexes, svs_all_num, labels_svs_all_num
# Store svm parameters
def StoreParams(alphas, bs):
import pickle
with open("./SvmParams/alphas.txt", "wb") as f1, open("./SvmParams/bs.txt", "wb") as f2:
pickle.dump(alphas, f1)
pickle.dump(bs, f2)
# Load parameters from existing files
def LoadParams():
import pickle
with open("./SvmParams/alphas.txt", "rb") as f1, open("./SvmParams/bs.txt", "rb") as f2:
alphas = pickle.load(f1)
bs = pickle.load(f2)
return alphas, bs
def TestRBF(k_tup=("rbf", 10)):
data_set, labels = GetDataSetByDir("./DataSet/TrainingDigits/")
# Get b and alphas
# bs, alphas, class_sets = OVR(data_set, labels, k_tup)
# StoreParams(alphas, bs)
# Convert data set to matrix and get their attribute
alphas, bs = LoadParams()
class_sets = GetClassSet(labels)
data_set = np.mat(data_set)
labels = np.mat(labels).transpose()
m, n = np.shape(data_set)
# Get support vectors
sv_indexes, svs_all_num, labels_svs_all_num = GetSupportVectors(data_set, labels, alphas)
error_count = 0
for i in range(m):
result = -1
for num in range(10):
kernel_eval = KernelTrans(svs_all_num[num], data_set[i, :], k_tup)
predict = kernel_eval.T * np.multiply(labels_svs_all_num[num], alphas[num][sv_indexes[num]]) + bs[num]
if np.sign(predict) == np.sign(class_sets[num][i]):
result = num
break
print("The real answer is %d, and the classifier came back with %d" %
(labels[i], result))
if result != labels[i]:
error_count += 1
print("The training set error is %f" % (error_count / m))
# Get test set error
data_set, labels = GetDataSetByDir("./DataSet/TestDigits/")
class_sets = GetClassSet(labels)
error_count = 0
data_set = np.mat(data_set)
labels = np.mat(labels).transpose()
m, n = np.shape(data_set)
for i in range(m):
result = -1
for num in range(10):
kernel_eval = KernelTrans(svs_all_num[num], data_set[i, :], k_tup)
predict = kernel_eval.T * np.multiply(labels_svs_all_num[num], alphas[num][sv_indexes[num]]) + bs[num]
if np.sign(predict) == np.sign(class_sets[num][i]):
result = num
break
print("The real answer is %d, and the classifier came back with %d" %
(labels[i], result))
if result != labels[i]:
error_count += 1
print("The test set error is %f" % (error_count / m))
if __name__ == "__main__":
TestRBF()
| 34.566176 | 114 | 0.616677 |
e78c32a0fdb34d8a128914f74f085e064add3058 | 3,397 | py | Python | vmraid/patches/v4_1/file_manager_fix.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | vmraid/patches/v4_1/file_manager_fix.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | vmraid/patches/v4_1/file_manager_fix.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, VMRaid Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import vmraid
import os
from vmraid.core.doctype.file.file import get_content_hash, get_file_name
from vmraid.utils import get_files_path, get_site_path
# The files missed by the previous patch might have been replaced with new files
# with the same filename
#
# This patch does the following,
# * Detect which files were replaced and rename them with name{hash:5}.extn and
# update filedata record for the new file
#
# * make missing_files.txt in site dir with files that should be recovered from
# a backup from a time before version 3 migration
#
# * Patch remaining unpatched File records.
from six import iteritems
def execute():
vmraid.db.auto_commit_on_many_writes = True
rename_replacing_files()
for name, file_name, file_url in vmraid.db.sql(
"""select name, file_name, file_url from `tabFile`
where ifnull(file_name, '')!='' and ifnull(content_hash, '')=''"""):
b = vmraid.get_doc('File', name)
old_file_name = b.file_name
b.file_name = os.path.basename(old_file_name)
if old_file_name.startswith('files/') or old_file_name.startswith('/files/'):
b.file_url = os.path.normpath('/' + old_file_name)
else:
b.file_url = os.path.normpath('/files/' + old_file_name)
try:
_file = vmraid.get_doc("File", {"file_name": name})
content = _file.get_content()
b.content_hash = get_content_hash(content)
except IOError:
print('Warning: Error processing ', name)
b.content_hash = None
b.flags.ignore_duplicate_entry_error = True
b.save()
vmraid.db.auto_commit_on_many_writes = False
def get_replaced_files():
ret = []
new_files = dict(vmraid.db.sql("select name, file_name from `tabFile` where file_name not like 'files/%'"))
old_files = dict(vmraid.db.sql("select name, file_name from `tabFile` where ifnull(content_hash, '')=''"))
invfiles = invert_dict(new_files)
for nname, nfilename in iteritems(new_files):
if 'files/' + nfilename in old_files.values():
ret.append((nfilename, invfiles[nfilename]))
return ret
def rename_replacing_files():
replaced_files = get_replaced_files()
if len(replaced_files):
missing_files = [v[0] for v in replaced_files]
with open(get_site_path('missing_files.txt'), 'w') as f:
f.write(('\n'.join(missing_files) + '\n').encode('utf-8'))
for file_name, file_datas in replaced_files:
print ('processing ' + file_name)
content_hash = vmraid.db.get_value('File', file_datas[0], 'content_hash')
if not content_hash:
continue
new_file_name = get_file_name(file_name, content_hash)
if os.path.exists(get_files_path(new_file_name)):
continue
print('skipping ' + file_name)
try:
os.rename(get_files_path(file_name), get_files_path(new_file_name))
except OSError:
print('Error renaming ', file_name)
for name in file_datas:
f = vmraid.get_doc('File', name)
f.file_name = new_file_name
f.file_url = '/files/' + new_file_name
f.save()
def invert_dict(ddict):
ret = {}
for k,v in iteritems(ddict):
if not ret.get(v):
ret[v] = [k]
else:
ret[v].append(k)
return ret
def get_file_name(fname, hash):
if '.' in fname:
partial, extn = fname.rsplit('.', 1)
else:
partial = fname
extn = ''
return '{partial}{suffix}.{extn}'.format(partial=partial, extn=extn, suffix=hash[:5])
| 33.303922 | 108 | 0.723874 |
d8aadb78183785051a07792b02c758b4895ee861 | 380 | py | Python | tests/test_openmc_integration.py | open-radiation-sources/parametric-plasma-source | f72ed2d5929fa88e45a4124289355955947f6ba0 | [
"MIT"
] | 6 | 2020-11-26T11:54:15.000Z | 2022-03-31T14:52:52.000Z | tests/test_openmc_integration.py | open-radiation-sources/parametric-plasma-source | f72ed2d5929fa88e45a4124289355955947f6ba0 | [
"MIT"
] | 27 | 2020-10-21T08:30:02.000Z | 2022-01-14T22:17:10.000Z | tests/test_openmc_integration.py | open-radiation-sources/parametric-plasma-source | f72ed2d5929fa88e45a4124289355955947f6ba0 | [
"MIT"
] | 3 | 2020-10-21T08:07:54.000Z | 2020-11-10T13:40:58.000Z | """Test sampling via OpenMC."""
import pytest
from parametric_plasma_source import sample_source_openmc
pytest.importorskip("openmc")
class TestOpenMCIntegration:
def test_openmc_integration(self, plasma_source):
out = sample_source_openmc(plasma_source)
assert out.stderr is None
assert "Source sampling completed." in out.stdout.decode("utf-8")
| 25.333333 | 73 | 0.755263 |
c7307dd4449e4a46386287c92da608e2421fce4f | 693 | py | Python | lib/inputstreamhelper/unicodes.py | tmihai20/script.module.inputstreamhelper | 25171444b48d85a1559ea5d2a8337f5ae8e66836 | [
"MIT"
] | null | null | null | lib/inputstreamhelper/unicodes.py | tmihai20/script.module.inputstreamhelper | 25171444b48d85a1559ea5d2a8337f5ae8e66836 | [
"MIT"
] | null | null | null | lib/inputstreamhelper/unicodes.py | tmihai20/script.module.inputstreamhelper | 25171444b48d85a1559ea5d2a8337f5ae8e66836 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# MIT License (see LICENSE.txt or https://opensource.org/licenses/MIT)
"""Implements Unicode Helper functions"""
from __future__ import absolute_import, division, unicode_literals
def to_unicode(text, encoding='utf-8', errors='strict'):
"""Force text to unicode"""
if isinstance(text, bytes):
return text.decode(encoding, errors)
return text
def from_unicode(text, encoding='utf-8', errors='strict'):
"""Force unicode to text"""
import sys
if sys.version_info.major == 2 and isinstance(text, unicode): # noqa: F821; pylint: disable=undefined-variable,useless-suppression
return text.encode(encoding, errors)
return text
| 34.65 | 135 | 0.704185 |
93da49526ae89f6c8eb4a0d7258d43ebf444fc45 | 3,356 | py | Python | spikey/snn/input/rbf.py | SpikeyCNS/spikey | 03a49073491974eff01bc017fd8eadb822e13f0d | [
"MIT"
] | 4 | 2021-02-25T20:53:41.000Z | 2022-01-18T15:27:07.000Z | spikey/snn/input/rbf.py | SpikeyCNS/spikey | 03a49073491974eff01bc017fd8eadb822e13f0d | [
"MIT"
] | 5 | 2021-03-06T05:35:10.000Z | 2021-03-31T09:27:57.000Z | spikey/snn/input/rbf.py | SpikeyCNS/spikey | 03a49073491974eff01bc017fd8eadb822e13f0d | [
"MIT"
] | null | null | null | """
Network input dynamics.
"""
import numpy as np
from spikey.snn.input.template import Input
class RBF(Input):
"""
Radial basis function neurons to simulate place cells.
Parameters
----------
kwargs: dict
Dictionary with values for each key in NECESSARY_KEYS.
Examples
--------
.. code-block:: python
processing_time = 10
config = {
"n_inputs": 10,
"magnitude": 2,
"input_firing_steps": -1,
"input_pct_inhibitory": 0.2,
}
input = RBF(**config)
input.reset()
env = Cartpole(preset='FREMAUX')
state = env.reset()
for step in range(10):
input.update(state)
for _ in range(processing_time)
in_fires = input.__call__()
state, _, done, __ = env.update(0)
if done:
break
.. code-block:: python
class network_template(Network):
keys = {
"n_inputs": 10,
"magnitude": 2,
"input_firing_steps": -1,
"input_pct_inhibitory": 0.2,
}
parts = {
"inputs": RBF
}
"""
def __init__(self, **kwargs):
print(
"WARNING: input.RBF is not fully implemented!"
) # TODO add to unit tests when done
def __call__(self) -> np.bool:
"""
Spikes output from each input neuron, called once per network step.
Returns
-------
ndarray[n_inputs, dtype=bool] Spike output for each neuron.
""" ## Does not respect polarities.
if not self.values.size:
return []
spikes = (
np.int_(np.random.uniform(0, 1, size=self.values.size) <= self.values)
* self._magnitude
)
self.network_time += 1
return spikes * self.polarities
def update(self, state: object):
"""
Update input generator, called once per game step.
Parameters
----------
state: object
Enviornment state in format generator can understand.
"""
self.network_time = 0
alpha = lambda a1, a2: (a1 - a2) # % (2 * np.pi)
x, xdot, theta, thetadot = state
lambda_thetadot = np.arctan(thetadot / 4)
n_x = [5 / 4 * m for m in range(-3, 4)] # 5/4 * m, m in {-3..3}
n_xdot = [5 / 4 * n for n in range(-3, 4)] # 5/4 * n, n in {-3..3}
n_theta = [
2 * np.pi / 180 * p for p in range(-7, 8)
] # 2pi/3 * p - pi, p in {0..14}
n_thetadot = [
2 * np.pi / 30 * q for q in range(-7, 8)
] # 2pi/3 * q, q in {-7..7}
var_1, var_2, var_3, var_4 = 5 / 4, 5 / 4, 1 * np.pi / 1200, 2 * np.pi / 60
pcm = 0.4 # 400hz
p_t = lambda a, b, c, d: pcm * np.exp(
-((x - n_x[a]) ** 2)
/ (2 * var_1) # -(xdot-n_xdot[b])**2 / (2 * var_2) \
- alpha(theta, n_theta[c]) ** 2 / (2 * var_3)
- (lambda_thetadot - n_thetadot[d]) ** 2 / (2 * var_4)
)
self.values = np.zeros((7, 15, 15))
for (m, p, q), _ in np.ndenumerate(self.values):
n = 0
self.values[m, p, q] = p_t(m, n, p, q)
self.values = np.ravel(self.values)
| 26.634921 | 83 | 0.480036 |
5a39b9c92f988a32ee04e23f11eb844e44ef822d | 4,427 | py | Python | vistrails/core/data_structures/bijectivedict.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 83 | 2015-01-05T14:50:50.000Z | 2021-09-17T19:45:26.000Z | vistrails/core/data_structures/bijectivedict.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 254 | 2015-01-02T20:39:19.000Z | 2018-11-28T17:16:44.000Z | vistrails/core/data_structures/bijectivedict.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 40 | 2015-04-17T16:46:36.000Z | 2021-09-28T22:43:24.000Z | ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import copy
class Bidict(dict):
"""Subclass of mapping that automatically keeps track of the inverse
mapping. Note: self.inverse is a simple dict, so it won't keep
track of deletions directly to self.inverse and things like
that. Use this for lookups ONLY!. Also, if mapping is not
bijective, there's no guarantee the inverse mapping will be
consistent (particularly in the presence of deletions.)
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.inverse = {}
for (k, v) in self.iteritems():
self.inverse[v] = k
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.inverse[value] = key
def __delitem__(self, key):
v = self[key]
dict.__delitem__(self, key)
# Might not be true if mapping was not bijective
if v in self.inverse:
del self.inverse[v]
def __copy__(self):
r = Bidict()
r.inverse = copy.copy(self.inverse)
r.update(self)
return r
def update(self, other):
if hasattr(other, 'iterkeys'):
for i in other.iterkeys():
self[i] = other[i]
else:
for (k,v) in other:
self[k] = v
##############################################################################
import unittest
class TestBidict(unittest.TestCase):
def test1(self):
x = Bidict()
for i in xrange(10):
x[i] = 9-i
for i in xrange(10):
self.assertEquals(x[i], 9-i)
self.assertEquals(x.inverse[i], 9-i)
del x[1]
self.assertRaises(KeyError, x.__getitem__, 1)
self.assertRaises(KeyError, x.inverse.__getitem__, 8)
def test_non_bijective(self):
"""Tests resilience (not correctness!) under non-bijectiveness."""
x = Bidict()
x[1] = 2
x[3] = 2
del x[1]
del x[3]
def test_copy(self):
"""Tests copying a Bidict."""
x = Bidict()
x[1] = 2
x[3] = 4
y = copy.copy(x)
assert y.inverse[4] == x.inverse[4]
assert y.inverse[2] == x.inverse[2]
def test_update(self):
"""Tests if updating a bidict with a dict works"""
x = {1:2, 3:4}
y = Bidict()
y.update(x)
assert y.inverse[4] == 3
assert y.inverse[2] == 1
if __name__ == '__main__':
unittest.main()
| 34.317829 | 79 | 0.607635 |
ad4e19e3cf59e1d124f90db050e14c4ee92cb1f4 | 2,206 | py | Python | mods/sky/satellites.py | healeycodes/pi | 7288186c8529b2192eda0da8e1699c59702cb230 | [
"MIT"
] | 26 | 2021-05-09T17:11:16.000Z | 2021-12-05T20:49:49.000Z | mods/sky/satellites.py | healeycodes/pi | 7288186c8529b2192eda0da8e1699c59702cb230 | [
"MIT"
] | null | null | null | mods/sky/satellites.py | healeycodes/pi | 7288186c8529b2192eda0da8e1699c59702cb230 | [
"MIT"
] | 1 | 2021-06-25T12:51:03.000Z | 2021-06-25T12:51:03.000Z | from server.db import get_db, FORMAT_STRING as F
from datetime import datetime
from dataclasses import dataclass
# https://en.wikipedia.org/wiki/List_of_GPS_satellites
PRN_DESCRIPTIONS = {
"13": "USA-132",
"20": "USA-150",
"28": "USA-151",
"16": "USA-166",
"21": "USA-168",
"22": "USA-175",
"19": "USA-177",
"02": "USA-180",
"17": "USA-183",
"31": "USA-190",
"12": "USA-192",
"15": "USA-196",
"29": "USA-199",
"07": "USA-201",
"05": "USA-206",
"25": "USA-213",
"01": "USA-232",
"24": "USA-239",
"27": "USA-242",
"30": "USA-248",
"06": "USA-251",
"09": "USA-256",
"03": "USA-258",
"26": "USA-260",
"08": "USA-262",
"10": "USA-265",
"32": "USA-266",
"04": "USA-289 Vespucci",
"18": "USA-293 Magellan",
"23": "USA-304 Matthew Henson",
"14": "USA-309 Sacagawea",
}
@dataclass
class Satellite:
prn: str
description: str
status: int
timestamp: str
def save_sats(sats):
db = get_db()
cursor = db.connection.cursor()
# TODO: optimize this SQL once we've settled on a SQL language
cursor.execute(
f"UPDATE satellites SET status=0, timestamp={F} WHERE status=1",
(datetime.now(),),
)
for prn in sats:
cursor.execute(f"SELECT prn FROM satellites WHERE prn={F} LIMIT 1", (prn,))
if not cursor.fetchone():
desc = PRN_DESCRIPTIONS[prn] if prn in PRN_DESCRIPTIONS else "Unknown"
cursor.execute(
f"INSERT INTO satellites (prn, status, description, timestamp) VALUES ({F}, {F}, {F}, {F})",
(prn, 1, desc, datetime.now(),),
)
else:
cursor.execute(
f"UPDATE satellites SET status=1, timestamp={F} WHERE prn={F} and status=0",
(datetime.now(), prn,),
)
db.close()
def get_sats():
db = get_db()
cursor = db.connection.cursor()
cursor.execute("SELECT prn, description, status, timestamp FROM satellites")
rows = cursor.fetchall()
db.close()
return [
Satellite(prn=row[0], description=row[1], status=row[2], timestamp=row[3])
for row in rows
]
| 25.952941 | 108 | 0.547144 |
1894f3f128bb8c15c8cf4789dd1245a09df01275 | 165 | py | Python | vrft/__init__.py | emersonboeira/pyvrft | d61d2789169435600833111a9fb0ad5573e26922 | [
"MIT"
] | 8 | 2019-09-08T17:22:03.000Z | 2021-07-02T09:05:28.000Z | vrft/__init__.py | ElsevierSoftwareX/SOFTX_2019_285 | f731541b2d6b6270c7987b08ee6c90d662a53a4c | [
"MIT"
] | null | null | null | vrft/__init__.py | ElsevierSoftwareX/SOFTX_2019_285 | f731541b2d6b6270c7987b08ee6c90d662a53a4c | [
"MIT"
] | 2 | 2019-09-08T17:38:50.000Z | 2019-12-13T15:41:50.000Z | from vrft.control import design
from vrft.control import filter
from vrft.invfunc import stbinv
from vrft.invfunc import mtf2ss
from vrft.csvfunc import datafromcsv
| 27.5 | 36 | 0.848485 |
43a547f1594bc523a4375a4e06759250ea5a9725 | 2,021 | py | Python | tests/test_utils.py | kruupos/http_monitor | a56bec016db68f1a76835aff6b8f57975715c28e | [
"MIT"
] | null | null | null | tests/test_utils.py | kruupos/http_monitor | a56bec016db68f1a76835aff6b8f57975715c28e | [
"MIT"
] | 7 | 2018-10-15T14:15:26.000Z | 2018-10-17T18:08:42.000Z | tests/test_utils.py | kruupos/sniwi | a56bec016db68f1a76835aff6b8f57975715c28e | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
Test utils functions
"""
import pytest
from collections import defaultdict
from sniwi.utils import top_three, get_section
class TestUtils(object):
"""
Test utils functions
"""
@pytest.mark.parametrize('in_dict,out_list,explanation', [
(
{'max': 20, 'audrey': 50, 'sandra': 25, 'felix': 10},
['audrey : 50', 'sandra : 25', 'max : 20'],
'with normal dict'
),
(
defaultdict(int, {'max': 20, 'audrey': 50, 'sandra': 25, 'felix': 10}),
['audrey : 50', 'sandra : 25', 'max : 20'],
'with defaultdict'
),
({}, [], 'with empty dict'),
({'max': 20, 'audrey': 50}, ['audrey : 50', 'max : 20'], 'with small dict')
])
def test_top_three(self, in_dict, out_list, explanation):
"""
test top_three utils method
params:
in_dict: input dict or defaultdict to test
out_list: return 'key: value' of top_three function
explanation: (str), additionnal information for assert
"""
assert top_three(in_dict) == out_list, explanation
@pytest.mark.parametrize('url,section,explanation', [
(
'http://my.site.com/pages/ohers',
'/pages',
'with full url'
),
(
'http://my.site.com/pages?query',
'/pages',
'with defaultdict'
),
(
'/pages/others',
'/pages',
'without domain name'
),
('/', '/', 'minimalist'),
('NotanUrl', None, 'without an url')
])
def test_get_section(self, url, section, explanation):
"""
test get_section utils method
params:
ulr: (str) of an url
section: (str) expected result
explanation: (str), additionnal information for assert
"""
assert get_section(url) == section, explanation
| 28.871429 | 92 | 0.503711 |
e324343fbbccca7322d0749b008bb5bcb6b0d790 | 89 | py | Python | junn/io/__init__.py | modsim/junn | a40423b98c6a3739dd0b2ba02d546a5db91f9215 | [
"BSD-2-Clause"
] | null | null | null | junn/io/__init__.py | modsim/junn | a40423b98c6a3739dd0b2ba02d546a5db91f9215 | [
"BSD-2-Clause"
] | null | null | null | junn/io/__init__.py | modsim/junn | a40423b98c6a3739dd0b2ba02d546a5db91f9215 | [
"BSD-2-Clause"
] | null | null | null | """Input/output module."""
REGION_BACKGROUND = 0
REGION_FOREGROUND = 1
REGION_BORDER = 2
| 17.8 | 26 | 0.752809 |
d1be58d3479ac89b721c20c6a65233650de205a1 | 1,163 | py | Python | meiduo_mall/meiduo_mall/apps/oauth/migrations/0001_initial.py | TCYhupo/MeiduoMall | ac810a2c95058cc052057722088a209363940331 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/oauth/migrations/0001_initial.py | TCYhupo/MeiduoMall | ac810a2c95058cc052057722088a209363940331 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/oauth/migrations/0001_initial.py | TCYhupo/MeiduoMall | ac810a2c95058cc052057722088a209363940331 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2020-07-30 04:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='OAuthQQUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('uodate_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('openid', models.CharField(db_index=True, max_length=64, verbose_name='openid')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': 'QQ登录用户数据',
'verbose_name_plural': 'QQ登录用户数据',
'db_table': 'tb_oauth_app',
},
),
]
| 35.242424 | 137 | 0.614789 |
ae1f68b61b69ae6b1bf484600b3be4cc8bbd235c | 7,322 | py | Python | 基础教程/A2-神经网络基本原理/第6步 - 模型部署/src/ch13-ModelInference/ONNXConverter/transfer.py | microsoft/ai-edu | 2f59fa4d3cf19f14e0b291e907d89664bcdc8df3 | [
"Apache-2.0"
] | 11,094 | 2019-05-07T02:48:50.000Z | 2022-03-31T08:49:42.000Z | 基础教程/A2-神经网络基本原理/第6步 - 模型部署/src/ch13-ModelInference/ONNXConverter/transfer.py | microsoft/ai-edu | 2f59fa4d3cf19f14e0b291e907d89664bcdc8df3 | [
"Apache-2.0"
] | 157 | 2019-05-13T15:07:19.000Z | 2022-03-23T08:52:32.000Z | 基础教程/A2-神经网络基本原理/第6步 - 模型部署/src/ch13-ModelInference/ONNXConverter/transfer.py | microsoft/ai-edu | 2f59fa4d3cf19f14e0b291e907d89664bcdc8df3 | [
"Apache-2.0"
] | 2,412 | 2019-05-07T02:55:15.000Z | 2022-03-30T06:56:52.000Z | import onnx
from onnx import helper, numpy_helper
from onnx import AttributeProto, TensorProto, GraphProto
import argparse
import json
import numpy as np
# 将numpy生成的model转换为onnx格式
def ModelTransfer(model_path, output_path):
# 读取模型文件,该模型文件存储各层类型和名称,以及参数存储位置
with open(model_path, "r") as f:
model_define = json.load(f)
node_list = []
input_list = []
output_list = []
size = model_define["0"]["input_size"]
if len(size) > 2:
size = [size[0], size[3], size[1], size[2]]
input_list.append(
helper.make_tensor_value_info(
model_define["0"]["input_name"], TensorProto.FLOAT, size
))
# 根据各层类型判断所需要生成的结点
for index in range(len(model_define)):
node = model_define[str(index)]
# 对卷积层生成一个乘法和加法
if node["type"] == "FC":
s = np.load(node["weights_path"]).astype(np.float32)
node_list.append(
helper.make_node(
"Constant",
inputs=[],
outputs=[node["weights_name"]],
value=helper.make_tensor(
name=node["weights_name"],
data_type=TensorProto.FLOAT,
dims=s.shape,
vals=s.flatten().astype(float),
),
)
)
s = np.load(node["bias_path"]).astype(np.float32)
node_list.append(
helper.make_node(
"Constant",
inputs=[],
outputs=[node["bias_name"]],
value=helper.make_tensor(
name=node["bias_name"],
data_type=TensorProto.FLOAT,
dims=s.shape,
vals=s.flatten().astype(float),
),
)
)
node_list.append(
helper.make_node("MatMul", [node["input_name"], node["weights_name"]], [node["output_name"] + "Temp"])
)
node_list.append(
helper.make_node("Add", [node["output_name"] + "Temp", node["bias_name"]], [node["output_name"]])
)
# 卷积层对应代码
elif node["type"] == "Conv":
s = np.load(node["weights_path"]).astype(np.float32)
s = np.swapaxes(s, 0, 3)
s = np.swapaxes(s, 1, 2)
# s = np.swapaxes(s, 2, 3)
node_list.append(
helper.make_node(
"Constant",
inputs=[],
outputs=[node["weights_name"]],
value=helper.make_tensor(
name=node["weights_name"],
data_type=TensorProto.FLOAT,
dims=s.shape,
vals=s.flatten().astype(float),
),
)
)
# should use broadcast, but I didn't find how to use that attribute
s = np.load(node["bias_path"]).astype(np.float32)
s = np.tile(s, node["output_size"][1:3]+[1])
s = np.swapaxes(s, 0, 2)
s = np.swapaxes(s, 1, 2)
node_list.append(
helper.make_node(
"Constant",
inputs=[],
outputs=[node["bias_name"]],
value=helper.make_tensor(
name=node["bias_name"],
data_type=TensorProto.FLOAT,
dims=s.shape,
vals=s.flatten().astype(float),
),
)
)
node_list.append(
helper.make_node(
node["type"],
[node["input_name"], node["weights_name"]],
[node["output_name"] + "Temp"],
kernel_shape=node["kernel_shape"],
strides=node["strides"],
pads=node["pads"]
)
)
node_list.append(
helper.make_node("Add", inputs=[node["output_name"] + "Temp", node["bias_name"]], outputs=[node["output_name"]])
)
# relu和softmax
elif node["type"] == "Relu" or node["type"] == "Softmax" or node["type"] == "Sigmoid" or node["type"] == "Tanh":
node_list.append(
helper.make_node(
node["type"],
[node["input_name"]],
[node["output_name"]]
)
)
# max pooling
elif node["type"] == "MaxPool":
node_list.append(
helper.make_node(
node["type"],
[node["input_name"]],
[node["output_name"]],
kernel_shape=node["kernel_shape"],
strides=node["kernel_shape"],
pads=node["pads"]
)
)
# reshape对应代码,添加转置,方便与numpy代码接轨
elif node["type"] == "Reshape":
shape = np.array(node["shape"], dtype=np.int64)
node_list.append(
helper.make_node(
'Transpose',
inputs=[node["input_name"]],
outputs=[node["input_name"] + "T"],
perm=[0, 2, 3, 1]
)
)
node_list.append(
helper.make_node(
"Constant",
inputs=[],
outputs=[node["output_name"] + "shape"],
value=helper.make_tensor(
name=node["output_name"] + "shape",
data_type=TensorProto.INT64,
dims=shape.shape,
vals=shape.flatten(),
),
)
)
node_list.append(
helper.make_node(
node["type"],
[node["input_name"] + "T", node["output_name"] + "shape"],
[node["output_name"]],
)
)
size = model_define[str(index)]["output_size"]
if len(size) > 2:
size = [size[0], size[3], size[1], size[2]]
output_list.append(
helper.make_tensor_value_info(
model_define[str(index)]["output_name"], TensorProto.FLOAT, size
))
graph_proto = helper.make_graph(
node_list,
"test",
input_list,
output_list,
)
onnx.checker.check_node(node_list[1])
onnx.checker.check_graph(graph_proto)
model_def = helper.make_model(graph_proto, producer_name="test_onnx")
onnx.checker.check_model(model_def)
onnx.save(model_def, output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model_path", help="the path to the model file")
parser.add_argument("-o", "--output_path", help="the path to store the output model")
args = parser.parse_args()
# test()
# ModelTransfer(args.model_path, args.output_path)
ModelTransfer("./output/model.json", "./my_minst.onnx")
| 32.39823 | 128 | 0.452745 |
21be8b0bda40ef6ccea3620e777ae999b40e191d | 3,129 | py | Python | tests/wallet/rl_wallet/test_rl_wallet.py | greenberrycoin/gbch-blockchain | d99843d720c6e7bd7baaf8bb4639a46dbb56caed | [
"Apache-2.0"
] | null | null | null | tests/wallet/rl_wallet/test_rl_wallet.py | greenberrycoin/gbch-blockchain | d99843d720c6e7bd7baaf8bb4639a46dbb56caed | [
"Apache-2.0"
] | null | null | null | tests/wallet/rl_wallet/test_rl_wallet.py | greenberrycoin/gbch-blockchain | d99843d720c6e7bd7baaf8bb4639a46dbb56caed | [
"Apache-2.0"
] | null | null | null | import asyncio
import pytest
from greenberry.simulator.simulator_protocol import FarmNewBlockProtocol
from greenberry.types.peer_info import PeerInfo
from greenberry.util.ints import uint16, uint64
from greenberry.wallet.rl_wallet.rl_wallet import RLWallet
from tests.setup_nodes import self_hostname, setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestCCWallet:
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.mark.asyncio
async def test_create_rl_coin(self, two_wallet_nodes):
num_blocks = 4
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_1, wallet_server_1 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await wallet_server_1.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
rl_admin: RLWallet = await RLWallet.create_rl_admin(wallet_node.wallet_state_manager)
rl_user: RLWallet = await RLWallet.create_rl_user(wallet_node_1.wallet_state_manager)
interval = uint64(2)
limit = uint64(1)
amount = uint64(100)
await rl_admin.admin_create_coin(interval, limit, rl_user.rl_info.user_pubkey.hex(), amount, 0)
origin = rl_admin.rl_info.rl_origin
admin_pubkey = rl_admin.rl_info.admin_pubkey
await rl_user.set_user_info(
interval,
limit,
origin.parent_coin_info.hex(),
origin.puzzle_hash.hex(),
origin.amount,
admin_pubkey.hex(),
)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
await time_out_assert(15, rl_user.get_confirmed_balance, 100)
balance = await rl_user.rl_available_balance()
tx_record = await rl_user.generate_signed_transaction(1, 32 * b"\0")
await wallet_node_1.wallet_state_manager.main_wallet.push_transaction(tx_record)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
balance = await rl_user.get_confirmed_balance()
print(balance)
await time_out_assert(15, rl_user.get_confirmed_balance, 99)
rl_user.rl_get_aggregation_puzzlehash(rl_user.get_new_puzzle())
# rl_admin.rl_generate_signed_aggregation_transaction()
| 36.383721 | 105 | 0.713966 |
b6c9a6915780711ba21586b65cbcc0bd8e1f5976 | 1,469 | py | Python | src/pl_data/CustomDataLoader.py | mikcnt/dlai-project | 56fa0d1e682d07cd89cb011400b0a4ef92ec9265 | [
"MIT"
] | 3 | 2021-09-09T20:23:35.000Z | 2022-01-23T22:29:49.000Z | src/pl_data/CustomDataLoader.py | mikcnt/dlai-project | 56fa0d1e682d07cd89cb011400b0a4ef92ec9265 | [
"MIT"
] | null | null | null | src/pl_data/CustomDataLoader.py | mikcnt/dlai-project | 56fa0d1e682d07cd89cb011400b0a4ef92ec9265 | [
"MIT"
] | null | null | null | import torch
from torch.utils.data import RandomSampler
class CustomDataLoader(object):
def __init__(self, dataset, batch_size, shuffle=False, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.drop_last = drop_last
if shuffle:
self.sampler = RandomSampler(dataset)
else:
self.sampler = range(len(dataset))
def _reset_batch(self):
return {key: torch.Tensor() for key in self.dataset[0].keys()}
def __iter__(self):
batch = self._reset_batch()
for idx in self.sampler:
batch = {
key: torch.cat([batch[key], self.dataset[idx][key]])
for key in self.dataset[idx].keys()
}
while batch["obs"].shape[0] >= self.batch_size:
if batch["obs"].shape[0] == self.batch_size:
yield batch
batch = self._reset_batch()
else:
return_batch = {
key: batch[key][: self.batch_size]
for key in self.dataset[idx].keys()
}
batch = {
key: batch[key][self.batch_size :]
for key in self.dataset[idx].keys()
}
yield return_batch
# last batch
if batch["obs"].shape[0] > 0 and not self.drop_last:
yield batch
| 34.97619 | 76 | 0.504425 |
ad2ee5906fa89b01bcaa0b8a3d299b7c25238456 | 663 | py | Python | manas_apps/users/tests/test_drf_urls.py | pymandes/manas_apps | dfe5ff1e21d92953360dcfbee9b6070269458027 | [
"MIT"
] | null | null | null | manas_apps/users/tests/test_drf_urls.py | pymandes/manas_apps | dfe5ff1e21d92953360dcfbee9b6070269458027 | [
"MIT"
] | null | null | null | manas_apps/users/tests/test_drf_urls.py | pymandes/manas_apps | dfe5ff1e21d92953360dcfbee9b6070269458027 | [
"MIT"
] | null | null | null | import pytest
from django.urls import resolve, reverse
from manas_apps.users.models import User
pytestmark = pytest.mark.django_db
def test_user_detail(user: User):
assert (
reverse("api:user-detail", kwargs={"username": user.username})
== f"/api/users/{user.username}/"
)
assert resolve(f"/api/users/{user.username}/").view_name == "api:user-detail"
def test_user_list():
assert reverse("api:user-list") == "/api/users/"
assert resolve("/api/users/").view_name == "api:user-list"
def test_user_me():
assert reverse("api:user-me") == "/api/users/me/"
assert resolve("/api/users/me/").view_name == "api:user-me"
| 26.52 | 81 | 0.669683 |
364af6c64613560c0b831324ce1f94fcd3d3c4eb | 657 | py | Python | src/libs/core/options/config.py | VirtualVFix/AndroidTestFramework | 1feb769c6aca39a78e6daefd6face0a1e4d62cd4 | [
"MIT"
] | null | null | null | src/libs/core/options/config.py | VirtualVFix/AndroidTestFramework | 1feb769c6aca39a78e6daefd6face0a1e4d62cd4 | [
"MIT"
] | null | null | null | src/libs/core/options/config.py | VirtualVFix/AndroidTestFramework | 1feb769c6aca39a78e6daefd6face0a1e4d62cd4 | [
"MIT"
] | null | null | null | # All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "29/10/17 20:42"
#: Options functions which may be registered
OPTION_CLASS_FUNCTIONS_TO_REGISTER = ['validate',
'setup_frame',
'teardown_frame',
'setup_suite',
'teardown_suite',
'setup',
'teardown']
| 38.647059 | 62 | 0.491629 |
f53ab95ba18eaa86602280e8281bc1cef1559de4 | 1,981 | py | Python | yasql/apps/sqlorders/admin.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 443 | 2018-02-08T02:53:48.000Z | 2020-10-13T10:01:55.000Z | yasql/apps/sqlorders/admin.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 27 | 2018-03-06T03:50:07.000Z | 2020-08-18T08:09:49.000Z | yasql/apps/sqlorders/admin.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 148 | 2018-03-15T06:07:25.000Z | 2020-08-17T14:58:45.000Z | from django.contrib import admin
# Register your models here.
from sqlorders import models
@admin.register(models.DbConfig)
class DbConfigAdmin(admin.ModelAdmin):
list_display = ('host', 'port', 'character', 'env', 'use_type', 'rds_type', 'rds_category', 'comment')
list_display_links = ('host',)
list_filter = ('use_type', 'rds_type', 'rds_category')
search_fields = ('host', 'port', 'env__name', 'comment')
list_per_page = 10
@admin.register(models.DbSchemas)
class DbSchemasAdmin(admin.ModelAdmin):
list_display = ('cid', 'schema', 'created_at')
list_display_links = ('cid',)
search_fields = ('cid__host', 'schema')
list_per_page = 10
@admin.register(models.DbEnvironment)
class DbEnvironmentAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'created_at', 'updated_at')
list_display_links = ('name',)
search_fields = ('name',)
list_per_page = 10
@admin.register(models.DbOrders)
class DbOrdersAdmin(admin.ModelAdmin):
list_display = ('title', 'order_id', 'applicant', 'progress', 'rds_category', 'sql_type', 'env', 'created_at')
list_display_links = ('title',)
list_filter = ('progress', 'env')
search_fields = ('title', 'order_id', 'applicant')
list_per_page = 20
exclude = ['executor', 'auditor', 'closer', 'reviewer', 'email_cc']
readonly_fields = ['is_hide', 'demand', 'remark', 'rds_category', 'sql_type', 'env',
'applicant', 'cid', 'database', 'version', 'order_id', 'title']
@admin.register(models.DbOrdersExecuteTasks)
class DbOrdersExecuteTasksAdmin(admin.ModelAdmin):
list_display = ('task_id', 'applicant', 'order', 'sql_type', 'progress', 'created_time')
list_display_links = ('task_id',)
search_fields = ('taskid', 'order__order_id', 'applicant')
list_per_page = 20
exclude = ['rollback_sql', 'execute_log', 'executor']
readonly_fields = ['consuming_time', 'affected_rows', 'file_format', 'task_id', 'applicant', 'order']
| 38.096154 | 114 | 0.67895 |
ea30832746c18d7113145f8404d88b79e5b7389f | 16,155 | py | Python | 4-Lastnosti Fourirjeve Analize-zapiskiWV/ukazi.py | LoremasterLH/MatlabToPython | bdfd827d7fc143332e3945fc980e915c6324eb6c | [
"MIT"
] | 2 | 2018-11-05T12:55:42.000Z | 2018-11-20T11:00:43.000Z | 4-Lastnosti Fourirjeve Analize-zapiskiWV/ukazi.py | LoremasterLH/MatlabToPython | bdfd827d7fc143332e3945fc980e915c6324eb6c | [
"MIT"
] | null | null | null | 4-Lastnosti Fourirjeve Analize-zapiskiWV/ukazi.py | LoremasterLH/MatlabToPython | bdfd827d7fc143332e3945fc980e915c6324eb6c | [
"MIT"
] | 1 | 2018-11-20T13:15:09.000Z | 2018-11-20T13:15:09.000Z | # Author: Martin Konečnik
# Contact: martin.konecnik@gmail.com
# Licenced under MIT
# 4- Lastnosti Fourirjeve Analize
# Potrebna ročna inštalacija knjižnice pytftb (najlažje z git): https://github.com/scikit-signal/pytftb
import numpy as np
from scipy.signal import convolve2d as conv2
from scipy.signal.windows import gaussian as gausswin
from scipy.signal import hilbert
from tftb.generators import fmlin
from tftb.processing import Spectrogram
from tftb.processing.reassigned import pseudo_wigner_ville
from tftb.processing import WignerVilleDistribution
import pylab as pylab
import matplotlib.pyplot as plt
from pathlib import Path
from PIL import Image
# Potrebni za del z zvokom
import sounddevice as sd
from scipy.io import wavfile
import time
# ukazi.m:1
# --------------------------------------------------------------------------------------
# Časovno-frekvenčna ločljivost in časovno-frekvenčne porazdelitve
#######################################################################
# ukazi.m:6 -- Note: Preverjeno z Matlab
Fs = 1024
sig = fmlin(1*Fs)[0] # Ker v pythonu fmlin vrne analitičen signal, ni potrebno napraviti Hilbertove transformacije.
# Narišemo realni del v časovni domeni (signal je kompleksen, a le zaradi umetno zagotovljene analitičnosti.
plt.figure()
plt.plot(np.real(sig), LineWidth=0.4)
plt.xlabel('t')
plt.ylabel('amplituda')
# Analitični signali nimajo simetrije preko Nyquistove frekvence.
plt.figure()
plt.plot(abs(np.fft.fft(sig)), LineWidth=0.4)
plt.xlabel('f')
plt.ylabel('amplituda')
# ukazi.m:16
#######################################################################
# Časovno-frekvenčna ločljivost prvič: višja časovna ločljivost, nižja frekvenčna ločljivost
# ukazi.m:18 -- Note: Graf je prikazan s knjižnico pytftb in se razlikuje od Matlabovega.
plt.close('all')
T=32
N=256
dT=T - 1
window = np.ones(T)
TFD = Spectrogram(sig, n_fbins=N, fwindow=window)
TFD.run()
TFD.plot(kind="contour", threshold=0.1, show_tf=False)
# plt.xlabel('t (s)','FontSize',12)
# plt.ylabel('f (Hz)','FontSize',12)
# plt.title('T={0},N={1},dt={2}'.format(T, N, dT))
# ukazi.m:29 -- Note: Isto kot prej.
# Časovno-frekvenčna ločljivost drugič: nižja časovna ločljivost, višja frekvenčna ločljivost
T=256
N=256
dT=T - 1
window = np.ones(T)
TFD = Spectrogram(sig, n_fbins=N, fwindow=window)
TFD.run()
TFD.plot(kind="contour", threshold=0.1, show_tf=False)
# plt.xlabel('t (s)','FontSize',12)
# plt.ylabel('f (Hz)','FontSize',12)
# plt.title(cat('T=',str(T),',N=',str(N),',dT=',str(dT)))
# ukazi.m:49 -- Note:
# Wigner-Villova časovno-frekvenčna porazdelitev - skoraj idealna časovna in frekvenčna ločljivost
wvd = WignerVilleDistribution(np.real(sig))
wvd.run()
wvd.plot(kind='contour')
tfr, rtfr, hat = pseudo_wigner_ville(np.real(sig))
TFD,t,f=tfrwv(sig,nargout=3)
# ukazi4.m:41
plt.figure()
imagesc(t,f,TFD)
plt.axis('tight')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.axis('xy')
plt.title(cat('Wigner-Villova asovno-frekvenna porazdelitev'))
# Trenutna autokorelacija Rss(t,tau) = sig(t+tau) .* conj(sig(t-tau)); - kvadratna funkcija signala---------------------
# je osnova za Wigner-Villovo asovnofrekvenno porazdelitev in omogoa skoraj idealno asovno in frekvenno loljivost
t=np.arange(1,size(sig))
# ukazi4.m:51
tcol=size(sig)
# ukazi4.m:52
Rss=np.zeros(tcol,tcol)
# ukazi4.m:53
for icol in np.arange(1,tcol).reshape(-1):
ti=t[icol]
# ukazi4.m:55
taumax=min(cat(ti - 1,tcol - ti,round(tcol / 2) - 1))
# ukazi4.m:56
tau=np.arange(- taumax,taumax)
# ukazi4.m:58
indices=rem(tcol + tau,tcol) + 1
# ukazi4.m:59
Rss[indices,icol]=multiply(sig[ti + tau],conj(sig[ti - tau]))
# ukazi4.m:60
plt.figure()
for icol in np.arange(1,tcol / 2,10).reshape(-1):
cla
plt.subplot(2,1,1)
plt.plot(np.real(Rss[:,icol]))
plt.title(cat('t = ',str(icol),', np.real Rss'))
plt.axis('tight')
plt.subplot(2,1,2)
plt.plot(imag(Rss[:,icol]))
plt.title(cat('t = ',str(icol),', imag Rss'))
plt.axis('tight')
plt.waitforbuttonpress()
# Wigner-Villova asovno-frekvenna porazdelitev - skoraj idealna asovna in frekvenna loljivost----------------------------
# Wigner-Villova asovno-frekvenna porazdelitev je Fourierova transformacija trenutne prene korelacije Rss.
TFD,t,f=tfrwv(sig,nargout=3)
# ukazi4.m:73
plt.figure()
contour(t,f,TFD)
plt.axis('tight')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Wigner-Villova asovno-frekvenna porazdelitev'))
'------------------------------------------------------------------------------------------------------------------------------'
# asovno-frekvenna loljivost in asovno-frekvenne porazdelitve - ve signalov skupaj
# Primer 1: odkomentirajte za zgled z dvema asimetrino razporejenima atomoma v asovno-frekvenni ravnini.
Fs=1024
# ukazi4.m:83
sig=atoms(1*Fs,cat([1*Fs / 4,0.15,20,1],[3*Fs / 4,0.35,20,1]))
# ukazi4.m:84
plt.figure()
plt.plot(np.real(sig))
plt.xlabel('t')
plt.ylabel('amplituda')
plt.axis('tight')
plt.figure()
plt.plot(abs(np.fft.fft(sig)))
plt.xlabel('f')
plt.ylabel('amplituda')
plt.axis('tight')
#######################################################################
# asovno-frekvenna loljivost prvi: vija asovna loljivost, nija frekvenna loljivost---------------------------------
plt.close('all')
T=32
# ukazi4.m:99
N=256
# ukazi4.m:100
dT=T - 1
# ukazi4.m:101
TFD,f,t=specgram(sig,N,Fs,window(rectwin,T),dT,nargout=3)
# ukazi4.m:102
plt.figure()
contour(t,f,abs(TFD))
plt.axis('tight')
plt.xlabel('t (s)','FontSize',12)
plt.ylabel('f (Hz)','FontSize',12)
plt.axis('tight')
plt.title(cat('Spektrogram: T=',str(T),',N=',str(N),',dT=',str(dT)))
# asovno-frekvenna loljivost drugi: nija asovna loljivost, vija frekvenna loljivost--------------------------------
T=256
# ukazi4.m:110
N=256
# ukazi4.m:111
dT=T - 1
# ukazi4.m:112
TFD,f,t=specgram(sig,N,Fs,window(rectwin,T),dT,nargout=3)
# ukazi4.m:113
plt.figure()
contour(t,f,abs(TFD))
plt.axis('tight')
plt.xlabel('t (s)','FontSize',12)
plt.ylabel('f (Hz)','FontSize',12)
plt.axis('tight')
plt.title(cat('Spektrogram: T=',str(T),',N=',str(N),',dT=',str(dT)))
# Wigner-Villova asovno-frekvenna porazdelitev - skoraj idealna asovna in frekvenna loljivost----------------------------
TFD,t,f=tfrwv(sig,nargout=3)
# ukazi4.m:121
plt.figure()
contour(t,f,abs(TFD),100)
plt.axis('tight')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Wigner-Villova asovno-frekvenna porazdelitev'))
# Wigner-Villova asovno-frekvenna porazdelitev - SAMO np.realNI DEL SIGNALA - Zrcaljenje preko frekce Fsamp/4 !!! ---------------------------
TFD,t,f=tfrwv(np.real(sig),nargout=3)
# ukazi4.m:129
plt.figure()
contour(t,f,abs(TFD),100)
plt.axis('tight')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Wigner-Villova asovno-frekvenna porazdelitev'))
# pseudo-Wigner-Villova asovno-frekvenna porazdelitev -----------------------------------------------------------------------
# - okno v asovni domeni, ki prepreuje interference med asovno odmaknjenimi atomi
TFD,t,f=tfrpwv(sig,np.arange(1,Fs),Fs,hamming(31),nargout=3)
# ukazi4.m:139
plt.figure()
contour(t,f,abs(TFD))
plt.axis('tight')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Psevdo Wigner-Villova asovno-frekvenna porazdelitev'))
# pseudo-Wigner-Villova asovno-frekvenna porazdelitev - SAMO np.realNI DEL SIGNALA - Zrcaljenje preko frekce Fsamp/4 !!! -----------------------------------------
# - okno v asovni domeni, ki prepreuje interference med asovno odmaknjenimi atomi
TFD,t,f=tfrpwv(np.real(sig),np.arange(1,Fs),Fs,hamming(31),nargout=3)
# ukazi4.m:148
plt.figure()
contour(t,f,abs(TFD))
plt.axis('tight')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Psevdo Wigner-Villova asovno-frekvenna porazdelitev'))
'------------------------------------------------------------------------------------------------------------------------------'
# asovno-frekvenna loljivost in asovno-frekvenne porazdelitve - ve signalov skupaj
# # Primer 2: zgled z tirimi simetrino razporejenimi atomi v asovno-frekvenni ravnini.
Fs=1024
# ukazi4.m:161
sig=atoms(1*Fs,cat([1*Fs / 4,0.15,20,1],[3*Fs / 4,0.15,20,1],[1*Fs / 4,0.35,20,1],[3*Fs / 4,0.35,20,1]))
# ukazi4.m:162
plt.figure()
plt.plot(np.real(sig))
plt.xlabel('t')
plt.ylabel('amplituda')
plt.axis('tight')
plt.figure()
plt.plot(abs(np.fft.fft(sig)))
plt.xlabel('f')
plt.ylabel('amplituda')
plt.axis('tight')
#######################################################################
# asovno-frekvenna loljivost prvi: vija asovna loljivost, nija frekvenna loljivost---------------------------------
plt.close('all')
T=32
# ukazi4.m:178
N=256
# ukazi4.m:179
dT=T - 1
# ukazi4.m:180
TFD,f,t=specgram(sig,N,Fs,window(rectwin,T),dT,nargout=3)
# ukazi4.m:181
plt.figure()
imagesc(t,f[1:end() / 2],abs(TFD[1:end() / 2,:]))
plt.axis('xy')
plt.xlabel('t (s)','FontSize',12)
plt.ylabel('f (Hz)','FontSize',12)
plt.title(cat('Spektrogram: T=',str(T),',N=',str(N),',dT=',str(dT)))
T=256
# ukazi4.m:188
N=256
# ukazi4.m:189
dT=T - 1
# ukazi4.m:190
TFD,f,t=specgram(sig,N,Fs,window(rectwin,T),dT,nargout=3)
# ukazi4.m:191
plt.figure()
imagesc(t,f[1:end() / 2],abs(TFD[1:end() / 2,:]))
plt.axis('xy')
plt.xlabel('t (s)','FontSize',12)
plt.ylabel('f (Hz)','FontSize',12)
plt.axis('tight')
plt.title(cat('Spektrogram: T=',str(T),',N=',str(N),',dT=',str(dT)))
# Wigner-Villova asovno-frekvenna porazdelitev - skoraj idealna asovna in frekvenna loljivost----------------------------
TFD,t,f=tfrwv(sig,nargout=3)
# ukazi4.m:199
plt.figure()
contour(t,f,abs(TFD))
plt.axis('xy')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Wigner-Villova asovno-frekvenna porazdelitev'))
# pseudo-Wigner-Villova asovno-frekvenna porazdelitev -----------------------------------------------------------------------
# - okno v asovni domeni, ki prepreuje interference med asovno odmaknjenimi atomi
TFD,t,f=tfrpwv(sig,np.arange(1,Fs),Fs,hamming(31),nargout=3)
# ukazi4.m:208
plt.figure()
contour(t,f,abs(TFD))
plt.axis('xy')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Psevdo Wigner-Villova asovno-frekvenna porazdelitev'))
# pseudo-Wigner-Villova asovno-frekvenna porazdelitev - SAMO np.realNI DEL SIGNALA - Zrcaljenje preko frekce Fsamp/4 !!!! ----------------------------------------
# - okno v asovni domeni, ki prepreuje interference med asovno odmaknjenimi atomi
TFD,t,f=tfrpwv(np.real(sig),np.arange(1,Fs),Fs,hamming(31),nargout=3)
# ukazi4.m:217
plt.figure()
contour(t,f,abs(TFD))
plt.axis('xy')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Psevdo Wigner-Villova asovno-frekvenna porazdelitev'))
# Choi-Williams asovno-frekvenna porazdelitev ------------------------------------------------------------------------------
# - okno v asovni domeni, ki prepreuje interference med asovno odmaknjenimi atomi
# - okno v frekvenni domeni, ki prepreuje interference med frekvenno odmaknjenimi atomi
TFD,t,f=tfrcw(sig,np.arange(1,Fs),Fs,hamming(31),hamming(31),nargout=3)
# ukazi4.m:228
plt.figure()
imagesc(t,f,abs(TFD))
plt.axis('xy')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Choi-Williams asovno-frekvenna porazdelitev'))
'-----------------------------------------------------------------------------------------------'
# asovno-frekvenne porazdelitve: primeri za igranje in uenje
#######################################################################################5
T=1
# ukazi4.m:238
Fs=512
# ukazi4.m:239
# Primer 1: analitien signal z linearno naraajoo frekvenco.
sig,trenutnaFrekvenca=fmlin(dot(T,Fs),nargout=2)
# ukazi4.m:242
sig=scipy.signal.hilbert(sig)
# ukazi4.m:243
# Primer 2: analitien signal s sinusoidno spremeinjajoo se frekvenco.
sig,trenutnaFrekvenca=fmsin(dot(T,Fs),0.05,0.45,100,20,0.3,- 1.0,nargout=2)
# ukazi4.m:246
sig=scipy.signal.hilbert(sig)
# ukazi4.m:247
# Primer 3: signal s eksponentno spremeinjajoo se frekvenco.
sig,trenutnaFrekvenca=fmpower(dot(T,Fs),0.5,cat(1,0.5),cat(100,0.1),nargout=2)
# ukazi4.m:250
sig=scipy.signal.hilbert(sig)
# ukazi4.m:251
# Od tu dalje je je koda za katerikoli zgornji primer.
plt.figure()
plt.subplot(2,1,1)
plt.plot(cat(np.arange(0,size(sig) - 1)) / Fs,np.real(sig))
plt.axis('tight')
plt.xlabel('t (s)')
plt.ylabel('amplituda')
plt.title('signal')
plt.subplot(2,1,2)
plt.plot(cat(np.arange(0,size(sig) - 1)) / Fs,dot(trenutnaFrekvenca,Fs))
plt.xlabel('t (s)')
plt.ylabel('f (Hz)')
plt.title('trenutna frekvenca')
plt.axis('tight')
# asovno-frekvenna loljivost prvi: vija asovna loljivost, nija frekvenna loljivost---------------------------------
plt.close('all')
winT=32
# ukazi4.m:263
N=256
# ukazi4.m:264
dT=winT - 1
# ukazi4.m:265
TFD,f,t=specgram(sig,N,Fs,window(rectwin,winT),dT,nargout=3)
# ukazi4.m:266
plt.figure()
contour(t,f[1:end() / 2],abs(TFD[1:end() / 2,:]))
plt.axis('xy')
plt.xlabel('t (s)','FontSize',12)
plt.ylabel('f (Hz)','FontSize',12)
plt.title(cat('Spektrogram: winT=',str(winT),',N=',str(N),',dT=',str(dT)))
# asovno-frekvenna loljivost drugi: nija asovna loljivost, vija frekvenna loljivost--------------------------------
winT=256
# ukazi4.m:273
N=256
# ukazi4.m:274
dT=winT - 1
# ukazi4.m:275
TFD,f,t=specgram(sig,N,Fs,window(rectwin,winT),dT,nargout=3)
# ukazi4.m:276
plt.figure()
imagesc(t,f[1:end() / 2],abs(TFD[1:end() / 2,:]))
plt.axis('xy')
plt.xlabel('t (s)','FontSize',12)
plt.ylabel('f (Hz)','FontSize',12)
plt.axis('tight')
plt.title(cat('Spektrogram: winT=',str(winT),',N=',str(N),',dT=',str(dT)))
# Wigner-Villova asovno-frekvenna porazdelitev - skoraj idealna asovna in frekvenna loljivost----------------------------
TFD,t,f=tfrwv(sig,np.arange(1,dot(T,Fs)),Fs,nargout=3)
# ukazi4.m:284
plt.figure()
contour(t,f,abs(TFD))
plt.axis('xy')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Wigner-Villova asovno-frekvenna porazdelitev'))
# pseudo-Wigner-Villova asovno-frekvenna porazdelitev -----------------------------------------------------------------------
# - okno v asovni domeni, ki prepreuje interference med asovno odmaknjenimi atomi
TFD,t,f=tfrpwv(sig,np.arange(1,dot(T,Fs)),Fs,hamming(31),nargout=3)
# ukazi4.m:293
plt.figure()
contour(t,f,abs(TFD))
plt.axis('xy')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Psevdo Wigner-Villova asovno-frekvenna porazdelitev'))
# Choi-Williams asovno-frekvenna porazdelitev ------------------------------------------------------------------------------
# - okno v asovni domeni, ki prepreuje interference med asovno odmaknjenimi atomi
# - okno v frekvenni domeni, ki prepreuje interference med frekvenno odmaknjenimi atomi
TFD,t,f=tfrcw(sig,np.arange(1,dot(T,Fs)),Fs,hamming(25),hamming(25),nargout=3)
# ukazi4.m:303
plt.figure()
contour(t,f,abs(TFD))
plt.axis('xy')
plt.xlabel('t','FontSize',12)
plt.ylabel('f','FontSize',12)
plt.axis('tight')
plt.title(cat('Choi-Williams asovno-frekvenna porazdelitev'))
'------------------------------------------------------------------------------------------'
################################################################
# Primeri iz np.realnega ivljenja
print("\n" * 80) # clc
# exit()
plt.close('all')
Fs=8000
# ukazi4.m:316
bits=8
# ukazi4.m:317
T=2
# ukazi4.m:318
#my_recorder=audiorecorder(Fs,bits,1)
# ukazi4.m:320
print('Snemam posnetek v dolini ',str(T),'s...'))
#recordblocking(my_recorder,T)
sig=sd.rec(T*Fs, Fs, 1)
# ukazi4.m:324
plt.figure()
plt.plot(cat(np.arange(1,dot(T,Fs))) / Fs,sig)
plt.title('Posneti signal')
plt.xlabel('t (s)')
plt.ylabel('amplituda')
wavplay(sig,Fs,'sync')
sig=scipy.signal.hilbert(sig)
# ukazi4.m:328
# od tu dalje je pa dodajte kodo sami. Govorimo seveda o asovno-frekvenni analizi.
# POZOR! asovno-frekvenne porazdelitve so lahko raunsko precej precej porene.
# Priporoam majhne korake pri poveavi doline signala in smelo nastavljanje tevila frekvennih tok. | 33.516598 | 163 | 0.640111 |
71a398e1f760812592a4ecc5f7fd42bc9da70272 | 20,183 | py | Python | envoy/tests/test_parser.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | 1 | 2021-01-28T01:45:37.000Z | 2021-01-28T01:45:37.000Z | envoy/tests/test_parser.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | 3 | 2021-01-27T04:56:40.000Z | 2021-02-26T06:29:22.000Z | envoy/tests/test_parser.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | 1 | 2019-12-23T13:35:17.000Z | 2019-12-23T13:35:17.000Z | import pytest
from datadog_checks.envoy.errors import UnknownMetric, UnknownTags
from datadog_checks.envoy.metrics import METRIC_PREFIX, METRICS
from datadog_checks.envoy.parser import parse_histogram, parse_metric
def test_unknown_metric():
with pytest.raises(UnknownMetric):
parse_metric('foo.bar')
def test_unknown_tag():
with pytest.raises(UnknownTags):
parse_metric('stats.major.overflow')
def test_runtime():
metric = 'runtime.num_keys'
tags = [tag for tags in METRICS[metric]['tags'] for tag in tags]
assert parse_metric(metric) == (METRIC_PREFIX + metric, list(tags), METRICS[metric]['method'])
def test_cds():
metric = 'cluster_manager.cds.config_reload'
tags = [tag for tags in METRICS[metric]['tags'] for tag in tags]
assert parse_metric(metric) == (METRIC_PREFIX + metric, list(tags), METRICS[metric]['method'])
def test_http_router_filter():
metric = 'http{}.rq_total'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_http_router_filter_vhost():
metric = 'vhost{}.vcluster{}.upstream_rq_time'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_vhost_name'
tag1 = 'some_vcluster_name'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_http_rate_limit():
metric = 'cluster{}.ratelimit.ok'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_route_target_cluster'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_ip_tagging():
metric = 'http{}.ip_tagging{}.hit'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tag1 = 'some_tag_name'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_grpc():
metric = 'cluster{}.grpc{}{}.total'
untagged_metric = metric.format('', '', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_route_target_cluster'
tag1 = 'some_grpc_service'
tag2 = 'some_grpc_method'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1), '.{}'.format(tag2))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1), '{}:{}'.format(tags[2], tag2)],
METRICS[untagged_metric]['method'],
)
def test_dynamodb_operation():
metric = 'http{}.dynamodb.operation{}.upstream_rq_total'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tag1 = 'some_operation_name'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_dynamodb_table():
metric = 'http{}.dynamodb.table{}.upstream_rq_total'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tag1 = 'some_table_name'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_dynamodb_error():
metric = 'http{}.dynamodb.error{}{}'
untagged_metric = metric.format('', '', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tag1 = 'some_table_name'
tag2 = 'error_type'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1), '.{}'.format(tag2))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1), '{}:{}'.format(tags[2], tag2)],
METRICS[untagged_metric]['method'],
)
def test_http_buffer_filter():
metric = 'http{}.buffer.rq_timeout'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_rds():
metric = 'http{}.rds{}.config_reload'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tag1 = 'some_route_config_name'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_tcp_proxy():
metric = 'tcp{}.downstream_cx_total'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_tls():
metric = 'auth.clientssl{}.update_success'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_network_rate_limit():
metric = 'ratelimit{}.total'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_redis():
metric = 'redis{}.downstream_rq_total'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_redis_splitter():
metric = 'redis{}.splitter.invalid_request'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_redis_command():
metric = 'redis{}.command{}.total'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tag1 = 'some_command'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_mongo():
metric = 'mongo{}.op_insert'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_mongo_command():
metric = 'mongo{}.cmd{}.total'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tag1 = 'some_command'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_mongo_collection():
metric = 'mongo{}.collection{}.query.total'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tag1 = 'some_collection'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_listener():
metric = 'listener{}.ssl.ciphers{}'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = '0.0.0.0_80'
tag1 = 'some_ciphers'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_listener_manager():
metric = 'listener_manager.listener_added'
tags = [tag for tags in METRICS[metric]['tags'] for tag in tags]
assert parse_metric(metric) == (METRIC_PREFIX + metric, list(tags), METRICS[metric]['method'])
def test_listener_tls():
metric = 'listener{}.ssl.versions{}'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = '0.0.0.0'
tag1 = 'TLSv1.2'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_listener_curves():
metric = 'listener{}.ssl.curves{}'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = '0.0.0.0'
tag1 = 'P-256'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_listener_sigalgs():
metric = 'listener{}.ssl.sigalgs{}'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = '0.0.0.0'
tag1 = 'rsa_pss_rsae_sha256'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_http():
metric = 'http{}.downstream_cx_total'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_http_user_agent():
metric = 'http{}.user_agent{}.downstream_cx_total'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_stat_prefix'
tag1 = 'some_user_agent'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_http_listener():
metric = 'listener{}.http{}.downstream_rq_2xx'
untagged_metric = metric.format('', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = '0.0.0.0_80'
tag1 = 'some_stat_prefix'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1)],
METRICS[untagged_metric]['method'],
)
def test_http2():
metric = 'http2.rx_reset'
tags = [tag for tags in METRICS[metric]['tags'] for tag in tags]
assert parse_metric(metric) == (METRIC_PREFIX + metric, list(tags), METRICS[metric]['method'])
def test_cluster_manager():
metric = 'cluster_manager.cluster_added'
tags = [tag for tags in METRICS[metric]['tags'] for tag in tags]
assert parse_metric(metric) == (METRIC_PREFIX + metric, list(tags), METRICS[metric]['method'])
def test_cluster():
metric = 'cluster{}.upstream_cx_total'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_name'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_cluster_health_check():
metric = 'cluster{}.health_check.healthy'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_name'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_cluster_outlier_detection():
metric = 'cluster{}.outlier_detection.ejections_enforced_total'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_name'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_cluster_dynamic_http():
metric = 'cluster{}.upstream_rq_time'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_name'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_cluster_dynamic_http_zones():
metric = 'cluster{}.zone{}{}.upstream_rq_time'
untagged_metric = metric.format('', '', '')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_name'
tag1 = 'some_table_name'
tag2 = 'some_to_zone'
tagged_metric = metric.format('.{}'.format(tag0), '.{}'.format(tag1), '.{}'.format(tag2))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0), '{}:{}'.format(tags[1], tag1), '{}:{}'.format(tags[2], tag2)],
METRICS[untagged_metric]['method'],
)
def test_cluster_load_balancer():
metric = 'cluster{}.lb_healthy_panic'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_name'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_cluster_load_balancer_subsets():
metric = 'cluster{}.lb_subsets_active'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'some_name'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_tag_with_dots():
metric = 'cluster{}.lb_healthy_panic'
untagged_metric = metric.format('')
tags = [tag for tags in METRICS[untagged_metric]['tags'] for tag in tags]
tag0 = 'out.alerting-event-evaluator-test.datadog.svc.cluster.local|iperf'
tagged_metric = metric.format('.{}'.format(tag0))
assert parse_metric(tagged_metric) == (
METRIC_PREFIX + untagged_metric,
['{}:{}'.format(tags[0], tag0)],
METRICS[untagged_metric]['method'],
)
def test_no_match():
metric = 'envoy.http.downstream_rq_time'
value = 'No recorded values'
assert list(parse_histogram(metric, value)) == []
def test_ignore_nan():
metric = 'envoy.http.downstream_rq_time'
value = 'P0(0,0) P25(nan,0)'
assert list(parse_histogram(metric, value)) == [('envoy.http.downstream_rq_time.0percentile', 0.0)]
def test_correct():
metric = 'envoy.http.downstream_rq_time'
value = (
'P0(0,0) P25(25,0) P50(50,0) P75(75,0) P90(90,1.06) P95(95,1.08) '
'P99(99,1.096) P99.9(99.9,1.0996) P100(100,1.1)'
)
assert list(parse_histogram(metric, value)) == [
('envoy.http.downstream_rq_time.0percentile', 0.0),
('envoy.http.downstream_rq_time.25percentile', 25.0),
('envoy.http.downstream_rq_time.50percentile', 50.0),
('envoy.http.downstream_rq_time.75percentile', 75.0),
('envoy.http.downstream_rq_time.90percentile', 90.0),
('envoy.http.downstream_rq_time.95percentile', 95.0),
('envoy.http.downstream_rq_time.99percentile', 99.0),
('envoy.http.downstream_rq_time.99_9percentile', 99.9),
('envoy.http.downstream_rq_time.100percentile', 100.0),
]
def test_correct_unknown_percentile():
metric = 'envoy.http.downstream_rq_time'
value = 'P0(0,0) P25(25,0) P55.5(55.5,0)'
assert list(parse_histogram(metric, value)) == [
('envoy.http.downstream_rq_time.0percentile', 0.0),
('envoy.http.downstream_rq_time.25percentile', 25.0),
('envoy.http.downstream_rq_time.55_5percentile', 55.5),
]
| 34.092905 | 103 | 0.635337 |
09f0149d61be7a0492986a2458d82fc40c56f1c6 | 3,966 | py | Python | volume-count.py | mJace/scope-volume-count | ba61e744dfe1246fafa623b836863101b4f535a6 | [
"Apache-2.0"
] | 6 | 2016-12-15T20:11:50.000Z | 2020-12-22T06:57:00.000Z | volume-count.py | mJace/scope-volume-count | ba61e744dfe1246fafa623b836863101b4f535a6 | [
"Apache-2.0"
] | 3 | 2018-02-22T09:20:20.000Z | 2019-04-30T21:09:37.000Z | volume-count.py | mJace/scope-volume-count | ba61e744dfe1246fafa623b836863101b4f535a6 | [
"Apache-2.0"
] | 12 | 2017-05-27T07:12:51.000Z | 2021-06-02T05:05:59.000Z | #!/usr/bin/env python
from docker import Client
import BaseHTTPServer
import SocketServer
import datetime
import errno
import json
import os
import signal
import socket
import threading
import time
import urllib2
PLUGIN_ID="volume-count"
PLUGIN_UNIX_SOCK="/var/run/scope/plugins/" + PLUGIN_ID + ".sock"
DOCKER_SOCK="unix://var/run/docker.sock"
nodes = {}
def update_loop():
global nodes
next_call = time.time()
while True:
# Get current timestamp in RFC3339
timestamp = datetime.datetime.utcnow()
timestamp = timestamp.isoformat('T') + 'Z'
# Fetch and convert data to scope data model
new = {}
for container_id, volume_count in container_volume_counts().iteritems():
new["%s;<container>" % (container_id)] = {
'latest': {
'volume_count': {
'timestamp': timestamp,
'value': str(volume_count),
}
}
}
nodes = new
next_call += 5
time.sleep(next_call - time.time())
def start_update_loop():
updateThread = threading.Thread(target=update_loop)
updateThread.daemon = True
updateThread.start()
# List all containers, with the count of their volumes
def container_volume_counts():
containers = {}
cli = Client(base_url=DOCKER_SOCK, version='auto')
for c in cli.containers(all=True):
containers[c['Id']] = len(c['Mounts'])
return containers
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
# The logger requires a client_address, but unix sockets don't have
# one, so we fake it.
self.client_address = "-"
# Generate our json body
body = json.dumps({
'Plugins': [
{
'id': PLUGIN_ID,
'label': 'Volume Counts',
'description': 'Shows how many volumes each container has mounted',
'interfaces': ['reporter'],
'api_version': '1',
}
],
'Container': {
'nodes': nodes,
# Templates tell the UI how to render this field.
'metadata_templates': {
'volume_count': {
# Key where this data can be found.
'id': "volume_count",
# Human-friendly field name
'label': "# Volumes",
# Look up the 'id' in the latest object.
'from': "latest",
# Priorities over 10 are hidden, lower is earlier in the list.
'priority': 0.1,
},
},
},
})
# Send the headers
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Content-Length', len(body))
self.end_headers()
# Send the body
self.wfile.write(body)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def delete_socket_file():
if os.path.exists(PLUGIN_UNIX_SOCK):
os.remove(PLUGIN_UNIX_SOCK)
def sig_handler(b, a):
delete_socket_file()
exit(0)
def main():
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
start_update_loop()
# Ensure the socket directory exists
mkdir_p(os.path.dirname(PLUGIN_UNIX_SOCK))
# Remove existing socket in case it was left behind
delete_socket_file()
# Listen for connections on the unix socket
server = SocketServer.UnixStreamServer(PLUGIN_UNIX_SOCK, Handler)
try:
server.serve_forever()
except:
delete_socket_file()
raise
main()
| 28.328571 | 87 | 0.56001 |
67c2c764983b053629a7401a57cad505a5f628db | 3,868 | py | Python | Streamlit/models/model.py | Tanmay000009/WeCare4u-1 | bfd3c98a99558bdb21ef678a09992d373861e30f | [
"MIT"
] | null | null | null | Streamlit/models/model.py | Tanmay000009/WeCare4u-1 | bfd3c98a99558bdb21ef678a09992d373861e30f | [
"MIT"
] | null | null | null | Streamlit/models/model.py | Tanmay000009/WeCare4u-1 | bfd3c98a99558bdb21ef678a09992d373861e30f | [
"MIT"
] | 5 | 2021-03-28T03:30:54.000Z | 2022-02-25T10:26:33.000Z | import pandas as pandas
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import pickle
from sklearn.linear_model import SGDRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
def data_loader(filepath):
data = pd.read_csv(filepath,na_values=[' '])
data=data.drop(['ClientID','CycleNumber','Group','MeanCycleLength','MeanMensesLength','MensesScoreDaySix', 'MensesScoreDaySeven', 'MensesScoreDayEight',
'MensesScoreDayNine', 'MensesScoreDayTen', 'MensesScoreDay11',
'MensesScoreDay12', 'MensesScoreDay13', 'MensesScoreDay14',
'MensesScoreDay15', 'MeanBleedingIntensity', 'PhasesBleeding', 'IntercourseDuringUnusBleed',
'Age', 'AgeM', 'Maristatus', 'MaristatusM', 'Yearsmarried', 'Wedding',
'Religion', 'ReligionM', 'Ethnicity', 'EthnicityM', 'Schoolyears',
'SchoolyearsM', 'OccupationM', 'IncomeM', 'Height', 'Weight',
'Reprocate', 'Numberpreg', 'Livingkids', 'Miscarriages', 'Abortions',
'Medvits', 'Medvitexplain', 'Gynosurgeries', 'LivingkidsM', 'Boys',
'Girls', 'MedvitsM', 'MedvitexplainM', 'Urosurgeries', 'Breastfeeding',
'Method', 'Prevmethod', 'Methoddate', 'Whychart', 'Nextpreg',
'NextpregM', 'Spousesame', 'SpousesameM', 'Timeattemptpreg', 'BMI'], axis=1)
return data
def dataloader_cycleLength(data):
x = data[['CycleWithPeakorNot', 'ReproductiveCategory',
'LengthofLutealPhase', 'FirstDayofHigh',
'TotalNumberofHighDays', 'TotalHighPostPeak', 'TotalNumberofPeakDays',
'TotalDaysofFertility', 'TotalFertilityFormula', 'LengthofMenses',
'MensesScoreDayOne', 'MensesScoreDayTwo', 'MensesScoreDayThree',
'MensesScoreDayFour', 'MensesScoreDayFive', 'TotalMensesScore',
'NumberofDaysofIntercourse', 'IntercourseInFertileWindow',
'UnusualBleeding']]
y=data['LengthofCycle']
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=42)
return x_train, x_test, y_train, y_test
def dataloader_ovulationday(data):
x = data[['CycleWithPeakorNot', 'ReproductiveCategory',
'LengthofLutealPhase', 'FirstDayofHigh',
'TotalNumberofHighDays', 'TotalHighPostPeak', 'TotalNumberofPeakDays',
'TotalDaysofFertility', 'TotalFertilityFormula', 'LengthofMenses',
'MensesScoreDayOne', 'MensesScoreDayTwo', 'MensesScoreDayThree',
'MensesScoreDayFour', 'MensesScoreDayFive', 'TotalMensesScore',
'NumberofDaysofIntercourse', 'IntercourseInFertileWindow',
'UnusualBleeding', 'LengthofCycle']]
y=data['EstimatedDayofOvulation']
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=42)
return x_train, x_test, y_train, y_test
def model_dump(model, filename):
pkl_filename = "filename"
with open(pkl_filename, 'wb') as file:
pickle.dump(model, file)
return True
def train_cycleLength(filepath):
data = data_loader(filepath)
x_train,x_test,y_train,y_test = dataloader_cycleLength(data)
regressor = RandomForestRegressor()
regressor.fit(x_train, y_train)
print(regressor.score(x_test, y_test))
model_dump(regressor, "random_forest_length_of_cycle.pkl")
return True
def train_ovulationday(filepath):
data = data_loader(filepath)
x_train,x_test,y_train,y_test = dataloader_ovulationday(data)
regressor = make_pipeline(StandardScaler(),SGDRegressor(max_iter=1000, tol=1e-3))
regressor.fit(x_train, y_train)
print(regressor.score(x_test, y_test))
model_dump(regressor, "sgdRegressor_ovulationDay.pkl")
return True
if __name__=="__main__":
train_cycleLength("FedCycleData071012 (2).xls")
train_ovulationday("FedCycleData071012 (2).xls")
| 42.043478 | 156 | 0.719752 |
7fcae525a0afc533286efc8b54ec61e2aba24255 | 1,321 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/subsets.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/subsets.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/subsets.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(n * 2^n)
# Space: O(1)
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
result = [[]]
for i in xrange(len(nums)):
size = len(result)
for j in xrange(size):
result.append(list(result[j]))
result[-1].append(nums[i])
return result
# Time: O(n * 2^n)
# Space: O(1)
class Solution2(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = []
i, count = 0, 1 << len(nums)
nums.sort()
while i < count:
cur = []
for j in xrange(len(nums)):
if i & 1 << j:
cur.append(nums[j])
result.append(cur)
i += 1
return result
# Time: O(n * 2^n)
# Space: O(1)
class Solution3(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
return self.subsetsRecu([], sorted(nums))
def subsetsRecu(self, cur, nums):
if not nums:
return [cur]
return self.subsetsRecu(cur, nums[1:]) + self.subsetsRecu(cur + [nums[0]], nums[1:])
| 22.016667 | 92 | 0.4595 |
dde8e9ea0c613204fbe8128eb3cb921b53fb1ad7 | 373 | py | Python | blog/migrations/0026_image_dislikes.py | lorrainekamanda/awwardsdjango | e8827f93bcef1fa7a3926690d4eaf8b4bddca86f | [
"MIT"
] | null | null | null | blog/migrations/0026_image_dislikes.py | lorrainekamanda/awwardsdjango | e8827f93bcef1fa7a3926690d4eaf8b4bddca86f | [
"MIT"
] | 8 | 2021-03-19T04:46:04.000Z | 2021-09-22T19:11:11.000Z | blog/migrations/0026_image_dislikes.py | lorrainekamanda/awwardsdjango | e8827f93bcef1fa7a3926690d4eaf8b4bddca86f | [
"MIT"
] | null | null | null | # Generated by Django 3.0.6 on 2020-06-02 06:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0025_preference'),
]
operations = [
migrations.AddField(
model_name='image',
name='dislikes',
field=models.IntegerField(default=0),
),
]
| 19.631579 | 49 | 0.58445 |
5a3d0f55c83b42fa77bbab10cbc54deb1984a93a | 1,105 | py | Python | src/combustion/points/__init__.py | TidalPaladin/combustion | 69b9a2b9baf90b81ed9098b4f0391f5c15efaee7 | [
"Apache-2.0"
] | 3 | 2020-07-09T22:18:19.000Z | 2021-11-08T03:47:19.000Z | src/combustion/points/__init__.py | TidalPaladin/combustion | 69b9a2b9baf90b81ed9098b4f0391f5c15efaee7 | [
"Apache-2.0"
] | 15 | 2020-06-12T21:48:59.000Z | 2022-02-05T18:41:50.000Z | src/combustion/points/__init__.py | TidalPaladin/combustion | 69b9a2b9baf90b81ed9098b4f0391f5c15efaee7 | [
"Apache-2.0"
] | 1 | 2021-02-15T20:06:16.000Z | 2021-02-15T20:06:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Tuple
from torch import Tensor
from .crop import CenterCrop, center_crop
from .transforms import RandomRotate, Rotate, center, random_rotate, rotate
# torch-scatter doesn't install correctly unless combustion[points] is installed after combustion
try:
import torch_scatter # type: ignore
except ImportError:
torch_scatter = None
if torch_scatter is not None:
from .projection import projection_mapping, projection_mask
else:
def projection_mask(*args, **kwargs) -> Tensor: # type: ignore
raise ImportError("Operation requires torch_scatter, please install it with `pip install combustion[points]`")
def projection_mapping(*args, **kwargs) -> Tuple[Tensor, Tensor, Tuple[int, int]]: # type: ignore
raise ImportError("Operation requires torch_scatter, please install it with `pip install combustion[points]`")
__all__ = [
"center",
"Rotate",
"rotate",
"random_rotate",
"RandomRotate",
"center_crop",
"CenterCrop",
"projection_mask",
"projection_mapping",
]
| 27.625 | 118 | 0.717647 |
a801305f175ac582fe740886439647c74d685455 | 4,335 | py | Python | tests/utils_tests/test_feedgenerator.py | webjunkie/django | 5dbca13f3baa2e1bafd77e84a80ad6d8a074712e | [
"BSD-3-Clause"
] | 1 | 2019-03-14T03:06:49.000Z | 2019-03-14T03:06:49.000Z | tests/utils_tests/test_feedgenerator.py | webjunkie/django | 5dbca13f3baa2e1bafd77e84a80ad6d8a074712e | [
"BSD-3-Clause"
] | null | null | null | tests/utils_tests/test_feedgenerator.py | webjunkie/django | 5dbca13f3baa2e1bafd77e84a80ad6d8a074712e | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import datetime
import unittest
from django.utils import feedgenerator
from django.utils import tzinfo
class FeedgeneratorTest(unittest.TestCase):
"""
Tests for the low-level syndication feed framework.
"""
def test_get_tag_uri(self):
"""
Test get_tag_uri() correctly generates TagURIs.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://example.org/foo/bar#headline', datetime.date(2004, 10, 25)),
'tag:example.org,2004-10-25:/foo/bar/headline')
def test_get_tag_uri_with_port(self):
"""
Test that get_tag_uri() correctly generates TagURIs from URLs with port
numbers.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://www.example.org:8000/2008/11/14/django#headline', datetime.datetime(2008, 11, 14, 13, 37, 0)),
'tag:www.example.org,2008-11-14:/2008/11/14/django/headline')
def test_rfc2822_date(self):
"""
Test rfc2822_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"Fri, 14 Nov 2008 13:37:00 -0000"
)
def test_rfc2822_date_with_timezone(self):
"""
Test rfc2822_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=60)))),
"Fri, 14 Nov 2008 13:37:00 +0100"
)
def test_rfc2822_date_without_time(self):
"""
Test rfc2822_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.date(2008, 11, 14)),
"Fri, 14 Nov 2008 00:00:00 -0000"
)
def test_rfc3339_date(self):
"""
Test rfc3339_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"2008-11-14T13:37:00Z"
)
def test_rfc3339_date_with_timezone(self):
"""
Test rfc3339_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=120)))),
"2008-11-14T13:37:00+02:00"
)
def test_rfc3339_date_without_time(self):
"""
Test rfc3339_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.date(2008, 11, 14)),
"2008-11-14T00:00:00Z"
)
def test_atom1_mime_type(self):
"""
Test to make sure Atom MIME type has UTF8 Charset parameter set
"""
atom_feed = feedgenerator.Atom1Feed("title", "link", "description")
self.assertEqual(
atom_feed.mime_type, "application/atom+xml; charset=utf-8"
)
def test_rss_mime_type(self):
"""
Test to make sure RSS MIME type has UTF8 Charset parameter set
"""
rss_feed = feedgenerator.Rss201rev2Feed("title", "link", "description")
self.assertEqual(
rss_feed.mime_type, "application/rss+xml; charset=utf-8"
)
# Two regression tests for #14202
def test_feed_without_feed_url_gets_rendered_without_atom_link(self):
feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr')
self.assertEqual(feed.feed['feed_url'], None)
feed_content = feed.writeString('utf-8')
self.assertNotIn('<atom:link', feed_content)
self.assertNotIn('href="/feed/"', feed_content)
self.assertNotIn('rel="self"', feed_content)
def test_feed_with_feed_url_gets_rendered_with_atom_link(self):
feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr', feed_url='/feed/')
self.assertEqual(feed.feed['feed_url'], '/feed/')
feed_content = feed.writeString('utf-8')
self.assertIn('<atom:link', feed_content)
self.assertIn('href="/feed/"', feed_content)
self.assertIn('rel="self"', feed_content)
| 35.826446 | 143 | 0.62699 |
2adb6bbcef00d71779348d1cf1da67edc221fdd3 | 379 | py | Python | customer/migrations/0002_alter_customer_mobile.py | Ravi903132/Event | f75a6397de3ed1af1132fc9fb814b96bbf4fdd86 | [
"MIT"
] | null | null | null | customer/migrations/0002_alter_customer_mobile.py | Ravi903132/Event | f75a6397de3ed1af1132fc9fb814b96bbf4fdd86 | [
"MIT"
] | null | null | null | customer/migrations/0002_alter_customer_mobile.py | Ravi903132/Event | f75a6397de3ed1af1132fc9fb814b96bbf4fdd86 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.10 on 2022-04-10 09:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='mobile',
field=models.CharField(max_length=10),
),
]
| 19.947368 | 50 | 0.591029 |
754c595a41b5fd45ec05f6d4c981ab780c9b01b0 | 576 | py | Python | drf_mapped_choice/fields.py | claytondaley/drf-mapped-choice | d228df6c004de93069536e49ce1afb52b09fcca2 | [
"Apache-2.0"
] | null | null | null | drf_mapped_choice/fields.py | claytondaley/drf-mapped-choice | d228df6c004de93069536e49ce1afb52b09fcca2 | [
"Apache-2.0"
] | null | null | null | drf_mapped_choice/fields.py | claytondaley/drf-mapped-choice | d228df6c004de93069536e49ce1afb52b09fcca2 | [
"Apache-2.0"
] | null | null | null | from rest_framework import fields, serializers
class MappedChoiceField(fields.ChoiceField):
@serializers.ChoiceField.choices.setter
def choices(self, choices):
self.grouped_choices = fields.to_choices_dict(choices)
self._choices = fields.flatten_choices_dict(self.grouped_choices)
self.choice_strings_to_values = {v: k for k, v in self._choices.items()}
def validate_empty_values(self, data):
if data == '':
if self.allow_blank:
return (True, None)
return super().validate_empty_values(data)
| 33.882353 | 80 | 0.692708 |
568a1ddf9caffe5cfda7ae49f315aede5c8a70ac | 681 | py | Python | mainapp/migrations/0014_auto_20181020_1101.py | ploggingdev/hackerschat | f6d881e7bed0e634e9db6e788befa990ac581bbd | [
"MIT"
] | 3 | 2018-01-24T09:42:22.000Z | 2018-09-28T18:52:01.000Z | mainapp/migrations/0014_auto_20181020_1101.py | ploggingdev/hackerschat | f6d881e7bed0e634e9db6e788befa990ac581bbd | [
"MIT"
] | 6 | 2020-02-11T22:02:02.000Z | 2022-02-11T03:39:46.000Z | mainapp/migrations/0014_auto_20181020_1101.py | ploggingdev/hackerschat | f6d881e7bed0e634e9db6e788befa990ac581bbd | [
"MIT"
] | 3 | 2018-02-03T16:35:54.000Z | 2019-04-09T16:10:20.000Z | # Generated by Django 2.1.2 on 2018-10-20 11:01
from django.db import migrations
def set_room_position_value(apps, schema_editor):
Topic = apps.get_model('mainapp', 'Topic')
Room = apps.get_model('mainapp', 'Room')
for topic in Topic.objects.all():
if Room.objects.filter(name="off-topic", topic=topic).exists():
room_offtopic = Room.objects.get(name="off-topic", topic=topic)
room_offtopic.position = 2
room_offtopic.save()
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0013_auto_20181020_1053'),
]
operations = [
migrations.RunPython(set_room_position_value),
]
| 28.375 | 75 | 0.660793 |
c174bbf8c701f6dab10a1cd948f7dba3c0274801 | 527 | py | Python | pro1/users/migrations/0002_auto_20181103_0240.py | BhanuPrakashNani/StudentPortal | 689f12e693c01b5f78e8acee7e77b390434ae047 | [
"MIT"
] | 9 | 2018-12-03T12:51:25.000Z | 2018-12-19T23:43:07.000Z | pro1/users/migrations/0002_auto_20181103_0240.py | BhanuPrakashNani/StudentPortal | 689f12e693c01b5f78e8acee7e77b390434ae047 | [
"MIT"
] | 97 | 2018-11-17T18:34:18.000Z | 2018-12-29T15:34:50.000Z | pro1/users/migrations/0002_auto_20181103_0240.py | itsvrushabh/StudentPortal | e10f2aa7b8da021ae8a285160f64695ad5bc7a72 | [
"MIT"
] | 34 | 2018-12-01T16:30:09.000Z | 2019-01-09T16:51:04.000Z | # Generated by Django 2.1.3 on 2018-11-02 21:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='bio',
),
migrations.RemoveField(
model_name='profile',
name='birth_date',
),
migrations.RemoveField(
model_name='profile',
name='location',
),
]
| 20.269231 | 47 | 0.533207 |
9bd29bf276326c7e8039b0a80b6a48ebed1a1c6a | 2,770 | py | Python | examples/Website_Copy/run_meta_descript.py | paigebranam/Halldon_ | 1f02878e39e818055f86b8db570f30ae280948dc | [
"MIT"
] | null | null | null | examples/Website_Copy/run_meta_descript.py | paigebranam/Halldon_ | 1f02878e39e818055f86b8db570f30ae280948dc | [
"MIT"
] | null | null | null | examples/Website_Copy/run_meta_descript.py | paigebranam/Halldon_ | 1f02878e39e818055f86b8db570f30ae280948dc | [
"MIT"
] | null | null | null | ##updated api path, engine, top p, updated top p, temp, and tokens.
##Runs as expected
##must enter product and a descript.
import os
import sys
API_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
sys.path.append(API_PATH)
from api import GPT, Example, UIConfig
# Construct GPT object and show some examples
gpt = GPT(engine="curie-instruct-beta",
temperature=0.5,
top_p=.5,
max_tokens=100)
#Create a meta description
gpt.add_example(Example("""Rooting hormone, promotes root development.""",
"""These natural rooting hormones will promote the development of roots on most popular home, garden and greenhouse plants."""))
gpt.add_example(Example("""Rooting hormone, promotes root development.""",
"""Rooting hormone aids the development of roots on most popular home, garden and greenhouse plants."""))
gpt.add_example(Example("""Rooting hormone, promotes root development.""",
"""Root-A-Hormone™ is an all natural plant rooting hormone with bio-stimulants formulated to promote new root growth on most popular home and
greenhouse plant varieties."""))
gpt.add_example(Example("""Rooting hormone, promotes root development.""",
"""Rooting hormone helps you start seeds and cuttings easily by helping them develop roots faster. Also helps plant transplants
become established more quickly, so they can get growing."""))
gpt.add_example(Example("""Rooting hormone, promotes root development.""",
"""Our quality, trusted rooting products provide home gardeners with new ways to create more green."""))
gpt.add_example(Example("""Rooting hormone, promotes root development.""",
"""Rooting hormone is also used to make cuttings of rooted plants and in general to stimulate root growth."""))
gpt.add_example(Example("""Detangle Spray, softens and detangles hair.""",
"""Detangle spray prevents tangles and leaves hair moisturized. This special detangler spray lubricates
hair strands to make styling easier. Hair feels soft and lightly nourished. Smells great! For all hair types."""))
gpt.add_example(Example("""Detangle Spray, softens and detangles hair.""",
"""Detangle Hair Milk Spray helps you eliminate all tangles without the need for a comb or brush.
This special milky spray lubricates hair strands to make styling easier and more manageable. Hair feels soft, smooth and lightly nourished."""))
gpt.add_example(Example("""Detangle Spray,softens and detangles hair.""",
"""Detangle and deters breakage with this lightweight spritz that helps to smooth hair"""))
# Define UI configuration
config = UIConfig(description="Create a Meta Description",
button_text="Create",
placeholder="Product, Description of product.")
id = "meta-description" | 41.969697 | 144 | 0.746209 |
d26dcbb764f9ac5c9eeaacbd34759c186bcc8cca | 3,983 | py | Python | app/run.py | dibakart4/Disaster-Response-Pipeline | 82022e11f891e9b6f0dff7f5b88cc3c6aa54dbac | [
"MIT"
] | 9 | 2020-07-27T16:37:38.000Z | 2022-02-21T22:16:38.000Z | app/run.py | nkreimold/udacity-disaster-response-pipeline | 6aaa971f274bbd4bed992516b438ad4607faf0ca | [
"MIT"
] | null | null | null | app/run.py | nkreimold/udacity-disaster-response-pipeline | 6aaa971f274bbd4bed992516b438ad4607faf0ca | [
"MIT"
] | 50 | 2020-05-12T18:14:32.000Z | 2022-02-28T21:27:31.000Z | import json
import plotly
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk import pos_tag, word_tokenize
import nltk
from sklearn.base import BaseEstimator, TransformerMixin
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///../data/disaster_response_db.db')
df = pd.read_sql_table('df', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# TODO: Below is an example - modify to extract data for your own visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
category_names = df.iloc[:,4:].columns
category_boolean = (df.iloc[:,4:] != 0).sum().values
# create visuals
# TODO: Below is an example - modify to create your own visuals
graphs = [
# GRAPH 1 - genre graph
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
# GRAPH 2 - category graph
{
'data': [
Bar(
x=category_names,
y=category_boolean
)
],
'layout': {
'title': 'Distribution of Message Categories',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Category",
'tickangle': 35
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main() | 27.095238 | 77 | 0.581471 |
65359c5bdc4947753ef44532eaeee51e9c7f6ba5 | 8,315 | py | Python | main.py | jimgoo/pytorch-a2c-ppo-acktr | c6e12ecc113bed6607f00ddaec8e416ee5900454 | [
"MIT"
] | null | null | null | main.py | jimgoo/pytorch-a2c-ppo-acktr | c6e12ecc113bed6607f00ddaec8e416ee5900454 | [
"MIT"
] | null | null | null | main.py | jimgoo/pytorch-a2c-ppo-acktr | c6e12ecc113bed6607f00ddaec8e416ee5900454 | [
"MIT"
] | null | null | null | import copy
import glob
import os
import time
from collections import deque
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from a2c_ppo_acktr import algo
from a2c_ppo_acktr.arguments import get_args
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.storage import RolloutStorage
from a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule
args = get_args()
assert args.algo in ['a2c', 'ppo', 'acktr']
if args.recurrent_policy:
assert args.algo in ['a2c', 'ppo'], \
'Recurrent policy is not implemented for ACKTR'
num_updates = int(args.num_env_steps) // args.num_steps // args.num_processes
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
args.log_dir = os.path.expanduser(args.log_dir)
try:
os.makedirs(args.log_dir)
except OSError:
files = glob.glob(os.path.join(args.log_dir, '*.monitor.csv'))
for f in files:
os.remove(f)
eval_log_dir = args.log_dir + "_eval"
try:
os.makedirs(eval_log_dir)
except OSError:
files = glob.glob(os.path.join(eval_log_dir, '*.monitor.csv'))
for f in files:
os.remove(f)
def main():
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
args.gamma, args.log_dir, device, False)
actor_critic = Policy(envs.observation_space.shape, envs.action_space,
base_kwargs={'recurrent': args.recurrent_policy})
actor_critic.to(device)
print('\n' + str(actor_critic))
if args.algo == 'a2c':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, lr=args.lr,
eps=args.eps, alpha=args.alpha,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'ppo':
agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch,
args.value_loss_coef, args.entropy_coef, lr=args.lr,
eps=args.eps, max_grad_norm=args.max_grad_norm)
elif args.algo == 'acktr':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, acktr=True)
rollouts = RolloutStorage(args.num_steps, args.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size)
obs = envs.reset()
rollouts.obs[0].copy_(obs)
rollouts.to(device)
episode_rewards = deque(maxlen=10)
start = time.time()
for j in range(num_updates):
if args.use_linear_lr_decay:
# decrease learning rate linearly
if args.algo == "acktr":
# use optimizer's learning rate since it's hard-coded in kfac.py
update_linear_schedule(agent.optimizer, j, num_updates, agent.optimizer.lr)
else:
update_linear_schedule(agent.optimizer, j, num_updates, args.lr)
if args.algo == 'ppo' and args.use_linear_clip_decay:
agent.clip_param = args.clip_param * (1 - j / float(num_updates))
for step in range(args.num_steps):
# Sample actions
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step],
rollouts.recurrent_hidden_states[step],
rollouts.masks[step])
# Obser reward and next obs
obs, reward, done, infos = envs.step(action)
for info in infos:
if 'episode' in info.keys():
episode_rewards.append(info['episode']['r'])
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0]
for done_ in done])
bad_masks = torch.FloatTensor([[0.0] if 'bad_transition' in info.keys() else [1.0]
for info in infos])
rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks)
with torch.no_grad():
next_value = actor_critic.get_value(rollouts.obs[-1],
rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau, args.use_proper_time_limits)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
# save for every interval-th episode or for the last epoch
if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "":
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
# A really ugly way to save a model to CPU
save_model = actor_critic
if args.cuda:
save_model = copy.deepcopy(actor_critic).cpu()
save_model = [save_model,
getattr(get_vec_normalize(envs), 'ob_rms', None)]
torch.save(save_model, os.path.join(save_path, args.env_name + ".pt"))
total_num_steps = (j + 1) * args.num_processes * args.num_steps
if j % args.log_interval == 0 and len(episode_rewards) > 1:
end = time.time()
print("Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n".
format(j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards),
np.mean(episode_rewards),
np.median(episode_rewards),
np.min(episode_rewards),
np.max(episode_rewards),
dist_entropy, value_loss, action_loss))
if (args.eval_interval is not None
and len(episode_rewards) > 1
and j % args.eval_interval == 0):
eval_envs = make_vec_envs(
args.env_name, args.seed + args.num_processes, args.num_processes,
args.gamma, eval_log_dir, device, True)
vec_norm = get_vec_normalize(eval_envs)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = get_vec_normalize(envs).ob_rms
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(args.num_processes,
actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(args.num_processes, 1, device=device)
while len(eval_episode_rewards) < 10:
with torch.no_grad():
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs, eval_recurrent_hidden_states, eval_masks, deterministic=True)
# Obser reward and next obs
obs, reward, done, infos = eval_envs.step(action)
eval_masks = torch.tensor([[0.0] if done_ else [1.0]
for done_ in done],
dtype=torch.float32,
device=device)
for info in infos:
if 'episode' in info.keys():
eval_episode_rewards.append(info['episode']['r'])
eval_envs.close()
print(" Evaluation using {} episodes: mean reward {:.5f}\n".
format(len(eval_episode_rewards),
np.mean(eval_episode_rewards)))
if __name__ == "__main__":
main()
| 38.317972 | 152 | 0.584967 |
cad4ab4a1d072f518237d50e751c099ea4e862f7 | 2,824 | py | Python | trackpoint-speedup.py | omakoto/key-remapper | 6baf0cfdc87411c0cab1df69c303b8ad50acd406 | [
"MIT"
] | null | null | null | trackpoint-speedup.py | omakoto/key-remapper | 6baf0cfdc87411c0cab1df69c303b8ad50acd406 | [
"MIT"
] | null | null | null | trackpoint-speedup.py | omakoto/key-remapper | 6baf0cfdc87411c0cab1df69c303b8ad50acd406 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
#
# Remapper for https://www.amazon.com/gp/product/B00RM75NL0
#
import math
import os
import sys
import evdev
from evdev import ecodes, InputEvent
import key_remapper
NAME = "Trackpoint Spped-up"
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
ICON = os.path.join(SCRIPT_PATH, 'res/trackpoint.png')
DEFAULT_DEVICE_NAME = "^TPPS/2 Elan TrackPoint"
# evtest output for the device:
# Input device ID: bus 0x11 vendor 0x2 product 0xa version 0x63
# Input device name: "TPPS/2 Elan TrackPoint"
# Supported events:
# Event type 0 (EV_SYN)
# Event type 1 (EV_KEY)
# Event code 272 (BTN_LEFT)
# Event code 273 (BTN_RIGHT)
# Event code 274 (BTN_MIDDLE)
# Event type 2 (EV_REL)
# Event code 0 (REL_X)
# Event code 1 (REL_Y)
class Remapper(key_remapper.BaseRemapper):
def __init__(self):
super().__init__(NAME, ICON, DEFAULT_DEVICE_NAME,
match_non_keyboards=True, # Needed to read from non-keyboard devices.
# By default, you can only allows to send EV_KEY w/ KEY_* and BTN_* events.
# To send other events, you need to list all of them (including EV_KEY events) here.
uinput_events={
ecodes.EV_KEY: (ecodes.BTN_LEFT, ecodes.BTN_RIGHT, ecodes.BTN_MIDDLE),
ecodes.EV_REL: (ecodes.REL_X, ecodes.REL_Y),
})
self.threshold = 0
self.add = 0
self.power = 1
def on_init_arguments(self, parser):
parser.add_argument('--threshold', type=int, default=2, metavar='T')
parser.add_argument('--add', type=float, default=0, metavar='V')
parser.add_argument('--power', type=float, default=2.5, metavar='P')
parser.add_argument('--scale', type=float, default=5, metavar='S')
def on_arguments_parsed(self, args):
self.threshold = args.threshold
self.add = args.add
self.power = args.power
self.scale = args.scale
def on_handle_event(self, device: evdev.InputDevice, ev: evdev.InputEvent):
if ev.type == ecodes.EV_REL:
value = math.fabs(ev.value) - self.threshold
if value < 1:
value = ev.value
else:
value = (value + self.add) / self.scale
value = (math.pow(1 + value, self.power) - 1) * self.scale
value = value + self.threshold
if ev.value < 0:
value = -value
if self.enable_debug:
print(f'{ev.code}: {ev.value} -> {value}')
ev.value = int(value)
self.send_event(ev.type, ev.code, ev.value)
def main(args):
remapper = Remapper()
remapper.main(args)
if __name__ == '__main__':
main(sys.argv[1:])
| 32.090909 | 109 | 0.600921 |
f21d144c4b637aea62a9a8ed26a70ea9225ac40e | 835 | py | Python | python/mxnet/ndarray/contrib.py | saurabh3949/mxnet | e25074a469b45f2cbde68e2a0c8963daea93b66b | [
"Apache-2.0"
] | 4 | 2017-11-17T07:28:09.000Z | 2019-07-23T06:24:16.000Z | python/mxnet/ndarray/contrib.py | saurabh3949/mxnet | e25074a469b45f2cbde68e2a0c8963daea93b66b | [
"Apache-2.0"
] | null | null | null | python/mxnet/ndarray/contrib.py | saurabh3949/mxnet | e25074a469b45f2cbde68e2a0c8963daea93b66b | [
"Apache-2.0"
] | 2 | 2019-06-12T12:40:20.000Z | 2020-11-03T14:33:14.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Contrib NDArray API of MXNet."""
__all__ = []
| 41.75 | 62 | 0.764072 |
3e469b5475bf01cbe44a4457a03d3239140e5b4c | 3,623 | py | Python | core/platform/taskqueue/cloud_taskqueue_services.py | jlau323/oppia | 37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691 | [
"Apache-2.0"
] | 2 | 2021-04-08T01:06:08.000Z | 2021-06-02T08:20:13.000Z | core/platform/taskqueue/cloud_taskqueue_services.py | jlau323/oppia | 37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691 | [
"Apache-2.0"
] | null | null | null | core/platform/taskqueue/cloud_taskqueue_services.py | jlau323/oppia | 37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691 | [
"Apache-2.0"
] | 1 | 2020-12-11T06:56:31.000Z | 2020-12-11T06:56:31.000Z | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides functionality for Google Cloud Tasks-related operations."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import json
import logging
import feconf
from google.cloud import tasks_v2
from google.protobuf import timestamp_pb2
CLIENT = tasks_v2.CloudTasksClient()
def create_http_task(
queue_name, url, payload=None, scheduled_for=None, task_name=None):
"""Creates an http task with the correct http headers/payload and sends
that task to the Cloud Tasks API. An http task is an asynchronous task that
consists of a post request to a specified url with the specified payload.
The post request will be made by the Cloud Tasks Cloud Service when the
`scheduled_for` time is reached.
Args:
queue_name: str. The name of the queue to add the http task to.
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults
to None if no payload is required.
scheduled_for: datetime|None. The naive datetime object for the
time to execute the task. Pass in None for immediate execution.
task_name: str|None. Optional. The name of the task.
Returns:
Response. Response object that is returned by the Cloud Tasks API.
"""
# The cloud tasks library requires the Oppia project id and region, as well
# as the queue name as the path to be able to find the correct queue.
parent = CLIENT.queue_path(
feconf.OPPIA_PROJECT_ID, feconf.GOOGLE_APP_ENGINE_REGION, queue_name)
complete_url = '%s%s' % (feconf.OPPIA_SITE_URL, url)
# Construct the request body.
task = {
'http_request': { # Specify the type of request.
'http_method': 1,
'url': complete_url,
# The full url path that the task will be sent to.
}
}
if payload is not None:
if isinstance(payload, dict):
payload = json.dumps(payload)
task['http_request']['headers'] = {
'Content-type': 'application/json'
}
# The API expects a payload of type bytes.
converted_payload = payload.encode()
# Add the payload to the request.
task['http_request']['body'] = converted_payload
if scheduled_for is not None:
# Create Timestamp protobuf.
timestamp = timestamp_pb2.Timestamp()
timestamp.FromDatetime(scheduled_for)
# Add the timestamp to the tasks.
task['schedule_time'] = timestamp
if task_name is not None:
# Add the name to tasks.
task['name'] = task_name
# Use the CLIENT to build and send the task.
# Note: retry=None means that the default retry arguments in queue.yaml are
# used.
response = CLIENT.create_task(parent, task, retry=None)
logging.info('Created task %s' % response.name)
return response
| 36.23 | 79 | 0.686448 |
bb7b0cf6cb2c9c63741d8759262e92b543d7fb6b | 6,247 | py | Python | sample_imageinpainting_HiFill/GPU_CPU/HiFill_inpainting.py | Ehsan-Yaghoubi/You-Look-So-Different-Haven-t-I-Seen-You-a-Long-Time-Ago | 40cf189dd81f1b6048befadd1de895d8494c4635 | [
"MIT"
] | 1 | 2022-03-14T10:58:32.000Z | 2022-03-14T10:58:32.000Z | sample_imageinpainting_HiFill/GPU_CPU/HiFill_inpainting.py | Ehsan-Yaghoubi/You-Look-So-Different-Haven-t-I-Seen-You-a-Long-Time-Ago | 40cf189dd81f1b6048befadd1de895d8494c4635 | [
"MIT"
] | 1 | 2021-11-10T04:19:40.000Z | 2021-11-15T13:56:41.000Z | sample_imageinpainting_HiFill/GPU_CPU/HiFill_inpainting.py | Ehsan-Yaghoubi/You-Look-So-Different-Haven-t-I-Seen-You-a-Long-Time-Ago | 40cf189dd81f1b6048befadd1de895d8494c4635 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import tensorflow as tf
import glob
import argparse
import os
INPUT_SIZE = 512 # input image size for Generator
ATTENTION_SIZE = 32 # size of contextual attention
def sort(str_lst):
return [s for s in sorted(str_lst)]
# reconstruct residual from patches
def reconstruct_residual_from_patches(residual, multiple):
residual = np.reshape(residual, [ATTENTION_SIZE, ATTENTION_SIZE, multiple, multiple, 3])
residual = np.transpose(residual, [0,2,1,3,4])
return np.reshape(residual, [ATTENTION_SIZE * multiple, ATTENTION_SIZE * multiple, 3])
# extract image patches
def extract_image_patches(img, multiple):
h, w, c = img.shape
img = np.reshape(img, [h//multiple, multiple, w//multiple, multiple, c])
img = np.transpose(img, [0,2,1,3,4])
return img
# residual aggregation module
def residual_aggregate(residual, attention, multiple):
residual = extract_image_patches(residual, multiple * INPUT_SIZE//ATTENTION_SIZE)
residual = np.reshape(residual, [1, residual.shape[0] * residual.shape[1], -1])
residual = np.matmul(attention, residual)
residual = reconstruct_residual_from_patches(residual, multiple * INPUT_SIZE//ATTENTION_SIZE)
return residual
# resize image by averaging neighbors
def resize_ave(img, multiple):
img = img.astype(np.float32)
img_patches = extract_image_patches(img, multiple)
img = np.mean(img_patches, axis=(2,3))
return img
# pre-processing module
def pre_process(raw_img, raw_mask, multiple):
raw_mask = raw_mask.astype(np.float32) / 255.
raw_img = raw_img.astype(np.float32)
# resize raw image & mask to desinated size
large_img = cv2.resize(raw_img, (multiple * INPUT_SIZE, multiple * INPUT_SIZE), interpolation = cv2. INTER_LINEAR)
large_mask = cv2.resize(raw_mask, (multiple * INPUT_SIZE, multiple * INPUT_SIZE), interpolation = cv2.INTER_NEAREST)
# down-sample large image & mask to 512x512
small_img = resize_ave(large_img, multiple)
small_mask = cv2.resize(raw_mask, (INPUT_SIZE, INPUT_SIZE), interpolation = cv2.INTER_NEAREST)
# set hole region to 1. and backgroun to 0.
small_mask = 1. - small_mask
return large_img, large_mask, small_img, small_mask
# post-processing module
def post_process(raw_img, large_img, large_mask, res_512, img_512, mask_512, attention, multiple):
# compute the raw residual map
h, w, c = raw_img.shape
low_base = cv2.resize(res_512.astype(np.float32), (INPUT_SIZE * multiple, INPUT_SIZE * multiple), interpolation = cv2.INTER_LINEAR)
low_large = cv2.resize(img_512.astype(np.float32), (INPUT_SIZE * multiple, INPUT_SIZE * multiple), interpolation = cv2.INTER_LINEAR)
residual = (large_img - low_large) * large_mask
# reconstruct residual map using residual aggregation module
residual = residual_aggregate(residual, attention, multiple)
# compute large inpainted result
res_large = low_base + residual
res_large = np.clip(res_large, 0., 255.)
# resize large inpainted result to raw size
res_raw = cv2.resize(res_large, (w, h), interpolation = cv2.INTER_LINEAR)
# paste the hole region to the original raw image
mask = cv2.resize(mask_512.astype(np.float32), (w, h), interpolation = cv2.INTER_LINEAR)
mask = np.expand_dims(mask, axis=2)
res_raw = res_raw * mask + raw_img * (1. - mask)
return res_raw.astype(np.uint8)
def inpaint(raw_img,
raw_mask,
sess,
inpainted_512_node,
attention_node,
mask_512_node,
img_512_ph,
mask_512_ph,
multiple):
# pre-processing
img_large, mask_large, img_512, mask_512 = pre_process(raw_img, raw_mask, multiple)
# neural network
inpainted_512, attention, mask_512 = sess.run([inpainted_512_node, attention_node, mask_512_node], feed_dict={img_512_ph: [img_512] , mask_512_ph:[mask_512[:,:,0:1]]})
# post-processing
res_raw_size = post_process(raw_img, img_large, mask_large, \
inpainted_512[0], img_512, mask_512[0], attention[0], multiple)
return res_raw_size
def read_imgs_masks(args):
paths_img = glob.glob(args.images+'/*/*')
paths_mask = glob.glob(args.masks+'/*/*')
paths_img = sort(paths_img)
paths_mask = sort(paths_mask)
print('number of imgs: ' + str(len(paths_img)))
print('number of masks: ' + str(len(paths_mask)))
# print(paths_img)
# print(paths_mask)
return paths_img, paths_mask
def HiFill(img_array, mask_array, display=False):
# print("inpainting_HiFill is run")
if len(mask_array.shape) == 2:
mask_array = np.expand_dims(mask_array, axis=2)
mask_array = mask_array * np.ones((1, 1, 3), dtype=np.uint8)
multiple = 6 # multiples of image resizing
with tf.Graph().as_default():
with open('/media/socialab157/2cbae9f1-6394-4fa9-b963-5ef890eee044/A_PROJECTS/LOCAL/cvpr2021/YLD_YouLookDifferent/sample_imageinpainting_HiFill/GPU_CPU/pb/hifill.pb', "rb") as f:
output_graph_def = tf.GraphDef()
output_graph_def.ParseFromString(f.read())
tf.import_graph_def(output_graph_def, name="")
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
image_ph = sess.graph.get_tensor_by_name('img:0')
mask_ph = sess.graph.get_tensor_by_name('mask:0')
inpainted_512_node = sess.graph.get_tensor_by_name('inpainted:0')
attention_node = sess.graph.get_tensor_by_name('attention:0')
mask_512_node = sess.graph.get_tensor_by_name('mask_processed:0')
mask_array = cv2.bitwise_not(mask_array)
inpainted = inpaint(img_array, mask_array, sess, inpainted_512_node, attention_node, mask_512_node, image_ph, mask_ph, multiple)
if display:
cv2.imshow("inpainted", inpainted)
cv2.waitKey(0)
return inpainted
if __name__=="__main__":
img_path = "/home/socialab157/Desktop/YLD_fig/test_inpainting_methods/images/orig_11_20_0.jpg"
mask_path = "/home/socialab157/Desktop/YLD_fig/test_inpainting_methods/masks/mask_11_20_0.jpg"
raw_img = cv2.imread(img_path)
raw_mask = cv2.imread(mask_path)
HiFill(raw_img, raw_mask, True)
| 38.325153 | 184 | 0.707059 |
5d7508d3b8628ae0b60757014857e6df5510f433 | 18,760 | py | Python | orttraining/orttraining/test/python/orttraining_test_ortmodule_bert_classifier.py | lchang20/onnxruntime | 97b8f6f394ae02c73ed775f456fd85639c91ced1 | [
"MIT"
] | 6,036 | 2019-05-07T06:03:57.000Z | 2022-03-31T17:59:54.000Z | orttraining/orttraining/test/python/orttraining_test_ortmodule_bert_classifier.py | lchang20/onnxruntime | 97b8f6f394ae02c73ed775f456fd85639c91ced1 | [
"MIT"
] | 5,730 | 2019-05-06T23:04:55.000Z | 2022-03-31T23:55:56.000Z | orttraining/orttraining/test/python/orttraining_test_ortmodule_bert_classifier.py | lchang20/onnxruntime | 97b8f6f394ae02c73ed775f456fd85639c91ced1 | [
"MIT"
] | 1,566 | 2019-05-07T01:30:07.000Z | 2022-03-31T17:06:50.000Z | import logging
import argparse
import torch
import wget
import os
import pandas as pd
import zipfile
from transformers import BertTokenizer, AutoConfig
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertForSequenceClassification, AdamW, BertConfig
from transformers import get_linear_schedule_with_warmup
import numpy as np
import random
import time
import datetime
import onnxruntime
from onnxruntime.training.ortmodule import ORTModule, DebugOptions
def train(model, optimizer, scheduler, train_dataloader, epoch, device, args):
# ========================================
# Training
# ========================================
# This training code is based on the `run_glue.py` script here:
# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
# Perform one full pass over the training set.
print('\n======== Epoch {:} / {:} with batch size {:} ========'.format(epoch + 1, args.epochs, args.batch_size))
# Measure how long the training epoch takes.
t0 = time.time()
start_time = t0
# Reset the total loss for this epoch.
total_loss = 0
# Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training
# vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
if step == args.train_steps:
break
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using the
# `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
# Always clear any previously calculated gradients before performing a
# backward pass. PyTorch doesn't do this automatically because
# accumulating the gradients is "convenient while training RNNs".
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)
model.zero_grad()
# Perform a forward pass (evaluate the model on this training batch).
# This will return the loss (rather than the model output) because we have provided the `labels`.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
outputs = model(b_input_ids,
attention_mask=b_input_mask,
labels=b_labels)
# The call to `model` always returns a tuple, so we need to pull the
# loss value out of the tuple.
loss = outputs[0]
# Progress update every 40 batches.
if step % args.log_interval == 0 and not step == 0:
# Calculate elapsed time in minutes.
curr_time = time.time()
elapsed_time = curr_time - start_time
# Report progress.
print(f'Batch {step:4} of {len(train_dataloader):4}. Execution time: {elapsed_time:.4f}. Loss: {loss.item():.4f}')
start_time = curr_time
if args.view_graphs:
import torchviz
pytorch_backward_graph = torchviz.make_dot(outputs[0], params=dict(list(model.named_parameters())))
pytorch_backward_graph.view()
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_loss += loss.item()
# Perform a backward pass to calculate the gradients.
loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
# The optimizer dictates the "update rule"--how the parameters are
# modified based on their gradients, the learning rate, etc.
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_dataloader)
epoch_time = time.time() - t0
print("\n Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epoch took: {:.4f}s".format(epoch_time))
return epoch_time
def test(model, validation_dataloader, device, args):
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("\nRunning Validation with batch size {:} ...".format(args.test_batch_size))
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
model.eval()
t0 = time.time()
# Tracking variables
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and
# speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions.
# This will return the logits rather than the loss because we have
# not provided labels.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# TODO: original sample had the last argument equal to None, but b_labels is because model was
# exported using 3 inputs for training, so validation must follow.
# Another approach would be checkpoint the trained model, re-export the model for validation with the checkpoint
outputs = model(b_input_ids,
attention_mask=b_input_mask,
labels=b_labels)
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
logits = outputs[1]
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences.
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
# Accumulate the total accuracy.
eval_accuracy += tmp_eval_accuracy
# Track the number of batches
nb_eval_steps += 1
# Report the final accuracy for this validation run.
epoch_time = time.time() - t0
accuracy = eval_accuracy/nb_eval_steps
print(" Accuracy: {0:.2f}".format(accuracy))
print(" Validation took: {:.4f}s".format(epoch_time))
return epoch_time, accuracy
def load_dataset(args):
# 2. Loading CoLA Dataset
def _download_dataset(download_dir):
if not os.path.exists(download_dir):
# Download the file (if we haven't already)
print('Downloading dataset...')
url = 'https://nyu-mll.github.io/CoLA/cola_public_1.1.zip'
wget.download(url, './cola_public_1.1.zip')
else:
print('Reusing cached dataset')
if not os.path.exists(args.data_dir):
_download_dataset('./cola_public_1.1.zip')
# Unzip it
print('Extracting dataset')
with zipfile.ZipFile('./cola_public_1.1.zip', 'r') as zip_ref:
zip_ref.extractall('./')
else:
print('Reusing extracted dataset')
# Load the dataset into a pandas dataframe.
df = pd.read_csv(os.path.join(args.data_dir, "in_domain_train.tsv"), delimiter='\t', header=None, names=['sentence_source', 'label', 'label_notes', 'sentence'])
# Get the lists of sentences and their labels.
sentences = df.sentence.values
labels = df.label.values
# 3. Tokenization & Input Formatting
# Load the BERT tokenizer.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# Set the max length of encoded sentence.
# 64 is slightly larger than the maximum training sentence length of 47...
MAX_LEN = 64
# Tokenize all of the sentences and map the tokens to their word IDs.
input_ids = []
for sent in sentences:
# `encode` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
encoded_sent = tokenizer.encode(
sent, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
)
# Pad our input tokens with value 0.
if len(encoded_sent) < MAX_LEN:
encoded_sent.extend([0]*(MAX_LEN-len(encoded_sent)))
# Truncate to MAX_LEN
if len(encoded_sent) > MAX_LEN:
encoded_sent = encoded_sent[:MAX_LEN]
# Add the encoded sentence to the list.
input_ids.append(encoded_sent)
input_ids = np.array(input_ids, dtype=np.longlong)
# Create attention masks
attention_masks = []
# For each sentence...
for sent in input_ids:
# Create the attention mask.
# - If a token ID is 0, then it's padding, set the mask to 0.
# - If a token ID is > 0, then it's a real token, set the mask to 1.
att_mask = [int(token_id > 0) for token_id in sent]
# Store the attention mask for this sentence.
attention_masks.append(att_mask)
# Use 90% for training and 10% for validation.
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids, labels,
random_state=2018, test_size=0.1)
# Do the same for the masks.
train_masks, validation_masks, _, _ = train_test_split(attention_masks, labels,
random_state=2018, test_size=0.1)
# Convert all inputs and labels into torch tensors, the required datatype
# for our model.
train_inputs = torch.tensor(train_inputs)
validation_inputs = torch.tensor(validation_inputs)
train_labels = torch.tensor(train_labels)
validation_labels = torch.tensor(validation_labels)
train_masks = torch.tensor(train_masks)
validation_masks = torch.tensor(validation_masks)
# Create the DataLoader for our training set.
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.batch_size)
# Create the DataLoader for our validation set.
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=args.test_batch_size)
return train_dataloader, validation_dataloader
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def format_time(elapsed):
'''Takes a time in seconds and returns a string hh:mm:ss'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
def main():
# 1. Basic setup
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--pytorch-only', action='store_true', default=False,
help='disables ONNX Runtime training')
parser.add_argument('--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',
help='input batch size for testing (default: 64)')
parser.add_argument('--view-graphs', action='store_true', default=False,
help='views forward and backward graphs')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--epochs', type=int, default=4, metavar='N',
help='number of epochs to train (default: 4)')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=40, metavar='N',
help='how many batches to wait before logging training status (default: 40)')
parser.add_argument('--train-steps', type=int, default=-1, metavar='N',
help='number of steps to train. Set -1 to run through whole dataset (default: -1)')
parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='WARNING',
help='Log level (default: WARNING)')
parser.add_argument('--num-hidden-layers', type=int, default=1, metavar='H',
help='Number of hidden layers for the BERT model. A vanila BERT has 12 hidden layers (default: 1)')
parser.add_argument('--data-dir', type=str, default='./cola_public/raw',
help='Path to the bert data directory')
args = parser.parse_args()
# Device (CPU vs CUDA)
if torch.cuda.is_available() and not args.no_cuda:
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# Set log level
numeric_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.log_level)
logging.basicConfig(level=numeric_level)
# 2. Dataloader
train_dataloader, validation_dataloader = load_dataset(args)
# 3. Modeling
# Load BertForSequenceClassification, the pretrained BERT model with a single
# linear classification layer on top.
config = AutoConfig.from_pretrained(
"bert-base-uncased",
num_labels=2,
num_hidden_layers=args.num_hidden_layers,
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased", # Use the 12-layer BERT model, with an uncased vocab.
config=config,
)
if not args.pytorch_only:
# Just for future debugging
debug_options = DebugOptions(save_onnx=False, onnx_prefix='BertForSequenceClassification')
model = ORTModule(model, debug_options)
# Tell pytorch to run this model on the GPU.
if torch.cuda.is_available() and not args.no_cuda:
model.cuda()
# Note: AdamW is a class from the huggingface library (as opposed to pytorch)
optimizer = AdamW(model.parameters(),
lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps = 1e-8 # args.adam_epsilon - default is 1e-8.
)
# Authors recommend between 2 and 4 epochs
# Total number of training steps is number of batches * number of epochs.
total_steps = len(train_dataloader) * args.epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0, # Default value in run_glue.py
num_training_steps = total_steps)
# Seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
onnxruntime.set_seed(args.seed)
if torch.cuda.is_available() and not args.no_cuda:
torch.cuda.manual_seed_all(args.seed)
# 4. Train loop (fine-tune)
total_training_time, total_test_time, epoch_0_training, validation_accuracy = 0, 0, 0, 0
for epoch_i in range(0, args.epochs):
total_training_time += train(model, optimizer, scheduler, train_dataloader, epoch_i, device, args)
if not args.pytorch_only and epoch_i == 0:
epoch_0_training = total_training_time
test_time, validation_accuracy = test(model, validation_dataloader, device, args)
total_test_time += test_time
assert validation_accuracy > 0.5
print('\n======== Global stats ========')
if not args.pytorch_only:
estimated_export = 0
if args.epochs > 1:
estimated_export = epoch_0_training - (total_training_time - epoch_0_training)/(args.epochs-1)
print(" Estimated ONNX export took: {:.4f}s".format(estimated_export))
else:
print(" Estimated ONNX export took: Estimate available when epochs > 1 only")
print(" Accumulated training without export took: {:.4f}s".format(total_training_time - estimated_export))
print(" Accumulated training took: {:.4f}s".format(total_training_time))
print(" Accumulated validation took: {:.4f}s".format(total_test_time))
if __name__ == '__main__':
main()
| 43.126437 | 164 | 0.644989 |
f43a9d893dcbc26036af6cc32f8507449ee4d374 | 10,994 | py | Python | tests/grid_test.py | fancompute/simphox | 917673cc3ef8fb54fcbbaaa93b8efdc09a8e3614 | [
"MIT"
] | 6 | 2021-08-31T16:20:33.000Z | 2021-12-27T18:04:52.000Z | tests/grid_test.py | fancompute/simphox | 917673cc3ef8fb54fcbbaaa93b8efdc09a8e3614 | [
"MIT"
] | 1 | 2021-08-29T21:09:30.000Z | 2021-08-29T21:17:47.000Z | tests/grid_test.py | fancompute/simphox | 917673cc3ef8fb54fcbbaaa93b8efdc09a8e3614 | [
"MIT"
] | null | null | null | import pytest
from typing import List, Tuple, Union, Optional
from simphox.typing import Shape, Size, Size2, Size3
from simphox.utils import TEST_ZERO, TEST_ONE, Box
from simphox.grid import Grid, YeeGrid
import numpy as np
@pytest.mark.parametrize(
"size, spacing, eps, expected_cell_sizes",
[
((2.5, 2.5, 1), 0.5, 1, [np.array([0.5, 0.5, 0.5, 0.5, 0.5]), np.array([0.5, 0.5, 0.5, 0.5, 0.5]),
np.array([0.5, 0.5])]),
((1, 1), 0.2, 1, [np.array([0.2, 0.2, 0.2, 0.2, 0.2]), np.array([0.2, 0.2, 0.2, 0.2, 0.2]), np.array([1])]),
((1, 0.8), 0.2, 1, [np.ones(5) * 0.2, np.ones(4) * 0.2, np.array([1])]),
((15,), 3, 1, [np.ones(5) * 3, np.array([1]), np.array([1])]),
((5, 6, 6), (1, 2, 3), 1, [np.ones(5) * 1, np.ones(3) * 2, np.ones(2) * 3])
],
)
def test_cell_size(size: Size, spacing: Size, eps: Union[float, np.ndarray],
expected_cell_sizes: List[np.ndarray]):
grid = Grid(size, spacing, eps)
for actual, expected in zip(grid.cells, expected_cell_sizes):
np.testing.assert_allclose(actual, expected)
@pytest.mark.parametrize(
"size, spacing, eps, expected_pos",
[
((2.5, 2.5, 1), 0.5, 1,
[np.array([0, 0.5, 1, 1.5, 2, 2.5]), np.array([0, 0.5, 1, 1.5, 2, 2.5]), np.array([0, 0.5, 1])]),
((1, 1), 0.2, 1, [np.array([0, 0.2, 0.4, 0.6, 0.8, 1]), np.array([0, 0.2, 0.4, 0.6, 0.8, 1]), np.array([0])]),
((1, 0.8), 0.2, 1, [np.array([0, 0.2, 0.4, 0.6, 0.8, 1]), np.array([0, 0.2, 0.4, 0.6, 0.8]), np.array([0])]),
((15,), 3, 1, [np.arange(6) * 3, np.array([0]), np.array([0])]),
((5, 6, 6), (1, 2, 3), 1, [np.arange(6) * 1, np.arange(4) * 2, np.arange(3) * 3])
],
)
def test_pos(size: Size, spacing: Size, eps: Union[float, np.ndarray],
expected_pos: List[np.ndarray]):
grid = Grid(size, spacing, eps)
for actual, expected in zip(grid.pos, expected_pos):
np.testing.assert_allclose(actual, expected)
@pytest.mark.parametrize(
"size, spacing, eps, expected_spacing",
[
((5, 5, 2), 0.5, 1, np.asarray((0.5, 0.5, 0.5))),
((5, 5), 0.2, 1, np.ones(2) * 0.2),
((5, 4), 0.2, 1, np.ones(2) * 0.2),
((5, 3, 2), (1, 2, 3), 1, np.array((1, 2, 3)))
],
)
def test_spacing(size: Shape, spacing: Size,
eps: Union[float, np.ndarray], expected_spacing: np.ndarray):
grid = Grid(size, spacing, eps)
np.testing.assert_allclose(grid.spacing, expected_spacing)
@pytest.mark.parametrize(
"shape, eps",
[
((2, 3), np.asarray(((1, 1), (1, 1)))),
((2,), np.asarray(((1, 1), (1, 1))))
],
)
def test_error_raised_for_shape_eps_mismatch(shape: Shape, eps: Union[float, np.ndarray]):
with pytest.raises(AttributeError, match=f'Require grid.shape == eps.shape but got '):
Grid(shape, 1, eps)
@pytest.mark.parametrize(
"shape, spacing",
[
((2, 3), (1, 1, 1)),
((2, 3, 2), (1, 1))
],
)
def test_error_raised_for_shape_spacing_mismatch(shape: Shape, spacing: Size):
with pytest.raises(AttributeError, match='Require size.size == ndim == spacing.size but got '):
Grid(shape, spacing)
@pytest.mark.parametrize(
"shape, spacing, size",
[
((5, 5, 2), 0.5, (2.5, 2.5, 1)),
((5, 5), 0.2, (1, 1)),
],
)
def test_shape(shape: Shape, spacing: Size, size: Size):
grid = Grid(size, spacing)
np.testing.assert_allclose(grid.shape, shape)
@pytest.mark.parametrize(
"shape, spacing, size",
[
((5, 5, 2), 0.5, (2.5, 2.5, 1)),
((5, 5, 1), 0.2, (1, 1)),
],
)
def test_shape3(shape: Shape, spacing: Size, size: Size):
grid = Grid(size, spacing)
np.testing.assert_allclose(grid.shape3, shape)
@pytest.mark.parametrize(
"sim_spacing3, spacing, size",
[
((0.5, 0.5, 0.5), 0.5, (2.5, 2.5, 1)),
((0.2, 0.2, np.inf), 0.2, (1, 1)),
],
)
def test_spacing3(sim_spacing3: Size, spacing: Size, size: Size):
grid = Grid(size, spacing)
np.testing.assert_allclose(grid.spacing3, sim_spacing3)
@pytest.mark.parametrize(
"sim_size, spacing, center, size, squeezed, expected_slice",
[
((2.5, 2.5, 1), 0.5, (1, 1, 1), (0.5, 1, 1), True, [slice(2, 3, None), slice(1, 3, None), slice(1, 3, None)]),
((2.5, 2.5, 1), 0.5, (1, 1, 1), (0.5, 0.1, 1), True, [slice(2, 3, None), 2, slice(1, 3, None)]),
(
(2.5, 2.5, 1), 0.5, (1, 1, 1), (0.5, 0.1, 1), False, [slice(2, 3, None), slice(2, 3, None), slice(1, 3, None)]),
((1, 1), 0.2, (1, 1, 0), (0.5, 1, 1), True, [slice(4, 6, None), slice(3, 8, None), 0]),
],
)
def test_slice(sim_size: Shape, spacing: Size, center: Size3, size: Size3, squeezed: bool,
expected_slice: Tuple[Union[slice, int]]):
grid = Grid(sim_size, spacing)
actual = grid.slice(center, size, squeezed=squeezed)
assert tuple(actual) == tuple(expected_slice)
@pytest.mark.parametrize(
"size, spacing, pml, expected_df_data, expected_df_indices",
[
((1.5, 1.5, 1), 0.5, None,
[2., -2., 2., -2., 2., -2., 2., -2., 2., -2., 2., -2., 2., -2.,
2., -2., 2., -2., 2., -2., 2., -2., 2., -2., -2., 2., -2., 2.,
-2., 2., -2., 2., -2., 2., -2., 2.],
[6, 0, 7, 1, 8, 2, 9, 3, 10, 4, 11, 5, 12, 6, 13, 7, 14,
8, 15, 9, 16, 10, 17, 11, 12, 0, 13, 1, 14, 2, 15, 3, 16, 4,
17, 5]
),
((3, 3), 1, None,
[1., -1., 1., -1., 1., -1., 1., -1., 1., -1., 1., -1., -1., 1.,
-1., 1., -1., 1.],
[3, 0, 4, 1, 5, 2, 6, 3, 7, 4, 8, 5, 6, 0, 7, 1, 8, 2]
),
((6,), 2, None,
[0.5, -0.5, 0.5, -0.5, 0.5, -0.5],
[1, 0, 2, 1, 0, 2]
),
],
)
def test_df(size: Size, spacing: Size, pml: Optional[Size3], expected_df_data: np.ndarray,
expected_df_indices: np.ndarray):
grid = YeeGrid(size, spacing, pml=pml)
actual_df = grid.deriv_forward
np.testing.assert_allclose(actual_df[0].data, expected_df_data)
np.testing.assert_allclose(actual_df[0].indices, expected_df_indices)
@pytest.mark.parametrize(
"size, spacing, pml, expected_db_data, expected_db_indices",
[
((1.5, 1.5, 1), 0.5, None,
[-2., 2., -2., 2., -2., 2., -2., 2., -2., 2., -2., 2., 2.,
-2., 2., -2., 2., -2., 2., -2., 2., -2., 2., -2., 2., -2.,
2., -2., 2., -2., 2., -2., 2., -2., 2., -2.],
[12, 0, 13, 1, 14, 2, 15, 3, 16, 4, 17, 5, 6, 0, 7, 1, 8,
2, 9, 3, 10, 4, 11, 5, 12, 6, 13, 7, 14, 8, 15, 9, 16, 10,
17, 11]
),
((3, 3), 1, None,
[-1., 1., -1., 1., -1., 1., 1., -1., 1., -1., 1., -1., 1.,
-1., 1., -1., 1., -1.],
[6, 0, 7, 1, 8, 2, 3, 0, 4, 1, 5, 2, 6, 3, 7, 4, 8, 5]
),
((6,), 2, None,
[-0.5, 0.5, -0.5, 0.5, -0.5, 0.5],
[2, 0, 0, 1, 1, 2]
),
],
)
def test_db(size: Size, spacing: Size, pml: Optional[Size3], expected_db_data: np.ndarray,
expected_db_indices: np.ndarray):
grid = YeeGrid(size, spacing, pml=pml)
actual_db = grid.deriv_backward
np.testing.assert_allclose(actual_db[0].data, expected_db_data)
np.testing.assert_allclose(actual_db[0].indices, expected_db_indices)
@pytest.mark.parametrize(
"waveguide, sub, size, wg_height, spacing, rib_y, vertical, block, gap, seps, expected",
[
(Box((0.2, 0.4), material=TEST_ZERO), (1.4, 0.2), (1.4, 1), 0.2, 0.2, 0, False,
None, 0.2, (0.2, 0.4), np.array(
[[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 0., 0., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 0., 0., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]]
)),
(Box((0.2, 0.4), material=TEST_ZERO), (1.4, 0.2), (1.4, 1), 0.2, 0.2, 0, False,
Box((0.2, 0.2), material=TEST_ZERO), 0.2, (0.2, 0.2), np.array([
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 0, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 0, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1]
])),
(Box((0.2, 0.4), material=TEST_ZERO), (1.4, 0.2), (1.4, 1), 0.2, 0.2, 0, True,
Box((0.2, 0.2), material=TEST_ZERO), 0.2, (0.2, 0.2), np.array([
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 0., 0., 1., 0.],
[1., 1., 1., 1., 1.],
[1., 0., 0., 1., 0.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]
])),
(Box((0.6, 0.6), material=TEST_ZERO), (1, 1), (1, 1), 0.2, 0.2, 0, False, None, 0, 0, np.array([
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]
])),
(Box((0.4, 0.4), material=TEST_ZERO), (1, 1), (1, 1), 0.2, 0.2, 0, False, None, 0, 0, np.array([
[1, 1, 1, 1, 1],
[1, 0, 0, 1, 1],
[1, 0, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]
])),
(Box((0.4, 0.4), material=TEST_ZERO), (1, 0.2), (1, 1), 0.2, 0.2, 0.2, False, None, 0, 0, np.array([
[1, 0, 1, 1, 1],
[1, 0, 0, 1, 1],
[1, 0, 0, 1, 1],
[1, 0, 1, 1, 1],
[1, 0, 1, 1, 1]
])),
(Box((0.2, 0.4), material=TEST_ZERO), (1, 0.2), (1, 1), 0.2, 0.2, 0, False,
Box((0.2, 0.4), material=TEST_ZERO), 0, 0.2, np.array([
[1, 0, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 0, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 0, 0, 1, 1]
])),
(Box((0.4, 0.2), material=TEST_ZERO), (1, 0.2), (1, 1), 0.2, 0.2, 0, True,
Box((0.4, 0.2), material=TEST_ZERO), 0, 0.2, np.array([
[1., 1., 1., 1., 1.],
[1., 1., 0., 1., 0.],
[1., 1., 0., 1., 0.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]
])),
],
)
def test_block_design_eps_matches_expected(waveguide: Box, sub: Size2, size: Size2, wg_height: float, spacing: float,
rib_y: float, vertical: bool, block: Box, gap: float,
seps: Size2, expected: np.ndarray):
actual = Grid(size, spacing).block_design(waveguide=waveguide,
wg_height=wg_height,
sub_height=wg_height,
sub_eps=TEST_ONE.eps,
gap=gap,
rib_y=rib_y,
block=block,
vertical=vertical,
sep=seps
).eps
np.testing.assert_allclose(actual, expected)
| 38.575439 | 120 | 0.444151 |
4cd5566913e33ae0eff9977dd7eb3de158fee757 | 146 | py | Python | pymetricks/build/lib/py_admetricks/__init__.py | carlangastr/marketing-science-projects | 760b3c3e9fd9198805407f69a1f74e82d2446d1f | [
"MIT"
] | 2 | 2021-07-03T06:55:39.000Z | 2021-11-27T00:44:34.000Z | pymetricks/py_admetricks/__init__.py | carlangastr/marketing-science-projects | 760b3c3e9fd9198805407f69a1f74e82d2446d1f | [
"MIT"
] | 2 | 2021-06-09T21:00:01.000Z | 2021-06-10T03:53:20.000Z | pymetricks/py_admetricks/__init__.py | carlangastr/marketing-science-projects | 760b3c3e9fd9198805407f69a1f74e82d2446d1f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 10 00:36:16 2021
@author: Carlos Trujillo
"""
from py_admetricks.pymetricks import admetricks_api | 18.25 | 51 | 0.712329 |
abc0447e718114b9530f786a26f1c8573082e887 | 4,758 | py | Python | homeassistant/components/brother/sensor.py | ccatterina/core | 36789cfc310f270bf343676eb94d123e5d0dfa83 | [
"Apache-2.0"
] | 6 | 2016-11-25T06:36:27.000Z | 2021-11-16T11:20:23.000Z | homeassistant/components/brother/sensor.py | SicAriuSx83/core | 162c39258e68ae42fe4e1560ae91ed54f5662409 | [
"Apache-2.0"
] | 45 | 2020-10-15T06:47:06.000Z | 2022-03-31T06:26:16.000Z | homeassistant/components/brother/sensor.py | SicAriuSx83/core | 162c39258e68ae42fe4e1560ae91ed54f5662409 | [
"Apache-2.0"
] | 2 | 2020-11-17T09:19:47.000Z | 2020-12-16T03:56:09.000Z | """Support for the Brother service."""
from datetime import timedelta
import logging
from homeassistant.const import DEVICE_CLASS_TIMESTAMP
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.util.dt import utcnow
from .const import (
ATTR_BLACK_DRUM_COUNTER,
ATTR_BLACK_DRUM_REMAINING_LIFE,
ATTR_BLACK_DRUM_REMAINING_PAGES,
ATTR_CYAN_DRUM_COUNTER,
ATTR_CYAN_DRUM_REMAINING_LIFE,
ATTR_CYAN_DRUM_REMAINING_PAGES,
ATTR_DRUM_COUNTER,
ATTR_DRUM_REMAINING_LIFE,
ATTR_DRUM_REMAINING_PAGES,
ATTR_ICON,
ATTR_LABEL,
ATTR_MAGENTA_DRUM_COUNTER,
ATTR_MAGENTA_DRUM_REMAINING_LIFE,
ATTR_MAGENTA_DRUM_REMAINING_PAGES,
ATTR_MANUFACTURER,
ATTR_UNIT,
ATTR_UPTIME,
ATTR_YELLOW_DRUM_COUNTER,
ATTR_YELLOW_DRUM_REMAINING_LIFE,
ATTR_YELLOW_DRUM_REMAINING_PAGES,
DOMAIN,
SENSOR_TYPES,
)
ATTR_COUNTER = "counter"
ATTR_FIRMWARE = "firmware"
ATTR_MODEL = "model"
ATTR_REMAINING_PAGES = "remaining_pages"
ATTR_SERIAL = "serial"
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add Brother entities from a config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
sensors = []
device_info = {
"identifiers": {(DOMAIN, coordinator.data[ATTR_SERIAL])},
"name": coordinator.data[ATTR_MODEL],
"manufacturer": ATTR_MANUFACTURER,
"model": coordinator.data[ATTR_MODEL],
"sw_version": coordinator.data.get(ATTR_FIRMWARE),
}
for sensor in SENSOR_TYPES:
if sensor in coordinator.data:
sensors.append(BrotherPrinterSensor(coordinator, sensor, device_info))
async_add_entities(sensors, False)
class BrotherPrinterSensor(CoordinatorEntity):
"""Define an Brother Printer sensor."""
def __init__(self, coordinator, kind, device_info):
"""Initialize."""
super().__init__(coordinator)
self._name = f"{coordinator.data[ATTR_MODEL]} {SENSOR_TYPES[kind][ATTR_LABEL]}"
self._unique_id = f"{coordinator.data[ATTR_SERIAL].lower()}_{kind}"
self._device_info = device_info
self.kind = kind
self._attrs = {}
@property
def name(self):
"""Return the name."""
return self._name
@property
def state(self):
"""Return the state."""
if self.kind == ATTR_UPTIME:
uptime = utcnow() - timedelta(seconds=self.coordinator.data.get(self.kind))
return uptime.replace(microsecond=0).isoformat()
return self.coordinator.data.get(self.kind)
@property
def device_class(self):
"""Return the class of this sensor."""
if self.kind == ATTR_UPTIME:
return DEVICE_CLASS_TIMESTAMP
return None
@property
def device_state_attributes(self):
"""Return the state attributes."""
remaining_pages = None
drum_counter = None
if self.kind == ATTR_DRUM_REMAINING_LIFE:
remaining_pages = ATTR_DRUM_REMAINING_PAGES
drum_counter = ATTR_DRUM_COUNTER
elif self.kind == ATTR_BLACK_DRUM_REMAINING_LIFE:
remaining_pages = ATTR_BLACK_DRUM_REMAINING_PAGES
drum_counter = ATTR_BLACK_DRUM_COUNTER
elif self.kind == ATTR_CYAN_DRUM_REMAINING_LIFE:
remaining_pages = ATTR_CYAN_DRUM_REMAINING_PAGES
drum_counter = ATTR_CYAN_DRUM_COUNTER
elif self.kind == ATTR_MAGENTA_DRUM_REMAINING_LIFE:
remaining_pages = ATTR_MAGENTA_DRUM_REMAINING_PAGES
drum_counter = ATTR_MAGENTA_DRUM_COUNTER
elif self.kind == ATTR_YELLOW_DRUM_REMAINING_LIFE:
remaining_pages = ATTR_YELLOW_DRUM_REMAINING_PAGES
drum_counter = ATTR_YELLOW_DRUM_COUNTER
if remaining_pages and drum_counter:
self._attrs[ATTR_REMAINING_PAGES] = self.coordinator.data.get(
remaining_pages
)
self._attrs[ATTR_COUNTER] = self.coordinator.data.get(drum_counter)
return self._attrs
@property
def icon(self):
"""Return the icon."""
return SENSOR_TYPES[self.kind][ATTR_ICON]
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return SENSOR_TYPES[self.kind][ATTR_UNIT]
@property
def device_info(self):
"""Return the device info."""
return self._device_info
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return True
| 32.589041 | 93 | 0.688104 |
bb1a98d4453974ec78bc83d6c7624d9e8f287710 | 1,428 | py | Python | eth/vm/execution_context.py | vovyl/py-evm | dc9c03bfee31aecd100c953e8777988d8b296bd8 | [
"MIT"
] | 1 | 2019-09-14T13:32:40.000Z | 2019-09-14T13:32:40.000Z | eth/vm/execution_context.py | vovyl/py-evm | dc9c03bfee31aecd100c953e8777988d8b296bd8 | [
"MIT"
] | null | null | null | eth/vm/execution_context.py | vovyl/py-evm | dc9c03bfee31aecd100c953e8777988d8b296bd8 | [
"MIT"
] | null | null | null | from typing import (
Iterable,
)
from eth_typing import (
Address,
Hash32,
)
from eth._utils.generator import CachedIterable
class ExecutionContext:
_coinbase = None
_timestamp = None
_number = None
_difficulty = None
_gas_limit = None
_prev_hashes = None
_chain_id = None
def __init__(
self,
coinbase: Address,
timestamp: int,
block_number: int,
difficulty: int,
gas_limit: int,
prev_hashes: Iterable[Hash32],
chain_id: int) -> None:
self._coinbase = coinbase
self._timestamp = timestamp
self._block_number = block_number
self._difficulty = difficulty
self._gas_limit = gas_limit
self._prev_hashes = CachedIterable(prev_hashes)
self._chain_id = chain_id
@property
def coinbase(self) -> Address:
return self._coinbase
@property
def timestamp(self) -> int:
return self._timestamp
@property
def block_number(self) -> int:
return self._block_number
@property
def difficulty(self) -> int:
return self._difficulty
@property
def gas_limit(self) -> int:
return self._gas_limit
@property
def prev_hashes(self) -> Iterable[Hash32]:
return self._prev_hashes
@property
def chain_id(self) -> int:
return self._chain_id
| 21.313433 | 55 | 0.611345 |
0e626ae11bde8621e3dd7d10ba26311655be381c | 1,201 | py | Python | tests/test_of_types.py | hongquan/Defity | 9cfefb88ea3215b369e018b4f60a278ddd2927a6 | [
"Apache-2.0"
] | 12 | 2021-08-29T09:05:26.000Z | 2022-03-11T17:53:50.000Z | tests/test_of_types.py | hongquan/Defity | 9cfefb88ea3215b369e018b4f60a278ddd2927a6 | [
"Apache-2.0"
] | 1 | 2021-12-24T16:10:32.000Z | 2021-12-30T03:18:04.000Z | tests/test_of_types.py | hongquan/Defity | 9cfefb88ea3215b369e018b4f60a278ddd2927a6 | [
"Apache-2.0"
] | 3 | 2021-09-27T19:59:35.000Z | 2022-01-27T03:10:16.000Z | from pathlib import Path
import defity
DATA = Path(__file__).parent / 'data'
def test_single_type():
filepath = DATA / 'image.png'
mime = 'image/png'
matched = defity.is_file_of_type(filepath, mime)
assert matched
def test_not_match_single_type():
filepath = DATA / 'image.png'
mime = 'image/jpeg'
matched = defity.is_file_of_type(filepath, mime)
assert not matched
def test_multiple_types():
filepath = DATA / 'image.png'
mimes = ('image/png', 'application/pdf')
matched = defity.is_file_of_type(filepath, mimes)
assert matched
def test_not_match_multiple_types():
filepath = DATA / 'image.png'
mimes = ('image/jpeg', 'application/pdf')
matched = defity.is_file_of_type(filepath, mimes)
assert not matched
def test_file_multiple_types():
filepath = DATA / 'image.png'
mimes = ('image/png', 'application/pdf')
with filepath.open('rb') as f:
matched = defity.is_file_of_type(f, mimes)
assert matched
def test_bytes_multiple_types():
filepath = DATA / 'image.png'
mimes = ('image/png', 'application/pdf')
matched = defity.is_bytes_of_type(filepath.read_bytes(), mimes)
assert matched
| 24.02 | 67 | 0.685262 |
4f9be20f9f34a8d415f253c49d9993023d82309b | 6,621 | py | Python | helpers/emulator.py | Aayush9029/Kenobi-Server | 84581116eef3bbb15f28b2d03f49ed3deefff9c2 | [
"MIT"
] | 11 | 2021-11-21T20:06:21.000Z | 2021-12-05T13:44:30.000Z | helpers/emulator.py | Aayush9029/Kenobi-Server | 84581116eef3bbb15f28b2d03f49ed3deefff9c2 | [
"MIT"
] | 3 | 2021-11-22T00:55:37.000Z | 2021-11-27T17:39:19.000Z | helpers/emulator.py | Aayush9029/Kenobi-Server | 84581116eef3bbb15f28b2d03f49ed3deefff9c2 | [
"MIT"
] | 2 | 2021-11-21T23:44:39.000Z | 2021-11-23T03:17:33.000Z | """
File containing class which represents the emulator.
Controlling media, open apps and links, playing audio, etc is done here.
"""
import webbrowser
from subprocess import run
from os import system
from playsound import playsound
from pynput.keyboard import Controller, Key
from .operating_system import OperatingSystem
from .custom_logger import CustomLogger
class Emulator:
"""
Class which emulates a key presses, controls media playback
"""
def __init__(self):
"""
Initialize the key controller
And valid key dictionaries
"""
self.logger = CustomLogger(self.__class__.__name__)
self.operating_system = OperatingSystem()
self.keyboard = Controller()
self.valid_direction_keys = {
"left": Key.left,
"right": Key.right,
"up": Key.up,
"down": Key.down,
"space": Key.space,
"tab": Key.tab,
"return": Key.enter,
"escape": Key.esc,
}
self.valid_media_keys = {
"playpause": Key.media_play_pause,
"next": Key.media_next,
"previous": Key.media_previous,
"mute": Key.media_volume_mute,
"volumeup": Key.media_volume_up,
"volumedown": Key.media_volume_down
}
def emulate_key(self, received_key: str):
"""
Check if the key is valid, and if so
Emulate the key using the keyboard controller
"""
if received_key in self.valid_direction_keys:
self.keyboard.press(self.valid_direction_keys[received_key])
else:
self.logger.info(f"Invalid key {received_key}")
if received_key in self.valid_media_keys:
if self.operating_system.platform == "Darwin":
self.hid_post_aux_key(received_key)
return
self.keyboard.press(self.valid_media_keys[received_key])
else:
self.logger.info(f"Invalid key {received_key}")
def hid_post_aux_key(self, key):
"""
hid post aux key emulation for macOS
Pynput is not working on macOS so we use this workaround
"""
import Quartz
# NSEvent.h
NSSystemDefined = 14
# hidsystem/ev_keymap.h
sound_up_key = 0
sound_down_key = 1
play_key = 16
next_key = 17
previous_key = 18
mute_key = 7
supportedcmds = {
'playpause': play_key,
'next': next_key,
'previous': previous_key,
'volumeup': sound_up_key,
'volumedown': sound_down_key,
'mute': mute_key
}
if key in supportedcmds:
key = supportedcmds[key]
else:
self.logger.error(f"Invalid key {key}")
return
def do_key(down):
"""
Handles the key press (keydown or keyup)
"""
event = Quartz.NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
NSSystemDefined, # type
(0, 0), # location
0xa00 if down else 0xb00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
(key << 16) | ((0xa if down else 0xb) << 8), # data1
-1 # data2
)
c_event = event.CGEvent()
Quartz.CGEventPost(0, c_event)
do_key(True)
do_key(False)
def launch_app(self, app: str):
"""
Launch the app using the system command
"""
if app.endswith(".com"):
self.launch_site(url=app)
else:
# TEST Launch apps for other OSs
launch_args = {
'Linux': ['xdg-open'],
'Windows': ['start'],
'Darwin': ['open', '-a']
}
try:
run(launch_args[self.operating_system.platform] + [app])
except KeyError:
print(f"Invalid OS \"{self.operating_system}\"")
@staticmethod
def ping(value):
"""
Play the ping sound ping_sound.wav
"""
if value == "hello":
# add hello.wav file and play it
playsound("assets/ping_sound.wav")
elif value == "ping":
playsound("assets/ping_sound.wav")
else:
print(f"Invalid ping option {value}")
@staticmethod
def launch_site(url):
"""
Launch the site using the system command
"""
webbrowser.open_new(url=f"https://{url}")
def power_option(self, value):
"""
Emulate the power option
"""
if value == "shutdown":
self.shutdown()
elif value == "logout":
self.logout()
elif value == "restart":
self.restart()
elif value == "sleep":
self.sleep()
else:
print(f"Invalid power option {value}")
# power options child functions
def shutdown(self):
"""
Shutdown the computer depending on the OS
"""
if self.operating_system.platform == "Linux":
system("shutdown -h now")
elif self.operating_system.platform == "Windows":
system("shutdown -s")
elif self.operating_system.platform == "Darwin":
system("shutdown -h now")
def logout(self):
"""
Logout of the current session
"""
if self.operating_system.platform == "Linux":
system("gnome-session-quit --force")
elif self.operating_system.platform == "Windows":
system("shutdown -l")
elif self.operating_system.platform == "Darwin":
system("shutdown -l")
def restart(self):
"""
Restart the computer
"""
if self.operating_system.platform == "Linux":
system("shutdown -r now")
elif self.operating_system.platform == "Windows":
system("shutdown -r")
elif self.operating_system.platform == "Darwin":
system("shutdown -r now")
def sleep(self):
"""
Sleep the computer
"""
if self.operating_system.platform == "Linux":
system("systemctl suspend")
elif self.operating_system.platform == "Windows":
system("rundll32.exe powrprof.dll,SetSuspendState 0,1,0")
elif self.operating_system.platform == "Darwin":
system("pmset sleepnow")
| 30.232877 | 129 | 0.544027 |
5958b2ea202c2451244208607f683309d07ef65f | 4,303 | py | Python | examples/Arctic_HYCOM/get_hycom_GLBa0.08_ssh_2014.py | bilgetutak/pyroms | 3b0550f26f4ac181b7812e14a7167cd1ca0797f0 | [
"BSD-3-Clause"
] | 75 | 2016-04-05T07:15:57.000Z | 2022-03-04T22:49:54.000Z | examples/Arctic_HYCOM/get_hycom_GLBa0.08_ssh_2014.py | hadfieldnz/pyroms-mgh | cd0fe39075825f97a7caf64e2c4c5a19f23302fd | [
"BSD-3-Clause"
] | 27 | 2017-02-26T04:27:49.000Z | 2021-12-01T17:26:56.000Z | examples/Arctic_HYCOM/get_hycom_GLBa0.08_ssh_2014.py | hadfieldnz/pyroms-mgh | cd0fe39075825f97a7caf64e2c4c5a19f23302fd | [
"BSD-3-Clause"
] | 56 | 2016-05-11T06:19:14.000Z | 2022-03-22T19:04:17.000Z | import matplotlib
matplotlib.use('Agg')
import numpy as np
import netCDF4
from datetime import datetime
import pyroms
import pyroms_toolbox
import sys
def create_HYCOM_file(name, time, lon, lat, var):
#create netCDF file
nc = netCDF4.Dataset(name, 'w', format='NETCDF3_64BIT')
nc.Author = sys._getframe().f_code.co_name
nc.Created = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
nc.title = 'HYCOM + NCODA Global 1/12 Analysis (GLBa0.08)'
#create dimensions
Mp, Lp = lon.shape
nc.createDimension('lon', Lp)
nc.createDimension('lat', Mp)
nc.createDimension('ocean_time', None)
#create variables
nc.createVariable('lon', 'f', ('lat', 'lon'))
nc.variables['lon'].long_name = 'longitude'
nc.variables['lon'].units = 'degrees_east'
nc.variables['lon'][:] = lon
nc.createVariable('lat', 'f', ('lat', 'lon'))
nc.variables['lat'].long_name = 'latitude'
nc.variables['lat'].units = 'degrees_north'
nc.variables['lat'][:] = lat
nc.createVariable('ocean_time', 'f', ('ocean_time'))
nc.variables['ocean_time'].units = 'days since 1900-01-01 00:00:00'
nc.variables['ocean_time'].calendar = 'LEAP'
nc.variables['ocean_time'][0] = time
nc.createVariable(outvarname, 'f', ('ocean_time', 'lat', 'lon'), fill_value=spval)
nc.variables[outvarname].long_name = long_name
nc.variables[outvarname].units = units
nc.variables[outvarname].coordinates = 'lon lat'
nc.variables[outvarname][0] = var
nc.close()
print('Done with file %s' %name)
# get HYCOM Northeast Pacific data from 2007 to 2009
year = 2014
retry='True'
invarname = 'ssh'
outvarname = 'ssh'
#read grid and variable attributes from the first file
url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_90.6/2009/2d/archv.2009_001_00_2d.nc'
dataset = netCDF4.Dataset(url)
lon = dataset.variables['Longitude'][2100:,550:4040]
lat = dataset.variables['Latitude'][2100:,550:4040]
#spval = dataset.variables[invarname]._FillValue
units = dataset.variables[invarname].units
long_name = dataset.variables[invarname].long_name
dataset.close()
retry_day = []
# loop over daily files
if year%4 == 0:
daysinyear = 366
else:
daysinyear = 365
daysinyear = 94
for day in range(1,daysinyear+1):
#for day in range(95,daysinyear+1):
print('Processing file for day %03d, year %04d' %(day, year))
url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.0/2014/2d/archv.%04d_%03d_00_2d.nc' %(year,day)
# url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.1/2014/2d/archv.%04d_%03d_00_2d.nc' %(year,day)
#get data from server
try:
dataset = netCDF4.Dataset(url)
var = dataset.variables[invarname][0,2100:,550:4040]
spval = var.get_fill_value()
dataset.close()
except:
print('No file on the server... We skip this day.')
retry_day.append(day)
continue
#create netCDF file
outfile = 'data/HYCOM_GLBa0.08_%s_%04d_%03d.nc' %(outvarname,year,day)
jday = pyroms_toolbox.date2jday(datetime(year, 1, 1)) + day - 1
create_HYCOM_file(outfile, jday, lon, lat, var)
if retry == 'True':
if len(retry_day) != 0:
print("Some file have not been downloded... Let's try again")
while len(retry_day) != 0:
for day in retry_day:
print('Retry file for day %03d, year %04d' %(day, year))
url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.0/2014/2d/archv.%04d_%03d_00_2d.nc' %(year,day)
# url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.1/2014/2d/archv.%04d_%03d_00_2d.nc' %(year,day)
#get data from server
try:
dataset = netCDF4.Dataset(url)
var = dataset.variables[invarname][0,2100:,550:4040]
spval = var.get_fill_value()
dataset.close()
except:
print('No file on the server... We skip this day.')
continue
#create netCDF file
outfile = 'data/HYCOM_GLBa0.08_%s_%04d_%03d.nc' %(outvarname,year,day)
jday = pyroms_toolbox.date2jday(datetime(year, 1, 1)) + day - 1
create_HYCOM_file(outfile, jday, lon, lat, var)
retry_day.remove(day)
| 32.847328 | 126 | 0.64885 |
a1322c45f8087db6b1ecd39e7cf33fb7c84b02e9 | 2,410 | py | Python | Server/swaggerUI/swagger_server/test/test_default_controller.py | heartcase/ECE4564F17T6 | 98a3c548c1787b74ed50004451a82635fb3e05fe | [
"MIT"
] | null | null | null | Server/swaggerUI/swagger_server/test/test_default_controller.py | heartcase/ECE4564F17T6 | 98a3c548c1787b74ed50004451a82635fb3e05fe | [
"MIT"
] | null | null | null | Server/swaggerUI/swagger_server/test/test_default_controller.py | heartcase/ECE4564F17T6 | 98a3c548c1787b74ed50004451a82635fb3e05fe | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from flask import json
from six import BytesIO
from swagger_server.models.parking_spot import ParkingSpot # noqa: E501
from swagger_server.models.user import User # noqa: E501
from swagger_server.test import BaseTestCase
class TestDefaultController(BaseTestCase):
"""DefaultController integration test stubs"""
def test_login_get(self):
"""Test case for login_get
Login into server
"""
response = self.client.open(
'//Login',
method='GET')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_parking_spots_get(self):
"""Test case for parking_spots_get
get the list of the parking spots
"""
query_string = [('range', 'range_example')]
response = self.client.open(
'//ParkingSpots',
method='GET',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_parking_spots_id_get(self):
"""Test case for parking_spots_id_get
check the status of the parking spot
"""
response = self.client.open(
'//ParkingSpots/{id}'.format(id=56),
method='GET')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_parking_spots_id_post(self):
"""Test case for parking_spots_id_post
park or leave the parking spot
"""
data = dict(operation='operation_example',
park_hour=56)
response = self.client.open(
'//ParkingSpots/{id}'.format(id=56),
method='POST',
data=data,
content_type='multipart/form-data')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_user_uid_get(self):
"""Test case for user_uid_get
check the status of the user
"""
response = self.client.open(
'//User/{uid}'.format(uid=56),
method='GET')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| 29.753086 | 77 | 0.582158 |
9157d222e9d6b3748a014e0f588d53af04f67ef9 | 33,881 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/aio/operations/_gallery_application_versions_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 1 | 2021-09-07T18:35:49.000Z | 2021-09-07T18:35:49.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/aio/operations/_gallery_application_versions_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/aio/operations/_gallery_application_versions_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GalleryApplicationVersionsOperations:
"""GalleryApplicationVersionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_09_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: "_models.GalleryApplicationVersion",
**kwargs
) -> "_models.GalleryApplicationVersion":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryApplicationName': self._serialize.url("gallery_application_name", gallery_application_name, 'str'),
'galleryApplicationVersionName': self._serialize.url("gallery_application_version_name", gallery_application_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(gallery_application_version, 'GalleryApplicationVersion')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: "_models.GalleryApplicationVersion",
**kwargs
) -> AsyncLROPoller["_models.GalleryApplicationVersion"]:
"""Create or update a gallery Application Version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version is to be created.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
created. Needs to follow semantic version name pattern: The allowed characters are digit and
period. Digits must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`.
:type gallery_application_version_name: str
:param gallery_application_version: Parameters supplied to the create or update gallery
Application Version operation.
:type gallery_application_version: ~azure.mgmt.compute.v2020_09_30.models.GalleryApplicationVersion
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryApplicationVersion or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_09_30.models.GalleryApplicationVersion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersion"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
gallery_application_version=gallery_application_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryApplicationName': self._serialize.url("gallery_application_name", gallery_application_name, 'str'),
'galleryApplicationVersionName': self._serialize.url("gallery_application_version_name", gallery_application_version_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: "_models.GalleryApplicationVersionUpdate",
**kwargs
) -> "_models.GalleryApplicationVersion":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryApplicationName': self._serialize.url("gallery_application_name", gallery_application_name, 'str'),
'galleryApplicationVersionName': self._serialize.url("gallery_application_version_name", gallery_application_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(gallery_application_version, 'GalleryApplicationVersionUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: "_models.GalleryApplicationVersionUpdate",
**kwargs
) -> AsyncLROPoller["_models.GalleryApplicationVersion"]:
"""Update a gallery Application Version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version is to be updated.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
updated. Needs to follow semantic version name pattern: The allowed characters are digit and
period. Digits must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`.
:type gallery_application_version_name: str
:param gallery_application_version: Parameters supplied to the update gallery Application
Version operation.
:type gallery_application_version: ~azure.mgmt.compute.v2020_09_30.models.GalleryApplicationVersionUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryApplicationVersion or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_09_30.models.GalleryApplicationVersion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersion"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
gallery_application_version=gallery_application_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryApplicationName': self._serialize.url("gallery_application_name", gallery_application_name, 'str'),
'galleryApplicationVersionName': self._serialize.url("gallery_application_version_name", gallery_application_version_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
expand: Optional[Union[str, "_models.ReplicationStatusTypes"]] = None,
**kwargs
) -> "_models.GalleryApplicationVersion":
"""Retrieves information about a gallery Application Version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version resides.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
retrieved.
:type gallery_application_version_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str or ~azure.mgmt.compute.v2020_09_30.models.ReplicationStatusTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryApplicationVersion, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_09_30.models.GalleryApplicationVersion
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryApplicationName': self._serialize.url("gallery_application_name", gallery_application_name, 'str'),
'galleryApplicationVersionName': self._serialize.url("gallery_application_version_name", gallery_application_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryApplicationVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryApplicationName': self._serialize.url("gallery_application_name", gallery_application_name, 'str'),
'galleryApplicationVersionName': self._serialize.url("gallery_application_version_name", gallery_application_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete a gallery Application Version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version resides.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
deleted.
:type gallery_application_version_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryApplicationName': self._serialize.url("gallery_application_name", gallery_application_name, 'str'),
'galleryApplicationVersionName': self._serialize.url("gallery_application_version_name", gallery_application_version_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'} # type: ignore
def list_by_gallery_application(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs
) -> AsyncIterable["_models.GalleryApplicationVersionList"]:
"""List gallery Application Versions in a gallery Application Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides.
:type gallery_name: str
:param gallery_application_name: The name of the Shared Application Gallery Application
Definition from which the Application Versions are to be listed.
:type gallery_application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryApplicationVersionList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_09_30.models.GalleryApplicationVersionList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationVersionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_gallery_application.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryApplicationName': self._serialize.url("gallery_application_name", gallery_application_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('GalleryApplicationVersionList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_gallery_application.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions'} # type: ignore
| 54.735057 | 265 | 0.692748 |
a174d0199a56f641d0688f5f1278588f810a6c68 | 4,824 | py | Python | tests/test_cli.py | ablack-jpl/scrub | 46739b4a82eab7c37e7f02cf9d537c3a58d40e01 | [
"Apache-2.0"
] | null | null | null | tests/test_cli.py | ablack-jpl/scrub | 46739b4a82eab7c37e7f02cf9d537c3a58d40e01 | [
"Apache-2.0"
] | null | null | null | tests/test_cli.py | ablack-jpl/scrub | 46739b4a82eab7c37e7f02cf9d537c3a58d40e01 | [
"Apache-2.0"
] | null | null | null | import os
import re
import sys
import glob
import traceback
from scrub import scrub_cli
from tests import helpers
from tests import asserts
# Make the log directory if necessary
if not os.path.exists(helpers.log_dir):
os.mkdir(helpers.log_dir)
def test_scrubme_cli(capsys):
# Initialize variables
test_log_file = helpers.log_dir + '/run_all-cli.log'
# Navigate to the test directory
start_dir = os.getcwd()
os.chdir(helpers.c_test_dir)
# Import the configuration data
with open(helpers.c_conf_file, 'r') as input_fh:
c_conf_data = input_fh.readlines()
# Turn off all tools, except gcc
conf_data = helpers.isolate_tool(c_conf_data, 'GCC_WARNINGS')
# Initialize the test
helpers.init_testcase(conf_data, helpers.c_test_dir, 'clean', helpers.log_dir)
# Set the sys-argv values
sys.argv = ['scrub', 'run-all', '--config', './scrub.cfg']
# Run cli
try:
scrub_cli.main()
except SystemExit:
# Get the exit code
sys_exit_text = traceback.format_exc()
exit_code = int(list(filter(None, re.split('\n|:', sys_exit_text)))[-1])
# There should be no system exit
assert exit_code == 0
finally:
# Navigate to the start directory
os.chdir(start_dir)
# Write results to the output log file
with open(test_log_file, 'w') as output_fh:
system_output = capsys.readouterr()
output_fh.write(system_output.err)
output_fh.write(system_output.out)
# Clean the codebase
helpers.clean_codebase(helpers.c_test_dir, helpers.c_test_dir + '/src', 'make clean')
def test_modhelper_cli(capsys):
# Initialize variables
test_log_file = helpers.log_dir + '/run_tool-cli.log'
output_dir = helpers.c_test_dir + '/.scrub'
start_dir = os.getcwd()
# Change directory
os.chdir(helpers.c_test_dir)
# Import the configuration data
with open(helpers.c_conf_file, 'r') as input_fh:
c_conf_data = input_fh.readlines()
# Turn off all tools
conf_data = helpers.disable_all_tools(c_conf_data)
# Initialize the test
helpers.init_testcase(conf_data, helpers.c_test_dir, 'clean', helpers.log_dir)
# Set sys.argv variables
sys.argv = ['scrub', 'run-tool', '--module', 'scrub.tools.compiler.do_gcc', '--config', './scrub.cfg']
# Run module_helper
scrub_cli.main()
# Write results to the output log file
with open(test_log_file, 'w') as output_fh:
system_output = capsys.readouterr()
output_fh.write(system_output.err)
output_fh.write(system_output.out)
# Navigate to the start directory
os.chdir(start_dir)
# Check the SCRUB output
asserts.assert_mod_helper_success(output_dir, 'gcc', test_log_file)
# Clean the codebase
helpers.clean_codebase(helpers.c_test_dir, helpers.c_test_dir + '/src', 'make clean')
def test_diff_cli(capsys):
# Initialize variables
log_file = helpers.log_dir + '/diff_cli.log'
# Perform the analysis
baseline_source_dir = helpers.test_root + '/test_data/sample_data/diff_results/baseline_testcase'
comparison_source_dir = helpers.test_root + '/test_data/sample_data/diff_results/comparison_testcase'
baseline_scrub_root = baseline_source_dir + '/.scrub'
comparison_scrub_root = comparison_source_dir + '/.scrub'
# SEt sys.argv values
sys.argv = ['scrub', 'diff', '--baseline-source', baseline_source_dir, '--baseline-scrub', baseline_scrub_root,
'--comparison-source', comparison_source_dir, '--comparison-scrub', comparison_scrub_root]
# Run cli
scrub_cli.main()
# Write out the stdout
with open(log_file, 'w') as output_fh:
output_fh.write('{}'.format(capsys.readouterr().out))
output_fh.write('{}'.format(capsys.readouterr().err))
# Check the output data
comparison_files = glob.glob(comparison_scrub_root + '/*[!_diff].scrub')
diff_output_files = glob.glob(comparison_scrub_root + '/*_diff.scrub')
with open(log_file, 'r') as input_fh:
log_file_data = input_fh.read()
assert len(comparison_files) == len(diff_output_files)
assert log_file_data.find('Error') == -1
# Cleanup
for diff_file in diff_output_files:
os.remove(diff_file)
def test_conf_cli():
# Remove the configuration file if it exists
conf_file_out = './scrub.cfg'
if os.path.exists(conf_file_out):
os.remove(conf_file_out)
# Set sys.argv values
sys.argv = ['scrub', 'get-conf', '--output', conf_file_out]
# Generate a configuration file
scrub_cli.main()
# Make the output file exists
assert os.path.exists(conf_file_out)
# Remove the conf file if it exists
if os.path.exists(conf_file_out):
os.remove(conf_file_out)
| 31.122581 | 115 | 0.68408 |
221e7b36fd17c5f1a32ac08968f8acd2079b63bc | 1,655 | py | Python | scripts/wall_trace.py | nzhoo/pimouse_run_corridor | f742aee835ab14eb2f8ff903c5ce2607cc586259 | [
"BSD-3-Clause"
] | null | null | null | scripts/wall_trace.py | nzhoo/pimouse_run_corridor | f742aee835ab14eb2f8ff903c5ce2607cc586259 | [
"BSD-3-Clause"
] | null | null | null | scripts/wall_trace.py | nzhoo/pimouse_run_corridor | f742aee835ab14eb2f8ff903c5ce2607cc586259 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#encoding: utf-8
import rospy,copy,math
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger, TriggerResponse
from pimouse_ros.msg import LightSensorValues
class WallTrace():
def __init__(self):
self.cmd_vel = rospy.Publisher('/cmd_vel',Twist,queue_size=1)
self.sensor_values = LightSensorValues()
rospy.Subscriber('/lightsensors', LightSensorValues, self.callback_lightsensors)
def callback_lightsensors(self,messages):
self.sensor_values = messages
def run(self):
rate = rospy.Rate(20)
data = Twist()
accel = 0.02
data.linear.x = 0.0
data.angular.z = 0
while not rospy.is_shutdown():
s = self.sensor_values
data.linear.x += accel
if s.sum_forward > 50: data.linear.x = 0.0
elif data.linear.x <= 0.2: data.linear.x = 0.2
elif data.linear.x >= 0.8: data.linear.x = 0.8
if data.linear.x < 0.2: data.angular.z = 0.0
elif s.left_side < 10: data.angular.z = 0.0
else:
target = 50
error = (target - s.left_side)/50.0
data.angular.z = error * 3 * math.pi / 180.0
self.cmd_vel.publish(data)
rate.sleep()
if __name__ == '__main__':
rospy.init_node('wall_trace')
rospy.wait_for_service('/motor_on')
rospy.wait_for_service('/motor_off')
rospy.on_shutdown(rospy.ServiceProxy('/motor_off',Trigger).call)
rospy.ServiceProxy('/motor_on',Trigger).call()
WallTrace().run()
| 32.45098 | 88 | 0.587915 |
7218cd2b93eafe7e2382dac12100014412e4e400 | 458 | py | Python | widget_jsmol/tests/test_nbextension_path.py | osscar-org/widget-jsmol | c9fc8e5b878780efe95bee4843c70b0fefa36078 | [
"BSD-3-Clause"
] | 1 | 2020-05-21T05:11:42.000Z | 2020-05-21T05:11:42.000Z | widget_jsmol/tests/test_nbextension_path.py | osscar-org/widget-jsmol | c9fc8e5b878780efe95bee4843c70b0fefa36078 | [
"BSD-3-Clause"
] | 1 | 2020-10-19T18:26:03.000Z | 2020-10-19T18:26:03.000Z | widget_jsmol/tests/test_nbextension_path.py | osscar-org/widget-jsmol | c9fc8e5b878780efe95bee4843c70b0fefa36078 | [
"BSD-3-Clause"
] | 1 | 2022-03-01T02:52:55.000Z | 2022-03-01T02:52:55.000Z | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Dou Du.
# Distributed under the terms of the Modified BSD License.
def test_nbextension_path():
# Check that magic function can be imported from package root:
from widget_jsmol import _jupyter_nbextension_paths
# Ensure that it can be called without incident:
path = _jupyter_nbextension_paths()
# Some sanity checks:
assert len(path) == 1
assert isinstance(path[0], dict)
| 28.625 | 66 | 0.724891 |
b7926f13417870d2f7e2f0c90b1f6ccdda236759 | 187 | py | Python | setup.py | CyanideCN/vanadis | ebd2373fb55d5913eb1384ab434e083fe714ecab | [
"MIT"
] | 1 | 2019-12-01T11:31:23.000Z | 2019-12-01T11:31:23.000Z | setup.py | CyanideCN/vanadis | ebd2373fb55d5913eb1384ab434e083fe714ecab | [
"MIT"
] | null | null | null | setup.py | CyanideCN/vanadis | ebd2373fb55d5913eb1384ab434e083fe714ecab | [
"MIT"
] | 2 | 2019-09-24T00:46:24.000Z | 2019-12-01T11:31:24.000Z | from setuptools import setup, find_packages
setup(name='vanadis',
version='0.0.2',
packages=find_packages(),
install_requires='matplotlib',
license='MIT Licence') | 26.714286 | 43 | 0.684492 |
2c9001c23c76a086a48cee8f5ef2e0a91995030a | 199 | py | Python | restart.py | DimmyBandeira/jarvis | d2561b24f301651969ca0495951161cdb43df8aa | [
"MIT"
] | 59 | 2020-09-08T17:42:09.000Z | 2022-03-09T07:28:25.000Z | restart.py | DimmyBandeira/jarvis | d2561b24f301651969ca0495951161cdb43df8aa | [
"MIT"
] | 20 | 2020-10-26T16:10:43.000Z | 2022-03-23T17:38:38.000Z | restart.py | DimmyBandeira/jarvis | d2561b24f301651969ca0495951161cdb43df8aa | [
"MIT"
] | 20 | 2020-10-18T00:43:25.000Z | 2022-03-20T21:04:22.000Z | """This is a basic query to start Jarvis after 5 seconds triggered by the ``restart()`` function in main module."""
from os import system
from time import sleep
sleep(5)
system('python3 jarvis.py')
| 28.428571 | 115 | 0.743719 |
77cdf2a70b4e6e8b5b28e3dad1049e1907b315db | 215 | py | Python | control-flow/src/file-exception.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | 1 | 2019-01-02T15:04:08.000Z | 2019-01-02T15:04:08.000Z | control-flow/src/file-exception.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | control-flow/src/file-exception.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | try:
with open(r'/tmp/iris.csv') as file:
content = file.read()
print(content)
except FileNotFoundError:
print('File does not exist')
except PermissionError:
print('Permission denied')
| 19.545455 | 40 | 0.655814 |
dc297ee719270019083fee317a0dcc01fe630e6e | 554 | py | Python | python/echo_worker.py | lizhenghn123/Gearman_Examples | 953aedbf250a4b9286d6fb46cfef9414a7d08807 | [
"MIT"
] | null | null | null | python/echo_worker.py | lizhenghn123/Gearman_Examples | 953aedbf250a4b9286d6fb46cfef9414a7d08807 | [
"MIT"
] | null | null | null | python/echo_worker.py | lizhenghn123/Gearman_Examples | 953aedbf250a4b9286d6fb46cfef9414a7d08807 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2.6
# coding=utf-8
import os
import gearman
import math
class MyGearmanWorker(gearman.GearmanWorker):
def on_job_execute(self, current_job):
print "Job started"
print "===================\n"
return super(MyGearmanWorker, self).on_job_execute(current_job)
def task_callback(gearman_worker, gearman_job):
print gearman_job.data
print "-----------\n"
return gearman_job.data
my_worker = MyGearmanWorker(['127.0.0.1:4730'])
my_worker.register_task("echo", task_callback)
my_worker.work()
| 24.086957 | 71 | 0.685921 |
4ee74593408425c267cc90fec7569bdc90bcf4a6 | 1,059 | py | Python | telerembash/_logging.py | kpe/telerembash | 139fa21584e6cdaa4e84cec4193779d7d3d39a96 | [
"MIT"
] | 2 | 2021-04-25T16:47:13.000Z | 2021-12-09T15:38:34.000Z | telerembash/_logging.py | thomasbiege/telerembash | 4c8bc061d6cffff4d0e43389602ced4749dac092 | [
"MIT"
] | 1 | 2021-09-10T07:35:13.000Z | 2021-09-10T07:35:13.000Z | telerembash/_logging.py | thomasbiege/telerembash | 4c8bc061d6cffff4d0e43389602ced4749dac092 | [
"MIT"
] | 1 | 2021-04-25T16:47:17.000Z | 2021-04-25T16:47:17.000Z | # coding=utf-8
#
# created by kpe on 16.10.2020 at 10:04 PM
#
from __future__ import division, absolute_import, print_function
import os
import sys
import yaml
import logging
import logging.config
import coloredlogs
def setup_logging(default_path='logging.yaml', default_level=logging.INFO, env_key='LOG_CFG'):
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
try:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
except Exception as e:
print(e)
print('Error in Logging Configuration. Using default configs')
logging.basicConfig(level=default_level)
coloredlogs.install(level=default_level)
else:
logging.basicConfig(level=default_level)
coloredlogs.install(level=default_level)
print(f"Failed to load logging configuration:[{path}]. Using default configs", file=sys.stderr)
| 29.416667 | 103 | 0.655335 |
5f8ea8bd41274e0d4a8ef4c8d147050eb04b21d5 | 3,120 | py | Python | python_nlp_explorations_chatbot_keywords_extraction/article_4_miscellaneous_examples_nlp_spacy/07_spacy_linguistic_features.py | bflaven/BlogArticlesExamples | 5df2dfc26170ffbbade78ba136bf3172391e3b2a | [
"MIT"
] | 5 | 2018-05-03T08:16:02.000Z | 2021-09-04T03:44:24.000Z | python_nlp_explorations_chatbot_keywords_extraction/article_4_miscellaneous_examples_nlp_spacy/07_spacy_linguistic_features.py | bflaven/BlogArticlesExamples | 5df2dfc26170ffbbade78ba136bf3172391e3b2a | [
"MIT"
] | 1 | 2022-01-28T19:27:19.000Z | 2022-01-28T19:27:19.000Z | python_nlp_explorations_chatbot_keywords_extraction/article_4_miscellaneous_examples_nlp_spacy/07_spacy_linguistic_features.py | bflaven/BlogArticlesExamples | 5df2dfc26170ffbbade78ba136bf3172391e3b2a | [
"MIT"
] | 2 | 2020-09-10T13:33:27.000Z | 2022-02-09T11:07:38.000Z | #!/usr/bin/env python
# coding: utf8
"""A simple example of extracting relations between phrases and entities using
spaCy's named entity recognizer and the dependency parse. Here, we extract
money and currency values (entities labelled as MONEY) and then check the
dependency tree to find the noun phrase they are referring to – for example:
$9.4 million --> Net income.
Compatible with: spaCy v2.0.0+
Last tested with: v2.2.1
cd /Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/_my_article_python-explorations/git_repo_python_explorations_nlp/article_4_miscellaneous_examples_nlp_spacy
python 07_spacy_linguistic_features.py
Source: https://github.com/explosion/spaCy/blob/master/examples/information_extraction/entity_relations.py
"""
from __future__ import unicode_literals, print_function
import plac
import spacy
TEXTS = [
"Net income was $9.4 million compared to the prior year of $2.7 million.",
"Revenue exceeded twelve billion dollars, with a loss of $1b.",
]
@plac.annotations(
model=("Model to load (needs parser and NER)", "positional", None, str)
)
def main(model="en_core_web_sm"):
nlp = spacy.load(model)
print("Loaded model '%s'" % model)
print("Processing %d texts" % len(TEXTS))
for text in TEXTS:
doc = nlp(text)
relations = extract_currency_relations(doc)
for r1, r2 in relations:
print("{:<10}\t{}\t{}".format(r1.text, r2.ent_type_, r2.text))
def filter_spans(spans):
# Filter a sequence of spans so they don't contain overlaps
# For spaCy 2.1.4+: this function is available as spacy.util.filter_spans()
get_sort_key = lambda span: (span.end - span.start, -span.start)
sorted_spans = sorted(spans, key=get_sort_key, reverse=True)
result = []
seen_tokens = set()
for span in sorted_spans:
# Check for end - 1 here because boundaries are inclusive
if span.start not in seen_tokens and span.end - 1 not in seen_tokens:
result.append(span)
seen_tokens.update(range(span.start, span.end))
result = sorted(result, key=lambda span: span.start)
return result
def extract_currency_relations(doc):
# Merge entities and noun chunks into one token
spans = list(doc.ents) + list(doc.noun_chunks)
spans = filter_spans(spans)
with doc.retokenize() as retokenizer:
for span in spans:
retokenizer.merge(span)
relations = []
for money in filter(lambda w: w.ent_type_ == "MONEY", doc):
if money.dep_ in ("attr", "dobj"):
subject = [w for w in money.head.lefts if w.dep_ == "nsubj"]
if subject:
subject = subject[0]
relations.append((subject, money))
elif money.dep_ == "pobj" and money.head.dep_ == "prep":
relations.append((money.head.head, money))
return relations
if __name__ == "__main__":
plac.call(main)
# Expected output:
# Net income MONEY $9.4 million
# the prior year MONEY $2.7 million
# Revenue MONEY twelve billion dollars
# a loss MONEY 1b
#
| 31.836735 | 167 | 0.676603 |
e88ca749abd81b2de57fa4b96308c146f80f4e92 | 9,380 | py | Python | tests/h/routes_test.py | pombredanne/h | 9c4c2dc0d53ed5bed5183936c24b4c27b23070b4 | [
"BSD-2-Clause"
] | null | null | null | tests/h/routes_test.py | pombredanne/h | 9c4c2dc0d53ed5bed5183936c24b4c27b23070b4 | [
"BSD-2-Clause"
] | null | null | null | tests/h/routes_test.py | pombredanne/h | 9c4c2dc0d53ed5bed5183936c24b4c27b23070b4 | [
"BSD-2-Clause"
] | null | null | null | from unittest.mock import Mock, call
from h.routes import includeme
def test_includeme():
config = Mock(spec_set=["add_route"])
includeme(config)
# This may look like a ridiculous test, but the cost of keeping it
# up-to-date is hopefully pretty low (run the tests with -vv, copy the new
# expected value, strip out any Unicode prefixes) and it serves as a check
# to ensure that any changes made to the routes were intended.
calls = [
call("index", "/"),
call("robots", "/robots.txt"),
call("via_redirect", "/via"),
call("login", "/login"),
call("logout", "/logout"),
call("signup", "/signup"),
call("activate", "/activate/{id}/{code}"),
call("forgot_password", "/forgot-password"),
call("account_reset", "/account/reset"),
call("account_reset_with_code", "/account/reset/{code}"),
call("account", "/account/settings"),
call("account_profile", "/account/profile"),
call("account_notifications", "/account/settings/notifications"),
call("account_developer", "/account/developer"),
call("claim_account_legacy", "/claim_account/{token}"),
call("dismiss_sidebar_tutorial", "/app/dismiss_sidebar_tutorial"),
call("activity.search", "/search"),
call(
"activity.user_search",
"/users/{username}",
factory="h.traversal.UserByNameRoot",
traverse="/{username}",
),
call("admin.index", "/admin/"),
call("admin.admins", "/admin/admins"),
call("admin.badge", "/admin/badge"),
call("admin.features", "/admin/features"),
call("admin.cohorts", "/admin/features/cohorts"),
call("admin.cohorts_edit", "/admin/features/cohorts/{id}"),
call("admin.groups", "/admin/groups"),
call("admin.groups_create", "/admin/groups/new"),
call(
"admin.groups_delete",
"/admin/groups/delete/{id}",
factory="h.traversal.GroupRequiredRoot",
traverse="/{id}",
),
call(
"admin.groups_edit",
"/admin/groups/{id}",
factory="h.traversal.GroupRequiredRoot",
traverse="/{id}",
),
call("admin.mailer", "/admin/mailer"),
call("admin.mailer_test", "/admin/mailer/test"),
call("admin.nipsa", "/admin/nipsa"),
call("admin.oauthclients", "/admin/oauthclients"),
call("admin.oauthclients_create", "/admin/oauthclients/new"),
call("admin.oauthclients_edit", "/admin/oauthclients/{id}"),
call("admin.organizations", "/admin/organizations"),
call("admin.organizations_create", "/admin/organizations/new"),
call(
"admin.organizations_delete",
"/admin/organizations/delete/{pubid}",
factory="h.traversal.OrganizationRoot",
traverse="/{pubid}",
),
call(
"admin.organizations_edit",
"/admin/organizations/{pubid}",
factory="h.traversal.OrganizationRoot",
traverse="/{pubid}",
),
call("admin.staff", "/admin/staff"),
call("admin.users", "/admin/users"),
call("admin.users_activate", "/admin/users/activate"),
call("admin.users_delete", "/admin/users/delete"),
call("admin.users_rename", "/admin/users/rename"),
call("admin.search", "/admin/search"),
call(
"annotation",
"/a/{id}",
factory="h.traversal:AnnotationRoot",
traverse="/{id}",
),
call("stream", "/stream"),
call("stream.user_query", "/u/{user}"),
call("stream.tag_query", "/t/{tag}"),
call("assets", "/assets/*subpath"),
call("api.index", "/api/"),
call("api.links", "/api/links"),
call(
"api.annotations", "/api/annotations", factory="h.traversal:AnnotationRoot"
),
call(
"api.annotation",
"/api/annotations/{id:[A-Za-z0-9_-]{20,22}}",
factory="h.traversal:AnnotationRoot",
traverse="/{id}",
),
call(
"api.annotation_flag",
"/api/annotations/{id:[A-Za-z0-9_-]{20,22}}/flag",
factory="h.traversal:AnnotationRoot",
traverse="/{id}",
),
call(
"api.annotation_hide",
"/api/annotations/{id:[A-Za-z0-9_-]{20,22}}/hide",
factory="h.traversal:AnnotationRoot",
traverse="/{id}",
),
call(
"api.annotation.jsonld",
"/api/annotations/{id:[A-Za-z0-9_-]{20,22}}.jsonld",
factory="h.traversal:AnnotationRoot",
traverse="/{id}",
),
call(
"api.bulk",
"/api/bulk",
request_method="POST",
factory="h.traversal.BulkAPIRoot",
),
call("api.groups", "/api/groups", factory="h.traversal.GroupRoot"),
call(
"api.group_upsert",
"/api/groups/{id}",
request_method="PUT",
factory="h.traversal.GroupRoot",
traverse="/{id}",
),
call(
"api.group",
"/api/groups/{id}",
request_method=("GET", "PATCH"),
factory="h.traversal.GroupRequiredRoot",
traverse="/{id}",
),
call("api.profile", "/api/profile", factory="h.traversal.ProfileRoot"),
call("api.profile_groups", "/api/profile/groups"),
call("api.debug_token", "/api/debug-token"),
call(
"api.group_members",
"/api/groups/{pubid}/members",
factory="h.traversal.GroupRequiredRoot",
traverse="/{pubid}",
),
call(
"api.group_member",
"/api/groups/{pubid}/members/{userid}",
factory="h.traversal.GroupRequiredRoot",
traverse="/{pubid}",
),
call("api.search", "/api/search"),
call("api.users", "/api/users", factory="h.traversal.UserRoot"),
call(
"api.user_read",
"/api/users/{userid}",
request_method="GET",
factory="h.traversal.UserByIDRoot",
traverse="/{userid}",
),
call(
"api.user",
"/api/users/{username}",
factory="h.traversal.UserByNameRoot",
traverse="/{username}",
),
call("badge", "/api/badge"),
call("token", "/api/token"),
call("oauth_authorize", "/oauth/authorize"),
call("oauth_revoke", "/oauth/revoke"),
call("sidebar_app", "/app.html"),
call("notebook_app", "/notebook"),
call("embed", "/embed.js"),
call("stream_atom", "/stream.atom"),
call("stream_rss", "/stream.rss"),
call(
"organization_logo",
"/organizations/{pubid}/logo",
factory="h.traversal.OrganizationRoot",
traverse="/{pubid}",
),
call("group_create", "/groups/new"),
call(
"group_edit",
"/groups/{pubid}/edit",
factory="h.traversal.GroupRequiredRoot",
traverse="/{pubid}",
),
call(
"group_read",
"/groups/{pubid}/{slug:[^/]*}",
factory="h.traversal.GroupRequiredRoot",
traverse="/{pubid}",
),
call(
"group_read_noslug",
"/groups/{pubid}",
factory="h.traversal.GroupRequiredRoot",
traverse="/{pubid}",
),
call("help", "/docs/help"),
call("onboarding", "/welcome/"),
call("custom_onboarding", "/welcome/{slug}"),
call("unsubscribe", "/notification/unsubscribe/{token}"),
call("status", "/_status"),
call("about", "/about/", static=True),
call("bioscience", "/bioscience/", static=True),
call("blog", "/blog/", static=True),
call(
"chrome-extension",
"https://chrome.google.com/webstore/detail/bjfhmglciegochdpefhhlphglcehbmek",
static=True,
),
call("contact", "/contact/", static=True),
call("contribute", "/contribute/", static=True),
call("education", "/education/", static=True),
call("for-publishers", "/for-publishers/", static=True),
call("fund", "/fund/", static=True),
call("help-center", "/help/", static=True),
call("hypothesis-github", "https://github.com/hypothesis", static=True),
call("hypothesis-twitter", "https://twitter.com/hypothes_is", static=True),
call("jobs", "/jobs/", static=True),
call("press", "/press/", static=True),
call("privacy", "/privacy/", static=True),
call("roadmap", "/roadmap/", static=True),
call("team", "/team/", static=True),
call("terms-of-service", "/terms-of-service/", static=True),
call(
"wordpress-plugin", "https://wordpress.org/plugins/hypothesis/", static=True
),
]
# Test each one one at a time to make it a bit easier to spot which one
# isn't in the list
for single_call in calls:
assert single_call in config.add_route.mock_calls
# Then we can assert the order here
assert config.add_route.mock_calls == calls
| 37.822581 | 89 | 0.533795 |
8dc7b47753335506dde6901e358f411cf814187b | 172 | py | Python | integration/setup.py | frusdelion/rancher-community-catalog | 3a43943607adad91348149755f828166b3c1b88b | [
"Apache-2.0"
] | 413 | 2016-02-03T09:07:41.000Z | 2021-11-08T08:39:49.000Z | integration/setup.py | frusdelion/rancher-community-catalog | 3a43943607adad91348149755f828166b3c1b88b | [
"Apache-2.0"
] | 620 | 2016-02-01T04:13:12.000Z | 2020-10-20T19:25:16.000Z | integration/setup.py | frusdelion/rancher-community-catalog | 3a43943607adad91348149755f828166b3c1b88b | [
"Apache-2.0"
] | 1,079 | 2016-01-30T19:28:34.000Z | 2022-03-10T12:54:15.000Z | from distutils.core import setup
setup(
name='Rancher Catalog YAML Integration Tests',
version='0.1',
packages=[
'core',
],
license='ASL 2.0',
)
| 15.636364 | 50 | 0.604651 |
2386b3b360730d3b6564d595c1fbc0defedbcbad | 3,840 | py | Python | RecoHI/HiTracking/python/hiMultiTrackSelector_cfi.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | RecoHI/HiTracking/python/hiMultiTrackSelector_cfi.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | RecoHI/HiTracking/python/hiMultiTrackSelector_cfi.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
#loose
hiLooseMTS = cms.PSet(
preFilterName=cms.string(''),
name= cms.string('hiTrkLoose'),
# vertex selection
vtxNumber = cms.int32(-1),
vertexCut = cms.string(''),
#untracked bool copyTrajectories = true // when doing retracking before
copyTrajectories = cms.untracked.bool(True),
copyExtras = cms.untracked.bool(True), ## set to false on AOD
qualityBit = cms.string('loose'), ## set to '' or comment out if you dont want to set the
chi2n_par = cms.double(0.3), # version with 1D hits modification
chi2n_no1Dmod_par = cms.double(9999.), # normalizedChi2 < nLayers * chi2n_par
res_par = cms.vdouble(99999., 99999.), # residual parameterization (re-check in HI)
d0_par1 = cms.vdouble(9999., 0.), # parameterized nomd0E
dz_par1 = cms.vdouble(9999., 0.),
d0_par2 = cms.vdouble(0.4, 4.0), # d0E from tk.d0Error
dz_par2 = cms.vdouble(0.4, 4.0),
# Boolean indicating if adapted primary vertex compatibility cuts are to be applied.
applyAdaptedPVCuts = cms.bool(True),
# Impact parameter absolute cuts.
max_z0 = cms.double(100),
max_d0 = cms.double(100),
nSigmaZ = cms.double(9999.),
# Cuts on numbers of layers with hits/3D hits/lost hits.
minNumberLayers = cms.uint32(0),
minNumber3DLayers = cms.uint32(0),
maxNumberLostLayers = cms.uint32(999),
minHitsToBypassChecks = cms.uint32(999),
max_minMissHitOutOrIn = cms.int32(99),
max_lostHitFraction = cms.double(1.0),
min_eta = cms.double(-9999.),
max_eta = cms.double(9999.) ,
# Absolute cuts in case of no PV. If yes, please define also max_d0NoPV and max_z0NoPV
applyAbsCutsIfNoPV = cms.bool(False),
keepAllTracks= cms.bool(False),
# parameters for cutting on pterror/pt and number of valid hits
max_relpterr = cms.double(0.2),
min_nhits = cms.uint32(8),
useMVA = cms.bool(False),
minMVA = cms.double(-1)
)
hiTightMTS=hiLooseMTS.clone(
preFilterName='hiTrkLoose',
min_nhits = cms.uint32(8),
max_relpterr = cms.double(0.075),
d0_par2 = cms.vdouble(5.0, 0.0),
dz_par2 = cms.vdouble(5.0, 0.0),
chi2n_no1Dmod_par = cms.double(0.25),
name= cms.string('hiTrkTight'),
qualityBit = cms.string('tight'), ## set to '' or comment out if you dont want to set the bit
keepAllTracks= cms.bool(True)
)
hiHighpurityMTS= hiTightMTS.clone(
name= cms.string('hiTrkHighPurity'),
preFilterName='hiTrkTight',
min_nhits = cms.uint32(8),
max_relpterr = cms.double(0.05),
d0_par2 = [3.0, 0.0],
dz_par2 = [3.0, 0.0],
chi2n_no1Dmod_par = cms.double(0.15),
qualityBit = cms.string('highPurity') ## set to '' or comment out if you dont want to set the bit
)
#typical configuration is six selectors... something like this to
#make cloning easier.
hiMultiTrackSelector = cms.EDProducer("HIMultiTrackSelector",
src = cms.InputTag("hiGeneralTracks"),
beamspot = cms.InputTag("offlineBeamSpot"),
useVertices = cms.bool(True),
useVtxError = cms.bool(True),
vertices = cms.InputTag("hiSelectedPixelVertex"),
useAnyMVA = cms.bool(False),
GBRForestLabel = cms.string(''),
GBRForestVars = cms.vstring(),
trackSelectors = cms.VPSet( hiLooseMTS,
hiTightMTS,
hiHighpurityMTS)
)
| 41.73913 | 101 | 0.586458 |
54c15459166aa3dbc5379a75a719d5f4006202f4 | 74,507 | py | Python | scripts/checkimages.py | notconfusing/pywikibot-fr-welcome-bot | 6e07b7e74166a47c9425816e79786308df369ac2 | [
"MIT"
] | null | null | null | scripts/checkimages.py | notconfusing/pywikibot-fr-welcome-bot | 6e07b7e74166a47c9425816e79786308df369ac2 | [
"MIT"
] | null | null | null | scripts/checkimages.py | notconfusing/pywikibot-fr-welcome-bot | 6e07b7e74166a47c9425816e79786308df369ac2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to check recently uploaded files.
This script checks if a file description is present and if there are other
problems in the image's description.
This script will have to be configured for each language. Please submit
translations as addition to the Pywikibot framework.
Everything that needs customisation is indicated by comments.
This script understands the following command-line arguments:
-limit The number of images to check (default: 80)
-commons The Bot will check if an image on Commons has the same name
and if true it reports the image.
-duplicates[:#] Checking if the image has duplicates (if arg, set how many
rollback wait before reporting the image in the report
instead of tag the image) default: 1 rollback.
-duplicatesreport Report the duplicates in a log *AND* put the template in
the images.
-maxusernotify Maximum nofitications added to a user talk page in a single
check, to avoid email spamming.
-sendemail Send an email after tagging.
-break To break the bot after the first check (default: recursive)
-sleep[:#] Time in seconds between repeat runs (default: 30)
-wait[:#] Wait x second before check the images (default: 0)
-skip[:#] The bot skip the first [:#] images (default: 0)
-start[:#] Use allimages() as generator
(it starts already from File:[:#])
-cat[:#] Use a category as generator
-regex[:#] Use regex, must be used with -url or -page
-page[:#] Define the name of the wikipage where are the images
-url[:#] Define the url where are the images
-nologerror If given, this option will disable the error that is risen
when the log is full.
Instructions for the real-time settings.
For every new block you have to add:
<------- ------->
In this way the Bot can understand where the block starts in order to take the
right parameter.
* Name= Set the name of the block
* Find= search this text in the image's description
* Findonly= search for exactly this text in the image's description
* Summary= That's the summary that the bot will use when it will notify the
problem.
* Head= That's the incipit that the bot will use for the message.
* Text= This is the template that the bot will use when it will report the
image's problem.
Todo
----
* Clean the code, some passages are pretty difficult to understand.
* Add the "catch the language" function for commons.
* Fix and reorganise the new documentation
* Add a report for the image tagged.
"""
#
# (C) Kyle/Orgullomoore, 2006-2007 (newimage.py)
# (C) Siebrand Mazeland, 2007-2010
# (C) Filnik, 2007-2011
# (C) Pywikibot team, 2007-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import collections
import re
import time
import pywikibot
from pywikibot.bot import suggest_help
from pywikibot.exceptions import ArgumentDeprecationWarning, NotEmailableError
from pywikibot.family import Family
from pywikibot import i18n
from pywikibot import pagegenerators as pg
from pywikibot.site import Namespace
from pywikibot.tools import issue_deprecation_warning
###############################################################################
# <--------------------------- Change only below! --------------------------->#
###############################################################################
# NOTE: in the messages used by the Bot if you put __botnick__ in the text, it
# will automatically replaced with the bot's nickname.
# That's what you want that will be added. (i.e. the {{no source}} with the
# right day/month/year )
n_txt = {
'commons': '{{subst:nld}}',
'meta': '{{No license}}',
'test': '{{No license}}',
'ar': '{{subst:لم}}',
'de': '{{Dateiüberprüfung}}',
'en': '{{subst:nld}}',
'fa': '{{جا:حق تکثیر تصویر نامعلوم}}',
'fr': '{{subst:lid}}',
'ga': '{{subst:Ceadúnas de dhíth}}',
'hr': '{{Bez licence}}',
'hu': '{{nincslicenc|~~~~~}}',
'it': '{{subst:unverdata}}',
'ja': '{{subst:Nld}}',
'ko': '{{subst:nld}}',
'sr': '{{subst:датотека без лиценце}}',
'ta': '{{subst:nld}}',
'ur': '{{subst:حقوق نسخہ تصویر نامعلوم}}',
'zh': '{{subst:No license/auto}}',
}
# Text that the bot will try to see if there's already or not. If there's a
# {{ I'll use a regex to make a better check.
# This will work so:
# '{{no license' --> '\{\{(?:template:)?no[ _]license ?(?:\||\n|\}|/) ?' (case
# insensitive).
# If there's not a {{ it will work as usual (if x in Text)
txt_find = {
'commons': ['{{no license', '{{no license/en',
'{{nld', '{{no permission', '{{no permission since'],
'meta': ['{{no license', '{{nolicense', '{{nld'],
'test': ['{{no license'],
'ar': ['{{لت', '{{لا ترخيص'],
'de': ['{{DÜP', '{{Düp', '{{Dateiüberprüfung'],
'en': ['{{nld', '{{no license'],
'fa': ['{{حق تکثیر تصویر نامعلوم۲'],
'ga': ['{{Ceadúnas de dhíth', '{{Ceadúnas de dhíth'],
'hr': ['{{bez licence'],
'hu': ['{{nincsforrás', '{{nincslicenc'],
'it': ['{{unverdata', '{{unverified'],
'ja': ['{{no source', '{{unknown',
'{{non free', '<!--削除についての議論が終了するまで'],
'ko': ['{{출처 없음', '{{라이선스 없음', '{{Unknown'],
'sr': ['{{датотека без лиценце', '{{датотека без извора'],
'ta': ['{{no source', '{{nld', '{{no license'],
'ur': ['{{ناحوالہ', '{{اجازہ نامعلوم', '{{Di-no'],
'zh': ['{{no source', '{{unknown', '{{No license'],
}
# When the Bot find that the usertalk is empty is not pretty to put only the
# no source without the welcome, isn't it?
empty = {
'commons': '{{subst:welcome}}\n~~~~\n',
'meta': '{{subst:Welcome}}\n~~~~\n',
'ar': '{{ترحيب}}\n~~~~\n',
'de': '{{subst:willkommen}} ~~~~',
'en': '{{welcome}}\n~~~~\n',
'fa': '{{جا:خوشامدید|%s}}',
'fr': '{{Bienvenue nouveau\n~~~~\n',
'ga': '{{subst:Fáilte}} - ~~~~\n',
'hr': '{{subst:dd}}--~~~~\n',
'hu': '{{subst:Üdvözlet|~~~~}}\n',
'it': '<!-- inizio template di benvenuto -->\n{{subst:Benvebot}}\n~~~~\n'
'<!-- fine template di benvenuto -->',
'ja': '{{subst:Welcome/intro}}\n{{subst:welcome|--~~~~}}\n',
'ko': '{{환영}}--~~~~\n',
'sr': '{{dd}}--~~~~\n',
'ta': '{{welcome}}\n~~~~\n',
'ur': '{{خوش آمدید}}\n~~~~\n',
'zh': '{{subst:welcome|sign=~~~~}}',
}
# if the file has an unknown extension it will be tagged with this template.
# In reality, there aren't unknown extension, they are only not allowed...
delete_immediately = {
'commons': '{{speedy|The file has .%s as extension. '
'Is it ok? Please check.}}',
'meta': '{{Delete|The file has .%s as extension.}}',
'ar': '{{شطب|الملف له .%s كامتداد.}}',
'en': '{{db-meta|The file has .%s as extension.}}',
'fa': '{{حذف سریع|تصویر %s اضافی است.}}',
'ga': '{{scrios|Tá iarmhír .%s ar an comhad seo.}}',
'hu': '{{azonnali|A fájlnak .%s a kiterjesztése}}',
'it': '{{cancella subito|motivo=Il file ha come estensione ".%s"}}',
'ja': '{{db|知らないファイルフォーマット %s}}',
'ko': '{{delete|잘못된 파일 형식 (.%s)}}',
'sr': '{{speedy|Ова датотека садржи екстензију %s. '
'Молим вас да проверите да ли је у складу са правилима.}}',
'ta': '{{delete|'
'இந்தக் கோப்பு .%s என்றக் கோப்பு நீட்சியைக் கொண்டுள்ளது.}}',
'ur': '{{سریع حذف شدگی|اس ملف میں .%s بطور توسیع موجود ہے۔ }}',
'zh': '{{delete|未知檔案格式%s}}',
}
# That's the text that the bot will add if it doesn't find the license.
# Note: every __botnick__ will be repleaced with your bot's nickname
# (feel free not to use if you don't need it)
nothing_notification = {
'commons': "\n{{subst:User:Filnik/untagged|File:%s}}\n\n''This message "
"was '''added automatically by ~~~''', if you need "
'some help about it, please read the text above again and '
'follow the links in it, if you still need help ask at the '
'[[File:Human-help-browser.svg|18px|link=Commons:Help desk|?]] '
"'''[[Commons:Help desk|->]][[Commons:Help desk]]''' in any "
"language you like to use.'' --~~~~",
'meta': '{{subst:No license notice|File:%s}}',
'ar': '{{subst:مصدر الصورة|File:%s}} --~~~~',
'en': '{{subst:image source|File:%s}} --~~~~',
'fa': '{{جا:اخطار نگاره|%s}}',
'ga': '{{subst:Foinse na híomhá|File:%s}} --~~~~',
'hu': '{{subst:adjforrást|Kép:%s}}\n Ezt az üzenetet ~~~ automatikusan '
'helyezte el a vitalapodon, kérdéseddel fordulj a gazdájához, vagy '
'a [[WP:KF|Kocsmafalhoz]]. --~~~~',
'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Senza licenza|'
'%s|~~~}} --~~~~',
'ja': '\n{{subst:Image copyright|File:%s}}--~~~~',
'ko': '\n{{subst:User:Kwjbot IV/untagged|%s}} --~~~~',
'sr': '\n{{subst:Обавештење о датотеци без лиценце|%s}} --~~~~',
'ta': '\n{{subst:Di-no license-notice|படிமம்:%s}} ~~~~',
'ur': '{{subst:ماخذ تصویر|File:%s}}--~~~~',
'zh': '\n{{subst:Uploadvionotice|File:%s}} ~~~~',
}
# This is a list of what bots used this script in your project.
# NOTE: YOUR Bot username will be automatically added.
bot_list = {
'commons': ['Siebot', 'CommonsDelinker', 'Filbot', 'John Bot',
'Sz-iwbot', 'ABFbot'],
'meta': ['MABot'],
'de': ['Xqbot'],
'en': ['OrphanBot'],
'fa': ['Amirobot'],
'ga': ['AllieBot'],
'it': ['Filbot', 'Nikbot', '.snoopyBot.'],
'ja': ['Alexbot'],
'ko': ['Kwjbot IV'],
'sr': ['ZoranBot'],
'ta': ['TrengarasuBOT'],
'ur': ['Shuaib-bot', 'Tahir-bot', 'SAMI.bot'],
'zh': ['Alexbot'],
}
# The message that the bot will add the second time that find another license
# problem.
second_message_without_license = {
'hu': '\nSzia! Úgy tűnik a [[:Kép:%s]] képpel is hasonló a probléma, '
'mint az előbbivel. Kérlek olvasd el a [[WP:KÉPLIC|feltölthető '
'képek]]ről szóló oldalunk, és segítségért fordulj a [[WP:KF-JO|'
'Jogi kocsmafalhoz]]. Köszönöm --~~~~',
'it': ':{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Senza'
'licenza2|%s|~~~}} --~~~~',
}
# You can add some settings to a wiki page. In this way, you can change them
# without touching the code. That's useful if you are running the bot on
# Toolserver.
page_with_settings = {
'commons': 'User:Filbot/Settings',
'it': 'Progetto:Coordinamento/Immagini/Bot/Settings#Settings',
'sr': 'User:ZoranBot/checkimages.py/подешавања',
'zh': 'User:Alexbot/cisettings#Settings',
}
# The bot can report some images (like the images that have the same name of an
# image on commons) This is the page where the bot will store them.
report_page = {
'commons': 'User:Filbot/Report',
'meta': 'User:MABot/Report',
'test': 'User:Pywikibot-test/Report',
'de': 'Benutzer:Xqbot/Report',
'en': 'User:Filnik/Report',
'fa': 'کاربر:Amirobot/گزارش تصویر',
'ga': 'User:AllieBot/ReportImages',
'hu': 'User:Bdamokos/Report',
'it': 'Progetto:Coordinamento/Immagini/Bot/Report',
'ja': 'User:Alexbot/report',
'ko': 'User:Kwjbot IV/Report',
'sr': 'User:ZoranBot/checkimages.py/дневник',
'ta': 'User:Trengarasu/commonsimages',
'ur': 'صارف:محمد شعیب/درخواست تصویر',
'zh': 'User:Alexsh/checkimagereport',
}
# If a template isn't a license but it's included on a lot of images, that can
# be skipped to analyze the image without taking care of it. (the template must
# be in a list)
# Warning: Don't add template like "en, de, it" because they are already in
# (added in the code, below
# Warning 2: The bot will use regex, make the names compatible, please (don't
# add "Template:" or {{because they are already put in the regex).
# Warning 3: the part that use this regex is case-insensitive (just to let you
# know..)
HiddenTemplate = {
# Put the other in the page on the project defined below
'commons': ['Template:Information'],
'meta': ['Template:Information'],
'test': ['Template:Information'],
'ar': ['Template:معلومات'],
'de': ['Template:Information'],
'en': ['Template:Information'],
'fa': ['الگو:اطلاعات'],
'fr': ['Template:Information'],
'ga': ['Template:Information'],
'hr': ['Template:Infoslika'],
'hu': ['Template:Információ', 'Template:Enwiki', 'Template:Azonnali'],
'it': ['Template:EDP', 'Template:Informazioni file',
'Template:Information', 'Template:Trademark',
'Template:Permissionotrs'],
'ja': ['Template:Information'],
'ko': ['Template:그림 정보'],
'sr': ['Шаблон:Информација', 'Шаблон:Non-free use rationale 2'],
'ta': ['Template:Information'],
'ur': ['Template:معلومات'],
'zh': ['Template:Information'],
}
# A page where there's a list of template to skip.
PageWithHiddenTemplates = {
'commons': 'User:Filbot/White_templates#White_templates',
'it': 'Progetto:Coordinamento/Immagini/Bot/WhiteTemplates',
'ko': 'User:Kwjbot_IV/whitetemplates/list',
'sr': 'User:ZoranBot/checkimages.py/дозвољенишаблони',
}
# A page where there's a list of template to consider as licenses.
PageWithAllowedTemplates = {
'commons': 'User:Filbot/Allowed templates',
'de': 'Benutzer:Xqbot/Lizenzvorlagen',
'it': 'Progetto:Coordinamento/Immagini/Bot/AllowedTemplates',
'ko': 'User:Kwjbot_IV/AllowedTemplates',
'sr': 'User:ZoranBot/checkimages.py/дозвољенишаблони',
}
# Template added when the bot finds only an hidden template and nothing else.
# Note: every __botnick__ will be repleaced with your bot's nickname
# (feel free not to use if you don't need it)
HiddenTemplateNotification = {
'commons': ("\n{{subst:User:Filnik/whitetemplate|File:%s}}\n\n''This "
'message was added automatically by ~~~, if you need '
'some help about it please read the text above again and '
'follow the links in it, if you still need help ask at the '
'[[File:Human-help-browser.svg|18px|link=Commons:Help desk|?]]'
" '''[[Commons:Help desk|→]] [[Commons:Help desk]]''' in any "
"language you like to use.'' --~~~~"),
'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/'
'Template_insufficiente|%s|~~~}} --~~~~',
'ko': '\n{{subst:User:Kwj2772/whitetemplates|%s}} --~~~~',
}
# In this part there are the parameters for the dupe images.
# Put here the template that you want to put in the image to warn that it's a
# dupe. put __image__ if you want only one image, __images__ if you want the
# whole list
duplicatesText = {
'commons': '\n{{Dupe|__image__}}',
'de': '{{NowCommons}}',
'it': '\n{{Progetto:Coordinamento/Immagini/Bot/Template duplicati|'
'__images__}}',
'sr': '{{NowCommons|__image__}}',
}
# Message to put in the talk
duplicates_user_talk_text = {
'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Duplicati|'
'%s|%s|~~~}} --~~~~',
}
# Regex to detect the template put in the image's description to find the dupe
duplicatesRegex = {
'commons': r'\{\{(?:[Tt]emplate:|)(?:[Dd]up(?:licat|)e|[Bb]ad[ _][Nn]ame)'
r'[|}]',
'de': r'\{\{[nN](?:C|ow(?: c|[cC])ommons)[\|\}',
'it': r'\{\{(?:[Tt]emplate:|)[Pp]rogetto:[Cc]oordinamento/Immagini/Bot/'
r'Template duplicati[|}]',
'sr': r'\{\{[nN](?:C|ow(?: c|[cC])ommons)[\|\}',
}
# Category with the licenses and / or with subcategories with the other
# licenses.
category_with_licenses = {
'commons': 'Category:License tags',
'meta': 'Category:License templates',
'test': 'Category:CC license tags',
'ar': 'تصنيف:قوالب حقوق الصور',
'de': 'Kategorie:Vorlage:Lizenz für Bilder',
'en': 'Category:Wikipedia file copyright templates',
'fa': 'رده:الگو:حق تکثیر پرونده',
'ga': "Catagóir:Clibeanna cóipchirt d'íomhánna",
'it': 'Categoria:Template Licenze copyright',
'ja': 'Category:画像の著作権表示テンプレート',
'ko': '분류:위키백과 그림 저작권 틀',
'sr': 'Категорија:Шаблони за слике',
'ta': 'Category:காப்புரிமை வார்ப்புருக்கள்',
'ur': 'زمرہ:ویکیپیڈیا سانچہ جات حقوق تصاویر',
'zh': 'Category:版權申告模板',
}
# Page where is stored the message to send as email to the users
emailPageWithText = {
# 'de': 'Benutzer:ABF/D3',
}
# Title of the email
emailSubject = {
# 'de': 'Problemen mit Deinem Bild auf der Deutschen Wikipedia',
}
# Seems that uploaderBots aren't interested to get messages regarding the
# files that they upload.. strange, uh?
# Format: [[user,regex], [user,regex]...] the regex is needed to match the user
# where to send the warning-msg
uploadBots = {
'commons': [['File Upload Bot (Magnus Manske)',
r'\|[Ss]ource=Transferred from .*?; '
r'transferred to Commons by \[\[User:(.*?)\]\]']],
}
# Service images that don't have to be deleted and/or reported has a template
# inside them (you can let this param as None)
serviceTemplates = {
'it': ['Template:Immagine di servizio'],
}
# Add your project (in alphabetical order) if you want that the bot starts
project_inserted = ['ar', 'commons', 'de', 'en', 'fa', 'ga', 'hu', 'it', 'ja',
'ko', 'meta', 'sr', 'ta', 'test', 'ur', 'zh']
# END OF CONFIGURATION.
SETTINGS_REGEX = re.compile(r"""
<-------\ ------->\n
\*[Nn]ame\ ?=\ ?['"](.*?)['"]\n
\*([Ff]ind|[Ff]indonly)\ ?=\ ?(.*?)\n
\*[Ii]magechanges\ ?=\ ?(.*?)\n
\*[Ss]ummary\ ?=\ ?['"](.*?)['"]\n
\*[Hh]ead\ ?=\ ?['"](.*?)['"]\n
\*[Tt]ext\ ?=\ ?['"](.*?)['"]\n
\*[Mm]ex\ ?=\ ?['"]?([^\n]*?)['"]?\n
""", re.UNICODE | re.DOTALL | re.VERBOSE)
class LogIsFull(pywikibot.Error):
"""Log is full and the Bot cannot add other data to prevent Errors."""
def printWithTimeZone(message):
"""Print the messages followed by the TimeZone encoded correctly."""
time_zone = time.strftime('%d %b %Y %H:%M:%S (UTC)', time.gmtime())
pywikibot.output('{0} {1}'.format(message.rstrip(), time_zone))
class checkImagesBot(object):
"""A robot to check recently uploaded files."""
def __init__(self, site, logFulNumber=25000, sendemailActive=False,
duplicatesReport=False, logFullError=True,
max_user_notify=None):
"""Initializer, define some instance variables."""
self.site = site
self.logFullError = logFullError
self.logFulNumber = logFulNumber
self.rep_page = i18n.translate(self.site, report_page)
if not self.rep_page:
raise i18n.TranslationError(
'No report page provided in "report_page" dict '
'for your project!')
self.image_namespace = site.namespaces.FILE.custom_name + ':'
self.list_entry = '\n* [[:{0}%s]] '.format(self.image_namespace)
# The summary of the report
self.com = i18n.twtranslate(self.site, 'checkimages-log-comment')
hiddentemplatesRaw = i18n.translate(self.site, HiddenTemplate)
if not hiddentemplatesRaw:
raise i18n.TranslationError(
'No non-license templates provided in "HiddenTemplate" dict '
'for your project!')
self.hiddentemplates = {
pywikibot.Page(self.site, tmp, ns=self.site.namespaces.TEMPLATE)
for tmp in hiddentemplatesRaw}
self.pageHidden = i18n.translate(self.site, PageWithHiddenTemplates)
self.pageAllowed = i18n.translate(self.site, PageWithAllowedTemplates)
self.comment = i18n.twtranslate(self.site.lang,
'checkimages-source-tag-comment')
# Adding the bot's nickname at the notification text if needed.
self.bots = i18n.translate(self.site, bot_list)
if self.bots:
self.bots.append(site.username())
else:
self.bots = [site.username()]
self.sendemailActive = sendemailActive
self.skip_list = []
self.duplicatesReport = duplicatesReport
if max_user_notify:
self.num_notify = collections.defaultdict(lambda: max_user_notify)
else:
self.num_notify = None
# Load the licenses only once, so do it once
self.list_licenses = self.load_licenses()
def setParameters(self, image):
"""Set parameters."""
# ensure we have a FilePage
self.image = pywikibot.FilePage(image)
self.imageName = image.title(with_ns=False)
self.timestamp = None
self.uploader = None
def report(self, newtext, image_to_report, notification=None, head=None,
notification2=None, unver=True, commTalk=None, commImage=None):
"""Function to make the reports easier."""
self.image_to_report = image_to_report
self.newtext = newtext
if not newtext:
raise i18n.TranslationError(
'No no-license template provided in "n_txt" dict '
'for your project!')
self.head = head or ''
self.notification = notification
self.notification2 = notification2
if self.notification:
self.notification = re.sub(r'__botnick__', self.site.username(),
notification)
if self.notification2:
self.notification2 = re.sub(r'__botnick__', self.site.username(),
notification2)
self.commTalk = commTalk
self.commImage = commImage or self.comment
image_tagged = False
try:
image_tagged = self.tag_image(unver)
except pywikibot.NoPage:
pywikibot.output('The page has been deleted! Skip!')
except pywikibot.EditConflict:
pywikibot.output('Edit conflict! Skip!')
if image_tagged and self.notification:
try:
self.put_mex_in_talk()
except pywikibot.EditConflict:
pywikibot.output('Edit Conflict! Retrying...')
try:
self.put_mex_in_talk()
except Exception:
pywikibot.exception()
pywikibot.output(
'Another error... skipping the user...')
def uploadBotChangeFunction(self, reportPageText, upBotArray):
"""Detect the user that has uploaded the file through upload bot."""
regex = upBotArray[1]
results = re.findall(regex, reportPageText)
if results:
luser = results[0]
return luser
else:
# we can't find the user, report the problem to the bot
return upBotArray[0]
def tag_image(self, put=True):
"""Add template to the Image page and find out the uploader."""
# Get the image's description
reportPageObject = pywikibot.FilePage(self.site, self.image_to_report)
try:
reportPageText = reportPageObject.get()
except pywikibot.NoPage:
pywikibot.output(self.imageName + ' has been deleted...')
return False
# You can use this function also to find only the user that
# has upload the image (FixME: Rewrite a bit this part)
if put:
pywikibot.showDiff(reportPageText,
self.newtext + '\n' + reportPageText)
pywikibot.output(self.commImage)
try:
reportPageObject.put(self.newtext + '\n' + reportPageText,
summary=self.commImage)
except pywikibot.LockedPage:
pywikibot.output('File is locked. Skipping.')
return False
# paginetta it's the image page object.
try:
if reportPageObject == self.image and self.uploader:
nick = self.uploader
else:
nick = reportPageObject.latest_file_info.user
except pywikibot.PageRelatedError:
pywikibot.output(
'Seems that {} has only the description and not the file...'
.format(self.image_to_report))
repme = self.list_entry + "problems '''with the APIs'''"
self.report_image(self.image_to_report, self.rep_page, self.com,
repme)
return False
upBots = i18n.translate(self.site, uploadBots)
user = pywikibot.User(self.site, nick)
luser = user.title(as_url=True)
if upBots:
for upBot in upBots:
if upBot[0] == luser:
luser = self.uploadBotChangeFunction(reportPageText, upBot)
user = pywikibot.User(self.site, luser)
self.talk_page = user.getUserTalkPage()
self.luser = luser
return True
def put_mex_in_talk(self):
"""Function to put the warning in talk page of the uploader."""
commento2 = i18n.twtranslate(self.site.lang,
'checkimages-source-notice-comment')
emailPageName = i18n.translate(self.site, emailPageWithText)
emailSubj = i18n.translate(self.site, emailSubject)
if self.notification2:
self.notification2 = self.notification2 % self.image_to_report
else:
self.notification2 = self.notification
second_text = False
# Getting the talk page's history, to check if there is another
# advise...
try:
testoattuale = self.talk_page.get()
history = self.talk_page.getLatestEditors(limit=10)
latest_user = history[0]['user']
pywikibot.output(
'The latest user that has written something is: '
+ latest_user)
if latest_user in self.bots:
second_text = True
# A block to prevent the second message if the bot also
# welcomed users...
if history[0]['timestamp'] == history[-1]['timestamp']:
second_text = False
except pywikibot.IsRedirectPage:
pywikibot.output(
'The user talk is a redirect, trying to get the right talk...')
try:
self.talk_page = self.talk_page.getRedirectTarget()
testoattuale = self.talk_page.get()
except pywikibot.NoPage:
second_text = False
testoattuale = i18n.translate(self.site, empty)
except pywikibot.NoPage:
pywikibot.output('The user page is blank')
second_text = False
testoattuale = i18n.translate(self.site, empty)
if self.commTalk:
commentox = self.commTalk
else:
commentox = commento2
if second_text:
newText = '{}\n\n{}'.format(testoattuale, self.notification2)
else:
newText = '{0}\n\n== {1} ==\n{2}'.format(testoattuale, self.head,
self.notification)
# Check maximum number of notifications for this talk page
if (self.num_notify is not None
and self.num_notify[self.talk_page.title()] == 0):
pywikibot.output('Maximum notifications reached, skip.')
return
try:
self.talk_page.put(newText, summary=commentox, minor=False)
except pywikibot.LockedPage:
pywikibot.output('Talk page blocked, skip.')
else:
if self.num_notify is not None:
self.num_notify[self.talk_page.title()] -= 1
if emailPageName and emailSubj:
emailPage = pywikibot.Page(self.site, emailPageName)
try:
emailText = emailPage.get()
except (pywikibot.NoPage, pywikibot.IsRedirectPage):
return
if self.sendemailActive:
text_to_send = re.sub(r'__user-nickname__', r'{}'
.format(self.luser), emailText)
emailClass = pywikibot.User(self.site, self.luser)
try:
emailClass.send_email(emailSubj, text_to_send)
except NotEmailableError:
pywikibot.output('User is not mailable, aborted')
return
def regexGenerator(self, regexp, textrun):
"""Find page to yield using regex to parse text."""
regex = re.compile(r'{}'.format(regexp), re.UNICODE | re.DOTALL)
results = regex.findall(textrun)
for image in results:
yield pywikibot.FilePage(self.site, image)
def loadHiddenTemplates(self):
"""Function to load the white templates."""
# A template as {{en is not a license! Adding also them in the
# whitelist template...
for langK in Family.load('wikipedia').langs.keys():
self.hiddentemplates.add(pywikibot.Page(
self.site, 'Template:{}'.format(langK)))
# Hidden template loading
if self.pageHidden:
try:
pageHiddenText = pywikibot.Page(self.site,
self.pageHidden).get()
except (pywikibot.NoPage, pywikibot.IsRedirectPage):
pageHiddenText = ''
for element in self.load(pageHiddenText):
self.hiddentemplates.add(pywikibot.Page(self.site, element))
def important_image(self, listGiven):
"""
Get tuples of image and time, return the most used or oldest image.
@param listGiven: a list of tuples which hold seconds and FilePage
@type listGiven: list
@return: the most used or oldest image
@rtype: FilePage
"""
# find the most used image
inx_found = None # index of found image
max_usage = 0 # hold max amount of using pages
for num, element in enumerate(listGiven):
image = element[1]
image_used = len([page for page in image.usingPages()])
if image_used > max_usage:
max_usage = image_used
inx_found = num
if inx_found is not None:
return listGiven[inx_found][1]
# find the oldest image
sec, image = max(listGiven, key=lambda element: element[0])
return image
def checkImageOnCommons(self):
"""Checking if the file is on commons."""
pywikibot.output('Checking if [[{}]] is on commons...'
.format(self.imageName))
try:
hash_found = self.image.latest_file_info.sha1
except pywikibot.NoPage:
return # Image deleted, no hash found. Skip the image.
site = pywikibot.Site('commons', 'commons')
commons_image_with_this_hash = next(
iter(site.allimages(sha1=hash_found, total=1)), None)
if commons_image_with_this_hash:
servTMP = pywikibot.translate(self.site, serviceTemplates)
templatesInTheImage = self.image.templates()
if servTMP is not None:
for template in servTMP:
if pywikibot.Page(self.site,
template) in templatesInTheImage:
pywikibot.output(
"{} is on commons but it's a service image."
.format(self.imageName))
return True # continue with the check-part
pywikibot.output(self.imageName + ' is on commons!')
if self.image.fileIsShared():
pywikibot.output(
"But, the file doesn't exist on your project! Skip...")
# We have to skip the check part for that image because
# it's on commons but someone has added something on your
# project.
return
if re.findall(r'\bstemma\b', self.imageName.lower()) and \
self.site.code == 'it':
pywikibot.output(
"{} has 'stemma' inside, means that it's ok."
.format(self.imageName))
return True
# It's not only on commons but the image needs a check
# the second usually is a url or something like that.
# Compare the two in equal way, both url.
repme = ((self.list_entry
+ "is also on '''Commons''': [[commons:File:%s]]")
% (self.imageName,
commons_image_with_this_hash.title(
with_ns=False)))
if (self.image.title(as_url=True)
== commons_image_with_this_hash.title(as_url=True)):
repme += ' (same name)'
self.report_image(self.imageName, self.rep_page, self.com, repme,
addings=False)
return True
def checkImageDuplicated(self, duplicates_rollback):
"""Function to check the duplicated files."""
dupText = i18n.translate(self.site, duplicatesText)
dupRegex = i18n.translate(self.site, duplicatesRegex)
dupTalkText = i18n.translate(self.site, duplicates_user_talk_text)
# Head of the message given to the author
dupTalkHead = i18n.twtranslate(self.site, 'checkimages-doubles-head')
# Comment while bot reports the problem in the uploader's talk
dupComment_talk = i18n.twtranslate(self.site,
'checkimages-doubles-talk-comment')
# Comment used by the bot while it reports the problem in the image
dupComment_image = i18n.twtranslate(self.site,
'checkimages-doubles-file-comment')
imagePage = pywikibot.FilePage(self.site, self.imageName)
hash_found = imagePage.latest_file_info.sha1
duplicates = list(self.site.allimages(sha1=hash_found))
if not duplicates:
return # Error, image deleted, no hash found. Skip the image.
if len(duplicates) > 1:
xdict = {'en':
'%(name)s has {{PLURAL:count'
'|a duplicate! Reporting it'
'|%(count)s duplicates! Reporting them}}...'}
pywikibot.output(i18n.translate('en', xdict,
{'name': self.imageName,
'count': len(duplicates) - 1}))
if dupText and dupRegex:
time_image_list = []
for dup_page in duplicates:
if (dup_page.title(as_url=True) != self.image.title(
as_url=True)
or self.timestamp is None):
try:
self.timestamp = (
dup_page.latest_file_info.timestamp)
except pywikibot.PageRelatedError:
continue
data = self.timestamp.timetuple()
data_seconds = time.mktime(data)
time_image_list.append([data_seconds, dup_page])
Page_older_image = self.important_image(time_image_list)
older_page_text = Page_older_image.text
# And if the images are more than two?
string = ''
images_to_tag_list = []
for dup_page in duplicates:
if dup_page == Page_older_image:
# the most used or oldest image
# not report also this as duplicate
continue
try:
DupPageText = dup_page.text
except pywikibot.NoPage:
continue
if not (re.findall(dupRegex, DupPageText)
or re.findall(dupRegex, older_page_text)):
pywikibot.output(
'{} is a duplicate and has to be tagged...'
.format(dup_page))
images_to_tag_list.append(dup_page.title())
string += '* {0}\n'.format(
dup_page.title(as_link=True, textlink=True))
else:
pywikibot.output(
"Already put the dupe-template in the files's page"
" or in the dupe's page. Skip.")
return # Ok - Let's continue the checking phase
# true if the image are not to be tagged as dupes
only_report = False
# put only one image or the whole list according to the request
if '__images__' in dupText:
text_for_the_report = dupText.replace(
'__images__',
'\n{0}* {1}\n'.format(
string,
Page_older_image.title(
as_link=True, textlink=True)))
else:
text_for_the_report = dupText.replace(
'__image__',
Page_older_image.title(as_link=True, textlink=True))
# Two iteration: report the "problem" to the user only once
# (the last)
if len(images_to_tag_list) > 1:
for image_to_tag in images_to_tag_list[:-1]:
fp = pywikibot.FilePage(self.site, image_to_tag)
already_reported_in_past = fp.revision_count(self.bots)
# if you want only one edit, the edit found should be
# more than 0 -> num - 1
if already_reported_in_past > duplicates_rollback - 1:
only_report = True
break
# Delete the image in the list where we're write on
text_for_the_report = re.sub(
r'\n\*\[\[:%s\]\]'
% re.escape(self.image_namespace + image_to_tag),
'', text_for_the_report)
self.report(text_for_the_report, image_to_tag,
commImage=dupComment_image, unver=True)
if len(images_to_tag_list) != 0 and not only_report:
fp = pywikibot.FilePage(self.site, images_to_tag_list[-1])
already_reported_in_past = fp.revision_count(self.bots)
from_regex = (r'\n\*\[\[:%s%s\]\]'
% (self.image_namespace,
re.escape(self.image.title(as_url=True))))
# Delete the image in the list where we're write on
text_for_the_report = re.sub(from_regex, '',
text_for_the_report)
# if you want only one edit, the edit found should be more
# than 0 -> num - 1
if already_reported_in_past > duplicates_rollback - 1 or \
not dupTalkText:
only_report = True
else:
self.report(
text_for_the_report, images_to_tag_list[-1],
dupTalkText
% (Page_older_image.title(with_ns=True),
string),
dupTalkHead, commTalk=dupComment_talk,
commImage=dupComment_image, unver=True)
if self.duplicatesReport or only_report:
if only_report:
repme = ((self.list_entry + 'has the following duplicates '
"('''forced mode'''):")
% self.image.title(as_url=True))
else:
repme = (
(self.list_entry + 'has the following duplicates:')
% self.image.title(as_url=True))
for dup_page in duplicates:
if (dup_page.title(as_url=True)
== self.image.title(as_url=True)):
# the image itself, not report also this as duplicate
continue
repme += '\n** [[:{}{}]]'.format(
self.image_namespace, dup_page.title(as_url=True))
result = self.report_image(self.imageName, self.rep_page,
self.com, repme, addings=False)
if not result:
return True # If Errors, exit (but continue the check)
if Page_older_image.title() != self.imageName:
# The image is a duplicate, it will be deleted. So skip the
# check-part, useless
return
return True # Ok - No problem. Let's continue the checking phase
def report_image(self, image_to_report, rep_page=None, com=None,
rep_text=None, addings=True):
"""Report the files to the report page when needed."""
rep_page = rep_page or self.rep_page
com = com or self.com
rep_text = rep_text or self.list_entry + '~~~~~'
if addings:
# Adding the name of the image in the report if not done already
rep_text = rep_text % image_to_report
another_page = pywikibot.Page(self.site, rep_page)
try:
text_get = another_page.get()
except pywikibot.NoPage:
text_get = ''
except pywikibot.IsRedirectPage:
text_get = another_page.getRedirectTarget().get()
# Don't care for differences inside brackets.
end = rep_text.find('(', max(0, rep_text.find(']]')))
if end < 0:
end = None
short_text = rep_text[rep_text.find('[['):end].strip()
reported = True
# Skip if the message is already there.
if short_text in text_get:
pywikibot.output('{} is already in the report page.'
.format(image_to_report))
reported = False
elif len(text_get) >= self.logFulNumber:
if self.logFullError:
raise LogIsFull(
'The log page ({}) is full! Please delete the old files '
'reported.'.format(another_page.title()))
else:
pywikibot.output(
'The log page ({}) is full! Please delete the old files '
' reported. Skip!'.format(another_page.title()))
# Don't report, but continue with the check
# (we don't know if this is the first time we check this file
# or not)
else:
# Adding the log
another_page.put(text_get + rep_text, summary=com, force=True,
minor=False)
pywikibot.output('...Reported...')
return reported
def takesettings(self):
"""Function to take the settings from the wiki."""
settingsPage = i18n.translate(self.site, page_with_settings)
try:
if not settingsPage:
self.settingsData = None
else:
wikiPage = pywikibot.Page(self.site, settingsPage)
self.settingsData = []
try:
testo = wikiPage.get()
number = 1
for m in SETTINGS_REGEX.finditer(testo):
name = str(m.group(1))
find_tipe = str(m.group(2))
find = str(m.group(3))
imagechanges = str(m.group(4))
summary = str(m.group(5))
head = str(m.group(6))
text = str(m.group(7))
mexcatched = str(m.group(8))
tupla = [number, name, find_tipe, find, imagechanges,
summary, head, text, mexcatched]
self.settingsData += [tupla]
number += 1
if not self.settingsData:
pywikibot.output(
"You've set wrongly your settings, please take a "
'look to the relative page. (run without them)')
self.settingsData = None
except pywikibot.NoPage:
pywikibot.output("The settings' page doesn't exist!")
self.settingsData = None
except pywikibot.Error:
pywikibot.output(
'Problems with loading the settigs, run without them.')
self.settingsData = None
self.some_problem = False
if not self.settingsData:
self.settingsData = None
# Real-Time page loaded
if self.settingsData:
pywikibot.output('>> Loaded the real-time page... <<')
else:
pywikibot.output('>> No additional settings found! <<')
def load_licenses(self):
"""Load the list of the licenses."""
catName = i18n.translate(self.site, category_with_licenses)
if not catName:
raise i18n.TranslationError(
'No allowed licenses category provided in '
'"category_with_licenses" dict for your project!')
pywikibot.output('\nLoading the allowed licenses...\n')
cat = pywikibot.Category(self.site, catName)
list_licenses = list(cat.articles())
if self.site.code == 'commons':
no_licenses_to_skip = pywikibot.Category(self.site,
'License-related tags')
for license_given in no_licenses_to_skip.articles():
if license_given in list_licenses:
list_licenses.remove(license_given)
pywikibot.output('')
# Add the licenses set in the default page as licenses to check
if self.pageAllowed:
try:
pageAllowedText = pywikibot.Page(self.site,
self.pageAllowed).get()
except (pywikibot.NoPage, pywikibot.IsRedirectPage):
pageAllowedText = ''
for nameLicense in self.load(pageAllowedText):
pageLicense = pywikibot.Page(self.site, nameLicense)
if pageLicense not in list_licenses:
# the list has wiki-pages
list_licenses.append(pageLicense)
return list_licenses
def miniTemplateCheck(self, template):
"""Check if template is in allowed licenses or in licenses to skip."""
# the list_licenses are loaded in the __init__
# (not to load them multimple times)
if template in self.list_licenses:
self.license_selected = template.title(with_ns=False)
self.seems_ok = True
# let the last "fake" license normally detected
self.license_found = self.license_selected
return True
if template in self.hiddentemplates:
# if the whitetemplate is not in the images description, we don't
# care
try:
self.allLicenses.remove(template)
except ValueError:
return
else:
self.whiteTemplatesFound = True
def templateInList(self):
"""
Check if template is in list.
The problem is the calls to the Mediawiki system because they can be
pretty slow. While searching in a list of objects is really fast, so
first of all let's see if we can find something in the info that we
already have, then make a deeper check.
"""
for template in self.licenses_found:
result = self.miniTemplateCheck(template)
if result:
break
if not self.license_found:
for template in self.licenses_found:
if template.isRedirectPage():
template = template.getRedirectTarget()
result = self.miniTemplateCheck(template)
if result:
break
def smartDetection(self):
"""
Detect templates.
The bot instead of checking if there's a simple template in the
image's description, checks also if that template is a license or
something else. In this sense this type of check is smart.
"""
self.seems_ok = False
self.license_found = None
self.whiteTemplatesFound = False
regex_find_licenses = re.compile(
r'(?<!\{)\{\{(?:[Tt]emplate:|)([^{]+?)[|\n<}]', re.DOTALL)
regex_are_licenses = re.compile(
r'(?<!\{)\{\{(?:[Tt]emplate:|)([^{]+?)\}\}', re.DOTALL)
while True:
self.loadHiddenTemplates()
self.licenses_found = self.image.templates()
templatesInTheImageRaw = regex_find_licenses.findall(
self.imageCheckText)
if not self.licenses_found and templatesInTheImageRaw:
# {{nameTemplate|something <- this is not a template, be sure
# that we haven't catch something like that.
licenses_TEST = regex_are_licenses.findall(self.imageCheckText)
if not self.licenses_found and licenses_TEST:
raise pywikibot.Error(
"Invalid or broken templates found in the image's "
'page {}!'.format(self.image))
self.allLicenses = []
if not self.list_licenses:
raise i18n.TranslationError(
'No allowed licenses found in "category_with_licenses" '
'category for your project!')
# Found the templates ONLY in the image's description
for template_selected in templatesInTheImageRaw:
tp = pywikibot.Page(self.site, template_selected)
for templateReal in self.licenses_found:
if (tp.title(as_url=True, with_ns=False).lower()
== templateReal.title(as_url=True,
with_ns=False).lower()):
if templateReal not in self.allLicenses:
self.allLicenses.append(templateReal)
break
if self.licenses_found:
self.templateInList()
if not self.license_found and self.allLicenses:
self.allLicenses = [
template.getRedirectTarget()
if template.isRedirectPage() else template
for template in self.allLicenses if template.exists()]
if self.allLicenses:
self.license_found = self.allLicenses[0].title()
# If it has "some_problem" it must check the additional settings.
self.some_problem = False
if self.settingsData:
# use additional settings
self.findAdditionalProblems()
if self.some_problem:
if self.mex_used in self.imageCheckText:
pywikibot.output('File already fixed. Skipping.')
else:
pywikibot.output(
"The file's description for {} contains {}..."
.format(self.imageName, self.name_used))
if self.mex_used.lower() == 'default':
self.mex_used = self.unvertext
if self.imagestatus_used:
reported = True
else:
reported = self.report_image(self.imageName)
if reported:
self.report(self.mex_used, self.imageName, self.text_used,
self.head_used, None,
self.imagestatus_used, self.summary_used)
else:
pywikibot.output('Skipping the file...')
self.some_problem = False
else:
if not self.seems_ok and self.license_found:
rep_text_license_fake = ((self.list_entry
+ "seems to have a ''fake license'',"
' license detected:'
' <nowiki>%s</nowiki>') %
(self.imageName, self.license_found))
printWithTimeZone(
'{} seems to have a fake license: {}, reporting...'
.format(self.imageName, self.license_found))
self.report_image(self.imageName,
rep_text=rep_text_license_fake,
addings=False)
elif self.license_found:
pywikibot.output('[[%s]] seems ok, license found: {{%s}}...'
% (self.imageName, self.license_found))
return (self.license_found, self.whiteTemplatesFound)
def load(self, raw):
"""Load a list of objects from a string using regex."""
list_loaded = []
# I search with a regex how many user have not the talk page
# and i put them in a list (i find it more easy and secure)
regl = r"(\"|\')(.*?)\1(?:,|\])"
pl = re.compile(regl, re.UNICODE)
for xl in pl.finditer(raw):
word = xl.group(2).replace('\\\\', '\\')
if word not in list_loaded:
list_loaded.append(word)
return list_loaded
def skipImages(self, skip_number, limit):
"""Given a number of files, skip the first -number- files."""
# If the images to skip are more the images to check, make them the
# same number
if skip_number == 0:
pywikibot.output('\t\t>> No files to skip...<<')
return
if skip_number > limit:
skip_number = limit
# Print a starting message only if no images has been skipped
if not self.skip_list:
pywikibot.output(
i18n.translate(
'en',
'Skipping the first {{PLURAL:num|file|%(num)s files}}:\n',
{'num': skip_number}))
# If we still have pages to skip:
if len(self.skip_list) < skip_number:
pywikibot.output('Skipping {}...'.format(self.imageName))
self.skip_list.append(self.imageName)
if skip_number == 1:
pywikibot.output('')
return True
else:
pywikibot.output('')
@staticmethod
def wait(generator, wait_time):
"""
Skip the images uploaded before x seconds.
Let the users to fix the image's problem alone in the first x seconds.
"""
printWithTimeZone(
'Skipping the files uploaded less than {} seconds ago..'
.format(wait_time))
for page in generator:
image = pywikibot.FilePage(page)
try:
timestamp = image.latest_file_info.timestamp
except pywikibot.PageRelatedError:
continue
now = pywikibot.Timestamp.utcnow()
delta = now - timestamp
if delta.total_seconds() > wait_time:
yield image
else:
pywikibot.warning(
'Skipping {}, uploaded {} {} ago..'
.format(image.title(), delta.days, 'days')
if delta.days > 0
else (image.title(), delta.seconds, 'seconds'))
def isTagged(self):
"""Understand if a file is already tagged or not."""
# TODO: enhance and use textlib._MultiTemplateMatchBuilder
# Is the image already tagged? If yes, no need to double-check, skip
no_license = i18n.translate(self.site, txt_find)
if not no_license:
raise i18n.TranslationError(
'No no-license templates provided in "txt_find" dict '
'for your project!')
for i in no_license:
# If there are {{ use regex, otherwise no (if there's not the
# {{ may not be a template and the regex will be wrong)
if '{{' in i:
regexP = re.compile(
r'\{\{(?:template)?%s ?(?:\||\r?\n|\}|<|/) ?'
% i.split('{{')[1].replace(' ', '[ _]'), re.I)
result = regexP.findall(self.imageCheckText)
if result:
return True
elif i.lower() in self.imageCheckText:
return True
def findAdditionalProblems(self):
"""Extract additional settings from configuration page."""
# In every tuple there's a setting configuration
for tupla in self.settingsData:
name = tupla[1]
find_tipe = tupla[2]
find = tupla[3]
find_list = self.load(find)
imagechanges = tupla[4]
if imagechanges.lower() == 'false':
imagestatus = False
elif imagechanges.lower() == 'true':
imagestatus = True
else:
pywikibot.error('Imagechanges set wrongly!')
self.settingsData = None
break
summary = tupla[5]
head_2 = tupla[6]
if head_2.count('==') == 2:
head_2 = re.findall(r'\s*== *(.+?) *==\s*', head_2)[0]
text = tupla[7] % self.imageName
mexCatched = tupla[8]
for k in find_list:
if find_tipe.lower() == 'findonly':
searchResults = re.findall(r'{}'.format(k.lower()),
self.imageCheckText.lower())
if searchResults:
if searchResults[0] == self.imageCheckText.lower():
self.some_problem = True
self.text_used = text
self.head_used = head_2
self.imagestatus_used = imagestatus
self.name_used = name
self.summary_used = summary
self.mex_used = mexCatched
break
elif find_tipe.lower() == 'find':
if re.findall(r'{}'.format(k.lower()),
self.imageCheckText.lower()):
self.some_problem = True
self.text_used = text
self.head_used = head_2
self.imagestatus_used = imagestatus
self.name_used = name
self.summary_used = summary
self.mex_used = mexCatched
continue
def checkStep(self):
"""Check a single file page."""
# something = Minimal requirements for an image description.
# If this fits, no tagging will take place
# (if there aren't other issues)
# MIT license is ok on italian wikipedia, let also this here
# Don't put "}}" here, please. Useless and can give problems.
something = ['{{']
# Allowed extensions
try:
allowed_formats = self.site.siteinfo.get(
'fileextensions', get_default=False)
except KeyError:
allowed_formats = []
else:
allowed_formats = [item['ext'].lower() for item in allowed_formats]
brackets = False
delete = False
notification = None
# get the extension from the image's name
extension = self.imageName.split('.')[-1]
# Load the notification messages
HiddenTN = i18n.translate(self.site, HiddenTemplateNotification)
self.unvertext = i18n.translate(self.site, n_txt)
di = i18n.translate(self.site, delete_immediately)
# The header of the Unknown extension's message.
dih = i18n.twtranslate(self.site, 'checkimages-unknown-extension-head')
# Text that will be add if the bot find a unknown extension.
din = i18n.twtranslate(self.site,
'checkimages-unknown-extension-msg') + ' ~~~~'
# Header that the bot will add if the image hasn't the license.
nh = i18n.twtranslate(self.site, 'checkimages-no-license-head')
# Summary of the delete immediately.
dels = i18n.twtranslate(self.site, 'checkimages-deletion-comment')
nn = i18n.translate(self.site, nothing_notification)
smwl = i18n.translate(self.site, second_message_without_license)
try:
self.imageCheckText = self.image.get()
except pywikibot.NoPage:
pywikibot.output('Skipping {} because it has been deleted.'
.format(self.imageName))
return
except pywikibot.IsRedirectPage:
pywikibot.output("Skipping {} because it's a redirect."
.format(self.imageName))
return
# Delete the fields where the templates cannot be loaded
regex_nowiki = re.compile(r'<nowiki>(.*?)</nowiki>', re.DOTALL)
regex_pre = re.compile(r'<pre>(.*?)</pre>', re.DOTALL)
self.imageCheckText = regex_nowiki.sub('', self.imageCheckText)
self.imageCheckText = regex_pre.sub('', self.imageCheckText)
# Deleting the useless template from the description (before adding
# sth in the image the original text will be reloaded, don't worry).
if self.isTagged():
printWithTimeZone('{} is already tagged...'.format(self.imageName))
return
# something is the array with {{, MIT License and so on.
for a_word in something:
if a_word in self.imageCheckText:
# There's a template, probably a license
brackets = True
# Is the extension allowed? (is it an image or f.e. a .xls file?)
if allowed_formats and extension.lower() not in allowed_formats:
delete = True
(license_found, hiddenTemplateFound) = self.smartDetection()
# Here begins the check block.
if brackets and license_found:
return
elif delete:
pywikibot.output('{} is not a file!'.format(self.imageName))
if not di:
pywikibot.output('No localized message given for '
"'delete_immediately'. Skipping.")
return
# Some formatting for delete immediately template
dels = dels % {'adding': di}
di = '\n' + di
# Modify summary text
pywikibot.setAction(dels)
canctext = di % extension
notification = din % {'file': self.image.title(as_link=True,
textlink=True)}
head = dih
self.report(canctext, self.imageName, notification, head)
return
elif not self.imageCheckText.strip(): # empty image description
pywikibot.output(
"The file's description for {} does not contain a license "
' template!'.format(self.imageName))
if hiddenTemplateFound and HiddenTN:
notification = HiddenTN % self.imageName
elif nn:
notification = nn % self.imageName
head = nh
self.report(self.unvertext, self.imageName, notification, head,
smwl)
return
else:
pywikibot.output('{} has only text and not the specific '
'license...'.format(self.imageName))
if hiddenTemplateFound and HiddenTN:
notification = HiddenTN % self.imageName
elif nn:
notification = nn % self.imageName
head = nh
self.report(self.unvertext, self.imageName, notification, head,
smwl)
return
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
# Command line configurable parameters
repeat = True # Restart after having check all the images?
limit = 80 # How many images check?
time_sleep = 30 # How many time sleep after the check?
skip_number = 0 # How many images to skip before checking?
waitTime = 0 # How many time sleep before the check?
commonsActive = False # Is there's an image with the same name at commons?
normal = False # Check the new images or use another generator?
urlUsed = False # Use the url-related function instead of the new-pages
regexGen = False # Use the regex generator
duplicatesActive = False # Use the duplicate option
duplicatesReport = False # Use the duplicate-report option
max_user_notify = None
sendemailActive = False # Use the send-email
logFullError = True # Raise an error when the log is full
generator = None
unknown = [] # unknown parameters
local_args = pywikibot.handle_args(args)
site = pywikibot.Site()
# Here below there are the local parameters.
for arg in local_args:
if arg.startswith('-limit'):
if len(arg) == 6:
limit = int(pywikibot.input(
'How many files do you want to check?'))
else:
limit = int(arg[7:])
if arg.startswith(('-sleep', '-time')):
if arg.startswith('-sleep'):
length = len('-sleep')
else:
issue_deprecation_warning('-time', '-sleep', 2,
ArgumentDeprecationWarning,
since='20151209')
length = len('-time')
if len(arg) == length:
time_sleep = int(pywikibot.input(
'How many seconds do you want runs to be apart?'))
else:
time_sleep = int(arg[length + 1:])
elif arg == '-break':
repeat = False
elif arg == '-nologerror':
logFullError = False
elif arg == '-commons':
commonsActive = True
elif arg == '-duplicatesreport' or arg == '-duplicatereport':
if arg == '-duplicatereport':
issue_deprecation_warning('-duplicatereport',
'-duplicatesreport',
2, ArgumentDeprecationWarning,
since='20161116')
duplicatesReport = True
elif arg.startswith('-duplicates'):
duplicatesActive = True
if len(arg) == 11:
duplicates_rollback = 1
elif len(arg) > 11:
duplicates_rollback = int(arg[12:])
elif arg.startswith('-maxusernotify'):
if len(arg) == 13:
max_user_notify = int(pywikibot.input(
'What should be the maximum number of notifications per '
'user per check?'))
elif len(arg) > 13:
max_user_notify = int(arg[14:])
elif arg == '-sendemail':
sendemailActive = True
elif arg.startswith('-skip'):
if len(arg) == 5:
skip_number = int(pywikibot.input(
'How many files do you want to skip?'))
elif len(arg) > 5:
skip_number = int(arg[6:])
elif arg.startswith('-wait'):
if len(arg) == 5:
waitTime = int(pywikibot.input(
'How many time do you want to wait before checking the '
'files?'))
elif len(arg) > 5:
waitTime = int(arg[6:])
elif arg.startswith('-start'):
if len(arg) == 6:
firstPageTitle = pywikibot.input(
'From which page do you want to start?')
elif len(arg) > 6:
firstPageTitle = arg[7:]
namespaces = tuple(ns + ':'
for ns in site.namespace(Namespace.FILE, all))
if firstPageTitle.startswith(namespaces):
firstPageTitle = firstPageTitle.split(':')[1]
generator = site.allimages(start=firstPageTitle)
repeat = False
elif arg.startswith('-page'):
if len(arg) == 5:
regexPageName = str(pywikibot.input(
'Which page do you want to use for the regex?'))
elif len(arg) > 5:
regexPageName = str(arg[6:])
repeat = False
regexGen = True
elif arg.startswith('-url'):
if len(arg) == 4:
regexPageUrl = str(pywikibot.input(
'Which url do you want to use for the regex?'))
elif len(arg) > 4:
regexPageUrl = str(arg[5:])
urlUsed = True
repeat = False
regexGen = True
elif arg.startswith('-regex'):
if len(arg) == 6:
regexpToUse = str(pywikibot.input(
'Which regex do you want to use?'))
elif len(arg) > 6:
regexpToUse = str(arg[7:])
generator = 'regex'
repeat = False
elif arg.startswith('-cat'):
if len(arg) == 4:
catName = str(pywikibot.input('In which category do I work?'))
elif len(arg) > 4:
catName = str(arg[5:])
catSelected = pywikibot.Category(site,
'Category:{}'.format(catName))
generator = catSelected.articles(namespaces=[6])
repeat = False
elif arg.startswith('-ref'):
if len(arg) == 4:
refName = str(pywikibot.input(
'The references of what page should I parse?'))
elif len(arg) > 4:
refName = str(arg[5:])
ref = pywikibot.Page(site, refName)
generator = ref.getReferences(namespaces=[6])
repeat = False
else:
unknown.append(arg)
if not generator:
normal = True
skip = skip_number > 0
# Ensure that the bot is localized and right command args are given
if site.code not in project_inserted:
additional_text = ('Your project is not supported by this script.\n'
'To allow your project in the script you have to '
'add a localization into the script and add your '
'project to the "project_inserted" list!')
else:
additional_text = ''
if unknown or additional_text:
suggest_help(unknown_parameters=unknown,
additional_text=additional_text)
return False
# Reading the log of the new images if another generator is not given.
if normal:
if limit == 1:
pywikibot.output('Retrieving the latest file for checking...')
else:
pywikibot.output('Retrieving the latest {} files for checking...'
.format(limit))
while True:
# Defing the Main Class.
Bot = checkImagesBot(site, sendemailActive=sendemailActive,
duplicatesReport=duplicatesReport,
logFullError=logFullError,
max_user_notify=max_user_notify)
if normal:
generator = pg.NewimagesPageGenerator(total=limit, site=site)
# if urlUsed and regexGen, get the source for the generator
if urlUsed and regexGen:
textRegex = site.getUrl(regexPageUrl, no_hostname=True)
# Not an url but a wiki page as "source" for the regex
elif regexGen:
pageRegex = pywikibot.Page(site, regexPageName)
try:
textRegex = pageRegex.get()
except pywikibot.NoPage:
pywikibot.output("{} doesn't exist!".format(pageRegex.title()))
textRegex = '' # No source, so the bot will quit later.
# If generator is the regex' one, use your own Generator using an url
# or page and a regex.
if generator == 'regex' and regexGen:
generator = Bot.regexGenerator(regexpToUse, textRegex)
Bot.takesettings()
if waitTime > 0:
generator = Bot.wait(generator, waitTime)
for image in generator:
# Setting the image for the main class
Bot.setParameters(image)
if skip:
skip = Bot.skipImages(skip_number, limit)
if skip:
continue
# Check on commons if there's already an image with the same name
if commonsActive and site.family.name != 'commons':
if not Bot.checkImageOnCommons():
continue
# Check if there are duplicates of the image on the project
if duplicatesActive:
if not Bot.checkImageDuplicated(duplicates_rollback):
continue
Bot.checkStep()
if repeat:
pywikibot.output('Waiting for {} seconds,'.format(time_sleep))
pywikibot.sleep(time_sleep)
else:
break
return True
if __name__ == '__main__':
start = time.time()
ret = False
try:
ret = main()
except KeyboardInterrupt:
ret = True
finally:
if ret is not False:
final = time.time()
delta = int(final - start)
pywikibot.output('Execution time: {} seconds\n'.format(delta))
| 42.697421 | 79 | 0.547479 |
d65eadee8493053f1dab6eb56c346784004f8bbf | 2,451 | py | Python | post/views.py | sasada033/blog | 46d00e1605fa77b36a6a4d0d5644e717176b47ba | [
"MIT"
] | 1 | 2020-06-12T06:53:07.000Z | 2020-06-12T06:53:07.000Z | post/views.py | sasada033/blog | 46d00e1605fa77b36a6a4d0d5644e717176b47ba | [
"MIT"
] | 13 | 2020-06-07T06:21:51.000Z | 2022-01-13T02:55:09.000Z | post/views.py | sasada033/blog | 46d00e1605fa77b36a6a4d0d5644e717176b47ba | [
"MIT"
] | null | null | null | import logging
from django.views import generic
from django.contrib import messages
from django.db.models import Q
from django.urls import reverse_lazy
from django.shortcuts import get_object_or_404
from .models import Post
from .forms import PostSearchForm, InquiryForm
from taggit.models import Tag
logger = logging.getLogger(__name__)
class IndexView(generic.ListView):
model = Post
queryset = Post.objects.filter(is_public=True).select_related('user',).prefetch_related('tags',)
ordering = '-created_at'
paginate_by = 5
template_name = 'post/index.html'
def get_queryset(self):
queryset = super().get_queryset()
form = PostSearchForm(self.request.GET or None)
tag = self.kwargs.get('tag')
if self.request.path == '/trend/':
queryset = queryset.order_by('-page_view')
if tag:
queryset = queryset.filter(tags=get_object_or_404(Tag, name=tag))
messages.success(self.request, '「{}」の記事 - {}件'.format(tag, queryset.count()))
return queryset
elif form.is_valid():
key_word = form.cleaned_data.get('key_word')
if key_word:
queryset = queryset.filter(
Q(title__icontains=key_word) | Q(content__icontains=key_word)
)
messages.success(self.request, '「{}」の検索結果 - {}件'.format(key_word, queryset.count()))
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if not self.kwargs.get('tag') and not self.request.GET.get('key_word'):
context['is_top'] = True
if self.request.path == '/trend/':
context['is_trend'] = True
return context
class PostDetailView(generic.DetailView):
model = Post
template_name = 'post/detail.html'
class InquiryView(generic.FormView):
template_name = 'post/inquiry.html'
form_class = InquiryForm
success_url = reverse_lazy('post:inquiry')
def form_valid(self, form):
form.send_email(self.request)
messages.success(self.request, '管理人宛にメールを送信しました。お問い合わせありがとうございます。')
logger.info('Inquiry sent by {}'.format(form.cleaned_data['name']))
return super().form_valid(form)
class PrivacyPolicyView(generic.TemplateView):
template_name = 'post/privacy.html'
class ProfileView(generic.TemplateView):
template_name = 'post/profile.html'
| 31.025316 | 100 | 0.662995 |
8b4682e403201f5c51b0ed5c90de520a4a07e1f1 | 4,339 | py | Python | python/Xls2Xml.py | XiaoyueCai/Localizable.strings2Excel | c9664ba43aa1e6e374f33f31e5de52ddebca991e | [
"MIT"
] | null | null | null | python/Xls2Xml.py | XiaoyueCai/Localizable.strings2Excel | c9664ba43aa1e6e374f33f31e5de52ddebca991e | [
"MIT"
] | null | null | null | python/Xls2Xml.py | XiaoyueCai/Localizable.strings2Excel | c9664ba43aa1e6e374f33f31e5de52ddebca991e | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import os
import re
import time
from optparse import OptionParser
from Log import Log
from XlsFileUtil import XlsFileUtil
from XmlFileUtil import XmlFileUtil
def addParser():
parser = OptionParser()
parser.add_option("-f", "--fileDir",
help="Xls files directory.",
metavar="fileDir")
parser.add_option("-t", "--targetDir",
help="The directory where the xml files will be saved.",
metavar="targetDir")
parser.add_option("-e", "--excelStorageForm",
type="string",
default="single",
help="The excel(.xls) file storage forms including single(single file), multiple(multiple files), default is single.",
metavar="excelStorageForm")
parser.add_option("-a", "--additional",
help="additional info.",
metavar="additional")
(options, args) = parser.parse_args()
Log.info("options: %s, args: %s" % (options, args))
return options
def convertFromSingleForm(options, fileDir, targetDir):
for _, _, filenames in os.walk(fileDir):
xlsFilenames = [fi for fi in filenames if fi.endswith(".xls")]
for file in xlsFilenames:
xlsFileUtil = XlsFileUtil(fileDir+"/"+file)
table = xlsFileUtil.getTableByIndex(0)
firstRow = table.row_values(0)
keys = table.col_values(0)
del keys[0]
for index in range(len(firstRow)):
if index <= 0:
continue
languageName = firstRow[index]
values = table.col_values(index)
del values[0]
if languageName == "zh-Hans":
languageName = "zh-rCN"
elif languageName == "zh-Hant":
languageName = "zh-rTW"
else:
match = re.match(r"^([a-z]{2})-([A-Z]{2})$", languageName)
if match:
languageName = match.group(1) + "-r" + match.group(2)
path = targetDir + "/values-"+languageName+"/"
if languageName == 'en':
path = targetDir + "/values/"
filename = "strings.xml"
XmlFileUtil.writeToFile(
keys, values, path, filename, options.additional)
print "Convert %s successfully! you can xml files in %s" % (
fileDir, targetDir)
def convertFromMultipleForm(options, fileDir, targetDir):
for _, _, filenames in os.walk(fileDir):
xlsFilenames = [fi for fi in filenames if fi.endswith(".xls")]
for file in xlsFilenames:
xlsFileUtil = XlsFileUtil(fileDir+"/"+file)
languageName = file.replace(".xls", "")
if languageName == "zh-Hans":
languageName = "zh-rCN"
path = targetDir + "/values-"+languageName+"/"
if languageName == 'en':
path = targetDir + "/values/"
if not os.path.exists(path):
os.makedirs(path)
for table in xlsFileUtil.getAllTables():
keys = table.col_values(0)
values = table.col_values(1)
filename = table.name.replace(".strings", ".xml")
XmlFileUtil.writeToFile(
keys, values, path, filename, options.additional)
print "Convert %s successfully! you can xml files in %s" % (
fileDir, targetDir)
def startConvert(options):
fileDir = options.fileDir
targetDir = options.targetDir
print "Start converting"
if fileDir is None:
print "xls files directory can not be empty! try -h for help."
return
if targetDir is None:
print "Target file path can not be empty! try -h for help."
return
targetDir = targetDir + "/xls-files-to-xml_" + \
time.strftime("%Y%m%d_%H%M%S")
if not os.path.exists(targetDir):
os.makedirs(targetDir)
if options.excelStorageForm == "single":
convertFromSingleForm(options, fileDir, targetDir)
else:
convertFromMultipleForm(options, fileDir, targetDir)
def main():
options = addParser()
startConvert(options)
main()
| 32.62406 | 140 | 0.552892 |
38cad524d55bc1531a67f4a22f97b21665fc67d5 | 5,544 | py | Python | noxfile.py | mlanser/sciLab-displayMod | acc77c17d9bb389dea96f35b140595d7fc033548 | [
"MIT"
] | null | null | null | noxfile.py | mlanser/sciLab-displayMod | acc77c17d9bb389dea96f35b140595d7fc033548 | [
"MIT"
] | null | null | null | noxfile.py | mlanser/sciLab-displayMod | acc77c17d9bb389dea96f35b140595d7fc033548 | [
"MIT"
] | null | null | null | """Nox sessions."""
import shutil
import sys
from pathlib import Path
from textwrap import dedent
import nox
try:
from nox_poetry import Session
from nox_poetry import session
except ImportError:
message = f"""\
Nox failed to import the 'nox-poetry' package.
Please install it using the following command:
{sys.executable} -m pip install nox-poetry"""
raise SystemExit(dedent(message)) from None
package = "scilab_displaymod"
python_versions = ["3.10", "3.9", "3.8", "3.7"]
nox.needs_version = ">= 2021.6.6"
nox.options.sessions = (
"pre-commit",
"safety",
"mypy",
"tests",
"typeguard",
"xdoctest",
"docs-build",
)
def activate_virtualenv_in_precommit_hooks(session: Session) -> None:
"""Activate virtualenv in hooks installed by pre-commit.
This function patches git hooks installed by pre-commit to activate the
session's virtual environment. This allows pre-commit to locate hooks in
that environment when invoked from git.
Args:
session: The Session object.
"""
assert session.bin is not None # noqa: S101
virtualenv = session.env.get("VIRTUAL_ENV")
if virtualenv is None:
return
hookdir = Path(".git") / "hooks"
if not hookdir.is_dir():
return
for hook in hookdir.iterdir():
if hook.name.endswith(".sample") or not hook.is_file():
continue
text = hook.read_text()
bindir = repr(session.bin)[1:-1] # strip quotes
if not (
Path("A") == Path("a") and bindir.lower() in text.lower() or bindir in text
):
continue
lines = text.splitlines()
if not (lines[0].startswith("#!") and "python" in lines[0].lower()):
continue
header = dedent(
f"""\
import os
os.environ["VIRTUAL_ENV"] = {virtualenv!r}
os.environ["PATH"] = os.pathsep.join((
{session.bin!r},
os.environ.get("PATH", ""),
))
"""
)
lines.insert(1, header)
hook.write_text("\n".join(lines))
@session(name="pre-commit", python="3.10")
def precommit(session: Session) -> None:
"""Lint using pre-commit."""
args = session.posargs or ["run", "--all-files", "--show-diff-on-failure"]
session.install(
"black",
"darglint",
"flake8",
"flake8-bandit",
"flake8-bugbear",
"flake8-docstrings",
"flake8-rst-docstrings",
"pep8-naming",
"pre-commit",
"pre-commit-hooks",
"reorder-python-imports",
)
session.run("pre-commit", *args)
if args and args[0] == "install":
activate_virtualenv_in_precommit_hooks(session)
@session(python="3.10")
def safety(session: Session) -> None:
"""Scan dependencies for insecure packages."""
requirements = session.poetry.export_requirements()
session.install("safety")
session.run("safety", "check", "--full-report", f"--file={requirements}")
@session(python=python_versions)
def mypy(session: Session) -> None:
"""Type-check using mypy."""
args = session.posargs or ["src", "tests", "docs/conf.py"]
session.install(".")
session.install("mypy", "pytest")
session.run("mypy", *args)
if not session.posargs:
session.run("mypy", f"--python-executable={sys.executable}", "noxfile.py")
@session(python=python_versions)
def tests(session: Session) -> None:
"""Run the test suite."""
session.install(".")
session.install("coverage[toml]", "pytest", "pygments")
try:
session.run("coverage", "run", "--parallel", "-m", "pytest", *session.posargs)
finally:
if session.interactive:
session.notify("coverage", posargs=[])
@session
def coverage(session: Session) -> None:
"""Produce the coverage report."""
args = session.posargs or ["report"]
session.install("coverage[toml]")
if not session.posargs and any(Path().glob(".coverage.*")):
session.run("coverage", "combine")
session.run("coverage", *args)
@session(python=python_versions)
def typeguard(session: Session) -> None:
"""Runtime type checking using Typeguard."""
session.install(".")
session.install("pytest", "typeguard", "pygments")
session.run("pytest", f"--typeguard-packages={package}", *session.posargs)
@session(python=python_versions)
def xdoctest(session: Session) -> None:
"""Run examples with xdoctest."""
args = session.posargs or ["all"]
session.install(".")
session.install("xdoctest[colors]")
session.run("python", "-m", "xdoctest", package, *args)
@session(name="docs-build", python="3.10")
def docs_build(session: Session) -> None:
"""Build the documentation."""
args = session.posargs or ["docs", "docs/_build"]
session.install(".")
session.install("sphinx", "sphinx-click", "sphinx-rtd-theme")
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
session.run("sphinx-build", *args)
@session(python="3.10")
def docs(session: Session) -> None:
"""Build and serve the documentation with live reloading on file changes."""
args = session.posargs or ["--open-browser", "docs", "docs/_build"]
session.install(".")
session.install("sphinx", "sphinx-autobuild", "sphinx-click", "sphinx-rtd-theme")
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
session.run("sphinx-autobuild", *args)
| 28.430769 | 87 | 0.619228 |
567b5d227a54d00399e15385d95794514f9579bc | 26,765 | py | Python | streamdeckfs/entities/key.py | twidi/streamdeckify | 5795b0610f950aab67543f47e2e5d20e99fc88d7 | [
"MIT"
] | 9 | 2021-07-29T18:35:28.000Z | 2022-01-25T10:10:22.000Z | streamdeckfs/entities/key.py | twidi/streamdeckify | 5795b0610f950aab67543f47e2e5d20e99fc88d7 | [
"MIT"
] | 31 | 2021-05-25T15:33:08.000Z | 2021-06-06T19:16:12.000Z | streamdeckfs/entities/key.py | twidi/streamdeckify | 5795b0610f950aab67543f47e2e5d20e99fc88d7 | [
"MIT"
] | 1 | 2022-02-16T05:27:41.000Z | 2022-02-16T05:27:41.000Z | #
# Copyright (C) 2021 Stephane "Twidi" Angel <s.angel@twidi.com>
#
# This file is part of StreamDeckFS
# (see https://github.com/twidi/streamdeckfs).
#
# License: MIT, see https://opensource.org/licenses/MIT
#
import logging
import re
from dataclasses import dataclass
from fnmatch import fnmatch
from itertools import product
from time import time
from typing import Tuple
import networkx
from cached_property import cached_property
from PIL import Image
from StreamDeck.ImageHelpers import PILHelper
from ..common import file_flags, logger
from .base import (
FILTER_DENY,
NOT_HANDLED,
Entity,
EntityDir,
ParseFilenameResult,
versions_dict_factory,
)
from .page import PageContent
@dataclass(eq=False)
class Key(EntityDir, PageContent):
path_glob = "KEY_*"
main_part_re = re.compile(r"^(?P<kind>KEY)_(?:ROW_)?(?P<row>\d+)(?:_COL_|,)(?P<col>\d+)$")
main_part_compose = lambda args: f'KEY_{args["row"]},{args["col"]}'
get_main_args = lambda self: {"row": self.row, "col": self.col}
template_re = re.compile(r"^KEY_(?P<row_start>\d+)-(?P<row_end>\d+),(?P<col_start>\d+)-(?P<col_end>\d+)$")
allowed_args = EntityDir.allowed_args | {
"ref": re.compile(r"^(?P<arg>ref)=(?P<page>.*):(?P<key>.*)$"), # we'll use current row,col if no key given
}
identifier_attr = "key"
parent_container_attr = "keys"
key: Tuple[int, int]
filter_to_identifier = lambda filter: tuple(int(val) for val in filter.split(","))
template_identifier_re = re.compile(r"^(\d+)-(\d+),(\d+)-(\d+)$")
@classmethod
def filter_to_identifier(cls, filter):
if filter.count(",") == 1:
if (nb_dashses := filter.count("-")) == 0:
return tuple(int(val) for val in filter.split(","))
if nb_dashses == 2:
raw_start, row_end, col_start, col_end = map(
int, cls.template_identifier_re.match(filter.replace(" ", "")).groups()
)
return ((raw_start, row_end), (col_start, col_end))
raise ValueError
@cached_property
def event_class(self):
from . import KeyEvent
return KeyEvent
@cached_property
def var_class(self):
from . import KeyVar
return KeyVar
def __post_init__(self):
super().__post_init__()
self.compose_image_cache = None
self.pressed_at = None
self.layers = versions_dict_factory()
self.text_lines = versions_dict_factory()
self.rendered_overlay = None
self.template_for = None
@property
def row(self):
return self.key[0]
@property
def col(self):
return self.key[1]
@property
def width(self):
return self.deck.key_width
@property
def height(self):
return self.deck.key_height
@property
def str(self):
return f'KEY {self.key} ({self.name}{", disabled" if self.disabled else ""})'
def __str__(self):
return f"{self.page}, {self.str}"
def is_renderable(self, allow_disabled=False):
return self.template_for is None and super().is_renderable(allow_disabled)
@classmethod
def convert_args(cls, main, args):
final_args = super().convert_args(main, args)
if "template_for" in main:
final_args["template_for"] = main["template_for"]
return final_args
@classmethod
def convert_main_args(cls, args):
if (args := super().convert_main_args(args)) is None:
return None
if "template_for" not in args:
args["row"] = int(args["row"])
args["col"] = int(args["col"])
return args
def get_raw_args(self, available_vars, parent=None):
main, args = super().get_raw_args(available_vars, parent)
main.pop("template_for", None)
return main, args
@classmethod
def parse_main_part(cls, main_part, parent):
if not (match := cls.template_re.match(main_part)):
return super().parse_main_part(main_part, parent)
row_start, row_end, col_start, col_end = map(int, match.groups())
if not (
1 <= row_start <= row_end <= parent.deck.nb_rows and 1 <= col_start <= col_end <= parent.deck.nb_cols
):
raise ValueError
return {
"kind": "KEY",
"row": (row_start, row_end),
"col": (col_start, col_end),
"template_for": list(product(range(row_start, row_end + 1), range(col_start, col_end + 1))),
}
@classmethod
def parse_filename(cls, name, is_virtual, parent, available_vars):
parsed = super().parse_filename(name, is_virtual, parent, available_vars)
if (main := parsed.main) is not None:
if "template_for" not in main:
if (
main["row"] < 1
or main["row"] > parent.deck.nb_rows
or main["col"] < 1
or main["col"] > parent.deck.nb_cols
):
return ParseFilenameResult()
if (
parsed.ref
and (template := parsed.ref.template)
and (key := (main["row"], main["col"])) not in template.template_for
):
logger.error(
f"[{parent}, KEY `{name}`] Key {key} does not belong to sequence `{template.template_repr}`"
)
return ParseFilenameResult()
return parsed
@classmethod
def create_from_args(cls, path, parent, identifier, args, path_modified_at):
obj = super().create_from_args(path, parent, identifier, args, path_modified_at)
if "template_for" in args:
obj.template_for = args["template_for"]
return obj
@classmethod
def identifier_sort_key(cls, identifier):
if isinstance(identifier[0], tuple):
# we have `((row_start, row_end), (col_start, col_end))` for a template, so we'll use the first key
identifier = (identifier[0][0], identifier[1][0])
return identifier
@property
def resolved_layers(self):
if not self.reference:
return self.layers
layers = {}
for num_layer, layer in self.layers.items():
if layer:
layers[num_layer] = layer
for num_layer, layer in self.reference.resolved_layers.items():
if num_layer not in layers and layer and not layer.uses_vars:
layers[num_layer] = layer
return layers
@property
def resolved_text_lines(self):
if not self.reference:
return self.text_lines
text_lines = {}
for line, text_line in self.text_lines.items():
if text_line:
text_lines[line] = text_line
for line, text_line in self.reference.resolved_text_lines.items():
if line not in text_lines and text_line and not text_line.uses_vars:
text_lines[line] = text_line
return text_lines
@property
def resolved_events(self):
# same as `EntityDir.resolved_events` with the addition of ` and not event.uses_vars`
if not self.reference:
return self.events
events = {}
for kind, event in self.events.items():
if event:
events[kind] = event
for kind, event in self.reference.resolved_events.items():
if kind not in events and event and not event.uses_vars:
events[kind] = event
return events
def on_delete(self):
for layer in self.iter_all_children_versions(self.layers):
layer.on_delete()
for text_line in self.iter_all_children_versions(self.text_lines):
text_line.on_delete()
super().on_delete()
@classmethod
def find_reference(cls, parent, ref_conf, main, args):
final_ref_conf, page = cls.find_reference_page(parent, ref_conf)
if not final_ref_conf.get("key"):
final_ref_conf["key"] = str(f"{main['row']},{main['col']}")
if not page:
return final_ref_conf, None
return final_ref_conf, page.find_key(final_ref_conf["key"])
def get_waiting_references(self):
return [
(path, parent, ref_conf)
for page, path, parent, ref_conf in self.iter_waiting_references_for_page(self.page)
if (key := page.find_key(ref_conf["key"])) and key.key == self.key
]
def read_directory(self):
super().read_directory()
if self.deck.filters.get("layers") != FILTER_DENY:
from . import KeyImageLayer
for image_file in sorted(self.path.glob(KeyImageLayer.path_glob)):
self.on_file_change(
self.path,
image_file.name,
file_flags.CREATE | (file_flags.ISDIR if image_file.is_dir() else 0),
entity_class=KeyImageLayer,
)
if self.deck.filters.get("text_lines") != FILTER_DENY:
from . import KeyTextLine
for text_file in sorted(self.path.glob(KeyTextLine.path_glob)):
self.on_file_change(
self.path,
text_file.name,
file_flags.CREATE | (file_flags.ISDIR if text_file.is_dir() else 0),
entity_class=KeyTextLine,
)
if self.reference:
self.reference.copy_variable_references(self)
def copy_variable_references(self, dest):
from . import KeyImageLayer, KeyTextLine
for entity_class in (self.var_class, self.event_class, KeyTextLine, KeyImageLayer):
data_dict = getattr(self, entity_class.parent_container_attr)
dest_data_dict = getattr(dest, entity_class.parent_container_attr)
to_copy = [entity for identifier, entity in data_dict.items() if entity and entity.uses_vars]
if entity_class is self.var_class and len(to_copy) > 1:
# varables can depend on each other, we need to sort them
to_copy = reversed( # the graph is reversed because we pass `{node: parent_nodes, ...}`
[
var
for var in networkx.topological_sort(
networkx.DiGraph(
incoming_graph_data={var: tuple(var.used_vars.values()) for var in to_copy}
)
)
if var in to_copy # some vars are in `used_vars` but not `in to_copy`
]
)
for entity in to_copy:
if not dest_data_dict.get(entity.identifier):
entity.copy_as_reference(dest)
@property
def template_repr(self):
return f"{self.row[0]}-{self.row[1]},{self.col[0]}-{self.col[1]}"
def get_ref_arg(self):
if self.template_for is None:
return f"{self.page.number}:{self.row},{self.col}"
return f"{self.page.number}:{self.template_repr}"
def on_file_change(
self, directory, name, flags, modified_at=None, entity_class=None, available_vars=None, is_virtual=False
):
if directory != self.path:
return
if available_vars is None:
available_vars = self.get_available_vars()
if (
result := super().on_file_change(
directory, name, flags, modified_at, entity_class, available_vars, is_virtual
)
) is not NOT_HANDLED:
return result
path = self.path / name
if (layer_filter := self.deck.filters.get("layers")) != FILTER_DENY:
from . import KeyImageLayer
if (not entity_class or entity_class is KeyImageLayer) and fnmatch(name, KeyImageLayer.path_glob):
if (parsed := KeyImageLayer.parse_filename(name, is_virtual, self, available_vars)).main:
if layer_filter is not None and not KeyImageLayer.args_matching_filter(
parsed.main, parsed.args, layer_filter
):
return None
return self.on_child_entity_change(
path=path,
flags=flags,
entity_class=KeyImageLayer,
data_identifier=parsed.args["layer"],
args=parsed.args,
ref_conf=parsed.ref_conf,
ref=parsed.ref,
used_vars=parsed.used_vars,
used_env_vars=parsed.used_env_vars,
modified_at=modified_at,
is_virtual=is_virtual,
)
elif not is_virtual and parsed.ref_conf:
KeyImageLayer.add_waiting_reference(self, path, parsed.ref_conf)
if (text_line_filter := self.deck.filters.get("text_lines")) != FILTER_DENY:
from . import KeyTextLine
if (not entity_class or entity_class is KeyTextLine) and fnmatch(name, KeyTextLine.path_glob):
if (parsed := KeyTextLine.parse_filename(name, is_virtual, self, available_vars)).main:
if text_line_filter is not None and not KeyTextLine.args_matching_filter(
parsed.main, parsed.args, text_line_filter
):
return None
return self.on_child_entity_change(
path=path,
flags=flags,
entity_class=KeyTextLine,
data_identifier=parsed.args["line"],
args=parsed.args,
ref_conf=parsed.ref_conf,
ref=parsed.ref,
used_vars=parsed.used_vars,
used_env_vars=parsed.used_env_vars,
modified_at=modified_at,
is_virtual=is_virtual,
)
elif not is_virtual and parsed.ref_conf:
KeyTextLine.add_waiting_reference(self, path, parsed.ref_conf)
def on_directory_removed(self, directory):
pass
@staticmethod
def args_matching_filter(main, args, filter):
if filter is None:
return True
try:
if (main["row"], main["col"]) == tuple(int(val) for val in filter.split(",")):
return True
except ValueError:
pass
return args.get("name") == filter
def on_image_changed(self):
self.compose_image_cache = None
self.render()
for reference in self.referenced_by:
reference.on_image_changed()
@property
def image_size(self):
return self.width, self.height
@staticmethod
def sort_layers(layers):
return {num_layer: layer for num_layer, layer in sorted(layers.items()) if layer}
@staticmethod
def sort_text_lines(text_lines):
return {line: text_line for line, text_line in sorted(text_lines.items()) if text_line}
def compose_image(self, overlay_level=0):
if not self.compose_image_cache:
layers = self.resolved_layers
text_lines = self.resolved_text_lines
try:
if not layers and not text_lines:
self.compose_image_cache = (None, None)
else:
layers = self.sort_layers(layers) if layers else {}
if layers:
if len(layers) > 1:
# if more than one layer, we ignore the image used if no specific layers
layers.pop(-1, None)
text_lines = self.sort_text_lines(text_lines) if text_lines else {}
if text_lines:
if len(text_lines) > 1:
# if more than one text line, we ignore the one used if no specific lines
text_lines.pop(-1, None)
if not layers and not text_lines:
self.compose_image_cache = None, None
else:
all_layers = list(layers.values()) + list(text_lines.values())
final_image = Image.new("RGB", self.image_size, "black")
for layer in all_layers:
try:
if (composed := layer.compose()) is None:
continue
rendered_layer, position_x, position_y, mask = composed
except Exception:
logger.error(
f"[{layer}] Layer could not be rendered", exc_info=logger.level <= logging.DEBUG
)
continue # we simply ignore a layer that couldn't be created
final_image.paste(rendered_layer, (position_x, position_y), mask)
self.compose_image_cache = final_image, PILHelper.to_native_format(
self.deck.device, final_image
)
except Exception:
logger.error(f"[{self}] Image could not be rendered", exc_info=logger.level <= logging.DEBUG)
self.compose_image_cache = None, None
if overlay_level and (image := self.compose_image_cache[0]):
image_data = PILHelper.to_native_format(
self.deck.device, Image.eval(image, lambda x: x / (1 + 3 * overlay_level))
)
else:
image_data = self.compose_image_cache[1] if self.compose_image_cache[0] else None
return image_data
def has_content(self):
if any(self.resolved_events.values()):
return True
if any(self.resolved_layers.values()) or any(self.resolved_text_lines.values()):
return True # self.compose_image() is not None
return False
def render(self):
if not self.deck.is_running:
return
visible, overlay_level, key_below, key_above = key_visibility = self.deck.get_key_visibility(
self.page.number, self.key
)
if (has_content := self.has_content()) and visible:
self.deck.set_image(self.row, self.col, self.compose_image(overlay_level))
for text_line in self.resolved_text_lines.values():
if text_line:
text_line.start_scroller()
self.activate_events()
self.rendered_overlay = overlay_level
elif not has_content:
self.unrender(key_visibility=key_visibility)
def unrender(self, clear_image=True, key_visibility=None):
if self.rendered_overlay is None:
return
if key_visibility is None:
key_visibility = self.deck.get_key_visibility(self.page.number, self.key)
visible, overlay_level, key_below, key_above = key_visibility
for text_line in self.resolved_text_lines.values():
if text_line:
text_line.stop_scroller()
if visible and clear_image:
self.deck.remove_image(self.row, self.col)
self.deactivate_events()
self.rendered_overlay = None
if key_below:
key_below.render()
def version_activated(self):
super().version_activated()
if not self.is_renderable() or not self.page.is_renderable():
return
self.render()
def version_deactivated(self):
super().version_deactivated()
if not self.is_renderable() or not self.page.is_renderable():
return
self.unrender()
def find_layer(self, layer_filter, allow_disabled=False):
from . import KeyImageLayer
return KeyImageLayer.find_by_identifier_or_name(
self.resolved_layers, layer_filter, allow_disabled=allow_disabled
)
def find_text_line(self, text_line_filter, allow_disabled=False):
from . import KeyTextLine
return KeyTextLine.find_by_identifier_or_name(
self.resolved_text_lines, text_line_filter, allow_disabled=allow_disabled
)
@property
def press_duration(self):
# return value is in milliseconds
if not self.pressed_at:
return None
return (time() - self.pressed_at) * 1000
def pressed(self):
self.pressed_at = time()
events = self.resolved_events
if longpress_event := events.get("longpress"):
logger.debug(f"[{self}] PRESSED. WAITING LONGPRESS.")
longpress_event.wait_run_and_repeat(on_press=True)
if not (press_event := events.get("press")):
logger.debug(f"[{self}] PRESSED. IGNORED (event not configured)")
return
logger.debug(f"[{press_event}] PRESSED.")
press_event.wait_run_and_repeat(on_press=True)
def released(self):
try:
events = self.resolved_events
duration = self.press_duration or None
for event_name in ("press", "longpress"):
if event := events.get(event_name):
event.stop_repeater()
if event.duration_thread:
event.stop_duration_waiter()
str_delay_part = f" (after {duration}ms)" if duration is not None else ""
if not (release_event := events.get("release")):
logger.debug(f"[{self}] RELEASED{str_delay_part}. IGNORED (event not configured)")
return
if release_event.duration_min and (duration is None or duration < release_event.duration_min):
logger.debug(
f"[{release_event}] RELEASED{str_delay_part}. ABORTED (not pressed long enough, less than {release_event.duration_min}ms"
)
else:
logger.debug(f"[{release_event}] RELEASED{str_delay_part}.")
release_event.wait_run_and_repeat()
finally:
self.pressed_at = None
@cached_property
def env_vars(self):
row, col = self.key
if self.template_for is not None:
row = row[0]
col = col[0]
key_vars = {
"key": f"{row},{col}",
"key_index0": (index := self.deck.key_to_index(row, col)),
"key_index": index + 1,
"key_row": row,
"key_col": col,
"key_name": "" if self.name == self.unnamed else self.name,
"key_directory": self.path,
}
if template := self.template:
start_row, start_col = template.template_for[0]
end_row, end_col = template.template_for[-1]
key_vars |= {
"seq_start_row": start_row,
"seq_end_row": end_row,
"seq_start_col": start_col,
"seq_end_col": end_col,
"seq_nb_rows": end_row - start_row + 1,
"seq_nb_cols": end_col - start_col + 1,
"seq_nb_keys": len(template.template_for),
"seq_row": row - start_row + 1,
"seq_col": col - start_col + 1,
"seq_index0": index,
"seq_index": index + 1,
}
return self.page.env_vars | self.finalize_env_vars(key_vars)
def on_create(self):
super().on_create()
if self.template_for:
for row, col in self.template_for:
if key := self.page.keys.get((row, col)):
if key.reference == self:
# we have a key that references this template, so we copy the missing content
self.copy_variable_references(key)
else:
# no key exist, we create a virtual one
self.copy_as_reference(self.page, self.compose_main_part({"row": row, "col": col}))
@property
def template(self):
reference = self
while reference:
if reference.template_for is not None:
return reference
reference = reference.reference
return None
@dataclass(eq=False)
class KeyContent(Entity):
parent_attr = "key"
key: "Key"
@property
def page(self):
return self.key.page
@property
def deck(self):
return self.page.deck
@classmethod
def find_reference_key(cls, parent, ref_conf):
final_ref_conf = ref_conf.copy()
if ref_page := ref_conf.get("page"):
if not (page := parent.deck.find_page(ref_page)):
return final_ref_conf, None
else:
final_ref_conf["page"] = page = parent.page
if ref_key := ref_conf.get("key"):
if not (key := page.find_key(ref_key)):
return final_ref_conf, None
else:
final_ref_conf["key"] = key = parent
return final_ref_conf, key
@classmethod
def iter_waiting_references_for_key(cls, check_key):
for path, (parent, ref_conf) in check_key.children_waiting_for_references.get(cls, {}).items():
yield check_key, path, parent, ref_conf
for path, (parent, ref_conf) in check_key.page.children_waiting_for_references.get(cls, {}).items():
if (key := check_key.page.find_key(ref_conf["key"])) and key.key == check_key.key:
yield key, path, parent, ref_conf
for path, (parent, ref_conf) in check_key.deck.children_waiting_for_references.get(cls, {}).items():
if (
(page := check_key.deck.find_page(ref_conf["page"]))
and page.number == check_key.page.number
and (key := page.find_key(ref_conf["key"]))
and key.key == check_key.key
):
yield key, path, parent, ref_conf
def get_ref_arg(self):
return f"{self.key.get_ref_arg()}:{self.identifier}"
def copy_variable_reference_to_referencing_keys(self):
identifier = self.identifier
container_attr = self.parent_container_attr
for other_key in self.key.referenced_by:
if getattr(other_key, container_attr).get(identifier):
continue
self.copy_as_reference(other_key)
def on_create(self):
super().on_create()
if not self.is_virtual and self.uses_vars and self.key.is_referenced:
self.copy_variable_reference_to_referencing_keys()
| 39.072993 | 141 | 0.574332 |
16cf628e3781ff3fa86b8de8d9b610d269fa90b5 | 8,307 | py | Python | test/lexical_state_machine/test_lexical_state_machine.py | BloggerBust/bbpyp | 078f940dd38bc3ee7c5adcfb2555c2843a4ca57b | [
"Apache-2.0"
] | null | null | null | test/lexical_state_machine/test_lexical_state_machine.py | BloggerBust/bbpyp | 078f940dd38bc3ee7c5adcfb2555c2843a4ca57b | [
"Apache-2.0"
] | null | null | null | test/lexical_state_machine/test_lexical_state_machine.py | BloggerBust/bbpyp | 078f940dd38bc3ee7c5adcfb2555c2843a4ca57b | [
"Apache-2.0"
] | null | null | null | import unittest
from mock import Mock, PropertyMock, patch, sentinel, DEFAULT, call
from bbpyp.test.mock_helpers import set_property_mock
from bbpyp.lexical_state_machine.lexical_state_machine import LexicalStateMachine
from bbpyp.lexical_state_machine.model.lexical_state import LexicalState
@patch('test.TestContext', create=True)
class TestLexicalStateMachine(unittest.TestCase):
def setUp(self):
self._mock_lexical_actions = Mock()
self._mock_lexical_actions.tokenize = Mock()
self._mock_lexical_actions.dispatch = Mock()
self._mock_transition_builder = Mock()
self._mock_transition_builder.from_state.return_value = self._mock_transition_builder
self._mock_transition_builder.given_trigger_state.return_value = self._mock_transition_builder
self._mock_transition_builder.transition_to_state.return_value = self._mock_transition_builder
self._mock_transition_builder.take_transition_action.return_value = self._mock_transition_builder
self._mock_transition_builder.provided_guard_is_satisfied.return_value = self._mock_transition_builder
self._mock_transition_builder.append.return_value = self._mock_transition_builder
self._mock_transition_builder.build.return_value = {
(LexicalState.START, LexicalState.PARSE_TAGS): (self._mock_lexical_actions.tokenize, None, LexicalState.PARSE_TAGS),
(LexicalState.DISPATCH_TOKENS, LexicalState.PARSE_TAGS): (self._mock_lexical_actions.tokenize, None, LexicalState.PARSE_TAGS),
(LexicalState.PARSE_TAGS, LexicalState.DISPATCH_TOKENS): (self._mock_lexical_actions.dispatch, None, LexicalState.DISPATCH_TOKENS),
}
def test_lexical_state_machine_members_initialized_as_expected(self, test_context):
expected_state = LexicalState.START
test_context.context_service.get_context_variable.return_value = expected_state
expected_calls = [call.from_state(LexicalState.START), call.given_trigger_state(LexicalState.PARSE_TAGS), call.take_transition_action(self._mock_lexical_actions.tokenize), call.transition_to_state(LexicalState.PARSE_TAGS), call.append(
), call.from_state(LexicalState.DISPATCH_TOKENS), call.append(
), call.from_state(LexicalState.PARSE_TAGS), call.given_trigger_state(LexicalState.DISPATCH_TOKENS), call.take_transition_action(self._mock_lexical_actions.dispatch), call.transition_to_state(LexicalState.DISPATCH_TOKENS), call.provided_guard_is_satisfied(LexicalStateMachine._transition_guard_ignore), call.build()]
state_machine = LexicalStateMachine(
test_context.logger, self._mock_lexical_actions, self._mock_transition_builder, context_service=test_context.context_service)
self.assertIs(state_machine._logger, test_context.logger)
self.assertIs(state_machine._trigger_to_transition,
self._mock_transition_builder.build.return_value)
self.assertIs(state_machine.current_state, expected_state)
self.assertEqual(self._mock_transition_builder.mock_calls, expected_calls)
def test_from_starting_state_fire_expected_triggers_and_transition_to_correct_terminal_state(self, test_context):
def create_case(stimuli_meta_state, starting_state, expected_triggers, expected_terminal_state):
expected_calls = [call(trigger) for trigger in expected_triggers]
return {"stimuli_meta_state": stimuli_meta_state, "starting_state": starting_state, "expected_get_trigger_calls": expected_calls, "expected_terminal_state": expected_terminal_state}
cases = [
create_case(None, LexicalState.START, [
(LexicalState.START, LexicalState.PARSE_TAGS),
(LexicalState.PARSE_TAGS,
LexicalState.DISPATCH_TOKENS)
], LexicalState.DISPATCH_TOKENS),
create_case(LexicalState.PARSE_TAGS, LexicalState.PARSE_TAGS, [
(LexicalState.PARSE_TAGS,
LexicalState.DISPATCH_TOKENS)
], LexicalState.DISPATCH_TOKENS),
create_case(LexicalState.DISPATCH_TOKENS, LexicalState.DISPATCH_TOKENS, [
(LexicalState.DISPATCH_TOKENS,
LexicalState.PARSE_TAGS),
(LexicalState.PARSE_TAGS,
LexicalState.DISPATCH_TOKENS)
], LexicalState.DISPATCH_TOKENS)
]
with patch('bbpyp.state_machine.abstract_state_machine.AbstractStateMachine.current_state', new_callable=PropertyMock) as current_state_property_spy, patch('bbpyp.message_bus.model.message.Message', autospec=True) as stimuli:
current_state_property_spy.__set__ = set_property_mock
for case in cases:
state_machine = LexicalStateMachine(
test_context.logger, self._mock_lexical_actions, self._mock_transition_builder, context_service=test_context.context_service)
test_context._fire_trigger_spy.reset_mock()
test_context._fire_trigger_spy.side_effect = state_machine._fire_trigger
state_machine._fire_trigger = test_context._fire_trigger_spy
state_machine.current_state = case["starting_state"]
stimuli.meta = case["stimuli_meta_state"]
state_machine.next_state(stimuli)
self.assertIs(state_machine.current_state, case["expected_terminal_state"])
self.assertEqual(state_machine._fire_trigger.mock_calls,
case["expected_get_trigger_calls"])
def test_from_starting_state_call_correct_actions(self, test_context):
def create_case(current_state, action):
return {"current_state": current_state, "action": action}
cases = [
create_case(LexicalState.START, None),
create_case(LexicalState.PARSE_TAGS, self._mock_lexical_actions.tokenize),
create_case(LexicalState.DISPATCH_TOKENS, self._mock_lexical_actions.dispatch)
]
with patch('bbpyp.state_machine.abstract_state_machine.AbstractStateMachine.current_state', new_callable=PropertyMock) as current_state_property_spy, patch('bbpyp.message_bus.model.message.Message', autospec=True) as stimuli:
current_state_property_spy.__set__ = set_property_mock
stimuli.payload.__set__ = set_property_mock
for case in cases:
current_state_property_spy.reset_mock()
stimuli.reset_mock()
stimuli.payload.reset_mock()
self._mock_lexical_actions.tokenize.reset_mock()
self._mock_lexical_actions.dispatch.reset_mock()
test_context.next_state.reset_mock()
state_machine = LexicalStateMachine(
test_context.logger, self._mock_lexical_actions, self._mock_transition_builder, context_service=test_context.context_service)
state_machine.next_state = test_context.next_state
state_machine.current_state = case["current_state"]
original_payload = stimuli.payload
state_machine._on_enter(test_context.previous_state, case["action"], stimuli)
self.assertEqual(stimuli.meta, case["current_state"])
if case["current_state"] == LexicalState.START:
self._mock_lexical_actions.tokenize.assert_not_called()
self._mock_lexical_actions.dispatch.assert_not_called()
state_machine.next_state.assert_not_called()
elif case["current_state"] == LexicalState.PARSE_TAGS:
self._mock_lexical_actions.tokenize.called_once_with(original_payload)
self.assertEqual(
stimuli.payload, self._mock_lexical_actions.tokenize.return_value)
self._mock_lexical_actions.dispatch.assert_not_called()
state_machine.next_state.assert_called_once_with(stimuli)
elif case["current_state"] == LexicalState.DISPATCH_TOKENS:
self._mock_lexical_actions.tokenize.assert_not_called()
self._mock_lexical_actions.dispatch.called_once_with(stimuli)
state_machine.next_state.assert_not_called()
| 58.914894 | 324 | 0.722282 |
73ff21476b9023c60d54dff226aaa591cc453f5b | 5,221 | py | Python | caterva/ndarray.py | DimitriPapadopoulos/python-caterva | f2162c2cdaea8a818ad948afee1555db3747e3a3 | [
"BSD-3-Clause"
] | 17 | 2019-09-07T09:58:41.000Z | 2021-06-09T21:18:41.000Z | caterva/ndarray.py | DimitriPapadopoulos/python-caterva | f2162c2cdaea8a818ad948afee1555db3747e3a3 | [
"BSD-3-Clause"
] | 23 | 2019-07-04T10:11:54.000Z | 2021-06-04T08:34:42.000Z | caterva/ndarray.py | DimitriPapadopoulos/python-caterva | f2162c2cdaea8a818ad948afee1555db3747e3a3 | [
"BSD-3-Clause"
] | 2 | 2020-06-21T17:56:24.000Z | 2021-05-16T07:35:15.000Z | #######################################################################
# Copyright (C) 2019-present, Blosc Development team <blosc@blosc.org>
# All rights reserved.
#
# This source code is licensed under a BSD-style license (found in the
# LICENSE file in the root directory of this source tree)
#######################################################################
from . import caterva_ext as ext
import ndindex
import numpy as np
from .info import InfoReporter
import os
from .meta import Meta
def process_key(key, shape):
key = ndindex.ndindex(key).expand(shape).raw
mask = tuple(True if isinstance(k, int) else False for k in key)
key = tuple(k if isinstance(k, slice) else slice(k, k+1, None) for k in key)
return key, mask
def prod(list):
prod = 1
for li in list:
prod *= li
return prod
def get_caterva_start_stop(ndim, key, shape):
start = tuple(s.start if s.start is not None else 0 for s in key)
stop = tuple(s.stop if s.stop is not None else sh for s, sh in zip(key, shape))
size = prod([stop[i] - start[i] for i in range(ndim)])
return start, stop, size
def parse_kwargs(**kwargs):
if kwargs.get("urlpath"):
if os.path.exists(kwargs["urlpath"]):
raise FileExistsError(f"Can not create the file {kwargs['urlpath']}."
f"It already exists!")
class NDArray(ext.NDArray):
def __init__(self, **kwargs):
parse_kwargs(**kwargs)
self.kwargs = kwargs
super(NDArray, self).__init__(**self.kwargs)
@classmethod
def cast(cls, cont):
cont.__class__ = cls
assert isinstance(cont, NDArray)
return cont
@property
def meta(self):
return Meta(self)
@property
def info(self):
"""
Print information about this array.
"""
return InfoReporter(self)
@property
def info_items(self):
items = []
items += [("Type", f"{self.__class__.__name__}")]
items += [("Itemsize", self.itemsize)]
items += [("Shape", self.shape)]
items += [("Chunks", self.chunks)]
items += [("Blocks", self.blocks)]
items += [("Comp. codec", self.codec.name)]
items += [("Comp. level", self.clevel)]
filters = [f.name for f in self.filters if f.name != "NOFILTER"]
items += [("Comp. filters", f"[{', '.join(map(str, filters))}]")]
items += [("Comp. ratio", f"{self.cratio:.2f}")]
return items
def __setitem__(self, key, value):
key, mask = process_key(key, self.shape)
start, stop, _ = get_caterva_start_stop(self.ndim, key, self.shape)
key = (start, stop)
return ext.set_slice(self, key, value)
def __getitem__(self, key):
""" Get a (multidimensional) slice as specified in key.
Parameters
----------
key: int, slice or sequence of slices
The index for the slices to be updated. Note that step parameter is not honored yet
in slices.
Returns
-------
out: NDArray
An array, stored in a non-compressed buffer, with the requested data.
"""
key, mask = process_key(key, self.shape)
start, stop, _ = get_caterva_start_stop(self.ndim, key, self.shape)
key = (start, stop)
shape = [sp - st for st, sp in zip(start, stop)]
arr = np.zeros(shape, dtype=f"S{self.itemsize}")
return ext.get_slice_numpy(arr, self, key, mask)
def slice(self, key, **kwargs):
""" Get a (multidimensional) slice as specified in key. Generalizes :py:meth:`__getitem__`.
Parameters
----------
key: int, slice or sequence of slices
The index for the slices to be updated. Note that step parameter is not honored yet in
slices.
Other Parameters
----------------
kwargs: dict, optional
Keyword arguments that are supported by the :py:meth:`caterva.empty` constructor.
Returns
-------
out: NDArray
An array with the requested data.
"""
arr = NDArray(**kwargs)
kwargs = arr.kwargs
key, mask = process_key(key, self.shape)
start, stop, _ = get_caterva_start_stop(self.ndim, key, self.shape)
key = (start, stop)
return ext.get_slice(arr, self, key, mask, **kwargs)
def squeeze(self):
"""Remove the 1's in array's shape."""
super(NDArray, self).squeeze(**self.kwargs)
def to_buffer(self):
"""Returns a buffer with the data contents.
Returns
-------
bytes
The buffer containing the data of the whole array.
"""
return super(NDArray, self).to_buffer(**self.kwargs)
def copy(self, **kwargs):
"""Copy into a new array.
Other Parameters
----------------
kwargs: dict, optional
Keyword arguments that are supported by the :py:meth:`caterva.empty` constructor.
Returns
-------
NDArray
An array containing the copy.
"""
arr = NDArray(**kwargs)
return ext.copy(arr, self, **kwargs)
| 31.263473 | 99 | 0.5656 |
e81de2ed32dbef026e6b194d968137e164f4ca44 | 15,042 | py | Python | venv/lib/python3.5/site-packages/gi/_propertyhelper.py | joseluis8906/fancychat | c67bec8c0273f77c9070bb2b7e23421a7ca5bc11 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/gi/_propertyhelper.py | joseluis8906/fancychat | c67bec8c0273f77c9070bb2b7e23421a7ca5bc11 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/gi/_propertyhelper.py | joseluis8906/fancychat | c67bec8c0273f77c9070bb2b7e23421a7ca5bc11 | [
"MIT"
] | null | null | null | # -*- Mode: Python; py-indent-offset: 4 -*-
# pygobject - Python bindings for the GObject library
# Copyright (C) 2007 Johan Dahlin
#
# gi/_propertyhelper.py: GObject property wrapper/helper
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
import sys
import traceback
from . import _gi
from ._constants import \
TYPE_NONE, TYPE_INTERFACE, TYPE_CHAR, TYPE_UCHAR, \
TYPE_BOOLEAN, TYPE_INT, TYPE_UINT, TYPE_LONG, \
TYPE_ULONG, TYPE_INT64, TYPE_UINT64, TYPE_ENUM, TYPE_FLAGS, \
TYPE_FLOAT, TYPE_DOUBLE, TYPE_STRING, \
TYPE_POINTER, TYPE_BOXED, TYPE_PARAM, TYPE_OBJECT, \
TYPE_PYOBJECT, TYPE_GTYPE, TYPE_STRV, TYPE_VARIANT
G_MAXFLOAT = _gi.G_MAXFLOAT
G_MAXDOUBLE = _gi.G_MAXDOUBLE
G_MININT = _gi.G_MININT
G_MAXINT = _gi.G_MAXINT
G_MAXUINT = _gi.G_MAXUINT
G_MINLONG = _gi.G_MINLONG
G_MAXLONG = _gi.G_MAXLONG
G_MAXULONG = _gi.G_MAXULONG
if sys.version_info >= (3, 0):
_basestring = str
_long = int
else:
_basestring = basestring
_long = long
class Property(object):
"""Creates a new Property which when used in conjunction with
GObject subclass will create a Python property accessor for the
GObject ParamSpec.
:param callable getter:
getter to get the value of the property
:param callable setter:
setter to set the value of the property
:param type type:
type of property
:param default:
default value, must match the property type.
:param str nick:
short description
:param str blurb:
long description
:param GObject.ParamFlags flags:
parameter flags
:keyword minimum:
minimum allowed value (int, float, long only)
:keyword maximum:
maximum allowed value (int, float, long only)
.. code-block:: python
class MyObject(GObject.Object):
prop = GObject.Property(type=str)
obj = MyObject()
obj.prop = 'value'
obj.prop # now is 'value'
The API is similar to the builtin :py:func:`property`:
.. code-block:: python
class AnotherObject(GObject.Object):
value = 0
@GObject.Property
def prop(self):
'Read only property.'
return 1
@GObject.Property(type=int)
def propInt(self):
'Read-write integer property.'
return self.value
@propInt.setter
def propInt(self, value):
self.value = value
"""
_type_from_pytype_lookup = {
# Put long_ first in case long_ and int are the same so int clobbers long_
_long: TYPE_LONG,
int: TYPE_INT,
bool: TYPE_BOOLEAN,
float: TYPE_DOUBLE,
str: TYPE_STRING,
object: TYPE_PYOBJECT,
}
_min_value_lookup = {
TYPE_UINT: 0,
TYPE_ULONG: 0,
TYPE_UINT64: 0,
# Remember that G_MINFLOAT and G_MINDOUBLE are something different.
TYPE_FLOAT: -G_MAXFLOAT,
TYPE_DOUBLE: -G_MAXDOUBLE,
TYPE_INT: G_MININT,
TYPE_LONG: G_MINLONG,
TYPE_INT64: -2 ** 63,
}
_max_value_lookup = {
TYPE_UINT: G_MAXUINT,
TYPE_ULONG: G_MAXULONG,
TYPE_INT64: 2 ** 63 - 1,
TYPE_UINT64: 2 ** 64 - 1,
TYPE_FLOAT: G_MAXFLOAT,
TYPE_DOUBLE: G_MAXDOUBLE,
TYPE_INT: G_MAXINT,
TYPE_LONG: G_MAXLONG,
}
_default_lookup = {
TYPE_INT: 0,
TYPE_UINT: 0,
TYPE_LONG: 0,
TYPE_ULONG: 0,
TYPE_INT64: 0,
TYPE_UINT64: 0,
TYPE_STRING: '',
TYPE_FLOAT: 0.0,
TYPE_DOUBLE: 0.0,
}
class __metaclass__(type):
def __repr__(self):
return "<class 'GObject.Property'>"
def __init__(self, getter=None, setter=None, type=None, default=None,
nick='', blurb='', flags=_gi.PARAM_READWRITE,
minimum=None, maximum=None):
self.name = None
if type is None:
type = object
self.type = self._type_from_python(type)
self.default = self._get_default(default)
self._check_default()
if not isinstance(nick, _basestring):
raise TypeError("nick must be a string")
self.nick = nick
if not isinstance(blurb, _basestring):
raise TypeError("blurb must be a string")
self.blurb = blurb
# Always clobber __doc__ with blurb even if blurb is empty because
# we don't want the lengthy Property class documentation showing up
# on instances.
self.__doc__ = blurb
self.flags = flags
# Call after setting blurb for potential __doc__ usage.
if getter and not setter:
setter = self._readonly_setter
elif setter and not getter:
getter = self._writeonly_getter
elif not setter and not getter:
getter = self._default_getter
setter = self._default_setter
self.getter(getter)
# do not call self.setter() here, as this defines the property name
# already
self.fset = setter
if minimum is not None:
if minimum < self._get_minimum():
raise TypeError(
"Minimum for type %s cannot be lower than %d" %
(self.type, self._get_minimum()))
else:
minimum = self._get_minimum()
self.minimum = minimum
if maximum is not None:
if maximum > self._get_maximum():
raise TypeError(
"Maximum for type %s cannot be higher than %d" %
(self.type, self._get_maximum()))
else:
maximum = self._get_maximum()
self.maximum = maximum
self._exc = None
def __repr__(self):
return '<GObject Property %s (%s)>' % (
self.name or '(uninitialized)',
_gi.type_name(self.type))
def __get__(self, instance, klass):
if instance is None:
return self
self._exc = None
# Simply return the result of fget directly, no need to go through GObject.
# See: https://bugzilla.gnome.org/show_bug.cgi?id=723872
# We catch and print any exception occurring within the fget for compatibility
# prior to the fast path addition from bug 723872, this should eventually
# be removed and exceptions raised directly to the caller as in:
# https://bugzilla.gnome.org/show_bug.cgi?id=575652
try:
value = self.fget(instance)
except Exception:
traceback.print_exc()
value = None
if self._exc:
exc = self._exc
self._exc = None
raise exc
return value
def __set__(self, instance, value):
if instance is None:
raise TypeError
self._exc = None
instance.set_property(self.name, value)
if self._exc:
exc = self._exc
self._exc = None
raise exc
def __call__(self, fget):
"""Allows application of the getter along with init arguments."""
return self.getter(fget)
def getter(self, fget):
"""Set the getter function to fget. For use as a decorator."""
if fget.__doc__:
# Always clobber docstring and blurb with the getter docstring.
self.blurb = fget.__doc__
self.__doc__ = fget.__doc__
self.fget = fget
return self
def setter(self, fset):
"""Set the setter function to fset. For use as a decorator."""
self.fset = fset
# with a setter decorator, we must ignore the name of the method in
# install_properties, as this does not need to be a valid property name
# and does not define the property name. So set the name here.
if not self.name:
self.name = self.fget.__name__
return self
def _type_from_python(self, type_):
if type_ in self._type_from_pytype_lookup:
return self._type_from_pytype_lookup[type_]
elif (isinstance(type_, type) and
issubclass(type_, (_gi.GObject,
_gi.GEnum,
_gi.GFlags,
_gi.GBoxed,
_gi.GInterface))):
return type_.__gtype__
elif type_ in (TYPE_NONE, TYPE_INTERFACE, TYPE_CHAR, TYPE_UCHAR,
TYPE_INT, TYPE_UINT, TYPE_BOOLEAN, TYPE_LONG,
TYPE_ULONG, TYPE_INT64, TYPE_UINT64,
TYPE_FLOAT, TYPE_DOUBLE, TYPE_POINTER,
TYPE_BOXED, TYPE_PARAM, TYPE_OBJECT, TYPE_STRING,
TYPE_PYOBJECT, TYPE_GTYPE, TYPE_STRV, TYPE_VARIANT):
return type_
else:
raise TypeError("Unsupported type: %r" % (type_,))
def _get_default(self, default):
if default is not None:
return default
return self._default_lookup.get(self.type, None)
def _check_default(self):
ptype = self.type
default = self.default
if (ptype == TYPE_BOOLEAN and (default not in (True, False))):
raise TypeError(
"default must be True or False, not %r" % (default,))
elif ptype == TYPE_PYOBJECT:
if default is not None:
raise TypeError("object types does not have default values")
elif ptype == TYPE_GTYPE:
if default is not None:
raise TypeError("GType types does not have default values")
elif _gi.type_is_a(ptype, TYPE_ENUM):
if default is None:
raise TypeError("enum properties needs a default value")
elif not _gi.type_is_a(default, ptype):
raise TypeError("enum value %s must be an instance of %r" %
(default, ptype))
elif _gi.type_is_a(ptype, TYPE_FLAGS):
if not _gi.type_is_a(default, ptype):
raise TypeError("flags value %s must be an instance of %r" %
(default, ptype))
elif _gi.type_is_a(ptype, TYPE_STRV) and default is not None:
if not isinstance(default, list):
raise TypeError("Strv value %s must be a list" % repr(default))
for val in default:
if type(val) not in (str, bytes):
raise TypeError("Strv value %s must contain only strings" % str(default))
elif _gi.type_is_a(ptype, TYPE_VARIANT) and default is not None:
if not hasattr(default, '__gtype__') or not _gi.type_is_a(default, TYPE_VARIANT):
raise TypeError("variant value %s must be an instance of %r" %
(default, ptype))
def _get_minimum(self):
return self._min_value_lookup.get(self.type, None)
def _get_maximum(self):
return self._max_value_lookup.get(self.type, None)
#
# Getter and Setter
#
def _default_setter(self, instance, value):
setattr(instance, '_property_helper_' + self.name, value)
def _default_getter(self, instance):
return getattr(instance, '_property_helper_' + self.name, self.default)
def _readonly_setter(self, instance, value):
self._exc = TypeError("%s property of %s is read-only" % (
self.name, type(instance).__name__))
def _writeonly_getter(self, instance):
self._exc = TypeError("%s property of %s is write-only" % (
self.name, type(instance).__name__))
#
# Public API
#
def get_pspec_args(self):
ptype = self.type
if ptype in (TYPE_INT, TYPE_UINT, TYPE_LONG, TYPE_ULONG,
TYPE_INT64, TYPE_UINT64, TYPE_FLOAT, TYPE_DOUBLE):
args = self.minimum, self.maximum, self.default
elif (ptype == TYPE_STRING or ptype == TYPE_BOOLEAN or
ptype.is_a(TYPE_ENUM) or ptype.is_a(TYPE_FLAGS) or
ptype.is_a(TYPE_VARIANT)):
args = (self.default,)
elif ptype in (TYPE_PYOBJECT, TYPE_GTYPE):
args = ()
elif ptype.is_a(TYPE_OBJECT) or ptype.is_a(TYPE_BOXED):
args = ()
else:
raise NotImplementedError(ptype)
return (self.type, self.nick, self.blurb) + args + (self.flags,)
def install_properties(cls):
"""
Scans the given class for instances of Property and merges them
into the classes __gproperties__ dict if it exists or adds it if not.
"""
gproperties = cls.__dict__.get('__gproperties__', {})
props = []
for name, prop in cls.__dict__.items():
if isinstance(prop, Property): # not same as the built-in
# if a property was defined with a decorator, it may already have
# a name; if it was defined with an assignment (prop = Property(...))
# we set the property's name to the member name
if not prop.name:
prop.name = name
# we will encounter the same property multiple times in case of
# custom setter methods
if prop.name in gproperties:
if gproperties[prop.name] == prop.get_pspec_args():
continue
raise ValueError('Property %s was already found in __gproperties__' % prop.name)
gproperties[prop.name] = prop.get_pspec_args()
props.append(prop)
if not props:
return
cls.__gproperties__ = gproperties
if 'do_get_property' in cls.__dict__ or 'do_set_property' in cls.__dict__:
for prop in props:
if prop.fget != prop._default_getter or prop.fset != prop._default_setter:
raise TypeError(
"GObject subclass %r defines do_get/set_property"
" and it also uses a property with a custom setter"
" or getter. This is not allowed" %
(cls.__name__,))
def obj_get_property(self, pspec):
name = pspec.name.replace('-', '_')
return getattr(self, name, None)
cls.do_get_property = obj_get_property
def obj_set_property(self, pspec, value):
name = pspec.name.replace('-', '_')
prop = getattr(cls, name, None)
if prop:
prop.fset(self, value)
cls.do_set_property = obj_set_property
| 35.14486 | 96 | 0.597859 |
0c50b5c1f51ffdb4d7586461298f4fea9f509634 | 17,658 | py | Python | pde/tools/parameters.py | deephog/py-pde | 2b9cf5ecf90d33f484b6f2fb61437199ca66b25c | [
"MIT"
] | null | null | null | pde/tools/parameters.py | deephog/py-pde | 2b9cf5ecf90d33f484b6f2fb61437199ca66b25c | [
"MIT"
] | null | null | null | pde/tools/parameters.py | deephog/py-pde | 2b9cf5ecf90d33f484b6f2fb61437199ca66b25c | [
"MIT"
] | null | null | null | """
Infrastructure for managing classes with parameters
One aim is to allow easy management of inheritance of parameters.
.. autosummary::
:nosignatures:
Parameter
DeprecatedParameter
HideParameter
Parameterized
get_all_parameters
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
import logging
from collections import OrderedDict
from typing import Any, Dict, Sequence, Union
import numpy as np
from . import output
from .misc import hybridmethod, import_class
class Parameter:
""" class representing a single parameter """
def __init__(
self,
name: str,
default_value=None,
cls=object,
description: str = "",
hidden: bool = False,
extra: Dict[str, Any] = None,
):
"""initialize a parameter
Args:
name (str):
The name of the parameter
default_value:
The default value
cls:
The type of the parameter, which is used for conversion
description (str):
A string describing the impact of this parameter. This
description appears in the parameter help
hidden (bool):
Whether the parameter is hidden in the description summary
extra (dict):
Extra arguments that are stored with the parameter
"""
self.name = name
self.default_value = default_value
self.cls = cls
self.description = description
self.hidden = hidden
self.extra = {} if extra is None else extra
if cls is not object:
# check whether the default value is of the correct type
converted_value = cls(default_value)
if isinstance(converted_value, np.ndarray):
valid_default = np.allclose(converted_value, default_value)
else:
valid_default = converted_value == default_value
if not valid_default:
logging.warning(
"Default value `%s` does not seem to be of type `%s`",
name,
cls.__name__,
)
def __repr__(self):
return (
f'{self.__class__.__name__}(name="{self.name}", default_value='
f'"{self.default_value}", cls="{self.cls.__name__}", '
f'description="{self.description}", hidden={self.hidden})'
)
__str__ = __repr__
def __getstate__(self):
# replace the object class by its class path
return {
"name": str(self.name),
"default_value": self.convert(),
"cls": object.__module__ + "." + self.cls.__name__,
"description": self.description,
"hidden": self.hidden,
"extra": self.extra,
}
def __setstate__(self, state):
# restore the object from the class path
state["cls"] = import_class(state["cls"])
# restore the state
self.__dict__.update(state)
def convert(self, value=None):
"""converts a `value` into the correct type for this parameter. If
`value` is not given, the default value is converted.
Note that this does not make a copy of the values, which could lead to
unexpected effects where the default value is changed by an instance.
Args:
value: The value to convert
Returns:
The converted value, which is of type `self.cls`
"""
if value is None:
value = self.default_value
if self.cls is object:
return value
else:
try:
return self.cls(value)
except ValueError:
raise ValueError(
f"Could not convert {value!r} to {self.cls.__name__} for parameter "
f"'{self.name}'"
)
class DeprecatedParameter(Parameter):
""" a parameter that can still be used normally but is deprecated """
pass
class HideParameter:
""" a helper class that allows hiding parameters of the parent classes """
def __init__(self, name: str):
"""
Args:
name (str):
The name of the parameter
"""
self.name = name
ParameterListType = Sequence[Union[Parameter, HideParameter]]
class Parameterized:
""" a mixin that manages the parameters of a class """
parameters_default: ParameterListType = []
_subclasses: Dict[str, "Parameterized"] = {}
def __init__(self, parameters: Dict[str, Any] = None):
"""initialize the parameters of the object
Args:
parameters (dict):
A dictionary of parameters to change the defaults. The allowed
parameters can be obtained from
:meth:`~Parameterized.get_parameters` or displayed by calling
:meth:`~Parameterized.show_parameters`.
"""
# set logger if this has not happened, yet
if not hasattr(self, "_logger"):
self._logger = logging.getLogger(self.__class__.__name__)
# set parameters if they have not been initialized, yet
if not hasattr(self, "parameters"):
self.parameters = self._parse_parameters(
parameters, include_deprecated=True
)
def __init_subclass__(cls, **kwargs): # @NoSelf
""" register all subclasses to reconstruct them later """
# normalize the parameters_default attribute
if hasattr(cls, "parameters_default") and isinstance(
cls.parameters_default, dict
):
# default parameters are given as a dictionary
cls.parameters_default = [
Parameter(*args) for args in cls.parameters_default.items()
]
# register the subclasses
super().__init_subclass__(**kwargs)
cls._subclasses[cls.__name__] = cls
@classmethod
def get_parameters(
cls,
include_hidden: bool = False,
include_deprecated: bool = False,
sort: bool = True,
) -> Dict[str, Parameter]:
"""return a dictionary of parameters that the class supports
Args:
include_hidden (bool): Include hidden parameters
include_deprecated (bool): Include deprecated parameters
sort (bool): Return ordered dictionary with sorted keys
Returns:
dict: a dictionary of instance of :class:`Parameter` with their
names as keys.
"""
# collect the parameters from the class hierarchy
parameters: Dict[str, Parameter] = {}
for cls in reversed(cls.__mro__):
if hasattr(cls, "parameters_default"):
for p in cls.parameters_default:
if isinstance(p, HideParameter):
if include_hidden:
parameters[p.name].hidden = True
else:
del parameters[p.name]
else:
parameters[p.name] = p
# filter parameters based on hidden and deprecated flags
def show(p):
""" helper function to decide whether parameter will be shown """
# show based on hidden flag?
show1 = include_hidden or not p.hidden
# show based on deprecated flag?
show2 = include_deprecated or not isinstance(p, DeprecatedParameter)
return show1 and show2
# filter parameters based on `show`
result = {
name: parameter for name, parameter in parameters.items() if show(parameter)
}
if sort:
result = OrderedDict(sorted(result.items()))
return result
@classmethod
def _parse_parameters(
cls,
parameters: Dict[str, Any] = None,
check_validity: bool = True,
allow_hidden: bool = True,
include_deprecated: bool = False,
) -> Dict[str, Any]:
"""parse parameters
Args:
parameters (dict):
A dictionary of parameters that will be parsed.
check_validity (bool):
Determines whether a `ValueError` is raised if there are keys in
parameters that are not in the defaults. If `False`, additional
items are simply stored in `self.parameters`
allow_hidden (bool):
Allow setting hidden parameters
include_deprecated (bool):
Include deprecated parameters
"""
if parameters is None:
parameters = {}
else:
parameters = parameters.copy() # do not modify the original
# obtain all possible parameters
param_objs = cls.get_parameters(
include_hidden=allow_hidden, include_deprecated=include_deprecated
)
# initialize parameters with default ones from all parent classes
result: Dict[str, Any] = {}
for name, param_obj in param_objs.items():
if not allow_hidden and param_obj.hidden:
continue # skip hidden parameters
# take value from parameters or set default value
result[name] = param_obj.convert(parameters.pop(name, None))
# update parameters with the supplied ones
if check_validity and parameters:
raise ValueError(
f"Parameters `{sorted(parameters.keys())}` were provided for an "
f"instance but are not defined for the class `{cls.__name__}`"
)
else:
result.update(parameters) # add remaining parameters
return result
def get_parameter_default(self, name):
"""return the default value for the parameter with `name`
Args:
name (str): The parameter name
"""
for cls in self.__class__.__mro__:
if hasattr(cls, "parameters_default"):
for p in cls.parameters_default:
if isinstance(p, Parameter) and p.name == name:
return p.default_value
raise KeyError(f"Parameter `{name}` is not defined")
@classmethod
def _show_parameters(
cls,
description: bool = None,
sort: bool = False,
show_hidden: bool = False,
show_deprecated: bool = False,
parameter_values: Dict[str, Any] = None,
):
"""private method showing all parameters in human readable format
Args:
description (bool):
Flag determining whether the parameter description is shown. The
default is to show the description only when we are in a jupyter
notebook environment.
sort (bool):
Flag determining whether the parameters are sorted
show_hidden (bool):
Flag determining whether hidden parameters are shown
show_deprecated (bool):
Flag determining whether deprecated parameters are shown
parameter_values (dict):
A dictionary with values to show. Parameters not in this
dictionary are shown with their default value.
All flags default to `False`.
"""
# determine whether we are in a jupyter notebook and can return HTML
in_notebook = output.in_jupyter_notebook()
if description is None:
description = in_notebook # show only in notebook by default
# set the templates for displaying the data
if in_notebook:
writer: output.OutputBase = output.JupyterOutput(
'<style type="text/css">dl.py-pde_params dd {padding-left:2em}</style>'
'<dl class="py-pde_params">',
"</dl>",
)
# templates for HTML output
template = "<dt>{name} = {value!r}</dt>"
if description:
template += "<dd>{description}</dd>"
template_object = template
else:
# template for normal output
writer = output.BasicOutput()
template = "{name}: {type} = {value!r}"
template_object = "{name} = {value!r}"
if description:
template += " ({description})"
template_object += " ({description})"
# iterate over all parameters
params = cls.get_parameters(
include_hidden=show_hidden, include_deprecated=show_deprecated, sort=sort
)
for param in params.values():
# initialize the data to show
data = {
"name": param.name,
"type": param.cls.__name__,
"description": param.description,
}
# determine the value to show
if parameter_values is None:
data["value"] = param.default_value
else:
data["value"] = parameter_values[param.name]
# print the data to stdout
if param.cls is object:
writer(template_object.format(**data))
else:
writer(template.format(**data))
writer.show()
@hybridmethod
def show_parameters( # @NoSelf
cls,
description: bool = None, # @NoSelf
sort: bool = False,
show_hidden: bool = False,
show_deprecated: bool = False,
):
"""show all parameters in human readable format
Args:
description (bool):
Flag determining whether the parameter description is shown. The
default is to show the description only when we are in a jupyter
notebook environment.
sort (bool):
Flag determining whether the parameters are sorted
show_hidden (bool):
Flag determining whether hidden parameters are shown
show_deprecated (bool):
Flag determining whether deprecated parameters are shown
All flags default to `False`.
"""
cls._show_parameters(description, sort, show_hidden, show_deprecated)
@show_parameters.instancemethod # type: ignore
def show_parameters(
self,
description: bool = None, # @NoSelf
sort: bool = False,
show_hidden: bool = False,
show_deprecated: bool = False,
default_value: bool = False,
):
"""show all parameters in human readable format
Args:
description (bool):
Flag determining whether the parameter description is shown. The
default is to show the description only when we are in a jupyter
notebook environment.
sort (bool):
Flag determining whether the parameters are sorted
show_hidden (bool):
Flag determining whether hidden parameters are shown
show_deprecated (bool):
Flag determining whether deprecated parameters are shown
default_value (bool):
Flag determining whether the default values or the current
values are shown
All flags default to `False`.
"""
self._show_parameters(
description,
sort,
show_hidden,
show_deprecated,
parameter_values=None if default_value else self.parameters,
)
def get_all_parameters(data: str = "name") -> Dict[str, Any]:
"""get a dictionary with all parameters of all registered classes
Args:
data (str):
Determines what data is returned. Possible values are 'name',
'value', or 'description', to return the respective information
about the parameters.
"""
result = {}
for cls_name, cls in Parameterized._subclasses.items():
if data == "name":
parameters = set(cls.get_parameters().keys())
elif data == "value":
parameters = { # type: ignore
k: v.default_value for k, v in cls.get_parameters().items()
}
elif data == "description":
parameters = { # type: ignore
k: v.description for k, v in cls.get_parameters().items()
}
else:
raise ValueError(f"Cannot interpret data `{data}`")
result[cls_name] = parameters
return result
def sphinx_display_parameters(app, what, name, obj, options, lines):
"""helper function to display parameters in sphinx documentation
Example:
This function should be connected to the 'autodoc-process-docstring'
event like so:
app.connect('autodoc-process-docstring', sphinx_display_parameters)
"""
if what == "class" and issubclass(obj, Parameterized):
if any(":param parameters:" in line for line in lines):
# parse parameters
parameters = obj.get_parameters(sort=False)
if parameters:
lines.append(".. admonition::")
lines.append(f" Parameters of {obj.__name__}:")
lines.append(" ")
for p in parameters.values():
lines.append(f" {p.name}")
text = p.description.splitlines()
text.append(f"(Default value: :code:`{p.default_value!r}`)")
text = [" " + t for t in text]
lines.extend(text)
lines.append("")
lines.append("")
| 34.897233 | 88 | 0.572149 |
347754d497902fb242721ed3e43644b5dbecf4a6 | 9,979 | py | Python | fcmaes/moretry.py | dietmarwo/fast-cma-es | 7952d2b70dc76d8f9d3bc326fd19f98fd93e9ff6 | [
"MIT"
] | 20 | 2020-05-28T10:23:11.000Z | 2022-03-22T08:09:58.000Z | fcmaes/moretry.py | dietmarwo/fast-cma-es | 7952d2b70dc76d8f9d3bc326fd19f98fd93e9ff6 | [
"MIT"
] | 11 | 2020-03-04T15:16:41.000Z | 2022-03-27T07:14:19.000Z | fcmaes/moretry.py | dietmarwo/fast-cma-es | 7952d2b70dc76d8f9d3bc326fd19f98fd93e9ff6 | [
"MIT"
] | 12 | 2020-02-19T12:26:40.000Z | 2022-03-26T11:22:13.000Z | # Copyright (c) Dietmar Wolz.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory.
# parallel optimization retry of a multi-objective problem.
import numpy as np
import math, sys, time, warnings
import multiprocessing as mp
from multiprocessing import Process
from scipy.optimize import Bounds
from numpy.random import Generator, MT19937, SeedSequence
from fcmaes.optimizer import de_cma, logger, dtime
from fcmaes import retry, advretry
def minimize(fun,
bounds,
weight_bounds,
ncon = 0,
value_exp = 2.0,
value_limits = None,
num_retries = 1024,
logger = None,
workers = mp.cpu_count(),
popsize = 31,
max_evaluations = 50000,
capacity = None,
optimizer = None,
statistic_num = 0,
plot_name = None
):
"""Minimization of a multi objective function of one or more variables using parallel
optimization retry.
Parameters
----------
fun : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where ``x`` is an 1-D array with shape (n,) and ``args``
is a tuple of the fixed parameters needed to completely
specify the function.
bounds : sequence or `Bounds`, optional
Bounds on variables. There are two ways to specify the bounds:
1. Instance of the `scipy.Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
weight_bounds : `Bounds`, optional
Bounds on objective weights.
ncon : int, optional
number of constraints
value_exp : float, optional
exponent applied to the objective values for the weighted sum.
value_limits : sequence of floats, optional
Upper limit for optimized objective values to be stored.
num_retries : int, optional
Number of optimization retries.
logger : logger, optional
logger for log output of the retry mechanism. If None, logging
is switched off. Default is a logger which logs both to stdout and
appends to a file ``optimizer.log``.
workers : int, optional
number of parallel processes used. Default is mp.cpu_count()
popsize = int, optional
CMA-ES population size used for all CMA-ES runs.
Not used for differential evolution.
Ignored if parameter optimizer is defined.
max_evaluations : int, optional
Forced termination of all optimization runs after ``max_evaluations``
function evaluations. Only used if optimizer is undefined, otherwise
this setting is defined in the optimizer.
capacity : int, optional
capacity of the evaluation store.
optimizer : optimizer.Optimizer, optional
optimizer to use. Default is a sequence of differential evolution and CMA-ES.
plot_name : plot_name, optional
if defined the pareto front is plotted during the optimization to monitor progress
Returns
-------
xs, ys: list of argument vectors and corresponding value vectors of the optimization results. """
if optimizer is None:
optimizer = de_cma(max_evaluations, popsize)
if capacity is None:
capacity = num_retries
store = retry.Store(fun, bounds, capacity = capacity, logger = logger,
statistic_num = statistic_num, plot_name = plot_name)
xs = np.array(mo_retry(fun, weight_bounds, ncon, value_exp,
store, optimizer.minimize, num_retries, value_limits, workers))
ys = np.array([fun(x) for x in xs])
return xs, ys
def mo_retry(fun, weight_bounds, ncon, y_exp, store, optimize, num_retries, value_limits,
workers=mp.cpu_count()):
sg = SeedSequence()
rgs = [Generator(MT19937(s)) for s in sg.spawn(workers)]
proc=[Process(target=_retry_loop,
args=(pid, rgs, fun, weight_bounds, ncon, y_exp,
store, optimize, num_retries, value_limits)) for pid in range(workers)]
[p.start() for p in proc]
[p.join() for p in proc]
store.sort()
store.dump()
return store.get_xs()
def _retry_loop(pid, rgs, fun, weight_bounds, ncon, y_exp,
store, optimize, num_retries, value_limits):
if 'win' in sys.platform and not store.logger is None:
store.logger = logger()
lower = store.lower
wlb = np.array(weight_bounds.lb)
wub = np.array(weight_bounds.ub)
while store.get_runs_compare_incr(num_retries):
try:
rg = rgs[pid]
w = rg.uniform(size=len(wub))
w /= _avg_exp(w, y_exp) # correct scaling
w = wlb + w * (wub - wlb)
wrapper = mo_wrapper(fun, w, ncon, y_exp)
x, y, evals = optimize(wrapper.eval, Bounds(store.lower, store.upper), None,
[rg.uniform(0.05, 0.1)]*len(lower), rg, store)
objs = wrapper.mo_eval(x) # retrieve the objective values
if value_limits is None or all([objs[i] < value_limits[i] for i in range(len(w))]):
store.add_result(y, x, evals, math.inf)
if not store.plot_name is None:
name = store.plot_name + "_moretry_" + str(store.get_count_evals())
xs = np.array(store.get_xs())
ys = np.array([fun(x) for x in xs])
np.savez_compressed(name, xs=xs, ys=ys)
plot(name, ncon, xs, ys)
except Exception as ex:
print(str(ex))
def pareto(xs, ys):
"""pareto front for argument vectors and corresponding function value vectors."""
par = _pareto(ys)
xp = xs[par]
yp = ys[par]
ya = np.argsort(yp.T[0])
return xp[ya], yp[ya]
class mo_wrapper(object):
"""wrapper for multi objective functions applying the weighted sum approach."""
def __init__(self, fun, weights, ncon, y_exp=2):
self.fun = fun
self.weights = weights
self.ny = len(weights)
self.nobj = self.ny - ncon
self.ncon = ncon
self.y_exp = y_exp
def eval(self, x):
y = self.fun(np.array(x))
weighted = _avg_exp(self.weights*y, self.y_exp)
if self.ncon > 0: # check constraint violations
violations = np.array([i for i in range(self.nobj, self.ny) if y[i] > 0])
if len(violations) > 0:
weighted += sum(self.weights[violations])
return weighted
def mo_eval(self, x):
return self.fun(np.array(x))
def minimize_plot(name, optimizer, fun, bounds, weight_bounds, ncon = 0,
value_limits = None, num_retries = 1024,
exp = 2.0, workers = mp.cpu_count(),
logger=logger(), statistic_num = 0, plot_name = None):
time0 = time.perf_counter() # optimization start time
name += '_' + optimizer.name
logger.info('optimize ' + name)
xs, ys = minimize(fun, bounds, weight_bounds, ncon,
value_exp = exp,
value_limits = value_limits,
num_retries = num_retries,
optimizer = optimizer,
workers = workers,
logger=logger, statistic_num = statistic_num, plot_name = plot_name)
logger.info(name + ' time ' + str(dtime(time0)))
np.savez_compressed(name, xs=xs, ys=ys)
plot(name, ncon, xs, ys)
def plot(name, ncon, xs, ys, eps = 1E-2):
try:
if ncon > 0: # select feasible
ycon = np.array([np.maximum(y[-ncon:], 0) for y in ys])
con = np.sum(ycon, axis=1)
nobj = len(ys[0]) - ncon
feasible = np.array([i for i in range(len(ys)) if con[i] < eps])
if len(feasible) > 0:
#yc = [y[nobj:] for y in ys[feasible]]
xs, ys = xs[feasible], np.array([ y[:nobj] for y in ys[feasible]])
else:
return
retry.plot(ys, 'all_' + name + '.png', interp=False)
xs, front = pareto(xs, ys)
retry.plot(front, 'front_' + name + '.png', interp=False)
if ncon > 0:
for x, y, feas, in zip(xs, front, con[feasible]):
print(str(list(y)) + ' ' + #str(feas) + ' ' +
str([int(xi) for xi in x]))
except Exception as ex:
print(str(ex))
def adv_minimize_plot(name, optimizer, fun, bounds,
value_limit = math.inf, num_retries = 1024, logger=logger(), statistic_num = 0):
time0 = time.perf_counter() # optimization start time
name += '_smart_' + optimizer.name
logger.info('smart optimize ' + name)
store = advretry.Store(lambda x:fun(x)[0], bounds, capacity=5000, logger=logger,
num_retries=num_retries, statistic_num = statistic_num)
advretry.retry(store, optimizer.minimize, num_retries, value_limit)
xs = np.array(store.get_xs())
ys = np.array([fun(x) for x in xs])
retry.plot(ys, '_all_' + name + '.png', interp=False)
np.savez_compressed(name , xs=xs, ys=ys)
xs, front = pareto(xs, ys)
logger.info(name+ ' time ' + str(dtime(time0)))
retry.plot(front, '_front_' + name + '.png')
def _avg_exp(y, y_exp):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
weighted = sum([y[i]**y_exp for i in range(len(y))])**(1.0/y_exp)
return weighted
def _pareto(ys):
pareto = np.arange(ys.shape[0])
index = 0 # Next index to search for
while index < len(ys):
mask = np.any(ys < ys[index], axis=1)
mask[index] = True
pareto = pareto[mask] # Remove dominated points
ys = ys[mask]
index = np.sum(mask[:index])+1
return pareto
| 41.406639 | 101 | 0.592043 |
e62c051300ab5907b6d140ea8c68be502670cbc5 | 7,537 | py | Python | tests/strategies/test_dp.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | null | null | null | tests/strategies/test_dp.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | 1 | 2022-03-18T21:56:53.000Z | 2022-03-18T21:56:53.000Z | tests/strategies/test_dp.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import pytest
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import pytorch_lightning as pl
import tests.helpers.pipelines as tpipes
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.utilities import memory
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
from tests.helpers.simple_models import ClassificationModel
class CustomClassificationModelDP(ClassificationModel):
def _step(self, batch, batch_idx):
x, y = batch
logits = self(x)
return {"logits": logits, "y": y}
def training_step(self, batch, batch_idx):
out = self._step(batch, batch_idx)
loss = F.cross_entropy(out["logits"], out["y"])
return loss
def validation_step(self, batch, batch_idx):
return self._step(batch, batch_idx)
def test_step(self, batch, batch_idx):
return self._step(batch, batch_idx)
def validation_step_end(self, outputs):
self.log("val_acc", self.valid_acc(outputs["logits"], outputs["y"]))
def test_step_end(self, outputs):
self.log("test_acc", self.test_acc(outputs["logits"], outputs["y"]))
@RunIf(min_gpus=2)
def test_multi_gpu_early_stop_dp(tmpdir):
"""Make sure DDP works.
with early stopping
"""
tutils.set_random_main_port()
dm = ClassifDataModule()
model = CustomClassificationModelDP()
trainer_options = dict(
default_root_dir=tmpdir,
callbacks=[EarlyStopping(monitor="val_acc")],
max_epochs=50,
limit_train_batches=10,
limit_val_batches=10,
gpus=[0, 1],
strategy="dp",
)
tpipes.run_model_test(trainer_options, model, dm)
@RunIf(min_gpus=2)
def test_multi_gpu_model_dp(tmpdir):
tutils.set_random_main_port()
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=10,
limit_val_batches=10,
gpus=[0, 1],
strategy="dp",
enable_progress_bar=False,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model)
# test memory helper functions
memory.get_memory_profile("min_max")
class ReductionTestModel(BoringModel):
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
def test_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
def add_outputs(self, output, device):
output.update(
{
"reduce_int": torch.tensor(device.index, dtype=torch.int, device=device),
"reduce_float": torch.tensor(device.index, dtype=torch.float, device=device),
}
)
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
self.add_outputs(output, batch.device)
return output
def validation_step(self, batch, batch_idx):
output = super().validation_step(batch, batch_idx)
self.add_outputs(output, batch.device)
return output
def test_step(self, batch, batch_idx):
output = super().test_step(batch, batch_idx)
self.add_outputs(output, batch.device)
return output
def training_epoch_end(self, outputs):
assert outputs[0]["loss"].shape == torch.Size([])
self._assert_extra_outputs(outputs)
def validation_epoch_end(self, outputs):
assert outputs[0]["x"].shape == torch.Size([2])
self._assert_extra_outputs(outputs)
def test_epoch_end(self, outputs):
assert outputs[0]["y"].shape == torch.Size([2])
self._assert_extra_outputs(outputs)
def _assert_extra_outputs(self, outputs):
out = outputs[0]["reduce_int"]
assert torch.eq(out, torch.tensor([0, 1], device="cuda:0")).all()
assert out.dtype is torch.int
out = outputs[0]["reduce_float"]
assert torch.eq(out, torch.tensor([0.0, 1.0], device="cuda:0")).all()
assert out.dtype is torch.float
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_dp_raise_exception_with_batch_transfer_hooks(mock_is_available, mock_device_count, tmpdir):
"""Test that an exception is raised when overriding batch_transfer_hooks in DP model."""
class CustomModel(BoringModel):
def transfer_batch_to_device(self, batch, device, dataloader_idx):
batch = batch.to(device)
return batch
trainer_options = dict(default_root_dir=tmpdir, max_steps=7, gpus=[0, 1], strategy="dp")
trainer = Trainer(**trainer_options)
model = CustomModel()
with pytest.raises(MisconfigurationException, match=r"Overriding `transfer_batch_to_device` is not .* in DP"):
trainer.fit(model)
class CustomModel(BoringModel):
def on_before_batch_transfer(self, batch, dataloader_idx):
batch += 1
return batch
trainer = Trainer(**trainer_options)
model = CustomModel()
with pytest.raises(MisconfigurationException, match=r"Overriding `on_before_batch_transfer` is not .* in DP"):
trainer.fit(model)
class CustomModel(BoringModel):
def on_after_batch_transfer(self, batch, dataloader_idx):
batch += 1
return batch
trainer = Trainer(**trainer_options)
model = CustomModel()
with pytest.raises(MisconfigurationException, match=r"Overriding `on_after_batch_transfer` is not .* in DP"):
trainer.fit(model)
@RunIf(min_gpus=2)
def test_dp_training_step_dict(tmpdir):
"""This test verifies that dp properly reduces dictionaries."""
model = ReductionTestModel()
model.training_step_end = None
model.validation_step_end = None
model.test_step_end = None
trainer = pl.Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
gpus=2,
strategy="dp",
)
trainer.fit(model)
trainer.test(model)
@RunIf(min_gpus=2)
def test_dp_batch_not_moved_to_device_explicitly(tmpdir):
"""Test that with DP, batch is not moved to the device explicitly."""
class CustomModel(BoringModel):
def on_train_batch_start(self, batch, *args, **kargs):
assert not batch.is_cuda
def training_step(self, batch, batch_idx):
assert batch.is_cuda
return super().training_step(batch, batch_idx)
trainer = pl.Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy="dp",
)
trainer.fit(CustomModel())
| 31.535565 | 114 | 0.687939 |
dd5c7ea49b8e7a2ef7991055f8c4c01679600e91 | 455 | py | Python | Day 3/IntegerToBinary.py | RajShashwat/100daysofcode | d4642ad6278a40a21d74312e9b06495b1edef05a | [
"MIT"
] | null | null | null | Day 3/IntegerToBinary.py | RajShashwat/100daysofcode | d4642ad6278a40a21d74312e9b06495b1edef05a | [
"MIT"
] | null | null | null | Day 3/IntegerToBinary.py | RajShashwat/100daysofcode | d4642ad6278a40a21d74312e9b06495b1edef05a | [
"MIT"
] | null | null | null | #imported the Stack.py
from Stack import Stack
def intToBinary(num: int) -> str :
stack = Stack()
while num > 0:
remender = num % 2
stack.push(remender)
num = num // 2
binary = ""
while not stack.is_empty():
binary += str(stack.pop())
return binary
num = int(input("Enter a Number: "))
if num < 0:
print("Enter a Positive Number")
quit()
result = intToBinary(num)
print("Binary: ",result)
| 18.958333 | 36 | 0.586813 |
a61507712d07f15dfaeb8ca9f5d8ec7e1e4ccfeb | 14,325 | py | Python | train_frcnn.py | ravali27/Keras-FasterRCNN | e0847e18a85c988ae8baaba50ea40a2a7c05c855 | [
"MIT"
] | null | null | null | train_frcnn.py | ravali27/Keras-FasterRCNN | e0847e18a85c988ae8baaba50ea40a2a7c05c855 | [
"MIT"
] | null | null | null | train_frcnn.py | ravali27/Keras-FasterRCNN | e0847e18a85c988ae8baaba50ea40a2a7c05c855 | [
"MIT"
] | null | null | null | from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
import os
import tensorflow as tf
import keras
from keras import backend as K
from keras.optimizers import Adam, SGD, RMSprop
from keras.layers import Input
from keras.models import Model
from keras_frcnn import config, data_generators
from keras_frcnn import losses as losses
import keras_frcnn.roi_helpers as roi_helpers
from keras.utils import generic_utils
from keras.callbacks import TensorBoard
conf = tf.ConfigProto()
conf.gpu_options.allow_growth = True
session = tf.Session(config=conf)
def write_log(callback, names, logs, batch_no):
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, batch_no)
callback.writer.flush()
sys.setrecursionlimit(40000)
parser = OptionParser()
parser.add_option("-p", "--path", dest="train_path", help="Path to training data.")
parser.add_option("-o", "--parser", dest="parser", help="Parser to use. One of simple or pascal_voc",
default="pascal_voc")
parser.add_option("-n", "--num_rois", dest="num_rois", help="Number of RoIs to process at once.", default=32)
parser.add_option("--network", dest="network", help="Base network to use. Supports vgg or resnet50.", default='resnet50')
parser.add_option("--hf", dest="horizontal_flips", help="Augment with horizontal flips in training. (Default=false).", action="store_true", default=False)
parser.add_option("--fm", dest="freq_mask", help="Augment with frequency mask in training. (Default=false).", action="store_true", default=False)
parser.add_option("--tm", dest="time_mask", help="Augment with time mask in training. (Default=false).", action="store_true", default=False)
parser.add_option("--vf", dest="vertical_flips", help="Augment with vertical flips in training. (Default=false).", action="store_true", default=False)
parser.add_option("--rot", "--rot_90", dest="rot_90", help="Augment with 90 degree rotations in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--num_epochs", dest="num_epochs", help="Number of epochs.", default=2000)
parser.add_option("--config_filename", dest="config_filename",
help="Location to store all the metadata related to the training (to be used when testing).",
default="config.pickle")
parser.add_option("--output_weight_path", dest="output_weight_path", help="Output path for weights.", default='./model_frcnn.hdf5')
parser.add_option("--input_weight_path", dest="input_weight_path", help="Input path for weights. If not specified, will try to load default weights provided by keras.")
(options, args) = parser.parse_args()
if not options.train_path: # if filename is not given
parser.error('Error: path to training data must be specified. Pass --path to command line')
if options.parser == 'pascal_voc':
from keras_frcnn.pascal_voc_parser import get_data
elif options.parser == 'simple':
from keras_frcnn.simple_parser import get_data
else:
raise ValueError("Command line option parser must be one of 'pascal_voc' or 'simple'")
# pass the settings from the command line, and persist them in the config object
C = config.Config()
C.use_horizontal_flips = bool(options.horizontal_flips)
C.use_vertical_flips = bool(options.vertical_flips)
C.rot_90 = bool(options.rot_90)
C.use_freq_mask = bool(options.freq_mask)
C.use_time_mask = bool(options.time_mask)
C.model_path = options.output_weight_path
C.num_rois = int(options.num_rois)
if options.network == 'vgg':
C.network = 'vgg'
from keras_frcnn import vgg as nn
elif options.network == 'resnet50':
from keras_frcnn import resnet as nn
C.network = 'resnet50'
elif options.network == 'xception':
from keras_frcnn import xception as nn
C.network = 'xception'
elif options.network == 'inception_resnet_v2':
from keras_frcnn import inception_resnet_v2 as nn
C.network = 'inception_resnet_v2'
else:
print('Not a valid model')
raise ValueError
# check if weight path was passed via command line
if options.input_weight_path:
C.base_net_weights = options.input_weight_path
else:
# set the path to weights based on backend and model
C.base_net_weights = nn.get_weight_path()
# parser
all_imgs, classes_count, class_mapping = get_data(options.train_path)
# bg
if 'bg' not in classes_count:
classes_count['bg'] = 0
class_mapping['bg'] = len(class_mapping)
C.class_mapping = class_mapping
inv_map = {v: k for k, v in class_mapping.items()}
print('Training images per class:')
pprint.pprint(classes_count)
print('Num classes (including bg) = {}'.format(len(classes_count)))
config_output_filename = options.config_filename
with open(config_output_filename, 'wb') as config_f:
pickle.dump(C, config_f)
print('Config has been written to {}, and can be loaded when testing to ensure correct results'.format(config_output_filename))
random.shuffle(all_imgs)
num_imgs = len(all_imgs)
train_imgs = [s for s in all_imgs if s['imageset'] == 'train']
val_imgs = [s for s in all_imgs if s['imageset'] == 'val']
test_imgs = [s for s in all_imgs if s['imageset'] == 'test']
print('Num train samples {}'.format(len(train_imgs)))
print('Num val samples {}'.format(len(val_imgs)))
print('Num test samples {}'.format(len(test_imgs)))
# groundtruth anchor
data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode='train')
data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode='val')
data_gen_test = data_generators.get_anchor_gt(test_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode='test')
if K.image_dim_ordering() == 'th':
input_shape_img = (3, None, None)
else:
input_shape_img = (None, None, 3)
# input placeholder
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(None, 4))
# base network(feature extractor) (resnet, VGG, Inception, Inception Resnet V2, etc)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
# RPN
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)
# detection network
classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True)
model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)
# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)
print(model_all.summary())
try:
# load_weights by name
# some keras application model does not containing name
# for this kinds of model, we need to re-construct model with naming
print('loading weights from {}'.format(C.base_net_weights))
model_rpn.load_weights(C.base_net_weights, by_name=True)
model_classifier.load_weights(C.base_net_weights, by_name=True)
except:
print('Could not load pretrained model weights. Weights can be found in the keras application folder \
https://github.com/fchollet/keras/tree/master/keras/applications')
optimizer = Adam(lr=1e-5)
optimizer_classifier = Adam(lr=1e-5)
model_rpn.compile(optimizer=optimizer, loss=[losses.rpn_loss_cls(num_anchors), losses.rpn_loss_regr(num_anchors)])
model_classifier.compile(optimizer=optimizer_classifier, loss=[losses.class_loss_cls, losses.class_loss_regr(len(classes_count)-1)], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'})
model_all.compile(optimizer='sgd', loss='mae')
# Tensorboard log
log_path = './logs'
if not os.path.isdir(log_path):
os.mkdir(log_path)
# Tensorboard log
callback = TensorBoard(log_path)
callback.set_model(model_all)
epoch_length = 1000
num_epochs = int(options.num_epochs)
iter_num = 0
train_step = 0
losses = np.zeros((epoch_length, 5))
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
start_time = time.time()
best_loss = np.Inf
class_mapping_inv = {v: k for k, v in class_mapping.items()}
print('Starting training')
# vis = True
for epoch_num in range(num_epochs):
progbar = generic_utils.Progbar(epoch_length) # keras progress bar
print('Epoch {}/{}'.format(epoch_num + 1, num_epochs))
while True:
# try:
# mean overlapping bboxes
if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor))/len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print('Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(mean_overlapping_bboxes, epoch_length))
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')
# data generator X, Y, image
#print('data generation starts')
X, Y, img_data = next(data_gen_train)
loss_rpn = model_rpn.train_on_batch(X, Y)
write_log(callback, ['rpn_cls_loss', 'rpn_reg_loss'], loss_rpn, train_step)
P_rpn = model_rpn.predict_on_batch(X)
R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
# sampling positive/negative samples
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if C.num_rois > 1:
if len(pos_samples) < C.num_rois//2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, C.num_rois//2, replace=False).tolist()
try:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
loss_class = model_classifier.train_on_batch([X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]])
write_log(callback, ['detection_cls_loss', 'detection_reg_loss', 'detection_acc'], loss_class, train_step)
train_step += 1
losses[iter_num, 0] = loss_rpn[1]
losses[iter_num, 1] = loss_rpn[2]
losses[iter_num, 2] = loss_class[1]
losses[iter_num, 3] = loss_class[2]
losses[iter_num, 4] = loss_class[3]
iter_num += 1
progbar.update(iter_num, [('rpn_cls', np.mean(losses[:iter_num, 0])), ('rpn_regr', np.mean(losses[:iter_num, 1])),
('detector_cls', np.mean(losses[:iter_num, 2])), ('detector_regr', np.mean(losses[:iter_num, 3]))])
if iter_num == epoch_length:
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if C.verbose:
print('Mean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format(mean_overlapping_bboxes))
print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc))
print('Loss RPN classifier: {}'.format(loss_rpn_cls))
print('Loss RPN regression: {}'.format(loss_rpn_regr))
print('Loss Detector classifier: {}'.format(loss_class_cls))
print('Loss Detector regression: {}'.format(loss_class_regr))
print('Elapsed time: {}'.format(time.time() - start_time))
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
iter_num = 0
start_time = time.time()
write_log(callback,
['Elapsed_time', 'mean_overlapping_bboxes', 'mean_rpn_cls_loss', 'mean_rpn_reg_loss',
'mean_detection_cls_loss', 'mean_detection_reg_loss', 'mean_detection_acc', 'total_loss'],
[time.time() - start_time, mean_overlapping_bboxes, loss_rpn_cls, loss_rpn_regr,
loss_class_cls, loss_class_regr, class_acc, curr_loss],
epoch_num)
if curr_loss < best_loss:
if C.verbose:
print('Total loss decreased from {} to {}, saving weights'.format(best_loss,curr_loss))
best_loss = curr_loss
model_all.save_weights(C.model_path)
break
# except Exception as e:
# print('Exception: {}'.format(e))
# continue
print('Training complete, exiting.')
| 41.642442 | 199 | 0.690611 |
fa14e5ef62e6da799e5bef557d699888a40e77e6 | 4,584 | py | Python | lldb/packages/Python/lldbsuite/test/lang/c/conflicting-symbol/TestConflictingSymbol.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
] | null | null | null | lldb/packages/Python/lldbsuite/test/lang/c/conflicting-symbol/TestConflictingSymbol.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
] | null | null | null | lldb/packages/Python/lldbsuite/test/lang/c/conflicting-symbol/TestConflictingSymbol.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
] | null | null | null | """Test that conflicting symbols in different shared libraries work correctly"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestConflictingSymbols(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
TestBase.setUp(self)
lldbutil.mkdir_p(self.getBuildArtifact("One"))
lldbutil.mkdir_p(self.getBuildArtifact("Two"))
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24489")
def test_conflicting_symbols(self):
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Register our shared libraries for remote targets so they get
# automatically uploaded
environment = self.registerSharedLibrariesWithTarget(
target, ['One', 'Two'])
lldbutil.run_break_set_by_source_regexp(self, '// break here',
extra_options='-f One.c', num_expected_locations=-2)
lldbutil.run_break_set_by_source_regexp(self, '// break here',
extra_options='-f Two.c', num_expected_locations=-2)
lldbutil.run_break_set_by_source_regexp(self, '// break here',
extra_options='-f main.c', num_expected_locations=1)
process = target.LaunchSimple(
None, environment, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# This should display correctly.
self.expect(
"expr (unsigned long long)conflicting_symbol",
"Symbol from One should be found",
substrs=[
"11111"])
self.runCmd("continue", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
self.expect(
"expr (unsigned long long)conflicting_symbol",
"Symbol from Two should be found",
substrs=[
"22222"])
self.runCmd("continue", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
self.expect(
"expr (unsigned long long)conflicting_symbol",
"An error should be printed when symbols can't be ordered",
error=True,
substrs=[
"Multiple internal symbols"])
@expectedFailureAll(bugnumber="llvm.org/pr35043")
def test_shadowed(self):
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Register our shared libraries for remote targets so they get
# automatically uploaded
environment = self.registerSharedLibrariesWithTarget(
target, ['One', 'Two'])
lldbutil.run_break_set_by_source_regexp(self, '// break here',
extra_options='-f main.c', num_expected_locations=1)
process = target.LaunchSimple(
None, environment, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# As we are shadowing the conflicting symbol, there should be no
# ambiguity in this expression.
self.expect(
"expr int conflicting_symbol = 474747; conflicting_symbol",
substrs=[ "474747"])
| 36.967742 | 80 | 0.620201 |
b1e1c2b4ba7965f21d25517589d23a459edcd8a8 | 3,859 | py | Python | frontend/widgets/node.py | ludica-squamata/BehaviourTreeEditor_v2 | 65ca608f7086cd2b4f0cc1eab7b714959b312b94 | [
"MIT"
] | null | null | null | frontend/widgets/node.py | ludica-squamata/BehaviourTreeEditor_v2 | 65ca608f7086cd2b4f0cc1eab7b714959b312b94 | [
"MIT"
] | null | null | null | frontend/widgets/node.py | ludica-squamata/BehaviourTreeEditor_v2 | 65ca608f7086cd2b4f0cc1eab7b714959b312b94 | [
"MIT"
] | null | null | null | from frontend.globals import WidgetHandler, Renderer, COLOR_UNSELECTED, COLOR_SELECTED
from pygame import Surface, font, transform, draw
from backend.eventhandler import EventHandler
from .connection import toggle_connection
from .basewidget import BaseWidget
class Node(BaseWidget):
idx = 0
order = 'b'
tamanio = 16
named = False
color_base = COLOR_UNSELECTED
color_font = COLOR_SELECTED
color_box = COLOR_SELECTED
numerable = True
selectable = True
editable = True
def __init__(self, data):
super().__init__()
self.children = []
self.fuente = font.SysFont('Verdana', 10)
self.layer = 1
self.tipo = data['text'] if data['text'] != '' else 'leaf'
WidgetHandler.add_widget(self)
Renderer.add_widget(self)
self.image = self.create()
if data['color'] is not None:
self.colorize(data['color'])
self.text = data['text']
self.rect = self.image.get_rect(center=data['pos'])
EventHandler.register(self.toggle_selection, 'select', 'deselect')
def connect(self, other):
if self.tipo == 'leaf' and other.tipo == 'leaf':
raise TypeError('Two leaves cannot conect to each other')
if other not in self.children:
toggle_connection(self, other)
self.children.append(other)
for child in self.children:
child.parent = self
def disconnect(self, other):
if other in self.children:
toggle_connection(self, other, value=False)
self.children.remove(other)
def get_idx(self):
return [w for w in WidgetHandler.widgets.sprites() if w.numerable].index(self)
def colorize(self, color_namer):
a = color_namer.color if hasattr(color_namer, 'color') else color_namer
self.named = True if hasattr(color_namer, 'name') else False
self.color_base = a
if (0.2126 * a.r + 0.7152 * a.g + 0.0722 * a.b) < 50:
color_b = COLOR_SELECTED
else:
color_b = COLOR_UNSELECTED
self.color_font = color_b
self.color_box = color_b
self.image.fill(self.color_base)
def create(self):
return Surface((self.size, self.size))
@property
def size(self):
len_idx = len(str(self.get_idx()))
size = self.tamanio
if len_idx == 2:
size = 20
elif len_idx == 3:
size = 25
return size
def update(self, *args):
self.idx = self.get_idx()
render_uns = self.fuente.render(str(self.idx), 1, self.color_font, self.color_base)
size = self.size if self.tamanio < self.size else self.tamanio
self.image = transform.scale(self.image, (size, size))
self.rect = self.image.get_rect(center=self.rect.center)
self.image.blit(render_uns, render_uns.get_rect(center=self.image.get_rect().center))
def __repr__(self):
return self.tipo + ' #' + str(self.idx)
def __int__(self):
return self.idx
def __str__(self):
return str(self.idx)
def kill(self):
WidgetHandler.del_widget(self)
Renderer.del_widget(self)
super().kill()
@property
def lead(self):
lenght = len(self.children)
if lenght > 1:
return [int(i) for i in self.children]
elif lenght == 1:
return int(self.children[0])
def select(self):
super().select()
r = self.rect.copy()
draw.rect(self.image, self.color_box, [0, 0, r.w, r.h], 1)
def deselect(self):
super().deselect()
self.image.fill(self.color_base)
EventHandler.register(lambda e: Node(e.data), 'AddNode')
| 31.373984 | 94 | 0.590827 |
3e60cf91952eee452c477da793a6e6b27a34f7bd | 2,748 | py | Python | configs/common/SimpleOpts.py | caihuayi/gem5 | 96fce476785a834f102ae69a895e661cf08e47cd | [
"BSD-3-Clause"
] | 16 | 2020-09-24T00:17:36.000Z | 2021-08-12T06:11:52.000Z | configs/common/SimpleOpts.py | caihuayi/gem5 | 96fce476785a834f102ae69a895e661cf08e47cd | [
"BSD-3-Clause"
] | 5 | 2021-01-27T23:09:06.000Z | 2022-01-07T03:19:39.000Z | configs/common/SimpleOpts.py | caihuayi/gem5 | 96fce476785a834f102ae69a895e661cf08e47cd | [
"BSD-3-Clause"
] | 15 | 2020-11-18T00:15:28.000Z | 2021-12-12T03:18:34.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Jason Power
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from __future__ import absolute_import
""" Options wrapper for simple gem5 configuration scripts
This module wraps the optparse class so that we can register options
from each class instead of only from the configuration script.
"""
# Module-level variable to track if we've called the parse_args function yet
called_parse_args = False
# For fatal
import m5
# import the options parser
from optparse import OptionParser
# add the options we want to be able to control from the command line
parser = OptionParser()
def add_option(*args, **kwargs):
"""Call "add_option" to the global options parser
"""
if (parser.has_option(args[0]) or
(len(args) > 1 and parser.has_option(args[1])) ):
m5.fatal("Duplicate option: %s" % str(args))
if called_parse_args:
m5.fatal("Can't add an option after calling SimpleOpts.parse_args")
parser.add_option(*args, **kwargs)
def parse_args():
global called_parse_args
called_parse_args = True
return parser.parse_args()
def set_usage(*args, **kwargs):
parser.set_usage(*args, **kwargs)
def print_help(*args, **kwargs):
parser.print_help(*args, **kwargs)
| 36.64 | 76 | 0.760189 |
23f0b77bdc63ebba6d3670cbc0e1250eea5d4698 | 22,418 | py | Python | multiresolution_mesh_creator/src/create_multiresolution_meshes.py | davidackerman/multiresolution-mesh-creator | 1875e52b57804e982c47b9f58a0572f50e778c73 | [
"BSD-3-Clause"
] | 5 | 2021-12-17T16:23:47.000Z | 2022-03-12T20:03:28.000Z | multiresolution_mesh_creator/src/create_multiresolution_meshes.py | janelia-cosem/multiresolution-mesh-creator | c5818da25b79debd08e5e9eb84641eea22eef715 | [
"BSD-3-Clause"
] | 1 | 2021-12-13T04:50:14.000Z | 2021-12-14T08:10:29.000Z | multiresolution_mesh_creator/src/create_multiresolution_meshes.py | janelia-cosem/multiresolution-mesh-creator | c5818da25b79debd08e5e9eb84641eea22eef715 | [
"BSD-3-Clause"
] | 1 | 2021-10-15T23:20:00.000Z | 2021-10-15T23:20:00.000Z | from contextlib import ExitStack
import trimesh
from trimesh.intersections import slice_faces_plane
import numpy as np
from dvidutils import encode_faces_to_custom_drc_bytes
import time
import os
from os import listdir
from os.path import isfile, join, splitext
import dask
from dask.distributed import worker_client
import pyfqmr
from ..util import mesh_util, io_util, dask_util
import logging
logger = logging.getLogger(__name__)
def my_slice_faces_plane(vertices, faces, plane_normal, plane_origin):
"""Wrapper for trimesh slice_faces_plane to catch error that happens if the
whole mesh is to one side of the plane.
Args:
vertices: Mesh vertices
faces: Mesh faces
plane_normal: Normal of plane
plane_origin: Origin of plane
Returns:
vertices, faces: Vertices and faces
"""
if len(vertices) > 0 and len(faces) > 0:
try:
vertices, faces = slice_faces_plane(vertices, faces, plane_normal,
plane_origin)
except ValueError as e:
if str(e) != "input must be 1D integers!":
raise
else:
pass
return vertices, faces
def update_fragment_dict(dictionary, fragment_pos, vertices, faces,
lod_0_fragment_pos):
"""Update dictionary (in place) whose keys are fragment positions and
whose values are `Fragment` which is a class containing the corresponding
fragment vertices, faces and corresponding lod 0 fragment positions.
This is necessary since each fragment (above lod 0) must be divisible by a
2x2x2 grid. So each fragment is technically split into many "subfragments".
Thus the dictionary is used to map all subfragments to the proper parent
fragment. The lod 0 fragment positions are used when writing out the index
files because if a subfragment is empty it still needs to be included in
the index file. By tracking all the corresponding lod 0 fragments of a
given lod fragment, we can ensure that all necessary empty fragments are
included.
Args:
dictionary: Dictionary of fragment pos keys and fragment info values
fragment_pos: Current lod fragment position
vertices: Vertices
faces: Faces
lod_0_fragment_pos: Corresponding lod 0 fragment positions
corresponding to fragment_pos
"""
if fragment_pos in dictionary:
fragment = dictionary[fragment_pos]
fragment.update(vertices, faces, lod_0_fragment_pos)
dictionary[fragment_pos] = fragment
else:
dictionary[fragment_pos] = mesh_util.Fragment(vertices, faces,
[lod_0_fragment_pos])
def generate_mesh_decomposition(mesh_path, lod_0_box_size, grid_origin,
start_fragment, end_fragment, current_lod,
num_chunks):
"""Dask delayed function to decompose a mesh, provided as vertices and
faces, into fragments of size lod_0_box_size * 2**current_lod. Each
fragment is also subdivided by 2x2x2. This is performed over a limited
xrange in order to parallelize via dask.
Args:
mesh_path: Path to current lod mesh
lod_0_box_size: Base chunk shape
grid_origin: The lod 0 mesh grid origin
start_fragment: Start fragment position (x,y,z)
end_fragment: End fragment position (x,y,z)
x_start: Starting x position for this dask task
x_end: Ending x position for this dask task
current_lod: The current level of detail
Returns:
fragments: List of `CompressedFragments` (named tuple)
"""
# We load the mesh here because if we load once in
# generate_all_neuroglancer_meshes and use cluster.scatter onvertices and
# faces we get this issue about concurrent futures:
# https://github.com/dask/distributed/issues/4612
mesh = trimesh.load(mesh_path)
vertices = mesh.vertices
faces = mesh.faces
del mesh
combined_fragments_dictionary = {}
fragments = []
nyz, nxz, nxy = np.eye(3)
if current_lod != 0:
# Want each chunk for lod>0 to be divisible by 2x2x2 region,
# so multiply coordinates by 2
start_fragment *= 2
end_fragment *= 2
# 2x2x2 subdividing box size
sub_box_size = lod_0_box_size * 2**(current_lod - 1)
else:
sub_box_size = lod_0_box_size
vertices -= grid_origin
# Set up slab for current dask task
n = np.eye(3)
for dimension in range(3):
if num_chunks[dimension] > 1:
n_d = n[dimension, :]
plane_origin = n_d * end_fragment[dimension] * sub_box_size
vertices, faces = my_slice_faces_plane(vertices, faces, -n_d,
plane_origin)
if len(vertices) == 0:
return None
plane_origin = n_d * start_fragment[dimension] * sub_box_size
vertices, faces = my_slice_faces_plane(vertices, faces, n_d,
plane_origin)
if len(vertices) == 0:
return None
# Get chunks of desired size by slicing in x,y,z and ensure their chunks
# are divisible by 2x2x2 chunks
for x in range(start_fragment[0], end_fragment[0]):
plane_origin_yz = nyz * (x + 1) * sub_box_size
vertices_yz, faces_yz = my_slice_faces_plane(vertices, faces, -nyz,
plane_origin_yz)
for y in range(start_fragment[1], end_fragment[1]):
plane_origin_xz = nxz * (y + 1) * sub_box_size
vertices_xz, faces_xz = my_slice_faces_plane(
vertices_yz, faces_yz, -nxz, plane_origin_xz)
for z in range(start_fragment[2], end_fragment[2]):
plane_origin_xy = nxy * (z + 1) * sub_box_size
vertices_xy, faces_xy = my_slice_faces_plane(
vertices_xz, faces_xz, -nxy, plane_origin_xy)
lod_0_fragment_position = tuple(np.array([x, y, z]))
if current_lod != 0:
fragment_position = tuple(np.array([x, y, z]) // 2)
else:
fragment_position = lod_0_fragment_position
update_fragment_dict(combined_fragments_dictionary,
fragment_position, vertices_xy, faces_xy,
list(lod_0_fragment_position))
vertices_xz, faces_xz = my_slice_faces_plane(
vertices_xz, faces_xz, nxy, plane_origin_xy)
vertices_yz, faces_yz = my_slice_faces_plane(
vertices_yz, faces_yz, nxz, plane_origin_xz)
vertices, faces = my_slice_faces_plane(vertices, faces, nyz,
plane_origin_yz)
# Return combined_fragments_dictionary
for fragment_pos, fragment in combined_fragments_dictionary.items():
current_box_size = lod_0_box_size * 2**current_lod
draco_bytes = encode_faces_to_custom_drc_bytes(
fragment.vertices,
np.zeros(np.shape(fragment.vertices)),
fragment.faces,
np.asarray(3 * [current_box_size]),
np.asarray(fragment_pos) * current_box_size,
position_quantization_bits=10)
if len(draco_bytes) > 12:
# Then the mesh is not empty
fragment = mesh_util.CompressedFragment(
draco_bytes, np.asarray(fragment_pos), len(draco_bytes),
np.asarray(fragment.lod_0_fragment_pos))
fragments.append(fragment)
return fragments
def pyfqmr_decimate(input_path, output_path, id, lod, ext, decimation_factor,
aggressiveness):
"""Mesh decimation using pyfqmr.
Decimation is performed on a mesh located at `input_path`/`id`.`ext`. The
target number of faces is 1/2**`lod` of the current number of faces. The
mesh is written to an ply file in `output_path`/s`lod`/`id`.ply. This
utilizes `dask.delayed`.
Args:
input_path [`str`]: The input path for s0 meshes
output_path [`str`]: The output path
id [`int`]: The object id
lod [`int`]: The current level of detail
ext [`str`]: The extension of the s0 meshes.
decimation_factor [`float`]: The factor by which we decimate faces,
scaled by 2**lod
aggressiveness [`int`]: Aggressiveness for decimation
"""
vertices, faces = mesh_util.mesh_loader(f"{input_path}/{id}{ext}")
desired_faces = max(len(faces) // (decimation_factor**lod), 4)
mesh_simplifier = pyfqmr.Simplify()
mesh_simplifier.setMesh(vertices, faces)
del vertices
del faces
mesh_simplifier.simplify_mesh(target_count=desired_faces,
aggressiveness=aggressiveness,
preserve_border=False,
verbose=False)
vertices, faces, _ = mesh_simplifier.getMesh()
del mesh_simplifier
mesh = trimesh.Trimesh(vertices, faces)
del vertices
del faces
_ = mesh.export(f"{output_path}/s{lod}/{id}.ply")
def generate_decimated_meshes(input_path, output_path, lods, ids, ext,
decimation_factor, aggressiveness):
"""Generate decimatated meshes for all ids in `ids`, over all lod in `lods`.
Args:
input_path (`str`): Input mesh paths
output_path (`str`): Output mesh paths
lods (`int`): Levels of detail over which to have mesh
ids (`list`): All mesh ids
ext (`str`): Input mesh formats.
decimation_fraction [`float`]: The factor by which we decimate faces,
scaled by 2**lod
aggressiveness [`int`]: Aggressiveness for decimation
"""
results = []
for current_lod in lods:
if current_lod == 0:
os.makedirs(f"{output_path}/mesh_lods/", exist_ok=True)
# link existing to s0
if not os.path.exists(f"{output_path}/mesh_lods/s0"):
os.system(
f"ln -s {os.path.abspath(input_path)}/ {os.path.abspath(output_path)}/mesh_lods/s0"
)
else:
os.makedirs(f"{output_path}/mesh_lods/s{current_lod}",
exist_ok=True)
for id in ids:
results.append(
dask.delayed(pyfqmr_decimate)(input_path,
f"{output_path}/mesh_lods",
id, current_lod, ext,
decimation_factor,
aggressiveness))
dask.compute(*results)
def generate_neuroglancer_multires_mesh(output_path, num_workers, id, lods,
original_ext, lod_0_box_size):
"""Dask delayed function to generate multiresolution mesh in neuroglancer
mesh format using prewritten meshes at different levels of detail.
This function generates the neuroglancer mesh for a single mesh, and
parallelizes the mesh creation over `num_workers` by splitting the mesh in
the x-direciton into `num_workers` fragments, each of which is sent to a
a worker to be further subdivided.
Args:
output_path (`str`): Output path to writeout neuroglancer mesh
num_workers (`int`): Number of workers for dask
id (`int`): Mesh id
lods (`list`): List of levels of detail
original_ext (`str`): Original mesh file extension
lod_0_box_size (`int`): Box size in lod 0 coordinates
"""
with ExitStack() as stack:
if num_workers > 1:
# Worker client context really slows things down a lot, so only need to do it if we will actually parallelize
client = stack.enter_context(worker_client())
os.makedirs(f"{output_path}/multires", exist_ok=True)
os.system(
f"rm -rf {output_path}/multires/{id} {output_path}/multires/{id}.index"
)
results = []
for idx, current_lod in enumerate(lods):
if current_lod == 0:
mesh_path = f"{output_path}/mesh_lods/s{current_lod}/{id}{original_ext}"
else:
mesh_path = f"{output_path}/mesh_lods/s{current_lod}/{id}.ply"
vertices, _ = mesh_util.mesh_loader(mesh_path)
if current_lod == 0:
max_box_size = lod_0_box_size * 2**lods[-1]
grid_origin = (vertices.min(axis=0) // max_box_size -
1) * max_box_size
vertices -= grid_origin
current_box_size = lod_0_box_size * 2**current_lod
start_fragment = np.maximum(
vertices.min(axis=0) // current_box_size - 1,
np.array([0, 0, 0])).astype(int)
end_fragment = (vertices.max(axis=0) // current_box_size +
1).astype(int)
del vertices
# Want to divide the mesh up into upto num_workers chunks. We do
# that by first subdividing the largest dimension as much as
# possible, followed by the next largest dimension etc so long
# as we don't exceed num_workers slices. If we instead slice each
# dimension once, before slicing any dimension twice etc, it would
# increase the number of mesh slice operations we perform, which
# seems slow.
max_number_of_chunks = (end_fragment - start_fragment)
dimensions_sorted = np.argsort(-max_number_of_chunks)
num_chunks = np.array([1, 1, 1])
for _ in range(num_workers + 1):
for d in dimensions_sorted:
if num_chunks[d] < max_number_of_chunks[d]:
num_chunks[d] += 1
if np.prod(num_chunks) > num_workers:
num_chunks[d] -= 1
break
stride = np.ceil(1.0 * (end_fragment - start_fragment) /
num_chunks).astype(np.int)
# Scattering here, unless broadcast=True, causes this issue:
# https://github.com/dask/distributed/issues/4612. But that is
# slow so we are currently electing to read the meshes each time
# within generate_mesh_decomposition.
# vertices_to_send = client.scatter(vertices, broadcast=True)
# faces_to_send = client.scatter(faces, broadcast=True)
decomposition_results = []
for x in range(start_fragment[0], end_fragment[0], stride[0]):
for y in range(start_fragment[1], end_fragment[1], stride[1]):
for z in range(start_fragment[2], end_fragment[2],
stride[2]):
current_start_fragment = np.array([x, y, z])
current_end_fragment = current_start_fragment + stride
if num_workers == 1:
# then we aren't parallelizing again
decomposition_results.append(
generate_mesh_decomposition(
mesh_path, lod_0_box_size, grid_origin,
current_start_fragment,
current_end_fragment, current_lod,
num_chunks))
else:
results.append(
dask.delayed(generate_mesh_decomposition)(
mesh_path, lod_0_box_size, grid_origin,
current_start_fragment,
current_end_fragment, current_lod,
num_chunks))
if num_workers > 1:
client.rebalance()
decomposition_results = dask.compute(*results)
results = []
# Remove empty slabs
decomposition_results = [
fragments for fragments in decomposition_results if fragments
]
fragments = [
fragment for fragments in decomposition_results
for fragment in fragments
]
del decomposition_results
mesh_util.write_mesh_files(
f"{output_path}/multires", f"{id}", grid_origin, fragments,
current_lod, lods[:idx + 1],
np.asarray([lod_0_box_size, lod_0_box_size, lod_0_box_size]))
del fragments
def generate_all_neuroglancer_multires_meshes(output_path, num_workers, ids,
lods, original_ext,
lod_0_box_size):
"""Generate all neuroglancer multiresolution meshes for `ids`. Calls dask
delayed function `generate_neuroglancer_multires_mesh` for each id.
Args:
output_path (`str`): Output path to write out neuroglancer mesh
num_workers (`int`): Number of workers for dask
ids (`list`): List of mesh ids
lods (`list`): List of levels of detail
original_ext (`str`): Original mesh file extension
lod_0_box_size (`int`): Box size in lod 0 coordinates
"""
def get_number_of_subtask_workers(output_path, ids, original_ext,
num_workers):
# Given a maximum number of workers, this function gets the maximum
# workers for a given object based on sizes. This is to prevent
# overloading dask with hundreds of thousands of nested tasks when
# lots of small objects are present
total_file_size = 0
file_sizes = np.zeros((len(ids), ), dtype=np.int)
for idx, id in enumerate(ids):
current_size = os.stat(
f"{output_path}/mesh_lods/s0/{id}{original_ext}").st_size
total_file_size += current_size
file_sizes[idx] = current_size
num_workers_per_byte = num_workers / total_file_size
num_subtask_workers = np.ceil(file_sizes *
num_workers_per_byte).astype(np.int)
return num_subtask_workers
num_subtask_workers = get_number_of_subtask_workers(
output_path, ids, original_ext, num_workers)
results = []
for idx, id in enumerate(ids):
results.append(
dask.delayed(generate_neuroglancer_multires_mesh)(
output_path, num_subtask_workers[idx], id, lods, original_ext,
lod_0_box_size))
dask.compute(*results)
def main():
"""Main function called when running code
"""
# Get information regarding run
submission_directory = os.getcwd()
args = io_util.parser_params()
num_workers = args.num_workers
required_settings, optional_decimation_settings = io_util.read_run_config(
args.config_path)
# Setup config parameters
input_path = required_settings['input_path']
output_path = required_settings['output_path']
num_lods = required_settings['num_lods']
lod_0_box_size = required_settings['box_size']
skip_decimation = optional_decimation_settings['skip_decimation']
decimation_factor = optional_decimation_settings['decimation_factor']
aggressiveness = optional_decimation_settings['aggressiveness']
delete_decimated_meshes = optional_decimation_settings[
'delete_decimated_meshes']
# Change execution directory
execution_directory = dask_util.setup_execution_directory(
args.config_path, logger)
logpath = f'{execution_directory}/output.log'
# Start mesh creation
with io_util.tee_streams(logpath):
try:
os.chdir(execution_directory)
lods = list(range(num_lods))
mesh_files = [
f for f in listdir(input_path) if isfile(join(input_path, f))
]
mesh_ids = [splitext(mesh_file)[0] for mesh_file in mesh_files]
mesh_ext = splitext(mesh_files[0])[1]
t0 = time.time()
# Mesh decimation
if not skip_decimation:
# Start dask
with dask_util.start_dask(num_workers, "decimation", logger):
with io_util.Timing_Messager("Generating decimated meshes",
logger):
generate_decimated_meshes(input_path, output_path,
lods, mesh_ids, mesh_ext,
decimation_factor,
aggressiveness)
# Restart dask to clean up cluster before multires assembly
with dask_util.start_dask(num_workers, "multires creation",
logger):
# Create multiresolution meshes
with io_util.Timing_Messager("Generating multires meshes",
logger):
generate_all_neuroglancer_multires_meshes(
output_path, num_workers, mesh_ids, lods, mesh_ext,
lod_0_box_size)
# Writing out top-level files
with io_util.Timing_Messager(
"Writing info and segment properties files", logger):
multires_output_path = f"{output_path}/multires"
mesh_util.write_segment_properties_file(multires_output_path)
mesh_util.write_info_file(multires_output_path)
if not skip_decimation and delete_decimated_meshes:
with io_util.Timing_Messager("Deleting decimated meshes",
logger):
os.system(f"rm -rf {output_path}/mesh_lods")
io_util.print_with_datetime(
f"Complete! Elapsed time: {time.time() - t0}", logger)
finally:
os.chdir(submission_directory)
if __name__ == "__main__":
"""Run main function
"""
main()
| 41.133945 | 121 | 0.59256 |
430f743823c77420ff9e66b2a6190a4b3916b2b1 | 1,768 | py | Python | src/268.missing-number.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | src/268.missing-number.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | src/268.missing-number.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=268 lang=python3
#
# [268] Missing Number
#
# https://leetcode.com/problems/missing-number/description/
#
# algorithms
# Easy (56.30%)
# Likes: 3474
# Dislikes: 2608
# Total Accepted: 734K
# Total Submissions: 1.3M
# Testcase Example: '[3,0,1]'
#
# Given an array nums containing n distinct numbers in the range [0, n], return
# the only number in the range that is missing from the array.
#
# Follow up: Could you implement a solution using only O(1) extra space
# complexity and O(n) runtime complexity?
#
#
# Example 1:
#
#
# Input: nums = [3,0,1]
# Output: 2
# Explanation: n = 3 since there are 3 numbers, so all numbers are in the range
# [0,3]. 2 is the missing number in the range since it does not appear in
# nums.
#
#
# Example 2:
#
#
# Input: nums = [0,1]
# Output: 2
# Explanation: n = 2 since there are 2 numbers, so all numbers are in the range
# [0,2]. 2 is the missing number in the range since it does not appear in
# nums.
#
#
# Example 3:
#
#
# Input: nums = [9,6,4,2,3,5,7,0,1]
# Output: 8
# Explanation: n = 9 since there are 9 numbers, so all numbers are in the range
# [0,9]. 8 is the missing number in the range since it does not appear in
# nums.
#
#
# Example 4:
#
#
# Input: nums = [0]
# Output: 1
# Explanation: n = 1 since there is 1 number, so all numbers are in the range
# [0,1]. 1 is the missing number in the range since it does not appear in
# nums.
#
#
#
# Constraints:
#
#
# n == nums.length
# 1 <= n <= 10^4
# 0 <= nums[i] <= n
# All the numbers of nums are unique.
#
#
#
from typing import List
# @lc code=start
class Solution:
def missingNumber(self, nums: List[int]) -> int:
n = len(nums)
s = n*(n+1) // 2
return s - sum(nums)
# @lc code=end
| 21.301205 | 79 | 0.63914 |
4ca6c3515a4273028b5ccfde5ecb027929e5d187 | 1,860 | py | Python | training/src/tests/tests/python/dilated_conv1d.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | training/src/tests/tests/python/dilated_conv1d.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | training/src/tests/tests/python/dilated_conv1d.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | # Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
# First
torch.manual_seed(0)
torch.set_printoptions(precision=8)
m1 = torch.nn.Conv1d(2, 1, 2, stride=1, padding=0, dilation=2, bias=False)
input = torch.randn(1, 2, 5, requires_grad=True)
print("Input: ", input)
result = m1(input)
print("Result: ", result)
result.sum().backward()
print("Gradient for input: ", input.grad)
print("Gradient for weights: ", m1.weight.grad)
# Second
m1 = torch.nn.Conv1d(4, 3, 3, stride=2, padding=1, dilation=3, bias=True)
input = torch.randn(2, 4, 5, requires_grad=True)
print("Input: ", input)
result = m1(input)
print("Result: ", result)
result.sum().backward()
print("Gradient for input: ", input.grad)
print("Gradient for weights: ", m1.weight.grad)
print("Gradient for bias: ", m1.bias.grad)
| 47.692308 | 148 | 0.755376 |
6484882d65bbd26a52f9173df79f028d8389b98f | 28,681 | py | Python | sdk/storage/azure-storage-blob/azure/storage/blob/blob_service_client.py | jiasli/azure-sdk-for-python | f700299c45cea44064d5156f2bfe3664284f6da4 | [
"MIT"
] | null | null | null | sdk/storage/azure-storage-blob/azure/storage/blob/blob_service_client.py | jiasli/azure-sdk-for-python | f700299c45cea44064d5156f2bfe3664284f6da4 | [
"MIT"
] | 1 | 2019-06-04T18:12:16.000Z | 2019-06-04T18:12:16.000Z | sdk/storage/azure-storage-blob/azure/storage/blob/blob_service_client.py | jiasli/azure-sdk-for-python | f700299c45cea44064d5156f2bfe3664284f6da4 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import functools
from typing import ( # pylint: disable=unused-import
Union, Optional, Any, Iterable, Dict, List,
TYPE_CHECKING
)
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # type: ignore
from azure.core.paging import ItemPaged
from ._shared.shared_access_signature import SharedAccessSignature
from ._shared.models import LocationMode, Services
from ._shared.utils import (
StorageAccountHostsMixin,
return_response_headers,
parse_connection_str,
process_storage_error,
parse_query)
from ._generated import AzureBlobStorage
from ._generated.models import StorageErrorException, StorageServiceProperties
from .container_client import ContainerClient
from .blob_client import BlobClient
from .models import ContainerProperties, ContainerPropertiesPaged
if TYPE_CHECKING:
from datetime import datetime
from azure.core.pipeline.transport import HttpTransport
from azure.core.pipeline.policies import HTTPPolicy
from ._shared.models import AccountPermissions, ResourceTypes
from .lease import LeaseClient
from .models import (
BlobProperties,
Logging,
Metrics,
RetentionPolicy,
StaticWebsite,
CorsRule,
PublicAccess
)
class BlobServiceClient(StorageAccountHostsMixin):
"""A client to interact with the Blob Service at the account level.
This client provides operations to retrieve and configure the account properties
as well as list, create and delete containers within the account.
For operations relating to a specific container or blob, clients for those entities
can also be retrieved using the `get_client` functions.
:ivar str url:
The full endpoint URL to the Blob service endpoint. This could be either the
primary endpoint, or the secondard endpoint depending on the current `location_mode`.
:ivar str primary_endpoint:
The full primary endpoint URL.
:ivar str primary_hostname:
The hostname of the primary endpoint.
:ivar str secondary_endpoint:
The full secondard endpoint URL if configured. If not available
a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
`secondary_hostname` keyword argument on instantiation.
:ivar str secondary_hostname:
The hostname of the secondary endpoint. If not available this
will be None. To explicitly specify a secondary hostname, use the optional
`secondary_hostname` keyword argument on instantiation.
:ivar str location_mode:
The location mode that the client is currently using. By default
this will be "primary". Options include "primary" and "secondary".
:param str account_url:
The URL to the blob storage account. Any other entities included
in the URL path (e.g. container or blob) will be discarded. This URL can be optionally
authenticated with a SAS token.
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string, and account
shared access key, or an instance of a TokenCredentials class from azure.identity.
If the URL already has a SAS token, specifying an explicit credential will take priority.
Example:
.. literalinclude:: ../tests/test_blob_samples_authentication.py
:start-after: [START create_blob_service_client]
:end-before: [END create_blob_service_client]
:language: python
:dedent: 8
:caption: Creating the BlobServiceClient with account url and credential.
.. literalinclude:: ../tests/test_blob_samples_authentication.py
:start-after: [START create_blob_service_client_oauth]
:end-before: [END create_blob_service_client_oauth]
:language: python
:dedent: 8
:caption: Creating the BlobServiceClient with Azure Identity credentials.
"""
def __init__(
self, account_url, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> None
try:
if not account_url.lower().startswith('http'):
account_url = "https://" + account_url
except AttributeError:
raise ValueError("Account URL must be a string.")
parsed_url = urlparse(account_url.rstrip('/'))
if not parsed_url.netloc:
raise ValueError("Invalid URL: {}".format(account_url))
_, sas_token = parse_query(parsed_url.query)
self._query_str, credential = self._format_query_string(sas_token, credential)
super(BlobServiceClient, self).__init__(parsed_url, 'blob', credential, **kwargs)
self._client = AzureBlobStorage(self.url, pipeline=self._pipeline)
def _format_url(self, hostname):
"""Format the endpoint URL according to the current location
mode hostname.
"""
return "{}://{}/{}".format(self.scheme, hostname, self._query_str)
@classmethod
def from_connection_string(
cls, conn_str, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
"""Create BlobServiceClient from a Connection String.
:param str conn_str:
A connection string to an Azure Storage account.
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token, or the connection string already has shared
access key values. The value can be a SAS token string, and account shared access
key, or an instance of a TokenCredentials class from azure.identity.
Credentials provided here will take precedence over those in the connection string.
Example:
.. literalinclude:: ../tests/test_blob_samples_authentication.py
:start-after: [START auth_from_connection_string]
:end-before: [END auth_from_connection_string]
:language: python
:dedent: 8
:caption: Creating the BlobServiceClient from a connection string.
"""
account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
if 'secondary_hostname' not in kwargs:
kwargs['secondary_hostname'] = secondary
return cls(account_url, credential=credential, **kwargs)
def generate_shared_access_signature(
self, resource_types, # type: Union[ResourceTypes, str]
permission, # type: Union[AccountPermissions, str]
expiry, # type: Optional[Union[datetime, str]]
start=None, # type: Optional[Union[datetime, str]]
ip=None, # type: Optional[str]
protocol=None # type: Optional[str]
):
"""Generates a shared access signature for the blob service.
Use the returned signature with the credential parameter of any BlobServiceClient,
ContainerClient or BlobClient.
:param resource_types:
Specifies the resource types that are accessible with the account SAS.
:type resource_types: str or ~azure.storage.blob.models.ResourceTypes
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or ~azure.storage.blob.models.AccountPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value is https.
:return: A Shared Access Signature (sas) token.
:rtype: str
Example:
.. literalinclude:: ../tests/test_blob_samples_authentication.py
:start-after: [START create_sas_token]
:end-before: [END create_sas_token]
:language: python
:dedent: 8
:caption: Generating a shared access signature.
"""
if not hasattr(self.credential, 'account_key') and not self.credential.account_key:
raise ValueError("No account SAS key available.")
sas = SharedAccessSignature(self.credential.account_name, self.credential.account_key)
return sas.generate_account(
Services.BLOB, resource_types, permission, expiry, start=start, ip=ip, protocol=protocol) # type: ignore
def get_account_information(self, **kwargs): # type: ignore
# type: (Optional[int]) -> Dict[str, str]
"""Gets information related to the storage account.
The information can also be retrieved if the user has a SAS to a container or blob.
The keys in the returned dictionary include 'sku_name' and 'account_kind'.
:returns: A dict of account information (SKU and account type).
:rtype: dict(str, str)
Example:
.. literalinclude:: ../tests/test_blob_samples_service.py
:start-after: [START get_blob_service_account_info]
:end-before: [END get_blob_service_account_info]
:language: python
:dedent: 8
:caption: Getting account information for the blob service.
"""
try:
return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
except StorageErrorException as error:
process_storage_error(error)
def get_service_stats(self, timeout=None, **kwargs): # type: ignore
# type: (Optional[int], **Any) -> Dict[str, Any]
"""Retrieves statistics related to replication for the Blob service.
It is only available when read-access geo-redundant replication is enabled for
the storage account.
With geo-redundant replication, Azure Storage maintains your data durable
in two locations. In both locations, Azure Storage constantly maintains
multiple healthy replicas of your data. The location where you read,
create, update, or delete data is the primary storage account location.
The primary location exists in the region you choose at the time you
create an account via the Azure Management Azure classic portal, for
example, North Central US. The location to which your data is replicated
is the secondary location. The secondary location is automatically
determined based on the location of the primary; it is in a second data
center that resides in the same region as the primary location. Read-only
access is available from the secondary location, if read-access geo-redundant
replication is enabled for your storage account.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: The blob service stats.
:rtype: ~azure.storage.blob._generated.models.StorageServiceStats
Example:
.. literalinclude:: ../tests/test_blob_samples_service.py
:start-after: [START get_blob_service_stats]
:end-before: [END get_blob_service_stats]
:language: python
:dedent: 8
:caption: Getting service stats for the blob service.
"""
try:
return self._client.service.get_statistics( # type: ignore
timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
def get_service_properties(self, timeout=None, **kwargs):
# type(Optional[int]) -> Dict[str, Any]
"""Gets the properties of a storage account's Blob service, including
Azure Storage Analytics.
:param int timeout:
The timeout parameter is expressed in seconds.
:rtype: ~azure.storage.blob._generated.models.StorageServiceProperties
Example:
.. literalinclude:: ../tests/test_blob_samples_service.py
:start-after: [START get_blob_service_properties]
:end-before: [END get_blob_service_properties]
:language: python
:dedent: 8
:caption: Getting service properties for the blob service.
"""
try:
return self._client.service.get_properties(timeout=timeout, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
def set_service_properties(
self, logging=None, # type: Optional[Logging]
hour_metrics=None, # type: Optional[Metrics]
minute_metrics=None, # type: Optional[Metrics]
cors=None, # type: Optional[List[CorsRule]]
target_version=None, # type: Optional[str]
delete_retention_policy=None, # type: Optional[RetentionPolicy]
static_website=None, # type: Optional[StaticWebsite]
timeout=None, # type: Optional[int]
**kwargs
):
# type: (...) -> None
"""Sets the properties of a storage account's Blob service, including
Azure Storage Analytics.
If an element (e.g. Logging) is left as None, the
existing settings on the service for that functionality are preserved.
:param logging:
Groups the Azure Analytics Logging settings.
:type logging:
:class:`~azure.storage.blob.models.Logging`
:param hour_metrics:
The hour metrics settings provide a summary of request
statistics grouped by API in hourly aggregates for blobs.
:type hour_metrics:
:class:`~azure.storage.blob.models.Metrics`
:param minute_metrics:
The minute metrics settings provide request statistics
for each minute for blobs.
:type minute_metrics:
:class:`~azure.storage.blob.models.Metrics`
:param cors:
You can include up to five CorsRule elements in the
list. If an empty list is specified, all CORS rules will be deleted,
and CORS will be disabled for the service.
:type cors: list(:class:`~azure.storage.blob.models.CorsRule`)
:param str target_version:
Indicates the default version to use for requests if an incoming
request's version is not specified.
:param delete_retention_policy:
The delete retention policy specifies whether to retain deleted blobs.
It also specifies the number of days and versions of blob to keep.
:type delete_retention_policy:
:class:`~azure.storage.blob.models.RetentionPolicy`
:param static_website:
Specifies whether the static website feature is enabled,
and if yes, indicates the index document and 404 error document to use.
:type static_website:
:class:`~azure.storage.blob.models.StaticWebsite`
:param int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
Example:
.. literalinclude:: ../tests/test_blob_samples_service.py
:start-after: [START set_blob_service_properties]
:end-before: [END set_blob_service_properties]
:language: python
:dedent: 8
:caption: Setting service properties for the blob service.
"""
props = StorageServiceProperties(
logging=logging,
hour_metrics=hour_metrics,
minute_metrics=minute_metrics,
cors=cors,
default_service_version=target_version,
delete_retention_policy=delete_retention_policy,
static_website=static_website
)
try:
self._client.service.set_properties(props, timeout=timeout, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
def list_containers(
self, name_starts_with=None, # type: Optional[str]
include_metadata=False, # type: Optional[bool]
results_per_page=None, # type: Optional[int]
timeout=None, # type: Optional[int]
**kwargs
):
# type: (...) -> ItemPaged[ContainerProperties]
"""Returns a generator to list the containers under the specified account.
The generator will lazily follow the continuation tokens returned by
the service and stop when all containers have been returned.
:param str name_starts_with:
Filters the results to return only containers whose names
begin with the specified prefix.
:param bool include_metadata:
Specifies that container metadata be returned in the response.
The default value is `False`.
:param int results_per_page:
The maximum number of container names to retrieve per API
call. If the request does not specify the server will return up to 5,000 items.
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: An iterable (auto-paging) of ContainerProperties.
:rtype: ~azure.core.paging.ItemPaged[~azure.core.blob.models.ContainerProperties]
Example:
.. literalinclude:: ../tests/test_blob_samples_service.py
:start-after: [START bsc_list_containers]
:end-before: [END bsc_list_containers]
:language: python
:dedent: 12
:caption: Listing the containers in the blob service.
"""
include = 'metadata' if include_metadata else None
command = functools.partial(
self._client.service.list_containers_segment,
prefix=name_starts_with,
include=include,
timeout=timeout,
**kwargs)
return ItemPaged(
command, prefix=name_starts_with, results_per_page=results_per_page, page_iterator_class=ContainerPropertiesPaged)
def create_container(
self, name, # type: str
metadata=None, # type: Optional[Dict[str, str]]
public_access=None, # type: Optional[Union[PublicAccess, str]]
timeout=None, # type: Optional[int]
**kwargs
):
# type: (...) -> ContainerClient
"""Creates a new container under the specified account.
If the container with the same name already exists, a ResourceExistsError will
be raised. This method returns a client with which to interact with the newly
created container.
:param str name: The name of the container to create.
:param metadata:
A dict with name-value pairs to associate with the
container as metadata. Example: `{'Category':'test'}`
:type metadata: dict(str, str)
:param public_access:
Possible values include: container, blob.
:type public_access: str or ~azure.storage.blob.models.PublicAccess
:param int timeout:
The timeout parameter is expressed in seconds.
:rtype: ~azure.storage.blob.container_client.ContainerClient
Example:
.. literalinclude:: ../tests/test_blob_samples_service.py
:start-after: [START bsc_create_container]
:end-before: [END bsc_create_container]
:language: python
:dedent: 12
:caption: Creating a container in the blob service.
"""
container = self.get_container_client(name)
container.create_container(
metadata=metadata, public_access=public_access, timeout=timeout, **kwargs)
return container
def delete_container(
self, container, # type: Union[ContainerProperties, str]
lease=None, # type: Optional[Union[LeaseClient, str]]
if_modified_since=None, # type: Optional[datetime]
if_unmodified_since=None, # type: Optional[datetime]
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
timeout=None, # type: Optional[int]
**kwargs
):
# type: (...) -> None
"""Marks the specified container for deletion.
The container and any blobs contained within it are later deleted during garbage collection.
If the container is not found, a ResourceNotFoundError will be raised.
:param container:
The container to delete. This can either be the name of the container,
or an instance of ContainerProperties.
:type container: str or ~azure.storage.blob.models.ContainerProperties
:param ~azure.storage.blob.lease.LeaseClient lease:
If specified, delete_container only succeeds if the
container's lease is active and matches this ID.
Required if the container has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
Example:
.. literalinclude:: ../tests/test_blob_samples_service.py
:start-after: [START bsc_delete_container]
:end-before: [END bsc_delete_container]
:language: python
:dedent: 12
:caption: Deleting a container in the blob service.
"""
container = self.get_container_client(container) # type: ignore
container.delete_container( # type: ignore
lease=lease,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout,
**kwargs)
def get_container_client(self, container):
# type: (Union[ContainerProperties, str]) -> ContainerClient
"""Get a client to interact with the specified container.
The container need not already exist.
:param container:
The container. This can either be the name of the container,
or an instance of ContainerProperties.
:type container: str or ~azure.storage.blob.models.ContainerProperties
:returns: A ContainerClient.
:rtype: ~azure.core.blob.container_client.ContainerClient
Example:
.. literalinclude:: ../tests/test_blob_samples_service.py
:start-after: [START bsc_get_container_client]
:end-before: [END bsc_get_container_client]
:language: python
:dedent: 8
:caption: Getting the container client to interact with a specific container.
"""
return ContainerClient(
self.url, container=container,
credential=self.credential, _configuration=self._config,
_pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
key_resolver_function=self.key_resolver_function)
def get_blob_client(
self, container, # type: Union[ContainerProperties, str]
blob, # type: Union[BlobProperties, str]
snapshot=None # type: Optional[Union[Dict[str, Any], str]]
):
# type: (...) -> BlobClient
"""Get a client to interact with the specified blob.
The blob need not already exist.
:param container:
The container that the blob is in. This can either be the name of the container,
or an instance of ContainerProperties.
:type container: str or ~azure.storage.blob.models.ContainerProperties
:param blob:
The blob with which to interact. This can either be the name of the blob,
or an instance of BlobProperties.
:type blob: str or ~azure.storage.blob.models.BlobProperties
:param snapshot:
The optional blob snapshot on which to operate. This can either be the ID of the snapshot,
or a dictionary output returned by :func:`~azure.storage.blob.blob_client.BlobClient.create_snapshot()`.
:type snapshot: str or dict(str, Any)
:returns: A BlobClient.
:rtype: ~azure.storage.blob.blob_client.BlobClient
Example:
.. literalinclude:: ../tests/test_blob_samples_service.py
:start-after: [START bsc_get_blob_client]
:end-before: [END bsc_get_blob_client]
:language: python
:dedent: 12
:caption: Getting the blob client to interact with a specific blob.
"""
return BlobClient( # type: ignore
self.url, container=container, blob=blob, snapshot=snapshot,
credential=self.credential, _configuration=self._config,
_pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
key_resolver_function=self.key_resolver_function)
| 47.801667 | 126 | 0.648722 |
6e233b0e730a0d9929043bc47c80b739792f65f8 | 5,140 | py | Python | services/document-manager/backend/app/docman/resources/import_now_submission_documents_job.py | bcgov/mds | 6c427a66a5edb4196222607291adef8fd6677038 | [
"Apache-2.0"
] | 25 | 2018-07-09T19:04:37.000Z | 2022-03-15T17:27:10.000Z | services/document-manager/backend/app/docman/resources/import_now_submission_documents_job.py | areyeslo/mds | e8c38e593e09b78e2a57009c0d003d6c4bfa32e6 | [
"Apache-2.0"
] | 983 | 2018-04-25T20:08:07.000Z | 2022-03-31T21:45:20.000Z | services/document-manager/backend/app/docman/resources/import_now_submission_documents_job.py | areyeslo/mds | e8c38e593e09b78e2a57009c0d003d6c4bfa32e6 | [
"Apache-2.0"
] | 58 | 2018-05-15T22:35:50.000Z | 2021-11-29T19:40:52.000Z | from flask import current_app, make_response, jsonify, request
from flask_restplus import Resource, reqparse
from werkzeug.exceptions import BadRequest
from sqlalchemy import and_
import json
from app.extensions import api
from app.docman.models.import_now_submission_documents_job import ImportNowSubmissionDocumentsJob
from app.docman.models.import_now_submission_document import ImportNowSubmissionDocument
from app.utils.include.user_info import User
from app.utils.access_decorators import requires_role_edit_permit, requires_role_view_all
from app.docman.response_models import IMPORT_NOW_SUBMISSION_DOCUMENTS_JOB
@api.route('/import-now-submission-documents')
class ImportNowSubmissionDocumentsJobListResource(Resource):
parser = reqparse.RequestParser()
parser.add_argument('now_application_id', type=int, required=True)
parser.add_argument('now_application_guid', type=str, required=True)
parser.add_argument('submission_documents', type=list, location='json', required=True)
@requires_role_edit_permit
def post(self):
from app.services.commands_helper import create_import_now_submission_documents, abort_task
from app.tasks.celery import celery
# Get the NoW Application ID and its submission documents to transfer.
data = self.parser.parse_args()
now_application_id = data.get('now_application_id', None)
now_application_guid = data.get('now_application_guid', None)
submission_documents = data.get('submission_documents', [])
# If any jobs for this Notice of Work are in progress, cancel them.
in_progress_jobs = ImportNowSubmissionDocumentsJob.query.filter(
and_(
ImportNowSubmissionDocumentsJob.now_application_id == now_application_id,
ImportNowSubmissionDocumentsJob.import_now_submission_documents_job_status_code.in_(
['NOT', 'INP', 'DEL']))).all()
for job in in_progress_jobs:
job.import_now_submission_documents_job_status_code = 'REV'
abort_task(job.celery_task_id)
job.save()
# Create the Import NoW Submission Documents job record.
import_job = ImportNowSubmissionDocumentsJob(
now_application_id=now_application_id,
now_application_guid=now_application_guid,
create_user=User().get_user_username())
# Only import documents that have not already been added to an import job and transfer any in-progress documents from the cancelled in-progress jobs to this one.
for doc in submission_documents:
documenturl = doc['documenturl']
filename = doc['filename']
messageid = doc['messageid']
documenttype = doc['documenttype']
description = doc['description']
# Get the possible already-existing record for this document.
existing_doc = ImportNowSubmissionDocument.query.filter(
and_(ImportNowSubmissionDocument.submission_document_url == documenturl,
ImportNowSubmissionDocument.submission_document_file_name == filename,
ImportNowSubmissionDocument.submission_document_message_id == messageid,
ImportNowSubmissionDocument.submission_document_type == documenttype,
ImportNowSubmissionDocument.submission_document_description ==
description)).one_or_none()
# Only import this existing document if it has not already been imported successfully.
if existing_doc:
if existing_doc.document_id is None:
import_job.import_now_submission_documents.append(existing_doc)
continue
import_job.import_now_submission_documents.append(
ImportNowSubmissionDocument(
submission_document_url=documenturl,
submission_document_file_name=filename,
submission_document_message_id=messageid,
submission_document_type=documenttype,
submission_document_description=description))
import_job.save()
# Create the Import NoW Submission Documents job.
message = create_import_now_submission_documents(
import_job.import_now_submission_documents_job_id)
# Return a response indicating that the task has started.
result = make_response(jsonify(message=message), 201)
return result
@api.marshal_with(IMPORT_NOW_SUBMISSION_DOCUMENTS_JOB, code=200, skip_none=True)
@requires_role_view_all
def get(self):
now_application_guid = request.args.get('now_application_guid', None)
if not now_application_guid:
raise BadRequest('now_application_guid is required')
import_jobs = ImportNowSubmissionDocumentsJob.find_by_now_application_guid(
now_application_guid)
most_recent_only = request.args.get('most_recent_only', False)
if most_recent_only:
return import_jobs[-1] if import_jobs else {}
return import_jobs
| 48.490566 | 169 | 0.711479 |
5c768bbc8c9b36992c0a58af9b526310e5336b23 | 5,211 | py | Python | openet/disalexi/lc_properties.py | hgbzzw/openet-disalexi-beta | efc581b08613ee2d1cb653b04c079d4610d51174 | [
"Apache-2.0"
] | 3 | 2019-11-07T14:58:42.000Z | 2022-01-05T07:54:27.000Z | openet/disalexi/lc_properties.py | hgbzzw/openet-disalexi-beta | efc581b08613ee2d1cb653b04c079d4610d51174 | [
"Apache-2.0"
] | null | null | null | openet/disalexi/lc_properties.py | hgbzzw/openet-disalexi-beta | efc581b08613ee2d1cb653b04c079d4610d51174 | [
"Apache-2.0"
] | 2 | 2020-01-14T08:32:30.000Z | 2022-01-11T00:47:13.000Z | remaps = {
'NLCD': {
'aleafv': {
11: 0.82, 12: 0.82, 21: 0.84, 22: 0.84, 23: 0.84, 24: 0.84,
31: 0.82, 32: 0.82, 41: 0.86, 42: 0.89, 43: 0.87,
51: 0.83, 52: 0.83, 71: 0.82, 72: 0.82, 73: 0.82, 74: 0.82,
81: 0.82, 82: 0.83, 90: 0.85, 91: 0.85, 92: 0.85, 93: 0.85,
94: 0.85, 95: 0.85, 96: 0.85, 97: 0.85, 98: 0.85, 99: 0.85},
'aleafn': {
11: 0.28, 12: 0.28, 21: 0.37, 22: 0.37, 23: 0.37, 24: 0.37,
31: 0.57, 32: 0.57, 41: 0.37, 42: 0.60, 43: 0.48,
51: 0.35, 52: 0.35, 71: 0.28, 72: 0.28, 73: 0.28, 74: 0.28,
81: 0.28, 82: 0.35, 90: 0.36, 91: 0.36, 92: 0.36, 93: 0.36,
94: 0.36, 95: 0.36, 96: 0.36, 97: 0.36, 98: 0.36, 99: 0.36},
'aleafl': {
11: 0.95, 12: 0.95, 21: 0.95, 22: 0.95, 23: 0.95, 24: 0.95,
31: 0.95, 32: 0.95, 41: 0.95, 42: 0.95, 43: 0.95,
51: 0.95, 52: 0.95, 71: 0.95, 72: 0.95, 73: 0.95, 74: 0.95,
81: 0.95, 82: 0.95, 90: 0.95, 91: 0.95, 92: 0.95, 93: 0.95,
94: 0.95, 95: 0.95, 96: 0.95, 97: 0.95, 98: 0.95, 99: 0.95},
'adeadv': {
11: 0.42, 12: 0.42, 21: 0.58, 22: 0.58, 23: 0.58, 24: 0.58,
31: 0.92, 32: 0.92, 41: 0.84, 42: 0.84, 43: 0.84,
51: 0.77, 52: 0.77, 71: 0.42, 72: 0.42, 73: 0.42, 74: 0.42,
81: 0.42, 82: 0.49, 90: 0.58, 91: 0.58, 92: 0.58, 93: 0.58,
94: 0.58, 95: 0.58, 96: 0.58, 97: 0.58, 98: 0.58, 99: 0.58},
'adeadn': {
11: 0.04, 12: 0.04, 21: 0.26, 22: 0.26, 23: 0.26, 24: 0.26,
31: 0.80, 32: 0.80, 41: 0.61, 42: 0.61, 43: 0.61,
51: 0.52, 52: 0.52, 71: 0.04, 72: 0.04, 73: 0.04, 74: 0.04,
81: 0.04, 82: 0.13, 90: 0.26, 91: 0.26, 92: 0.26, 93: 0.26,
94: 0.26, 95: 0.26, 96: 0.26, 97: 0.26, 98: 0.26, 99: 0.26},
'adeadl': {
11: 0.95, 12: 0.95, 21: 0.95, 22: 0.95, 23: 0.95, 24: 0.95,
31: 0.95, 32: 0.95, 41: 0.95, 42: 0.95, 43: 0.95,
51: 0.95, 52: 0.95, 71: 0.95, 72: 0.95, 73: 0.95, 74: 0.95,
81: 0.95, 82: 0.95, 90: 0.95, 91: 0.95, 92: 0.95, 93: 0.95,
94: 0.95, 95: 0.95, 96: 0.95, 97: 0.95, 98: 0.95, 99: 0.95},
'hmin': {
11: 0.00, 12: 0.00, 21: 0.00, 22: 0.00, 23: 1.00, 24: 6.00,
31: 0.00, 32: 0.00, 41: 10.0, 42: 15.0, 43: 12.0,
51: 0.20, 52: 1.00, 71: 0.10, 72: 0.10, 73: 0.10, 74: 0.10,
81: 0.10, 82: 0.00, 90: 5.00, 91: 1.00, 92: 1.00, 93: 1.00,
94: 1.00, 95: 1.00, 96: 1.00, 97: 1.00, 98: 1.00, 99: 1.00},
'hmax': {
11: 0.60, 12: 0.60, 21: 0.60, 22: 0.60, 23: 1.00, 24: 6.00,
31: 0.20, 32: 0.20, 41: 10.0, 42: 15.0, 43: 12.0,
51: 0.20, 52: 1.00, 71: 0.60, 72: 0.60, 73: 0.10, 74: 0.10,
81: 0.60, 82: 0.60, 90: 5.00, 91: 2.50, 92: 2.50, 93: 2.50,
94: 2.50, 95: 2.50, 96: 2.50, 97: 2.50, 98: 2.50, 99: 2.50},
'xl': {
11: 0.02, 12: 0.02, 21: 0.02, 22: 0.02, 23: 0.02, 24: 0.02,
31: 0.02, 32: 0.02, 41: 0.10, 42: 0.05, 43: 0.08,
51: 0.02, 52: 0.02, 71: 0.02, 72: 0.02, 73: 0.02, 74: 0.02,
81: 0.02, 82: 0.05, 90: 0.05, 91: 0.05, 92: 0.05, 93: 0.05,
94: 0.05, 95: 0.05, 96: 0.05, 97: 0.05, 98: 0.05, 99: 0.05},
'omega': {
11: 0.99, 12: 0.99, 21: 0.99, 22: 0.99, 23: 0.99, 24: 0.99,
31: 0.99, 32: 0.99, 41: 0.78, 42: 0.68, 43: 0.75,
51: 0.84, 52: 0.84, 71: 0.83, 72: 0.83, 73: 0.83, 74: 0.83,
81: 0.83, 82: 0.83, 90: 0.86, 91: 0.86, 92: 0.86, 93: 0.86,
94: 0.86, 95: 0.86, 96: 0.86, 97: 0.86, 98: 0.86, 99: 0.86}
},
'GLOBELAND30': {
'aleafv': {
10: 0.83, 20: 0.86, 30: 0.82, 40: 0.83, 50: 0.85, 60: 0.82,
70: 0.82, 80: 0.84, 90: 0.82, 100: 0.82},
'aleafn': {
10: 0.35, 20: 0.37, 30: 0.28, 40: 0.35, 50: 0.36, 60: 0.28,
70: 0.28, 80: 0.37, 90: 0.57, 100: 0.28},
'aleafl': {
10: 0.95, 20: 0.95, 30: 0.95, 40: 0.95, 50: 0.95, 60: 0.95,
70: 0.95, 80: 0.95, 90: 0.95, 100: 0.95},
'adeadv': {
10: 0.49, 20: 0.84, 30: 0.42, 40: 0.77, 50: 0.58, 60: 0.42,
70: 0.42, 80: 0.58, 90: 0.92, 100: 0.42},
'adeadn': {
10: 0.13, 20: 0.61, 30: 0.04, 40: 0.52, 50: 0.26, 60: 0.04,
70: 0.04, 80: 0.26, 90: 0.80, 100: 0.04},
'adeadl': {
10: 0.95, 20: 0.95, 30: 0.95, 40: 0.95, 50: 0.95, 60: 0.95,
70: 0.95, 80: 0.95, 90: 0.95, 100: 0.95},
'hmin': {
10: 0.00, 20: 10.0, 30: 0.10, 40: 1.00, 50: 1.00, 60: 0.00,
70: 0.10, 80: 1.00, 90: 0.00, 100: 0.00},
'hmax': {
10: 0.60, 20: 10.0, 30: 0.60, 40: 1.00, 50: 2.50, 60: 0.60,
70: 0.10, 80: 1.00, 90: 0.20, 100: 0.60},
'xl': {
10: 0.05, 20: 0.10, 30: 0.02, 40: 0.02, 50: 0.05, 60: 0.02,
70: 0.02, 80: 0.02, 90: 0.02, 100: 0.02},
'omega': {
10: 0.83, 20: 0.78, 30: 0.83, 40: 0.84, 50: 0.86, 60: 0.99,
70: 0.83, 80: 0.99, 90: 0.99, 100: 0.99}
}
}
| 53.721649 | 72 | 0.399731 |
a8f932ac7e695566475e28c606c638edaad1a40c | 1,185 | py | Python | Scrape Wikipedia using speech recognition.py | AnshikaY/awesomeScripts | 94b1a5a57e9a117150e1beadd8a7a71d4d237ad4 | [
"MIT"
] | null | null | null | Scrape Wikipedia using speech recognition.py | AnshikaY/awesomeScripts | 94b1a5a57e9a117150e1beadd8a7a71d4d237ad4 | [
"MIT"
] | null | null | null | Scrape Wikipedia using speech recognition.py | AnshikaY/awesomeScripts | 94b1a5a57e9a117150e1beadd8a7a71d4d237ad4 | [
"MIT"
] | null | null | null | import pyttsx3
import speech_recognition as SR
import wikipedia
import sys
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
# for voice in voices:
# print(voice.id)
# select voice among the available options
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def obey_command():
# It takes input from the microphone and returns output as a string
mic = SR.Recognizer()
with SR.Microphone() as source:
print("Listening...")
mic.pause_threshold = 1
audio = mic.listen(source)
try:
print("Recognizing...")
query = mic.recognize_google(audio, language='en-in')
print(query)
except Exception as e:
print(e)
print("Say that again please...")
return "None"
return query
if __name__ == "__main__":
query = obey_command().lower()
if 'wikipedia' in query:
speak("Searching wikipedia")
query = query.replace("wikipedia", "")
result = wikipedia.summary(query, sentences=2)
speak("According to wikipedia")
speak(result)
sys.exit()
| 24.6875 | 71 | 0.623629 |
705c7aa3cfe6fce5869177f54108a6cce0fedea8 | 5,728 | py | Python | database/db_models.py | Huynhhung0/origin-website | dee1fc4f5f048e3e17d64e551522c739a7953364 | [
"MIT"
] | 1 | 2020-05-21T04:44:45.000Z | 2020-05-21T04:44:45.000Z | database/db_models.py | Huynhhung0/origin-website | dee1fc4f5f048e3e17d64e551522c739a7953364 | [
"MIT"
] | null | null | null | database/db_models.py | Huynhhung0/origin-website | dee1fc4f5f048e3e17d64e551522c739a7953364 | [
"MIT"
] | null | null | null |
from decimal import *
from sqlalchemy import event
from sqlalchemy.orm import deferred
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.postgresql import JSONB
from database import db, db_common
class EmailList(db.Model):
__tablename__ = 'email_list'
email = db.Column(db.String(255), primary_key=True, autoincrement=False)
unsubscribed = db.Column(db.Boolean())
created_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
ip_addr = db.Column(db.String(100))
country_code = db.Column(db.String(2))
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
# sendgrid feedback loop
bounced = db.Column(db.Boolean())
spam_report = db.Column(db.Boolean())
blocked = db.Column(db.Boolean())
invalid = db.Column(db.Boolean())
def __str__(self):
return '%s' % (self.email)
class Presale(db.Model):
__tablename__ = 'presale'
id = db.Column(db.Integer, primary_key=True)
full_name = db.Column(db.String(255))
email = db.Column(db.String(255))
accredited = db.Column(db.Boolean())
entity_type = db.Column(db.String(255))
desired_allocation = db.Column(db.String(255))
desired_allocation_currency = db.Column(db.String(3))
citizenship = db.Column(db.String(2))
sending_addr = db.Column(db.String(255))
note = db.Column(db.Text())
created_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
ip_addr = db.Column(db.String(100))
def __str__(self):
return '%s' % (self.email)
class FullContact(db.Model):
__tablename__ = 'fullcontact'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), index=True, unique=True)
fullcontact_response = db.Column(JSONB)
github_handle = db.Column(db.String(255))
angellist_handle = db.Column(db.String(255))
twitter_handle = db.Column(db.String(255))
class MessageLog(db.Model):
__tablename__ = 'message_log'
id = db.Column(db.BigInteger, primary_key=True)
email = db.Column(db.String(255), index=True)
msg_purpose = db.Column(db.String(128), index=True)
msg_subject = db.Column(db.String(128))
msg_text = db.Column(db.Text())
msg_html = db.Column(db.Text())
msg_sent = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
class Interest(db.Model):
__tablename__ = 'interest'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
company_name = db.Column(db.String(255))
email = db.Column(db.String(255))
website = db.Column(db.String(255))
note = db.Column(db.Text())
created_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
ip_addr = db.Column(db.String(100))
def __str__(self):
return '%s' % (self.email)
class Contributor(db.Model):
__tablename__ = 'contributor'
username = db.Column(db.String(255), primary_key=True, autoincrement=False)
commits = db.Column(db.Integer())
avatar = db.Column(db.String(255))
created_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
def __str__(self):
return '%s' % (self.username)
class SocialStat(db.Model):
__tablename__ = 'social_stat'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(255), index=True)
timestamp = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
subscribed_count = db.Column(db.Integer())
def __str__(self):
return '%s' % (self.name)
class EthContact(db.Model):
__tablename__ = 'eth_contact'
address = db.Column(db.String(42), primary_key=True)
name = db.Column(db.String(255), index=True)
desc = db.Column(db.String(255))
email = db.Column(db.String(255), index=True)
phone = db.Column(db.String(255), index=True)
eth_balance = db.Column(db.Float(), default=0)
ogn_balance = db.Column(db.Float(), default=0)
dai_balance = db.Column(db.Float(), default=0)
investor = db.Column(db.Boolean())
presale_interest = db.Column(db.Boolean())
dapp_user = db.Column(db.Boolean())
employee = db.Column(db.Boolean())
exchange = db.Column(db.Boolean())
company_wallet = db.Column(db.Boolean())
investor_airdrop = db.Column(db.Boolean())
token_count = db.Column(db.Integer())
transaction_count = db.Column(db.Integer())
tokens = db.Column(JSONB)
last_updated = db.Column(db.DateTime(),server_default=db.func.now(), onupdate=db.func.now())
created_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
country_code = db.Column(db.String(2))
def __str__(self):
return '%s' % (self.name)
class TokenTransaction(db.Model):
__tablename__ = 'token_transaction'
tx_hash = db.Column(db.String(66), primary_key=True)
from_address = db.Column(db.String(42), index=True)
to_address = db.Column(db.String(42), index=True)
amount = db.Column(db.Float())
block_number = db.Column(db.Integer())
notification_sent = db.Column(db.Boolean())
timestamp = db.Column(db.DateTime(timezone=True))
last_updated = db.Column(db.DateTime(),server_default=db.func.now(), onupdate=db.func.now())
created_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
def __str__(self):
return '%s' % (self.name)
@event.listens_for(Presale, 'after_insert')
@event.listens_for(Interest, 'after_insert')
def _subscribe_email_list(mapper, connection, target):
from util.tasks import subscribe_email_list
payload = {"email": target.email,
"ip_addr": target.ip_addr}
subscribe_email_list.delay(**payload)
| 35.57764 | 96 | 0.686278 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.