id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
170777 | import torch
import torch.nn as nn
from mmcv.runner import ModuleList
from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner,
build_sampler, merge_aug_bboxes, merge_aug_masks,
multiclass_nms)
from ..builder import HEADS, build_head, build_roi_extractor
from .base_roi_head import BaseRoIHead
from .test_mixins import BBoxTestMixin, MaskTestMixin
@HEADS.register_module()
class TailCascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
"""Cascade roi head including one bbox head and one mask head.
https://arxiv.org/abs/1712.00726
"""
def __init__(self,
num_stages,
stage_loss_weights,
bbox_roi_extractor=None,
bbox_head=None,
bbox_head_tail=None,
labels=None,
labels_tail=None,
mask_roi_extractor=None,
mask_head=None,
shared_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
assert bbox_roi_extractor is not None
assert bbox_head is not None
assert shared_head is None, \
'Shared head is not supported in Cascade RCNN anymore'
self.num_stages = num_stages
self.stage_loss_weights = stage_loss_weights
super(TailCascadeRoIHead, self).__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head, bbox_head_tail)
self.labels = labels
self.labels_tail = labels_tail
self.init_assigner_sampler()
def init_bbox_head(self, bbox_roi_extractor, bbox_head, bbox_head_tail):
"""Initialize box head and box roi extractor.
Args:
bbox_roi_extractor (dict): Config of box roi extractor.
bbox_head (dict): Config of box in box head.
"""
self.bbox_roi_extractor = ModuleList()
self.bbox_head = ModuleList()
self.bbox_head_tail = ModuleList()
if not isinstance(bbox_roi_extractor, list):
bbox_roi_extractor = [
bbox_roi_extractor for _ in range(self.num_stages)
]
if not isinstance(bbox_head, list):
bbox_head = [bbox_head for _ in range(self.num_stages)]
if not isinstance(bbox_head_tail, list):
bbox_head_tail = [bbox_head_tail for _ in range(self.num_stages)]
assert len(bbox_roi_extractor) == len(bbox_head) == len(bbox_head_tail) == self.num_stages
for roi_extractor, head, head_tail in zip(bbox_roi_extractor, bbox_head, bbox_head_tail):
self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor))
self.bbox_head.append(build_head(head))
self.bbox_head_tail.append(build_head(head_tail))
def init_assigner_sampler(self):
"""Initialize assigner and sampler for each stage."""
self.bbox_assigner = []
self.bbox_sampler = []
self.bbox_assigner_tail = []
self.bbox_sampler_tail = []
if self.train_cfg is not None:
for rcnn_train_cfg in self.train_cfg:
self.bbox_assigner.append(build_assigner(rcnn_train_cfg.assigner))
self.bbox_assigner_tail.append(build_assigner(rcnn_train_cfg.assigner_tail))
self.bbox_sampler.append(build_sampler(rcnn_train_cfg.sampler))
self.bbox_sampler_tail.append(build_sampler(rcnn_train_cfg.sampler_tail))
# def init_weights(self, pretrained):
# """Initialize the weights in head.
# Args:
# pretrained (str, optional): Path to pre-trained weights.
# Defaults to None.
# """
# if self.with_shared_head:
# self.shared_head.init_weights(pretrained=pretrained)
# for i in range(self.num_stages):
# if self.with_bbox:
# self.bbox_roi_extractor[i].init_weights()
# self.bbox_head[i].init_weights()
# self.bbox_head_tail[i].init_weights()
# if self.with_mask:
# if not self.share_roi_extractor:
# self.mask_roi_extractor[i].init_weights()
# self.mask_head[i].init_weights()
def init_mask_head(self, mask_roi_extractor, mask_head):
"""Initialize mask head and mask roi extractor.
Args:
mask_roi_extractor (dict): Config of mask roi extractor.
mask_head (dict): Config of mask in mask head.
"""
self.mask_head = nn.ModuleList()
if not isinstance(mask_head, list):
mask_head = [mask_head for _ in range(self.num_stages)]
assert len(mask_head) == self.num_stages
for head in mask_head:
self.mask_head.append(build_head(head))
if mask_roi_extractor is not None:
self.share_roi_extractor = False
self.mask_roi_extractor = nn.ModuleList()
if not isinstance(mask_roi_extractor, list):
mask_roi_extractor = [
mask_roi_extractor for _ in range(self.num_stages)
]
assert len(mask_roi_extractor) == self.num_stages
for roi_extractor in mask_roi_extractor:
self.mask_roi_extractor.append(
build_roi_extractor(roi_extractor))
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
# bbox head
outs = ()
rois = bbox2roi([proposals])
if self.with_bbox:
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask heads
if self.with_mask:
mask_rois = rois[:100]
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
outs = outs + (mask_results['mask_pred'],)
return outs
def _bbox_forward(self, stage, x, rois):
"""Box head forward function used in both training and testing."""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
cls_score, cls_score_gs, bbox_pred = bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score, cls_score_gs=cls_score_gs, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def _bbox_forward_tail(self, stage, x, rois):
"""Box head forward function used in both training and testing."""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head_tail = self.bbox_head_tail[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
cls_score, cls_score_gs, bbox_pred = bbox_head_tail(bbox_feats)
bbox_results = dict(
cls_score=cls_score, cls_score_gs=cls_score_gs, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def _bbox_forward_train(self, stage, x, sampling_results, sampling_results_tail, gt_bboxes,
gt_labels, rcnn_train_cfg):
"""Run forward function and calculate loss for box head in training."""
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(stage, x, rois)
bbox_targets = self.bbox_head[stage].get_targets(
sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)
loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],
bbox_results['cls_score_gs'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
rois_tail = bbox2roi([res.bboxes for res in sampling_results_tail])
bbox_results_tail = self._bbox_forward_tail(stage, x, rois_tail)
bbox_targets_tail = self.bbox_head_tail[stage].get_targets(sampling_results_tail, gt_bboxes,
gt_labels, rcnn_train_cfg)
loss_bbox_tail = self.bbox_head_tail[stage].loss(bbox_results_tail['cls_score'],
bbox_results_tail['cls_score_gs'],
bbox_results_tail['bbox_pred'], rois_tail,
*bbox_targets_tail)
bbox_results_tail.update(loss_bbox=loss_bbox_tail, rois=rois_tail, bbox_targets=bbox_targets_tail)
return bbox_results, bbox_results_tail
def forward_train(self,
x,
img_metas,
proposal_list,
# proposal_list_tail,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
losses = dict()
proposal_list_tail = proposal_list.copy()
# print(len(self.bbox_assigner))
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg[i]
lw = self.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
sampling_results_tail = []
if self.with_bbox or self.with_mask:
bbox_assigner = self.bbox_assigner[i]
bbox_assigner_tail = self.bbox_assigner_tail[i]
bbox_sampler = self.bbox_sampler[i]
bbox_sampler_tail = self.bbox_sampler_tail[i]
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
assign_result_tail = bbox_assigner_tail.assign(
proposal_list_tail[j], gt_bboxes[j], gt_bboxes_ignore[j],
gt_labels[j])
sampling_result_tail = bbox_sampler_tail.sample(
assign_result_tail,
proposal_list_tail[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results_tail.append(sampling_result_tail)
# bbox head forward and loss
bbox_results, bbox_results_tail = self._bbox_forward_train(i, x, sampling_results, sampling_results_tail,
gt_bboxes, gt_labels,
rcnn_train_cfg)
for name, value in bbox_results['loss_bbox'].items():
losses[f's{i}.{name}'] = (value * lw if 'loss' in name else value)
for name, value in bbox_results_tail['loss_bbox'].items():
losses[f's{i}.tail.{name}'] = (value * lw if 'loss' in name else value)
# refine bboxes
if i < self.num_stages - 1:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
pos_is_gts_tail = [res.pos_is_gt for res in sampling_results_tail]
# bbox_targets is a tuple
roi_labels = bbox_results['bbox_targets'][0]
roi_labels_tail = bbox_results_tail['bbox_targets'][0]
with torch.no_grad():
roi_labels = torch.where(
roi_labels == self.bbox_head[i].num_classes,
bbox_results['cls_score'][:, :-1].argmax(1),
roi_labels)
proposal_list = self.bbox_head[i].refine_bboxes(
bbox_results['rois'], roi_labels,
bbox_results['bbox_pred'], pos_is_gts, img_metas)
roi_labels_tail = torch.where(
roi_labels_tail == self.bbox_head_tail[i].num_classes,
bbox_results_tail['cls_score'][:, :-1].argmax(1),
roi_labels_tail)
proposal_list_tail = self.bbox_head_tail[i].refine_bboxes(
bbox_results_tail['rois'], roi_labels_tail,
bbox_results_tail['bbox_pred'], pos_is_gts_tail, img_metas)
return losses
def simple_test(self, x, proposal_list, img_metas, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
num_imgs = len(proposal_list)
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# "ms" in variable names means multi-stage
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
ms_scores_tail = []
rcnn_test_cfg = self.test_cfg
rois = bbox2roi(proposal_list)
rois_tail = bbox2roi(proposal_list)
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
# split batch bbox prediction back to each image
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
num_proposals_per_img = tuple(
len(proposals) for proposals in proposal_list)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
if isinstance(bbox_pred, torch.Tensor):
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
else:
bbox_pred = self.bbox_head[i].bbox_pred_split(
bbox_pred, num_proposals_per_img)
ms_scores.append(cls_score)
bbox_results_tail = self._bbox_forward_tail(i, x, rois_tail)
# split batch bbox prediction back to each image
cls_score_tail = bbox_results_tail['cls_score']
bbox_pred_tail = bbox_results_tail['bbox_pred']
rois_tail = rois_tail.split(num_proposals_per_img, 0)
cls_score_tail = cls_score_tail.split(num_proposals_per_img, 0)
if isinstance(bbox_pred_tail, torch.Tensor):
bbox_pred_tail = bbox_pred_tail.split(num_proposals_per_img, 0)
else:
bbox_pred_tail = self.bbox_head_tail[i].bbox_pred_split(
bbox_pred_tail, num_proposals_per_img)
ms_scores_tail.append(cls_score_tail)
if i < self.num_stages - 1:
bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]
bbox_label_tail = [s[:, :-1].argmax(dim=1) for s in cls_score_tail]
rois = torch.cat([
self.bbox_head[i].regress_by_class(rois[j], bbox_label[j],
bbox_pred[j],
img_metas[j])
for j in range(num_imgs)
])
rois_tail = torch.cat([
self.bbox_head_tail[i].regress_by_class(rois_tail[j], bbox_label_tail[j],
bbox_pred_tail[j],
img_metas[j])
for j in range(num_imgs)
])
# average scores of each image by stages
cls_score = [
sum([score[i] for score in ms_scores]) / float(len(ms_scores))
for i in range(num_imgs)
]
det_bboxes = []
det_labels = []
for i in range(num_imgs):
det_bbox, det_label = self.bbox_head[-1].get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
# average scores of each image by stages
cls_score_tail = [
sum([score[i] for score in ms_scores_tail]) / float(len(ms_scores_tail))
for i in range(num_imgs)
]
det_bboxes_tail = []
det_labels_tail = []
for i in range(num_imgs):
det_bbox, det_label = self.bbox_head_tail[-1].get_bboxes(
rois_tail[i],
cls_score_tail[i],
bbox_pred_tail[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes_tail.append(det_bbox)
det_labels_tail.append(det_label)
if self.labels is not None:
det_bboxes_post = []
det_labels_post = []
for i in range(num_imgs):
inds = []
for label in self.labels:
inds.append(torch.nonzero(det_labels[i] == label, as_tuple=False).squeeze(1))
inds = torch.cat(inds)
det_bboxes_post.append(det_bboxes[i][inds])
det_labels_post.append(det_labels[i][inds])
if self.labels_tail is not None:
det_bboxes_tail_post = []
det_labels_tail_post = []
for i in range(num_imgs):
inds = []
for label in self.labels_tail:
inds.append(torch.nonzero(det_labels_tail[i] == label, as_tuple=False).squeeze(1))
inds = torch.cat(inds)
det_bboxes_tail_post.append(det_bboxes_tail[i][inds])
det_labels_tail_post.append(det_labels_tail[i][inds])
bbox_results = []
for i in range(num_imgs):
if det_bboxes_post[i].shape[0] == 0:
det_bboxes_post[i] = torch.zeros([0, 5]).to(device=det_bboxes_post[i].device)
if det_bboxes_tail_post[i].shape[0] == 0:
det_bboxes_tail_post[i] = torch.zeros([0, 5]).to(device=det_bboxes_tail_post[i].device)
assert det_bboxes_post[i].shape[1] == det_bboxes_tail_post[i].shape[1], (det_bboxes_post[i].shape, det_bboxes_tail_post[i].shape)
det_bboxes = torch.cat((det_bboxes_post[i], det_bboxes_tail_post[i]))
det_labels = torch.cat((det_labels_post[i], det_labels_tail_post[i]))
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
bbox_results.append(bbox_result)
ms_bbox_result['ensemble'] = bbox_results
if self.with_mask:
if det_bboxes.shape[0] == 0:
mask_classes = self.mask_head[-1].num_classes
segm_result = [[] for _ in range(mask_classes)]
else:
_bboxes = (
det_bboxes[:, :4] * det_bboxes.new_tensor(scale_factor)
if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
aug_masks = []
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks,
[img_metas] * self.num_stages,
self.test_cfg)
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks, _bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale)
ms_segm_result['ensemble'] = segm_result
if self.with_mask:
results = (ms_bbox_result['ensemble'], ms_segm_result['ensemble'])
else:
results = ms_bbox_result['ensemble']
return results
def aug_test(self, features, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
rcnn_test_cfg = self.test_cfg
aug_bboxes = []
aug_scores = []
aug_bboxes_tail = []
aug_scores_tail = []
for x, img_meta in zip(features, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
# "ms" in variable names means multi-stage
ms_scores = []
ms_scores_tail = []
rois = bbox2roi([proposals])
rois_tail = bbox2roi([proposals])
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
ms_scores.append(bbox_results['cls_score'])
bbox_results_tail = self._bbox_forward_tail(i, x, rois_tail)
ms_scores_tail.append(bbox_results_tail['cls_score'])
if i < self.num_stages - 1:
bbox_label = bbox_results['cls_score'][:, :-1].argmax(
dim=1)
rois = self.bbox_head[i].regress_by_class(
rois, bbox_label, bbox_results['bbox_pred'],
img_meta[0])
bbox_label_tail = bbox_results_tail['cls_score'][:, :-1].argmax(
dim=1)
rois_tail = self.bbox_head_tail[i].regress_by_class(
rois_tail, bbox_label_tail, bbox_results_tail['bbox_pred'],
img_meta[0])
cls_score = sum(ms_scores) / float(len(ms_scores))
bboxes, scores = self.bbox_head[-1].get_bboxes(
rois,
cls_score,
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
# print('a', bboxes.shape, scores.shape)
cls_score_tail = sum(ms_scores_tail) / float(len(ms_scores_tail))
bboxes_tail, scores_tail = self.bbox_head_tail[-1].get_bboxes(
rois_tail,
cls_score_tail,
bbox_results_tail['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
# print('b', bboxes_tail.shape, scores_tail.shape)
# print(scores_tail)
# print(scores)
# if self.labels is not None:
# inds = []
# for label in self.labels:
# inds.append(torch.nonzero(scores == label, as_tuple=False).squeeze(1))
# inds = torch.cat(inds)
# bboxes = bboxes[inds]
# scores = scores[inds]
# if self.labels_tail is not None:
# inds = []
# for label in self.labels_tail:
# inds.append(torch.nonzero(scores_tail == label, as_tuple=False).squeeze(1))
# inds = torch.cat(inds)
# bboxes_tail = bboxes_tail[inds]
# scores_tail = scores_tail[inds]
# print(bboxes,bboxes.shape)
# print(bboxes_tail, bboxes_tail.shape)
# if bboxes.shape[0] == 0:
# det_bboxes = bboxes_tail
# det_labels = scores_tail
# elif bboxes_tail.shape[0] == 0:
# det_bboxes = bboxes
# det_labels = scores
# else:
# det_bboxes = torch.cat((bboxes, bboxes_tail))
# det_labels = torch.cat((scores, scores_tail))
# aug_bboxes.append(det_bboxes)
# aug_scores.append(det_labels)
# print('c', det_bboxes.shape)
# print('d', det_labels.shape)
det_bboxes = torch.cat((bboxes, bboxes_tail))
det_labels = torch.cat((scores, scores_tail))
aug_bboxes.append(det_bboxes)
aug_scores.append(det_labels)
# aug_bboxes_tail.append(bboxes_tail)
# aug_scores_tail.append(scores_tail)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
# print('e', merged_bboxes.shape, merged_scores.shape)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
# # after merging, bboxes will be rescaled to the original image size
# merged_bboxes_tail, merged_scores_tail = merge_aug_bboxes(
# aug_bboxes_tail, aug_scores_tail, img_metas, rcnn_test_cfg)
# # print('e', merged_bboxes.shape, merged_scores.shape)
# det_bboxes_tail, det_labels_tail = multiclass_nms(merged_bboxes_tail, merged_scores_tail,
# rcnn_test_cfg.score_thr,
# rcnn_test_cfg.nms,
# rcnn_test_cfg.max_per_img)
# if self.labels is not None:
# inds = []
# for label in self.labels:
# inds.append(torch.nonzero(det_labels == label, as_tuple=False).squeeze(1))
# inds = torch.cat(inds)
# det_bboxes_post = det_bboxes[inds]
# det_labels_post = det_labels[inds]
# if self.labels_tail is not None:
# inds = []
# for label in self.labels_tail:
# inds.append(torch.nonzero(det_labels_tail == label, as_tuple=False).squeeze(1))
# inds = torch.cat(inds)
# det_bboxes_tail_post = det_bboxes_tail[inds]
# det_labels_tail_post = det_labels_tail[inds]
# det_bboxes = torch.cat((det_bboxes_post, det_bboxes_tail_post))
# det_labels = torch.cat((det_labels_post, det_labels_tail_post))
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
if self.with_mask:
if det_bboxes.shape[0] == 0:
segm_result = [[]
for _ in range(self.mask_head[-1].num_classes)]
else:
aug_masks = []
aug_img_metas = []
for x, img_meta in zip(features, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip, flip_direction)
mask_rois = bbox2roi([_bboxes])
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
aug_img_metas.append(img_meta)
merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
rcnn_test_cfg,
ori_shape,
scale_factor=1.0,
rescale=False)
return [(bbox_result, segm_result)]
else:
return [bbox_result]
def aug_test_(self, features, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
rcnn_test_cfg = self.test_cfg
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(features, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
# "ms" in variable names means multi-stage
ms_scores = []
rois = bbox2roi([proposals])
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
ms_scores.append(bbox_results['cls_score'])
if i < self.num_stages - 1:
bbox_label = bbox_results['cls_score'][:, :-1].argmax(
dim=1)
rois = self.bbox_head[i].regress_by_class(
rois, bbox_label, bbox_results['bbox_pred'],
img_meta[0])
cls_score = sum(ms_scores) / float(len(ms_scores))
bboxes, scores = self.bbox_head[-1].get_bboxes(
rois,
cls_score,
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
if self.with_mask:
if det_bboxes.shape[0] == 0:
segm_result = [[[]
for _ in range(self.mask_head[-1].num_classes)]
]
else:
aug_masks = []
aug_img_metas = []
for x, img_meta in zip(features, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip, flip_direction)
mask_rois = bbox2roi([_bboxes])
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
aug_img_metas.append(img_meta)
merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
rcnn_test_cfg,
ori_shape,
scale_factor=1.0,
rescale=False)
return [(bbox_result, segm_result)]
else:
return [bbox_result] | StarcoderdataPython |
3220253 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package manages pushes to and deletes from a v2 docker registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import concurrent.futures
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v2_2 import docker_http
from containerregistry.client.v2_2 import docker_image
from containerregistry.client.v2_2 import docker_image_list as image_list
import httplib2
import six.moves.http_client
import six.moves.urllib.parse
def _tag_or_digest(name):
if isinstance(name, docker_name.Tag):
return name.tag
else:
assert isinstance(name, docker_name.Digest)
return name.digest
class Push(object):
"""Push encapsulates a Registry v2.2 Docker push session."""
def __init__(self,
name,
creds,
transport,
mount = None,
threads = 8):
"""Constructor.
If multiple threads are used, the caller *must* ensure that the provided
transport is thread-safe, as well as the image that is being uploaded.
It is notable that tarfile and httplib2.Http in Python are NOT threadsafe.
Args:
name: the fully-qualified name of the tag to push
creds: credential provider for authorizing requests
transport: the http transport to use for sending requests
mount: list of repos from which to mount blobs.
threads: the number of threads to use for uploads.
Raises:
ValueError: an incorrectly typed argument was supplied.
"""
self._name = name
self._transport = docker_http.Transport(name, creds, transport,
docker_http.PUSH)
self._mount = mount
self._threads = threads
def _scheme_and_host(self):
return '{scheme}://{registry}'.format(
scheme=docker_http.Scheme(self._name.registry),
registry=self._name.registry)
def _base_url(self):
return self._scheme_and_host() + '/v2/{repository}'.format(
repository=self._name.repository)
def _get_absolute_url(self, location):
# If 'location' is an absolute URL (includes host), this will be a no-op.
return six.moves.urllib.parse.urljoin(
base=self._scheme_and_host(), url=location)
def _blob_exists(self, digest):
"""Check the remote for the given layer."""
# HEAD the blob, and check for a 200
resp, unused_content = self._transport.Request(
'{base_url}/blobs/{digest}'.format(
base_url=self._base_url(), digest=digest),
method='HEAD',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
])
return resp.status == six.moves.http_client.OK # pytype: disable=attribute-error
def _manifest_exists(
self, image
):
"""Check the remote for the given manifest by digest."""
# GET the manifest by digest, and check for 200
resp, unused_content = self._transport.Request(
'{base_url}/manifests/{digest}'.format(
base_url=self._base_url(), digest=image.digest()),
method='GET',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
],
accepted_mimes=[image.media_type()])
return resp.status == six.moves.http_client.OK # pytype: disable=attribute-error
def _get_blob(self, image, digest):
if digest == image.config_blob():
return image.config_file().encode('utf8')
return image.blob(digest)
def _monolithic_upload(self, image,
digest):
self._transport.Request(
'{base_url}/blobs/uploads/?digest={digest}'.format(
base_url=self._base_url(), digest=digest),
method='POST',
body=self._get_blob(image, digest),
accepted_codes=[six.moves.http_client.CREATED])
def _add_digest(self, url, digest):
scheme, netloc, path, query_string, fragment = (
six.moves.urllib.parse.urlsplit(url))
qs = six.moves.urllib.parse.parse_qs(query_string)
qs['digest'] = [digest]
query_string = six.moves.urllib.parse.urlencode(qs, doseq=True)
return six.moves.urllib.parse.urlunsplit((scheme, netloc, path,
query_string, fragment))
def _put_upload(self, image, digest):
mounted, location = self._start_upload(digest, self._mount)
if mounted:
logging.info('Layer %s mounted.', digest)
return
location = self._add_digest(location, digest)
self._transport.Request(
location,
method='PUT',
body=self._get_blob(image, digest),
accepted_codes=[six.moves.http_client.CREATED])
# pylint: disable=missing-docstring
def _patch_upload(self, image,
digest):
mounted, location = self._start_upload(digest, self._mount)
if mounted:
logging.info('Layer %s mounted.', digest)
return
location = self._get_absolute_url(location)
resp, unused_content = self._transport.Request(
location,
method='PATCH',
body=self._get_blob(image, digest),
content_type='application/octet-stream',
accepted_codes=[
six.moves.http_client.NO_CONTENT, six.moves.http_client.ACCEPTED,
six.moves.http_client.CREATED
])
location = self._add_digest(resp['location'], digest)
location = self._get_absolute_url(location)
self._transport.Request(
location,
method='PUT',
body=None,
accepted_codes=[six.moves.http_client.CREATED])
def _put_blob(self, image, digest):
"""Upload the aufs .tgz for a single layer."""
# We have a few choices for unchunked uploading:
# POST to /v2/<name>/blobs/uploads/?digest=<digest>
# Fastest, but not supported by many registries.
# self._monolithic_upload(image, digest)
#
# or:
# POST /v2/<name>/blobs/uploads/ (no body*)
# PUT /v2/<name>/blobs/uploads/<uuid> (full body)
# Next fastest, but there is a mysterious bad interaction
# with Bintray. This pattern also hasn't been used in
# clients since 1.8, when they switched to the 3-stage
# method below.
# self._put_upload(image, digest)
# or:
# POST /v2/<name>/blobs/uploads/ (no body*)
# PATCH /v2/<name>/blobs/uploads/<uuid> (full body)
# PUT /v2/<name>/blobs/uploads/<uuid> (no body)
#
# * We attempt to perform a cross-repo mount if any repositories are
# specified in the "mount" parameter. This does a fast copy from a
# repository that is known to contain this blob and skips the upload.
self._patch_upload(image, digest)
def _remote_tag_digest(
self, image
):
"""Check the remote for the given manifest by digest."""
# GET the tag we're pushing
resp, unused_content = self._transport.Request(
'{base_url}/manifests/{tag}'.format(
base_url=self._base_url(),
tag=self._name.tag), # pytype: disable=attribute-error
method='GET',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
],
accepted_mimes=[image.media_type()])
if resp.status == six.moves.http_client.NOT_FOUND: # pytype: disable=attribute-error
return None
return resp.get('docker-content-digest')
def _put_manifest(
self,
image,
use_digest = False):
"""Upload the manifest for this image."""
if use_digest:
tag_or_digest = image.digest()
else:
tag_or_digest = _tag_or_digest(self._name)
self._transport.Request(
'{base_url}/manifests/{tag_or_digest}'.format(
base_url=self._base_url(), tag_or_digest=tag_or_digest),
method='PUT',
body=image.manifest(),
content_type=image.media_type(),
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.CREATED,
six.moves.http_client.ACCEPTED # pytype: disable=wrong-arg-types
])
def _start_upload(self,
digest,
mount = None
):
"""POST to begin the upload process with optional cross-repo mount param."""
# Do a normal POST to initiate an upload if mount is missing.
url = '{base_url}/blobs/uploads/'.format(base_url=self._base_url())
accepted_codes = [six.moves.http_client.ACCEPTED]
# TODO(r2d4): Enable mount parameter for dockerhub
# else:
# # If we have a mount parameter, try to mount the blob from another repo.
# mount_from = '&'.join([
# 'from=' + six.moves.urllib.parse.quote(repo.repository, '')
# for repo in self._mount
# ])
# url = '{base_url}/blobs/uploads/?mount={digest}&{mount_from}'.format(
# base_url=self._base_url(), digest=digest, mount_from=mount_from)
# accepted_codes = [
# six.moves.http_client.CREATED, six.moves.http_client.ACCEPTED
# ]
# import pdb; pdb.set_trace()
resp, unused_content = self._transport.Request(
url, method='POST', body=None, accepted_codes=accepted_codes)
# pytype: disable=attribute-error,bad-return-type
return resp.status == six.moves.http_client.CREATED, resp.get('location')
# pytype: enable=attribute-error,bad-return-type
def _upload_one(self, image, digest):
"""Upload a single layer, after checking whether it exists already."""
if self._blob_exists(digest):
logging.info('Layer %s exists, skipping', digest)
return
self._put_blob(image, digest)
logging.info('Layer %s pushed.', digest)
def upload(self,
image,
use_digest = False):
"""Upload the layers of the given image.
Args:
image: the image to upload.
use_digest: use the manifest digest (i.e. not tag) as the image reference.
"""
# If the manifest (by digest) exists, then avoid N layer existence
# checks (they must exist).
if self._manifest_exists(image):
if isinstance(self._name, docker_name.Tag):
if self._remote_tag_digest(image) == image.digest():
logging.info('Tag points to the right manifest, skipping push.')
return
logging.info('Manifest exists, skipping blob uploads and pushing tag.')
else:
logging.info('Manifest exists, skipping upload.')
elif isinstance(image, image_list.DockerImageList):
for _, child in image:
# TODO(user): Refactor so that the threadpool is shared.
with child:
self.upload(child, use_digest=True)
elif self._threads == 1:
for digest in image.distributable_blob_set():
self._upload_one(image, digest)
else:
with concurrent.futures.ThreadPoolExecutor(
max_workers=self._threads) as executor:
future_to_params = {
executor.submit(self._upload_one, image, digest): (image, digest)
for digest in image.distributable_blob_set()
}
for future in concurrent.futures.as_completed(future_to_params):
future.result()
# This should complete the upload by uploading the manifest.
self._put_manifest(image, use_digest=use_digest)
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, exception_type, unused_value, unused_traceback):
if exception_type:
logging.error('Error during upload of: %s', self._name)
return
logging.info('Finished upload of: %s', self._name)
# pylint: disable=invalid-name
def Delete(
name,
creds,
transport
):
"""Delete a tag or digest.
Args:
name: a tag or digest to be deleted.
creds: the creds to use for deletion.
transport: the transport to use to contact the registry.
"""
docker_transport = docker_http.Transport(name, creds, transport,
docker_http.DELETE)
_, unused_content = docker_transport.Request(
'{scheme}://{registry}/v2/{repository}/manifests/{entity}'.format(
scheme=docker_http.Scheme(name.registry),
registry=name.registry,
repository=name.repository,
entity=_tag_or_digest(name)),
method='DELETE',
accepted_codes=[six.moves.http_client.OK, six.moves.http_client.ACCEPTED])
| StarcoderdataPython |
147674 | <filename>src/ui/file_options_dialog.py<gh_stars>1-10
#
# <NAME>
#
# Version 3.0 onwards
#
# Copyright (c) 2021 dmh23
#
import tkinter as tk
import tkinter.messagebox as messagebox
from src.ui.dialog import Dialog
class FileOptionsDialog(Dialog):
def __init__(self, parent):
self._configManager = parent.get_config_manager()
self._tk_calculate_new_reward = tk.BooleanVar(value=self._configManager.get_calculate_new_reward())
self._tk_calculate_alternate_discount_factors = tk.BooleanVar(
value=self._configManager.get_calculate_alternate_discount_factors())
super().__init__(parent, "File Processing Options")
def body(self, master):
tk.Label(master, text="Select the following options to enable advanced analysis:").grid(
column=0, row=0, pady=3, sticky="W")
tk.Checkbutton(master, variable=self._tk_calculate_new_reward,
text="Calculate rewards using new reward function").grid(
column=0, row=1, pady=3, sticky="W")
tk.Checkbutton(master, variable=self._tk_calculate_alternate_discount_factors,
text="Calculate future rewards using alternate discount factors").grid(
column=0, row=2, pady=3, sticky="W")
tk.Label(master, text="Beware ... these options will increase the time taken to open a log file",
foreground="red").grid(
column=0, row=3, pady=6, sticky="W")
def apply(self):
turned_on_some_analysis = False
new_value = self._tk_calculate_new_reward.get()
if new_value != self._configManager.get_calculate_new_reward():
turned_on_some_analysis = turned_on_some_analysis or new_value
self._configManager.set_calculate_new_reward(new_value)
new_value = self._tk_calculate_alternate_discount_factors.get()
if new_value != self._configManager.get_calculate_alternate_discount_factors():
turned_on_some_analysis = turned_on_some_analysis or new_value
self._configManager.set_calculate_alternate_discount_factors(new_value)
self.parent.refresh_analysis_controls()
if turned_on_some_analysis:
messagebox.showinfo(title="Options Reminder", message="You now need to re-open a log file to perform new analysis")
self.parent.close_file()
def validate(self):
return True
| StarcoderdataPython |
3272516 | """
可以通过以下接口来控制Xshell的会话。包括打开、关闭会话,记录会话日志等。
"""
Connected = False # 当前会话是否连接
LocalAddress = "LocalAddress" # 获取本地地址
Path = "Path" # 获取当前会话文件路径
RemoteAddress = "RemoteAddress" # 获取远端地址
RemotePort = "RemotePort" # 获取远端端口号
Logging = False # 当前会话是否正在记录日志文件
LogFilePath = "LogFilePath" # 存放日志文件的路径
def Open(lpszSession: str):
"""
打开新的会话或URL。
:param lpszSession: xshell会话路径或xshell使用url格式
"""
print("xsh.Session.Open({})".format(lpszSession))
def Close():
"""
关闭当前连接的会话。
"""
print("xsh.Session.Close()")
def Sleep(timeout: int):
"""
设置xshell等待时间。
:param timeout: 毫秒
"""
print("xsh.Session.Sleep({})".format(timeout))
def LogFilePath(lpszNewFilePath: str):
"""
指定日志文件路径。
:param lpszNewFilePath: 文件名(包括路径)
"""
print("xsh.Session.LogFilePath({})".format(lpszNewFilePath))
def StartLog():
"""
开始记录会话。存储在LogFilePath()中指定的日志文件中,如果未指定路径,则使用默认路径
"""
print("xsh.Session.StartLog()")
def StopLog():
"""
停止记录会话。
"""
print("xsh.Session.StopLog()")
| StarcoderdataPython |
47648 | <reponame>minnieteng/smoke_project<filename>smoke/box/FeatureTimeSpaceGrid.py
import os
import json
import tarfile
import tempfile
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from pytz import timezone
from geopy.distance import distance
from scipy.optimize import Bounds, minimize
from collections import Counter
from box.Box import Box
#from smoke.box.Box import Box
class FeatureTimeSpaceGrid:
def __init__(self, box, features,
datetime_start, datetime_stop, time_res_h):
""" Create an instance of a 4D grid with shape
(n_features, n_time, n_latitude, n_longitude) object acts as a
wrapper around the 2D theoretical space grid of the given Box
:param box: Theoretical space grid to use as last 2 dims of space
:type box: smoke_tools.box.Box
:param features: Array of unique features for grid
:type features: np.array
:param datetime_start: Time for FeatureTimeSpaceGrid to start exclusive
:type datetime_start: datetime
:param datetime_stop: Time for FeatureTimeSpaceGrid to end inclusive
:type datetime_stop: datetime
:param time_res_h: Resolution to use in between start and stop for time in hours
:type time_res_h: int
"""
# Assign box attributes
self.box = box
# Save exact args used to make Box
box_args = self.box.get_orig_box_args()
self.orig_box_args = {"nw_lat":box_args[0],
"nw_lon":box_args[1],
"sw_lat_est":box_args[2],
"sw_lon_est":box_args[3],
"dist_km":box_args[4],
"dist_res_km":box_args[5]}
# Assign feature and time attributes ensuring UTC
self.features = features
self.datetime_start = datetime_start.replace(tzinfo=(timezone('UTC')))
self.datetime_stop = datetime_stop.replace(tzinfo=(timezone('UTC')))
self.time_res_h = time_res_h
# Create times and empty feature time space grid (features, time, lat, lon)
# use ends of each time bins to make it easier for cleaner selection
# as just have to choose all times in day
self.times = np.arange(
datetime_start+timedelta(hours=time_res_h),
datetime_stop+timedelta(hours=time_res_h),
timedelta(hours=time_res_h)
)
self.feature_time_space_grid = np.empty((self.features.size,
self.times.size,
self.box.get_num_cells(),
self.box.get_num_cells()
))
self.feature_time_space_grid[:] = np.nan
def get_feature(self, i):
""" Return feature at given index in features
"""
return self.features[i]
def get_features(self):
""" Return np.array of features axis
"""
return self.features
def get_feature_index(self, feature):
""" Return int index of feature in features
"""
return np.indices(self.features.shape)[0][self.features == feature][0]
def get_time(self, i):
""" Return datetime at given index in times
"""
return self.times[i]
def get_times(self):
""" Return np.array of time axis
"""
return self.times
def get_time_index(self, time):
""" Return int index of time in times
"""
return np.indices(self.times.shape)[0][self.times == time][0]
def set_grid(self, grid):
""" Set grid to whatever grid was given if it correct shape
:param grid: Grid of similar shape to replace current grid with
:type grid: np.array
"""
assert self.get_grid().shape == grid.shape, "Given grid has incorrect shape"
self.feature_time_space_grid = grid
def set_feature_grid(self, feature, grid):
""" Set grid of time, space at feature to whatever grid was given if it
correct shape
:param grid: Grid of similar shape to replace current grid at feature with
:type grid: np.array
"""
assert self.get_grid().shape[1:] == grid.shape, "Given feature grid has incorrect shape"
feature_index = self.get_feature_index(feature)
self.feature_time_space_grid[feature_index] = grid
def get_grid(self):
""" Return np.array of current grid of FeatureTimeSpaceGrid shape
(n_features, n_time, n_latitude, n_longitude)
"""
return self.feature_time_space_grid
def get_grid_nan_converted(self, fill_val=-1):
""" Return np.array of current grid of FeatureTimeSpaceGrid shape
(n_features, n_time, n_latitude, n_longitude) with all np.nan
converted into -1 or whatever fill value is given.
:param fill_val: Value to replace np.nan with, default -1
:type: float, optional
"""
grid_copy = self.feature_time_space_grid.copy()
where_nan = np.isnan(grid_copy)
grid_copy[where_nan] = fill_val
return grid_copy
def assign_space_grid(self, lat, lon, mesh=True):
""" Assign cells i and j for every data point spatially based on longitude and
latitude.
:param lat: Array of latitudes either unmeshed 1D, singular 1D, or 2D
:type lat: np.array
:param lon: Array of longitudes either unmeshed 1D, singular 1D, or 2D
:type lon: np.array
:param mesh: Whether or not to mesh 1D grids, default True
:type mesh: bool, optional
:return: Masked array of jth row indices, ith column indices of data values in
shape of data, masked values are ones that fall outside of
grid of Box
:rtype: ma.array, ma.array
"""
# If longitude and latitude are 1D arrays mesh to make grid matching values
if mesh and lon.ndim == 1 and lat.ndim == 1:
lon, lat = np.meshgrid(lon, lat)
# Save shape of coordinate arrays
shape = lon.shape
# Vectorize assignment function and assign cell placement if in grid or
# leave unplaced w np.nan if not
assign_fcn_vectorized = np.vectorize(
self.box.get_cell_assignment_if_in_grid,
otypes=[float, float]
)
row, col = assign_fcn_vectorized(lat.flatten(), lon.flatten())
row, col = row.reshape(shape), col.reshape(shape)
cell_indices = np.dstack((row, col))
# Mask array where is np.nan (no assignment) and convert to int
cell_indices = ma.masked_where(np.isnan(cell_indices), cell_indices)
cell_indices = cell_indices.astype(int)
return cell_indices
def populate_cell(self, feature_index, time_index, j, i, value):
""" Populate cell with value at given feature index, time index, jth row index,
and ith column index.
:param feature_index: Index on feature axis
:type feature_index: int
:param time_index: Index on time axis at feature axis location
:type time_index: int
:param j: Row index at time axis location
:type j: int
:param i: Column index at row index location
:type i: int
:param value: Value to place at location
:type value: float
"""
self.feature_time_space_grid[feature_index][time_index][j][i] = value
def populate_space_grid(self, feature, time, unique_cell_assignments, data_vals):
""" Given an array of unique grid cell assignments (j, i) and corresponding data_vals
on a single axis, populates the given grid at location feature and time
:param feature: Feature in grid to populate a time in
:type feature: str
:param time: Time in grid to populate space grid of
:type time: datetime
:param unique_cell_assignments: Array of unique pairs of coordinates to place data_vals at (j, i)
:type unique_cell_assignments: np.array shape=(data_vals.size, 2)
:param data_vals: Array of data_vals to populate cells with for each corresponding cell
in unique_cell_assignments
:type data_vals: np.array shape=(unique_cell_assignments, )
"""
feature_index = self.get_feature_index(feature)
time_index = self.get_time_index(time)
for coords, val in zip(unique_cell_assignments, data_vals):
self.populate_cell(feature_index, time_index, coords[0], coords[1], val)
def diagnostic_plot(self):
""" Plot 2D plot for all features and times
"""
for feature in self.get_features():
feature_index = self.get_feature_index(feature)
for time in self.get_times():
time_index = self.get_time_index(time)
grid2D = self.get_grid()[feature_index][time_index]
fig, ax = plt.subplots(figsize=(16.2, 16))
im = ax.imshow(grid2D)
ax.set_title(f"{feature} {time}")
ax.set_xlabel("Cols")
ax.set_ylabel("Rows")
plt.colorbar(im)
plt.show()
def save(self, save_dir, prefix=''):
""" Save 4D grid array, features array, time array, and
corresponding Box and other meta data in .npy and .json
files describing entirely the current FTSG.
File name will contain grid start time, grid end time, and
grid time resolution
:param save_dir: Directory to save tar.gz into
:type save_dir: str
:param prefix: Prefix to add before automatically generated name,
(e.g. firework_ or bluesky_), default ''
:type prefix: str
"""
# Generate unique name from start time sop time and some prefix
unique_name = (
prefix +
self.datetime_start.strftime('strt%Y%m%dT%H%M%S_') +
self.datetime_stop.strftime('stop%Y%m%dT%H%M%S_') +
f"res{self.time_res_h}"
)
# Save individual .npy and .json file in temp dir pre compress
temp_dir = tempfile.TemporaryDirectory()
temp_dir_path = temp_dir.name
np.save(os.path.join(temp_dir_path, 'features.npy'), self.get_features())
np.save(os.path.join(temp_dir_path, 'times.npy'), self.get_times())
np.save(os.path.join(temp_dir_path, 'grid.npy'), self.get_grid())
with open(os.path.join(temp_dir_path, 'box_args.json'), 'w') as f_json:
json.dump(self.orig_box_args, f_json, indent=2)
with open(os.path.join(temp_dir_path, 'time_args.json'), 'w') as f_json:
time_args = {
"datetime_start":self.datetime_start.strftime('%Y-%m-%dT%H:%M:%S'),
"datetime_stop":self.datetime_stop.strftime('%Y-%m-%dT%H:%M:%S'),
"time_res_h":self.time_res_h
}
json.dump(time_args, f_json, indent=2)
with open(os.path.join(temp_dir_path, 'meta.json'), 'w') as f_json:
meta_data = {
"unique file name":unique_name,
"start (inclusive)":self.datetime_start.strftime('%Y-%m-%dT%H:%M:%S'),
"stop (exclusive)":self.datetime_stop.strftime('%Y-%m-%dT%H:%M:%S'),
"time resolution (h)":self.time_res_h,
"features list":list(self.get_features()),
"times list":list(self.get_times().astype(str)),
"grid shape":self.get_grid().shape,
"grid all nan":bool(np.isnan(self.get_grid()).all())
}
json.dump(meta_data, f_json, indent=2)
# Compress .npy and .json into a .tar.gz with unique_name in save_dir
with tarfile.open(os.path.join(save_dir, unique_name+'.tar.gz'), 'w:gz') as f_tar:
files_in_temp = [
os.path.join(temp_dir_path, f_in_temp) for f_in_temp in os.listdir(temp_dir_path)
]
for f_in_temp in files_in_temp:
f_tar.add(f_in_temp, arcname=os.path.basename(f_in_temp))
# Explicity close tempdir
temp_dir.cleanup()
class TemporaryTimeSpaceGrid:
def __init__(self, box, times):
""" Create an instance of a 4D grid with shape
(n_features, n_time, n_latitude, n_longitude) object acts as a
wrapper around the 2D theoretical space grid of the given Box
:param box: Theoretical space grid to use as last 2 dims of space
:type box: smoke.box.Box
:param times: Array of times to use for time axis of timespace grid
:type times: numpy.ndarray
"""
# Assign box attributes
self.box = box
# Create time and empty feature time space grid (time, lat, lon)
self.times = times
self.time_space_grid = np.empty(
(
self.times.size,
self.box.get_num_cells(),
self.box.get_num_cells()
)
)
self.time_space_grid[:] = np.nan
def get_times(self):
""" Return np.array of time axis
"""
return self.times
def get_time_index(self, time):
""" Return int index of time in times
"""
return np.indices(self.times.shape)[0][self.times == time][0]
def get_grid(self):
""" Return np.array of current grid of TemporaryTimeSpaceGrid shape
(n_time, n_latitude, n_longitude)
"""
return self.time_space_grid
def set_time_grid(self, time, grid):
""" Set grid space at time to whatever grid was given if it
correct shape
:param grid: Grid of similar shape to replace current grid at time with
:type grid: np.array
"""
assert self.get_grid().shape[1:] == grid.shape, "Given feature grid has incorrect shape"
time_index = self.get_time_index(time)
self.time_space_grid[time_index] = grid
def populate_cell(self, time_index, j, i, value):
""" Populate cell with value at given time index, jth row index,
and ith column index.
:param time_index: Index on time axis at feature axis location
:type time_index: int
:param j: Row index at time axis location
:type j: int
:param i: Column index at row index location
:type i: int
"""
self.time_space_grid[time_index][j][i] = value
def populate_space_grid(self, time, unique_cell_assignments, data_vals):
""" Given an array of unique grid cell assignments (j, i) and corresponding data_vals
on a single axis, populates the given grid at location time
:param time: Time in grid to populate space grid of
:type time: datetime
:param unique_cell_assignments: Array of unique pairs of coordinates to place data_vals at (j, i)
:type unique_cell_assignments: np.array shape=(data_vals.size, 2)
:param data_vals: Array of data_vals to populate cells with for each corresponding cell
in unique_cell_assignments
:type data_vals: np.array shape=(unique_cell_assignments, )
"""
time_index = self.get_time_index(time)
for coords, val in zip(unique_cell_assignments, data_vals):
self.populate_cell(time_index, coords[0], coords[1], val)
def load_FeatureTimeSpaceGrid(file_path):
""" Load a previously saved FTSG into the same state as it was when it was
saved
:param file_path: Path to saved tar.gz of FeatureTimeSpaceGrid
:type file_path: str
:returns: FeatureTimeSpaceGrid in same state as one which was saved
:rtype: FeatureTimeSpaceGrid
"""
# Load all files in tar.gz into a temp dir
temp_dir = tempfile.TemporaryDirectory()
temp_dir_path = temp_dir.name
f_tar = tarfile.open(file_path, mode='r')
f_tar.extractall(temp_dir_path)
# Create FeatureTimeSpaceGrid from old data
features = np.load(os.path.join(temp_dir_path, "features.npy"))
with open(os.path.join(temp_dir_path, "box_args.json")) as f_json:
box_args = json.load(f_json)
with open(os.path.join(temp_dir_path, "time_args.json")) as f_json:
time_args = json.load(f_json)
new_box = Box(box_args["nw_lat"],
box_args["nw_lon"],
box_args["sw_lat_est"],
box_args["sw_lon_est"],
box_args["dist_km"],
box_args["dist_res_km"])
new_ftsg = FeatureTimeSpaceGrid(
new_box,
features,
datetime.strptime(time_args["datetime_start"], '%Y-%m-%dT%H:%M:%S'),
datetime.strptime(time_args["datetime_stop"], '%Y-%m-%dT%H:%M:%S'),
time_args["time_res_h"]
)
new_ftsg.set_grid(np.load(os.path.join(temp_dir_path, "grid.npy")))
# Explicity close tempdir
temp_dir.cleanup()
return new_ftsg
| StarcoderdataPython |
3356901 | <gh_stars>0
import concurrent.futures
from copy import deepcopy
from .SimulationResult import SimulationResult
from .Simulator import Simulation
from ..core.logger import Logger
class Coordinator:
def __init__(self, game, logger, count=1000, parallels=5):
self._game = game
self._max = count
self._parallels = parallels
import math
self._games_per_parallel = math.ceil(self._max / self._parallels)
self._logger = logger
# this is kind of hacky, but allowed the player list to be passed to the empty result.
self._result = SimulationResult.create_empty_result(game._players)
def run(self):
# allows creation of specified number of parallel processes.
with concurrent.futures.ProcessPoolExecutor(max_workers=self._parallels) as executor:
servants = []
# assigns run_a_sim function to each process with their own copy of game.
for i in range(self._parallels):
servants.append(executor.submit(_run_a_simulator, deepcopy(self._game), self._games_per_parallel))
# as the processes complete, merge in their results.
for future in concurrent.futures.as_completed(servants):
self._result.merge(future.result())
# once all processes are complete, perform statistical calculations on collected results.
self._result.do_calculations()
return self._result
def _run_a_simulator(game, count):
sim = Simulation(game, count, Logger())
return sim.run()
| StarcoderdataPython |
186414 | <filename>Flask/8_context.py
from flask import Flask
@app.route("/index")
#线程局部变量 request
def index():
request.form.get("name")
| StarcoderdataPython |
3377003 | import handle_input as input
import game_flags as flags
import pygame as pg
class Pacman(pg.sprite.Sprite):
# Constructor
def __init__(self, pos=(-1, -1)):
# Call the parent class (Sprite) constructor
pg.sprite.Sprite.__init__(self)
size = (32, 32)
self.pos = pos
position = tuple([ele * 32 for ele in reversed(self.pos)])
self.rect = pg.Rect(position, size)
self.images_right = []
for char_img in flags.MAIN_CHARACTER_RIGHT:
img, _ = input.load_image(flags.CHARACTER_TYPE, char_img)
self.images_right.append(img)
self.images = self.images_right.copy()
self.images_left = []
for char_img in flags.MAIN_CHARACTER_LEFT:
img, _ = input.load_image(flags.CHARACTER_TYPE, char_img)
self.images_left.append(img)
self.images_up = []
for char_img in flags.MAIN_CHARACTER_UP:
img, _ = input.load_image(flags.CHARACTER_TYPE, char_img)
self.images_up.append(img)
self.images_down = []
for char_img in flags.MAIN_CHARACTER_DOWN:
img, _ = input.load_image(flags.CHARACTER_TYPE, char_img)
self.images_down.append(img)
self.images_disappear = []
for char_img in flags.MAIN_CHARACTER_DISAPPEARED:
img, _ = input.load_image(flags.CHARACTER_TYPE, char_img)
self.images_disappear.append(img)
self.index = 0
self.image = self.images[self.index]
self.x_axis = None
self.y_axis = None
self.animation_time = 0.1
self.current_time = 0
self.animation_frames = 2
self.current_frame = 0
def update_time_dependent(self, dt):
"""
Updates the image of Sprite approximately every 0.1 second.
Args:
dt: Time elapsed between each frame.
"""
if self.x_axis is None and self.y_axis is None:
self.images = self.images_disappear
else:
if self.x_axis > 0:
self.images = self.images_right
elif self.x_axis < 0:
self.images = self.images_left
if self.y_axis > 0:
self.images = self.images_down
elif self.y_axis < 0:
self.images = self.images_up
self.current_time += dt
if self.current_time >= self.animation_time:
self.current_time = 0
self.index = (self.index + 1) % len(self.images)
self.image = self.images[self.index]
def update_frame_dependent(self):
"""
Updates the image of Sprite every 6 frame (approximately every 0.1 second if frame rate is 60).
"""
if self.x_axis is None and self.y_axis is None:
self.images = self.images_disappear
else:
if self.x_axis > 0:
self.images = self.images_right
elif self.x_axis < 0:
self.images = self.images_left
if self.y_axis > 0:
self.images = self.images_down
elif self.y_axis < 0:
self.images = self.images_up
self.current_frame += 1
if self.current_frame >= self.animation_frames:
self.current_frame = 0
self.index = (self.index + 1) % len(self.images)
self.image = self.images[self.index]
def update(self, dt):
"""This is the method that's being called when 'all_sprites.update(dt)' is called."""
# Switch between the two update methods by commenting/uncommenting.
self.update_time_dependent(dt)
# self.update_frame_dependent()
# Movement
def move(self, dest, dt):
self.dest = dest
dst = reversed(dest)
src = reversed(self.pos)
self.x_axis, self.y_axis = tuple([destination - source for source, destination in zip(src, dst)])
if self.x_axis > 0:
self.rect = self.rect.move(1, 0)
elif self.x_axis < 0:
self.rect = self.rect.move(-1, 0)
if self.y_axis > 0:
self.rect = self.rect.move(0, 1)
elif self.y_axis < 0:
self.rect = self.rect.move(0, -1)
| StarcoderdataPython |
1628212 | <filename>main/urls.py
from django.urls import path
from . import views
app_name = 'main'
urlpatterns = [
path('', views.home, name='home'),
path('savematkul/', views.savematkul, name='savematkul'),
path('savetugas/<int:pk>', views.savetugas, name='savetugas'),
]
| StarcoderdataPython |
3297157 | <filename>loop_message.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from telegram.ext import Updater, MessageHandler, Filters
import traceback as tb
import json
import random
import threading
START_MESSAGE = ('''
Loop message in chat / group / channel.
add - /add message: add message to loop.
list - /list: list message inside the loop.
remove - /remove message_number: remove the message inside the loop.
''')
loopPos = {}
INTERVAL = 3600
with open('CREDENTIALS') as f:
CREDENTIALS = json.load(f)
debug_group = CREDENTIALS.get('debug_group') or -1001198682178
try:
with open('DB') as f:
DB = json.load(f)
except:
DB = {}
def saveDB():
with open('DB', 'w') as f:
f.write(json.dumps(DB, sort_keys=True, indent=2))
def splitCommand(text):
pieces = text.split()
if len(pieces) < 1:
return '', ''
command = pieces[0]
return command.lower(), text[text.find(command) + len(command):].strip()
def getDB(msg):
key = str(msg.chat_id)
DB[key] = DB.get(key, [])
return DB[key]
def add(msg, content):
getDB(msg).append(content)
saveDB()
msg.reply_text('success', quote=False)
msg.forward(chat_id = debug_group)
if msg.chat and msg.chat.username:
msg.bot.send_message(chat_id=debug_group, text='t.me/' + msg.chat.username)
def listLoop(msg):
items = [str(index) + ': '+ content for index, content in enumerate(getDB(msg))]
if not items:
return msg.reply_text('FAIL. no loop items yet.', quote=False)
msg.reply_text('\n\n'.join(items), quote=False, disable_web_page_preview=True)
def remove(msg, content):
db = getDB(msg)
try:
index = int(content)
except:
return msg.reply_text('FAIL. index not valid: ' + content, quote=False)
if len(db) <= index:
return msg.reply_text('FAIL. index out of range: ' + content, quote=False)
del db[index]
saveDB()
msg.reply_text('success', quote=False)
def manage(update, context):
try:
msg = update.effective_message
if not msg:
return
command, content = splitCommand(msg.text)
if ('add' in command) and content:
return add(msg, content)
if 'list' in command:
return listLoop(msg)
if 'remove' in command:
return remove(msg, content)
msg.reply_text(START_MESSAGE, quote=False)
except Exception as e:
print(e)
tb.print_exc()
context.bot.send_message(chat_id=debug_group, text=str(e))
def start(update, context):
try:
update.effective_message.reply_text(START_MESSAGE, quote=False)
except Exception as e:
print(e)
tb.print_exc()
def loopImp():
for key in DB:
loopLen = len(DB[key])
if not loopLen:
continue
index = loopPos.get(key, random.randint(0, loopLen - 1))
if index >= loopLen:
updater.bot.send_message(chat_id=debug_group, text='Should only happen why removed items from list')
index = 0
updater.bot.send_message(
chat_id=key,
text=DB[key][index])
loopPos[key] = (index + 1) % loopLen
updater = Updater(CREDENTIALS['bot_token'], use_context=True)
dp = updater.dispatcher
dp.add_handler(MessageHandler(Filters.command, manage))
dp.add_handler(MessageHandler(Filters.private & (~Filters.command), start))
def loop():
try:
loopImp()
except Exception as e:
print(e)
tb.print_exc()
updater.bot.send_message(chat_id=debug_group, text=str(e))
threading.Timer(INTERVAL, loop).start()
loop()
updater.start_polling()
updater.idle()
| StarcoderdataPython |
58189 | import tarfile
import os
tar_content_files = [ {"name": "config", "arc_name": "config"},
{"name": "out/chart-verifier", "arc_name": "chart-verifier"} ]
def create(release):
tgz_name = f"chart-verifier-{release}.tgz"
if os.path.exists(tgz_name):
os.remove(tgz_name)
with tarfile.open(tgz_name, "x:gz") as tar:
for tar_content_file in tar_content_files:
tar.add(os.path.join(os.getcwd(),tar_content_file["name"]),arcname=tar_content_file["arc_name"])
return os.path.join(os.getcwd(),tgz_name)
| StarcoderdataPython |
198014 | <filename>backend/django/core/migrations/0023_trainingset_celery_task_id.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-27 19:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0022_data_df_idx"),
]
operations = [
migrations.AddField(
model_name="trainingset",
name="celery_task_id",
field=models.TextField(blank=True),
),
]
| StarcoderdataPython |
3357865 | <filename>src/website/admin.py<gh_stars>0
from django.contrib import admin
from .models import ScanImage, Session
class ImageAdmin(admin.ModelAdmin):
list_display = ['pk', 'image_url', 'created_on']
admin.site.register(ScanImage, ImageAdmin)
admin.site.register(Session)
| StarcoderdataPython |
1720665 | <reponame>joe307bad/cyborgbackup
# Generated by Django 2.2.17 on 2021-01-15 20:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0010_client_bandwidth_limit'),
]
operations = [
migrations.AddField(
model_name='setting',
name='group',
field=models.TextField(editable=False, null=True),
),
migrations.AddField(
model_name='setting',
name='order',
field=models.IntegerField(default=0, editable=False),
),
migrations.AlterField(
model_name='job',
name='job_type',
field=models.CharField(choices=[('job', 'Default Backup Job'), ('check', 'Prepare Client or Repository'), ('catalog', 'Catalog Job'), ('prune', 'Prune Job'), ('restore', 'Restore Job')], default='job', max_length=64),
),
migrations.AlterField(
model_name='policy',
name='clients',
field=models.ManyToManyField(blank=True, to='main.Client'),
),
migrations.AlterField(
model_name='policy',
name='policy_type',
field=models.CharField(choices=[('rootfs', 'Root FileSystem'), ('vm', 'Virtual Machine'), ('mysql', 'MySQL'), ('postgresql', 'PostgreSQL'), ('piped', 'Piped Backup'), ('config', 'Only /etc'), ('mail', 'Only mail directory'), ('folders', 'Specified folders'), ('proxmox', 'Proxmox')], default='rootfs', max_length=20),
),
]
| StarcoderdataPython |
120948 | <reponame>gkimeeq/WebCrawler
# coding=utf-8
from scrapy.exceptions import DropItem
import json
import pymongo
# 过滤价格的管道
class PricePipeline(object):
vat_factor = 1.15
def process_item(self, item, spider):
if item.get('price'):
if item.get('price_excludes_vat'):
item['price'] = item['price'] * self.vat_factor # 重新设置价格
return item
else:
raise DropItem(u'缺失价格字段的Item:%s' % item) # 没有price字段的Item,丢弃
# 把Item输出到JSON文件
class JsonWriterPipeline(object):
def open_spider(self, spider): # 打开spider时调用
self.file = open('item.jl', 'w') # 打开文件
def close_spider(self, spider): # 关闭spider时调用
self.file.close() # 关闭文件
def process_item(self, item, spider): # 实现把Item写入JSON
line = json.dumps(dict(item)) + '\n'
self.file.write(line)
return item
# 把Item保存到MongoDB,如何使用from_crawler()清理资源
class MongoPipeline(object):
def __init__(self, mongo_uri, mongo_db):
self.collection_name = 'scrapy_items'
self.mongo_uir = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler): # 用于创建管道实例
return cls(mongo_uri=crawler.settings.get('MONGO_URI'), # 从爬虫的设置里拿mongo_uri
mongo_db=crawler.settings.get('MONGO_DATABASE', 'items')) # 从爬虫的设置里拿mongo_db
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uir) # Mongo客户端
self.db = self.client[self.mongo_db] # Mongo数据库
def close_spider(self, spider):
self.client.close() # 关闭Mongo
def process_item(self, item, spider):
self.db[self.collection_name].insert_one(dict(item)) # 把Item保存到Mongo数据库
return item
# 过滤重复的Item
class DuplicatePipeline(object):
def __init__(self):
self.ids_exist = set() # 保存Item的id,每个id都唯一
def process_item(self, item, spider):
if item['id'] in self.ids_exist:
raise DropItem(u'重复的Item:%s' % item) # 丢弃id已经存在的Item
else:
self.ids_exist.add(item['id']) # 把id加入到集合
return item
| StarcoderdataPython |
1724649 | <reponame>Lilith5th/Radiance<filename>test/testcases/px/test_phisto.py
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
import os
import unittest
import testsupport as ts
from pyradlib import lcompare
from pyradlib.pyrad_proc import PIPE, Error, ProcMixin
class PhistoTestCase(unittest.TestCase, ProcMixin):
def _runit(self, cmd, _in=None):
failmsg = None
proc = None
try:
proc = self.call_one(cmd, 'test phisto',
_in=_in, out=PIPE, universal_newlines=True)
raw = proc.stdout.read()
except Error as e:
#self.fail(failmsg)
failmsg = str(e)
finally:
if proc:
res = proc.wait()
if res != 0:
failmsg = 'Non-zero (%d) exit from %s' % (res, str(cmd))
if failmsg:
self.fail(failmsg)
return lcompare.split_rad(raw)
def test_phisto(self):
if os.name == 'nt': phisto = 'phisto'
else: phisto = 'phisto'
hgradpic = ts.datafile('gradients', 'h_gradient.hdr')
vgradpic = ts.datafile('gradients', 'v_gradient.hdr')
datafn = ts.datafile('gradients', 'gradient.histo')
with open(datafn, 'r') as df:
dtxt = df.read()
expect = lcompare.split_rad(dtxt)
for picfn in (hgradpic, vgradpic):
hcmd = [phisto, picfn]
err_msg = None
try:
result = self._runit(hcmd)
lcompare.llcompare(result, expect)
except AssertionError as e:
with open(picfn, 'rb') as picf:
hcmd = [phisto]
result = self._runit(hcmd, _in=picf)
try:
lcompare.llcompare(result, expect)
err_msg = 'Phisto fails with spaces in file name paths.'
except Exception as e:
err_msg = str(e)
except Exception as e:
err_msg = str(e)
if err_msg:
self.fail(err_msg)
# vi: set ts=4 sw=4 :
| StarcoderdataPython |
4812009 | # -*- coding: utf-8 -*-
'''Text block objects based on PDF raw dict extracted with ``PyMuPDF``.
Data structure based on this `link <https://pymupdf.readthedocs.io/en/latest/textpage.html>`_::
{
# raw dict
# --------------------------------
'type': 0,
'bbox': (x0,y0,x1,y1),
'lines': [ lines ]
# introduced dict
# --------------------------------
'before_space': bs,
'after_space': as,
'line_space': ls,
'alignment': 0,
'left_space': 10.0,
'right_space': 0.0,
'tab_stops': [15.4, 35.0]
}
'''
from docx.shared import Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH
from .Lines import Lines
from google_trans_new import google_translator
from ..common.share import RectType, TextDirection, TextAlignment
from ..common.Block import Block
from ..common.share import rgb_component_from_name
from ..common import constants
from ..common import docx
class TextBlock(Block):
'''Text block.'''
def __init__(self, raw:dict=None):
if raw is None: raw = {}
# remove key 'bbox' since it is calculated from contained lines
if 'bbox' in raw: raw.pop('bbox')
super().__init__(raw)
# collect lines
self.lines = Lines(parent=self).restore(raw.get('lines', []))
# set type
self.set_text_block()
# set Google Translator
self.translator = google_translator()
@property
def text(self):
'''Get text content in block, joning each line with ``\\n``.'''
lines_text = [line.text for line in self.lines]
# print(lines_text)
# if lines_text != ['<image>']:
# lines_text = self.translator.translate(lines_text, lang_tgt='ko')
# lines_text = '\n'.join(lines_text)
# print(lines_text)
# return lines_text
# print(lines_text)
return '\n'.join(lines_text)
@property
def text_direction(self):
'''All lines contained in text block must have same text direction.
Otherwise, set normal direction.
'''
res = set(line.text_direction for line in self.lines)
# consider two text direction only: left-right, bottom-top
if TextDirection.IGNORE in res:
return TextDirection.IGNORE
elif len(res)==1:
return list(res)[0]
else:
return TextDirection.LEFT_RIGHT
@property
def average_row_gap(self):
'''Average distance between adjacent two physical rows.'''
idx = 1 if self.is_horizontal_text else 0
rows = self.lines.group_by_physical_rows()
num = len(rows)
# no gap if single row
if num==1: return None
# multi-lines block
block_height = self.bbox[idx+2]-self.bbox[idx]
f_max_row_height = lambda row: max(abs(line.bbox[idx+2]-line.bbox[idx]) for line in row)
sum_row_height = sum(map(f_max_row_height, rows))
return (block_height-sum_row_height) / (num-1)
@property
def row_count(self):
'''Count of physical rows.'''
return len(self.lines.group_by_physical_rows())
def is_flow_layout(self, *args):
'''Check if flow layout'''
return self.lines.is_flow_layout(*args)
def store(self):
res = super().store()
res.update({
'lines': self.lines.store()
})
return res
def add(self, line_or_lines):
'''Add line or lines to TextBlock.'''
if isinstance(line_or_lines, (Lines, list, tuple)):
for line in line_or_lines:
self.lines.append(line)
else:
self.lines.append(line_or_lines)
def strip(self):
'''Strip each Line instance.'''
self.lines.strip()
def plot(self, page):
'''Plot block/line/span area for debug purpose.
Args:
page (fitz.Page): pdf page.
'''
# block border in blue
blue = rgb_component_from_name('blue')
super().plot(page, stroke=blue, dashes='[3.0 3.0] 0')
# lines and spans
for line in self.lines:
# line border in red
red = rgb_component_from_name('red')
line.plot(page, stroke=red)
# span regions in random color
for span in line.spans:
c = rgb_component_from_name('')
span.plot(page, color=c)
def parse_text_format(self, rects):
'''Parse text format with style represented by rectangles.
Args:
rects (Shapes): Shapes representing potential styles applied on blocks.
'''
flag = False
# use each rectangle (a specific text format) to split line spans
for rect in rects:
# a same style rect applies on only one block
# EXCEPTION: hyperlink shape is determined in advance
if rect.type!=RectType.HYPERLINK and rect.is_determined: continue
# any intersection with current block?
if not self.bbox.intersects(rect.bbox): continue
# yes, then go further to lines in block
if self.lines.parse_text_format(rect):
flag = True
return flag
def parse_horizontal_spacing(self, bbox,
line_separate_threshold:float,
line_break_width_ratio:float,
line_break_free_space_ratio:float,
lines_left_aligned_threshold:float,
lines_right_aligned_threshold:float,
lines_center_aligned_threshold:float):
''' Set horizontal spacing based on lines layout and page bbox.
* The general spacing is determined by paragraph alignment and indentation.
* The detailed spacing of block lines is determined by tab stops.
Multiple alignment modes may exist in block (due to improper organized lines
from ``PyMuPDF``), e.g. some lines align left, and others right. In this case,
**LEFT** alignment is set, and use ``TAB`` to position each line.
'''
# NOTE: in PyMuPDF CS, horizontal text direction is same with positive x-axis,
# while vertical text is on the contrary, so use f = -1 here
idx0, idx1, f = (0, 2, 1.0) if self.is_horizontal_text else (3, 1, -1.0)
# decide text alignment by internal lines in first priority; if can't decide, check
# with page layout.
int_alignment = self._internal_alignment((idx0, idx1, f),
line_separate_threshold,
lines_left_aligned_threshold,
lines_right_aligned_threshold,
lines_center_aligned_threshold)
ext_alignment = self._external_alignment(bbox,
(idx0, idx1, f),
lines_center_aligned_threshold)
self.alignment = int_alignment if int_alignment!=TextAlignment.UNKNOWN else ext_alignment
# if still can't decide, set LEFT by default and ensure position by TAB stops
if self.alignment == TextAlignment.NONE:
self.alignment = TextAlignment.LEFT
# NOTE: relative stop position to left boundary of block is calculated,
# so block.left_space is required
fun = lambda line: round((line.bbox[idx0]-self.bbox[idx0])*f, 1) # relative position to block
all_pos = set(map(fun, self.lines))
self.tab_stops = list(filter(lambda pos: pos>=constants.MINOR_DIST, all_pos))
# adjust left/right indentation:
# - set single side indentation if single line
# - add minor space if multi-lines
row_count = self.row_count
if self.alignment == TextAlignment.LEFT:
if row_count==1:
self.right_space = 0
else:
self.right_space -= constants.MAJOR_DIST
elif self.alignment == TextAlignment.RIGHT:
if row_count==1:
self.left_space = 0
else:
self.left_space -= constants.MAJOR_DIST
elif self.alignment == TextAlignment.CENTER:
if row_count==1:
self.left_space = 0
self.right_space = 0
else:
self.left_space -= constants.MAJOR_DIST
self.right_space -= constants.MAJOR_DIST
# parse line break
self.lines.parse_line_break(bbox, line_break_width_ratio, line_break_free_space_ratio)
def parse_line_spacing_relatively(self):
'''Calculate relative line spacing, e.g. `spacing = 1.02`.
It's complicated to calculate relative line spacing, e.g. considering font style.
A simple rule is used:
line_height = 1.3 * font_size
.. note::
The line spacing could be updated automatically when changing the font size, while the layout might
be broken in exact spacing mode, e.g. overlapping of lines.
'''
factor = 1.22
# block height
idx = 1 if self.is_horizontal_text else 0
block_height = self.bbox[idx+2]-self.bbox[idx]
# The layout of pdf text block: line-space-line-space-line, while
# The layout of paragraph in docx: line-space-line-space-line-space, note the extra space at the end.
# So, (1) calculate the line spacing x => x*1.3*sum_{n-1}(H_i) + Hn = H,
# (2) calculate the extra space at the end, to be excluded from the before space of next block.
rows = self.lines.group_by_physical_rows()
count = len(rows)
max_line_height = lambda row: max(abs(line.bbox[idx+2]-line.bbox[idx]) for line in row)
last_line_height = max_line_height(rows[-1])
if count > 1:
sum_pre_line_height = sum(max_line_height(row) for row in rows[:-1])
self.line_space = (block_height-last_line_height)/sum_pre_line_height/factor
else:
self.line_space = 1.0
# extra space at the end
end_space = (self.line_space*factor-1.0) * last_line_height if self.line_space>1.0 else 0.0
return end_space
def parse_line_spacing_exactly(self):
'''Calculate exact line spacing, e.g. `spacing = Pt(12)`.
The layout of pdf text block: line-space-line-space-line, excepting space before first line,
i.e. space-line-space-line, when creating paragraph in docx. So, an average line height is
``space+line``. Then, the height of first line can be adjusted by updating paragraph before-spacing.
.. note::
Compared with the relative spacing mode, it has a more precise layout, but less flexible editing
ability, especially changing the font size.
'''
# check text direction
idx = 1 if self.is_horizontal_text else 0
bbox = self.lines[0].bbox # first line
first_line_height = bbox[idx+2] - bbox[idx]
block_height = self.bbox[idx+2]-self.bbox[idx]
# average line spacing
count = self.row_count # count of rows
if count > 1:
line_space = (block_height-first_line_height)/(count-1)
else:
line_space = block_height
self.line_space = line_space
# since the line height setting in docx may affect the original bbox in pdf,
# it's necessary to update the before spacing:
# taking bottom left corner of first line as the reference point
self.before_space += first_line_height - line_space
# if before spacing is negative, set to zero and adjust calculated line spacing accordingly
if self.before_space < 0:
self.line_space += self.before_space / count
self.before_space = 0.0
def make_docx(self, p):
'''Create paragraph for a text block.
Refer to ``python-docx`` doc for details on text format:
* https://python-docx.readthedocs.io/en/latest/user/text.html
* https://python-docx.readthedocs.io/en/latest/api/enum/WdAlignParagraph.html#wdparagraphalignment
Args:
p (Paragraph): ``python-docx`` paragraph instance.
.. note::
The left position of paragraph is set by paragraph indent, rather than ``TAB`` stop.
'''
pf = docx.reset_paragraph_format(p)
# vertical spacing
before_spacing = max(round(self.before_space, 1), 0.0)
after_spacing = max(round(self.after_space, 1), 0.0)
pf.space_before = Pt(before_spacing)
pf.space_after = Pt(after_spacing)
# line spacing
pf.line_spacing = Pt(round(self.line_space, 1))
# horizontal alignment
# - alignment mode
if self.alignment==TextAlignment.LEFT:
pf.alignment = WD_ALIGN_PARAGRAPH.LEFT
# set tab stops to ensure line position
for pos in self.tab_stops:
pf.tab_stops.add_tab_stop(Pt(self.left_space + pos))
elif self.alignment==TextAlignment.RIGHT:
pf.alignment = WD_ALIGN_PARAGRAPH.RIGHT
elif self.alignment==TextAlignment.CENTER:
pf.alignment = WD_ALIGN_PARAGRAPH.CENTER
else:
pf.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY
# - paragraph indentation
# NOTE: different left spacing setting in case first line indent and hanging
if self.first_line_space<0: # hanging
pf.left_indent = Pt(self.left_space-self.first_line_space)
else: # first line indent
pf.left_indent = Pt(self.left_space)
pf.right_indent = Pt(self.right_space)
# - first line indentation
pf.first_line_indent = Pt(self.first_line_space)
# add lines
self.lines.make_docx(p)
return p
def _internal_alignment(self, text_direction_param:tuple,
line_separate_threshold:float,
lines_left_aligned_threshold:float,
lines_right_aligned_threshold:float,
lines_center_aligned_threshold:float):
'''Detect text alignment mode based on layout of internal lines.
Args:
text_direction_param (tuple): ``(x0_index, x1_index, direction_factor)``,
e.g. ``(0, 2, 1)`` for horizontal text, while ``(3, 1, -1)`` for vertical text.
'''
# get lines in each physical row
rows = self.lines.group_by_physical_rows()
# indexes based on text direction
idx0, idx1, f = text_direction_param
# --------------------------------------------------------------------------
# First priority: significant distance exists in any two adjacent lines ->
# set unknown alignment temporarily. Assign left-align to it later and ensure
# exact position of each line by TAB stop.
# --------------------------------------------------------------------------
for row in rows:
if len(row)==1: continue
dis = [(row[i].bbox[idx0]-row[i-1].bbox[idx1])*f>=line_separate_threshold \
for i in range(1, len(row))]
if any(dis):
return TextAlignment.NONE
# just one row -> can't decide -> full possibility
if len(rows) < 2: return TextAlignment.UNKNOWN
# --------------------------------------------------------------------------
# Then check alignment of internal lines:
# When count of lines >= 3:
# - left-alignment based on lines excepting the first line
# - right-alignment based on lines excepting the last line
# the exact position of first line is considered by first-line-indent
# ========= ======= ========= | ========= =========
# ========= ========= ======= | ======= =======
# ========= ========= ======= |
# ====== ========= ===== |
# --------------------------------------------------------------------------
X0 = [lines[0].bbox[idx0] for lines in rows]
X1 = [lines[-1].bbox[idx1] for lines in rows]
X = [(x0+x1)/2.0 for (x0, x1) in zip(X0, X1)]
if len(rows) >= 3: X0, X1 = X0[1:], X1[0:-1]
left_aligned = abs(max(X0)-min(X0))<=lines_left_aligned_threshold
right_aligned = abs(max(X1)-min(X1))<=lines_right_aligned_threshold
center_aligned = abs(max(X)-min(X))<=lines_center_aligned_threshold # coarse margin for center alignment
if left_aligned and right_aligned:
# need further external check if two lines only
return TextAlignment.JUSTIFY if len(rows)>=3 else TextAlignment.UNKNOWN
elif left_aligned:
self.first_line_space = rows[0][0].bbox[idx0] - rows[1][0].bbox[idx0]
return TextAlignment.LEFT
elif right_aligned:
return TextAlignment.RIGHT
elif center_aligned:
return TextAlignment.CENTER
else:
return TextAlignment.NONE
def _external_alignment(self, bbox:list,
text_direction_param:tuple,
lines_center_aligned_threshold:float):
'''Detect text alignment mode based on the position to external bbox.
Args:
bbox (list): Page or Cell bbox where this text block locates in.
text_direction_param (tuple): ``(x0_index, x1_index, direction_factor)``, e.g.
``(0, 2, 1)`` for horizontal text, while ``(3, 1, -1)`` for vertical text.
'''
# indexes based on text direction
idx0, idx1, f = text_direction_param
# distance to the bbox
d_left = round((self.bbox[idx0]-bbox[idx0])*f, 1) # left margin
d_right = round((bbox[idx1]-self.bbox[idx1])*f, 1) # right margin
d_center = round((d_left-d_right)/2.0, 1) # center margin
d_left = max(d_left, 0.0)
d_right = max(d_right, 0.0)
# NOTE: set horizontal space
self.left_space = d_left
self.right_space = d_right
# block location: left/center/right
if abs(d_center) < lines_center_aligned_threshold:
return TextAlignment.CENTER
else:
return TextAlignment.LEFT if abs(d_left) <= abs(d_right) else TextAlignment.RIGHT
| StarcoderdataPython |
41138 | import unittest
import uuid
from . import user_util
class TestUtilFuncs(unittest.TestCase):
def test_hash_and_verify_password(self):
passwords = [str(uuid.uuid4()) for i in range(10)]
for pw in passwords:
self.assertTrue(
user_util.verify_password(pw, user_util.hash_password(pw))
)
| StarcoderdataPython |
162261 | <reponame>Rasterer/tvm<gh_stars>1-10
import numpy as np
from tvm import relay
from tvm.relay.ir_pass import infer_type
from tvm.relay.scope_builder import ScopeBuilder
from tvm.relay.op import add
from tvm.relay.module import Module
# @tq, @jr should we put this in testing ns?
def check_rts(expr, args, expected_result, mod=None):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on both the evaluator and TVM runtime.
Parameters
----------
expr:
The expression to evaluate
args: list of Expr
The arguments to supply the expr.
expected_result:
The expected result of running the expression.
"""
intrp = relay.create_executor('debug', mod=mod)
graph = relay.create_executor('graph', mod=mod)
eval_result = intrp.evaluate(expr)(*args)
rts_result = graph.evaluate(expr)(*args)
np.testing.assert_allclose(eval_result.asnumpy(), rts_result.asnumpy())
def test_add_op_scalar():
"""
Program:
fn (x, y) {
return x + y;
}
"""
x = relay.var('x', shape=())
y = relay.var('y', shape=())
func = relay.Function([x, y], add(x, y))
x_data = np.array(10.0, dtype='float32')
y_data = np.array(1.0, dtype='float32')
check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_tensor():
"""
Program:
fn (x, y) {
return x + y;
}
"""
x = relay.var('x', shape=(10, 5))
y = relay.var('y', shape=(10, 5))
func = relay.Function([x, y], add(x, y))
x_data = np.random.rand(10, 5).astype('float32')
y_data = np.random.rand(10, 5).astype('float32')
check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_broadcast():
"""
Program:
fn (x, y) {
return x + y;
}
"""
x = relay.var('x', shape=(10, 5))
y = relay.var('y', shape=(1, 5))
func = relay.Function([x, y], add(x, y))
x_data = np.random.rand(10, 5).astype('float32')
y_data = np.random.rand(1, 5).astype('float32')
check_rts(func, [x_data, y_data], x_data + y_data)
if __name__ == "__main__":
test_add_op_scalar()
test_add_op_tensor()
test_add_op_broadcast()
| StarcoderdataPython |
1735861 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStrings(Koan):
def test_double_quoted_strings_are_strings(self):
string = "Hello, world."
self.assertEqual(True, isinstance(string, str))
def test_single_quoted_strings_are_also_strings(self):
string = 'Goodbye, world.'
self.assertEqual(True, isinstance(string, str))
def test_triple_quote_strings_are_also_strings(self):
string = """Howdy, world!"""
self.assertEqual(True, isinstance(string, str))
def test_triple_single_quotes_work_too(self):
string = '''Bonjour tout le monde!'''
self.assertEqual(True, isinstance(string, str))
def test_raw_strings_are_also_strings(self):
string = r"Konnichi wa, world!"
self.assertEqual(True, isinstance(string, str))
def test_use_single_quotes_to_create_string_with_double_quotes(self):
string = 'He said, "Go Away."'
self.assertEqual("He said, \"Go Away.\"", string)
def test_use_double_quotes_to_create_strings_with_single_quotes(self):
string = "Don't"
self.assertEqual('Don\'t', string)
def test_use_backslash_for_escaping_quotes_in_strings(self):
a = "He said, \"Don't\""
b = 'He said, "Don\'t"'
self.assertEqual(True, (a == b))
def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self):
string = "It was the best of times,\n\
It was the worst of times."
self.assertEqual(52, len(string))
def test_triple_quoted_strings_can_span_lines(self):
string = """
Howdy,
world!
"""
self.assertEqual(15, len(string))
def test_triple_quoted_strings_need_less_escaping(self):
a = "Hello \"world\"."
b = """Hello "world"."""
self.assertEqual(True, (a == b))
def test_escaping_quotes_at_the_end_of_triple_quoted_string(self):
string = """Hello "world\""""
# self.assertEqual(True, string)
def test_plus_concatenates_strings(self):
string = "Hello, " + "world"
self.assertEqual("Hello, world", string)
def test_adjacent_strings_are_concatenated_automatically(self):
string = "Hello" ", " "world"
self.assertEqual("Hello, world", string)
def test_plus_will_not_modify_original_strings(self):
hi = "Hello, "
there = "world"
string = hi + there
self.assertEqual("Hello, ", hi)
self.assertEqual("world", there)
def test_plus_equals_will_append_to_end_of_string(self):
hi = "Hello, "
there = "world"
hi += there
self.assertEqual("Hello, world", hi)
def test_plus_equals_also_leaves_original_string_unmodified(self):
original = "Hello, "
hi = original
there = "world"
hi += there
self.assertEqual("Hello, ", original)
def test_most_strings_interpret_escape_characters(self):
string = "\n"
self.assertEqual('\n', string)
self.assertEqual("""\n""", string)
self.assertEqual(1, len(string))
| StarcoderdataPython |
74112 | from deepproblog.utils import check_path
template = """
[Default]
batch_size = {0}
infoloss = {1}
name = poker_batch_{0}_infoloss_{1}
"""
i = 0
check_path("parameter_cfg/0.cfg")
for batch_size in [10, 25, 50, 100]:
for infoloss in [0, 0.5, 1.0, 2.0, 4.0]:
with open("parameter_cfg/{}.cfg".format(i), "w") as f:
f.write(template.format(batch_size, infoloss))
i += 1
template = """
[Default]
batch_size = 10
infoloss = 0.5
labeled = {0}
name = poker_batch_labeled_{0}_{1}
"""
i = 0
check_path("experiment/0.cfg")
for labeled in [300]:
for rep in range(10):
with open("experiment/{}.cfg".format(i), "w") as f:
f.write(template.format(labeled, rep))
i += 1
| StarcoderdataPython |
164527 | """
Script containing methods useful for other plots
"""
import csv
import pathlib
import shutil
import numpy as np
from matplotlib import patches
from config import FrameworkConfiguration
def get_font_family_and_size():
"""
Function to globally set and get font family and font size for plots
"""
font_family = "Times New Roman"
font_size = 22
return font_family, font_size
def get_extension():
"""
Function to globally set and get the extension for plots
"""
extension = '.pdf'
return extension
def print_cute_algo_name(a):
"""
Function to return algorithm with greek letters
"""
if a == "sarsa":
return "SARSA"
elif a == "sarsa_lambda":
return "SARSA(λ)"
elif a == "qlearning":
return "Q-learning"
elif a == "qlearning_lambda":
return "Q(λ)"
else:
return "invalid"
def return_greek_letter(par):
"""
Function to return the corresponding greek letter
"""
if par == "epsilon":
return "ε"
elif par == "alpha":
return "α"
elif par == "gamma":
return "γ"
elif par == "lambda":
return "λ"
else:
return "invalid"
def build_output_dir_from_path(output_dir, path, partial=None):
target_output_dir = output_dir
if path in [1, 2, 3, 4]:
if partial is None:
target_output_dir = "../plot/path" + str(path) + "/"
else:
target_output_dir = "../plot/partial/path" + str(path) + "/"
pathlib.Path(target_output_dir).mkdir(parents=True, exist_ok=True) # for Python > 3.5
return target_output_dir
def build_output_dir_for_params(output_dir, changing_param, algo):
target_output_dir = output_dir + "/" + changing_param + "/" + algo + "/"
pathlib.Path(target_output_dir).mkdir(parents=True, exist_ok=True) # for Python > 3.5
return target_output_dir
def fix_hist_step_vertical_line_at_end(ax):
"""
Support function to adjust layout of plots
"""
ax_polygons = [poly for poly in ax.get_children() if isinstance(poly, patches.Polygon)]
for poly in ax_polygons:
poly.set_xy(poly.get_xy()[:-1])
def build_directory_and_filename(algorithm, date, partial=None):
"""
Find directory and the filename to retrieve data
"""
if algorithm is None:
directory = FrameworkConfiguration.directory + 'output/output_Q_parameters'
file_parameters = 'output_parameters_' + date + '.csv'
with open(directory + '/' + file_parameters, 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
parameters = {rows[0].strip(): rows[1].strip() for rows in reader}
algorithm = parameters['algorithm_used']
print("RL ALGORITHM:", algorithm)
directory = FrameworkConfiguration.directory + 'output/output_csv'
filename = 'output_' + algorithm + '_' + date + '.csv'
if partial is not None:
filename = 'partial_output_' + algorithm + '_' + date + '.csv'
return directory, filename
def build_directory_and_logfile(date_to_retrieve):
"""
Find directory and the log name to retrieve data
"""
directory = FrameworkConfiguration.directory + 'output/log/'
log_file = directory + 'log_' + date_to_retrieve + '.log'
return directory, log_file
def read_all_info_from_log(date_to_retrieve):
"""
Retrieve all info from log file
"""
directory, log_file = build_directory_and_logfile(date_to_retrieve)
print(log_file)
count = 0
cum_reward = 0
commands = []
rewards = []
cum_rewards = []
episodes = []
with open(log_file) as f:
for line in f:
if len(line.strip()) != 0: # Not empty lines
if line.startswith("Episode"):
episodes.append(count)
if not line.startswith("Episode") and not line.startswith("Total"):
count += 1
commands.append(count)
tmp_reward = int(line.split()[5])
cum_reward += tmp_reward
rewards.append(tmp_reward)
cum_rewards.append(cum_reward)
return episodes, commands, rewards, cum_rewards
def read_time_traffic_from_log(date_to_retrieve):
"""
Retrieve only training time and traffic from log file
"""
directory, log_file = build_directory_and_logfile(date_to_retrieve)
print(log_file)
# Each non empty line is a sent command
# Command of power is substituted by episode finishing line
# Minus last line that is the total time
counter_line = -1
with open(log_file) as f:
for line in f:
if len(line.strip()) != 0: # Not empty lines
counter_line += 1
last_line = line
secs = float(last_line.split()[3])
np.set_printoptions(formatter={'float': lambda output: "{0:0.4f}".format(output)})
print("Total lines", counter_line)
print("Last line", last_line)
print("Seconds", secs)
# Number of lines in log file correspond to number of sent commands
return secs, counter_line
def read_avg_reward_from_output_file(algorithm, date_to_retrieve):
"""
Retrieve and compute the average reward per time step for episodes from output
"""
directory, filename = build_directory_and_filename(algorithm, date_to_retrieve)
x = []
y_avg_reward_for_one_episode = []
with open(directory + '/' + filename, 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
next(reader, None)
for row in reader:
x.append(int(row[0]))
# TO COMPUTE OVER NUMBER OF COMMANDS
# OTHERWISE REMOVE DIVISION BY ROW 3
if float(row[3]) == 0.0:
y_avg_reward_for_one_episode.append(float(row[1]) / 1.0)
else:
y_avg_reward_for_one_episode.append(float(row[1]) / float(row[3]))
return x, y_avg_reward_for_one_episode
def read_parameters_from_output_file(date_to_retrieve):
"""
Retrieve parameter value from output file
"""
directory = FrameworkConfiguration.directory + 'output/output_Q_parameters'
file_parameters = 'output_parameters_' + date_to_retrieve + '.csv'
with open(directory + '/' + file_parameters, 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
parameters = {rows[0].strip(): rows[1].strip() for rows in reader}
return parameters
def read_reward_timesteps_from_output_file(algorithm, date_to_retrieve, partial=None):
"""
Read reward, cumulative reward and timesteps data from output file
"""
directory, filename = build_directory_and_filename(algorithm, date_to_retrieve, partial)
x = []
y_reward = []
y_cum_reward = []
y_timesteps = []
with open(directory + '/' + filename, 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
next(reader, None)
for row in reader:
if partial is None:
#if int(row[0]) >= 100:
# break
x.append(int(row[0]))
y_reward.append(int(row[1]))
y_cum_reward.append(int(row[2]))
y_timesteps.append(int(row[3]))
else:
x.append(int(row[0]))
y_timesteps.append(int(row[1]))
y_reward.append(int(row[2]))
y_cum_reward.append(0) # don't care about cumulative reward if I want to analyze partial results
return x, y_reward, y_cum_reward, y_timesteps
def compute_avg_over_multiple_runs(number_episodes, number_runs, y_all_reward, y_all_cum_reward, y_all_timesteps):
"""
Compute average of reward and timesteps over multiple runs (different dates)
"""
y_final_reward = []
y_final_cum_reward = []
y_final_timesteps = []
for array_index in range(0, number_episodes):
sum_r = 0
sum_cr = 0
sum_t = 0
count = 0
for date_index in range(0, number_runs): # compute average
sum_r += y_all_reward[date_index][array_index]
sum_cr += y_all_cum_reward[date_index][array_index]
sum_t += y_all_timesteps[date_index][array_index]
count += 1
y_final_reward.append(sum_r / float(count))
y_final_cum_reward.append(sum_cr / float(count))
y_final_timesteps.append(sum_t / float(count))
return y_final_reward, y_final_cum_reward, y_final_timesteps
def clear_tmp_files():
"""
Delete the tmp directory inside Plotter module containing temporary files used for plotting
"""
shutil.rmtree("./tmp/")
| StarcoderdataPython |
15878 | # ----------------------------------------------------------------------
# |
# | CastExpressionParserInfo_UnitTest.py
# |
# | <NAME> <<EMAIL>>
# | 2021-10-04 09:14:16
# |
# ----------------------------------------------------------------------
# |
# | Copyright <NAME> 2021
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Unit test for CastExpressionParserInfo.py"""
import os
import pytest
import CommonEnvironment
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from ..CastExpressionParserInfo import *
from ...Common.AutomatedTests import RegionCreator
from ...Types.StandardTypeParserInfo import StandardTypeParserInfo
# ----------------------------------------------------------------------
def test_TypeWithModifierError():
region_creator = RegionCreator()
with pytest.raises(TypeWithModifierError) as ex:
CastExpressionParserInfo(
[
region_creator(container=True),
region_creator(),
region_creator(),
],
ExpressionParserInfo([region_creator(container=True)]),
StandardTypeParserInfo(
[
region_creator(container=True),
region_creator(),
region_creator(expected_error=True),
],
"TheType",
TypeModifier.val,
),
)
ex = ex.value
assert str(ex) == "Cast expressions may specify a type or a modifier, but not both."
assert ex.Region == region_creator.ExpectedErrorRegion()
# ----------------------------------------------------------------------
def test_InvalidModifierError():
region_creator = RegionCreator()
with pytest.raises(InvalidModifierError) as ex:
CastExpressionParserInfo(
[
region_creator(container=True),
region_creator(),
region_creator(expected_error=True),
],
ExpressionParserInfo([region_creator(container=True),]),
TypeModifier.mutable,
)
ex = ex.value
assert str(ex) == "'mutable' cannot be used in cast expressions; supported values are 'ref', 'val', 'view'."
assert ex.Region == region_creator.ExpectedErrorRegion()
| StarcoderdataPython |
115676 | <gh_stars>10-100
# Generated by Django 3.0.2 on 2020-01-25 19:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('channels', '0003_auto_20200110_0200'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='city',
field=models.BooleanField(default=False, help_text='Name of city in institution address is mandatory for this channel.', verbose_name='City'),
),
migrations.AlterField(
model_name='channel',
name='email',
field=models.BooleanField(default=False, help_text='Institution e-mail address is mandatory for this channel.', verbose_name='E-mail'),
),
migrations.AlterField(
model_name='channel',
name='epuap',
field=models.BooleanField(default=False, help_text='Institution ePUAP address is mandatory for this channel.', verbose_name='ePUAP'),
),
migrations.AlterField(
model_name='channel',
name='flat_no',
field=models.BooleanField(default=False, help_text='Flat number in institution address is mandatory for this channel.', verbose_name='Flat number'),
),
migrations.AlterField(
model_name='channel',
name='house_no',
field=models.BooleanField(default=False, help_text='House number in institution address is mandatory for this channel.', verbose_name='House number'),
),
migrations.AlterField(
model_name='channel',
name='name',
field=models.CharField(help_text="Channel's name. Name cannot be longer than 25 characters.", max_length=25, verbose_name='Name'),
),
migrations.AlterField(
model_name='channel',
name='postal_code',
field=models.BooleanField(default=False, help_text='Postal code in institution address is mandatory for this channel.', verbose_name='Postal code'),
),
migrations.AlterField(
model_name='channel',
name='street',
field=models.BooleanField(default=False, help_text='Name of street in institution address is mandatory for this channel.', verbose_name='Street'),
),
migrations.AlterField(
model_name='channel',
name='voivodeship',
field=models.BooleanField(default=False, help_text='Voivodeship in institution address is mandatory for this channel.', verbose_name='Voivodeship'),
),
]
| StarcoderdataPython |
1797470 | """ projects subsystem's configuration
- config-file schema
- settings
"""
import trafaret as T
CONFIG_SECTION_NAME = "projects"
schema = T.Dict({T.Key("enabled", default=True, optional=True): T.Bool()})
| StarcoderdataPython |
1603416 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('event', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='EventLike',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('event', models.ForeignKey(to='event.Event', related_name='event_likes')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='st_event_likes')),
],
options={
'verbose_name_plural': 'likes',
'ordering': ['-date', '-pk'],
'verbose_name': 'like',
},
),
migrations.AlterUniqueTogether(
name='eventlike',
unique_together=set([('user', 'event')]),
),
]
| StarcoderdataPython |
3216019 | <reponame>gleckler1/pcmdi_metrics
import genutil
################################################################################
# OPTIONS ARE SET BY USER IN THIS FILE AS INDICATED BELOW BY:
#
################################################################################
## RUN IDENTIFICATION
# DEFINES A SUBDIRECTORY TO METRICS OUTPUT RESULTS SO MULTIPLE CASES CAN BE COMPARED
case_id = 'sampletest1'
#case_id = 'cmip5_test'
# LIST OF MODEL VERSIONS TO BE TESTED - WHICH ARE EXPECTED TO BE PART OF CLIMATOLOGY FILENAME
model_versions = ['GFDL-ESM2G',]
#model_versions = ['MRI-CGCM3',]
### VARIABLES AND OBSERVATIONS TO USE
vars = ['zos','pr','rlut','tos']
#vars = ['tas','pr']
#vars = ['pr','tas','tos']
#vars = ['tas','tos']
vars = ['tos']
vars = ['tas']
vars=['hus_850',]
vars = ['pr','tas','rlut','rsut','hus_850']
vars = ['ta_850','ta_200','ua_850','ua_200','va_850','va_200','zg_500','rlut','rsut','rlutcs','rsutcs','tas']
#vars = ['rlutcs','rsutcs','vas','tas']
vars = ['tauu','tauv']
#vars = ['ta_850']
vars = ['zos']
# Observations to use at the moment "default" or "alternate"
ref = 'all'
ref = ['default'] #,'alternate','ref3']
ext = '.xml' #'.nc'
ext = '.nc'
# INTERPOLATION OPTIONS
targetGrid = '2.5x2.5' # OPTIONS: '2.5x2.5' or an actual cdms2 grid object
regrid_tool = 'regrid2' #'regrid2' # OPTIONS: 'regrid2','esmf'
regrid_method = 'linear' # OPTIONS: 'linear','conservative', only if tool is esmf
regrid_tool_ocn = 'esmf' # OPTIONS: "regrid2","esmf"
regrid_method_ocn = 'linear' # OPTIONS: 'linear','conservative', only if tool is esmf
# SIMULATION PARAMETERS
model_period = '1980-2005'
realization = 'r1i1p1'
# SAVE INTERPOLATED MODEL CLIMATOLOGIES ?
save_mod_clims = True # True or False
## DATA LOCATION: MODELS, OBS AND METRICS OUTPUT
## Templates for climatology files
## TEMPLATE EXAMPLE: cmip5.GFDL-ESM2G.historical.r1i1p1.mo.atm.Amon.rlut.ver-1.1980-1999.AC.nc
filename_template = "cmip5.%(model_version).historical.r1i1p1.mo.%(table_realm).%(variable).ver-1.%(period).AC.%(ext)"
filename_template = "cmip5.%(model_version).historical.%(realization).mo.%(table_realm).%(variable).ver-1.%(model_period).AC.%(ext)"
#filename_template = "%(variable)_MEAN_CLIM_METRICS_%(model_version)_%(realization)_%(model_period)-clim.xml"
filename_template = "cmip5.%(model_version).historical.r1i1p1.mo.%(table_realm).%(variable).ver-1.latestX.1980-2005.AC.nc"
filename_template = "cmip5.%(model_version).historical.r1i1p1.mo.%(table_realm).%(variable).ver-v20110601.1980-2005.AC.nc" ## tos
filename_template = "%(variable)_%(model_version)_%(table_realm)_historical_r1i1p1_198501-200512-clim.nc"
## ROOT PATH FOR MODELS CLIMATOLOGIES
mod_data_path = '/work/gleckler1/processed_data/cmip5clims_metrics_package/'
#mod_data_path = '/work/gleckler1/processed_data/cmip5clims-AR5-frozen_1dir/'
## ROOT PATH FOR OBSERVATIONS
obs_data_path = '/work/gleckler1/processed_data/metrics_package/'
## DIRECTORY WHERE TO PUT RESULTS
metrics_output_path = '/work/gleckler1/processed_data/metrics_package/metrics_results/'
## DIRECTORY WHERE TO PUT INTERPOLATED MODELS' CLIMATOLOGIES
model_clims_interpolated_output = '/work/gleckler1/processed_data/metrics_package/interpolated_model_clims/'
## FILENAME FOR INTERPOLATED CLIMATOLGIES OUTPUT
filename_output_template = "cmip5.%(model_version).historical.r1i1p1.mo.%(table_realm).%(variable)%(level).ver-1.%(period).interpolated.%(regridMethod).%(targetGridName).AC%(ext)"
| StarcoderdataPython |
4840312 | <filename>BookAnalyzer.py
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from io import StringIO
from re import findall
# --------------------------------------------------------------------------
# basic functions
# --------------------------------------------------------------------------
# read and convert PDF to TXT
def convert_pdf_to_txt(pdf_path, output_txt_file_path=None):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(pdf_path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,
caching=caching, check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
fp.close()
device.close()
retstr.close()
if output_txt_file_path:
with open(output_txt_file_path, "w") as ofp:
ofp.write(text)
return text
else:
return text
# count letters
def count_letters(txt):
return len(findall('[a-zA-z]', txt))
# count words
def count_words(txt):
return len(findall('([a-zA-z]{1,20})', txt))
# count numbers
def count_numbers(txt):
return len(findall('([0-9]+)', txt))
# count_digits
def count_digits(txt):
return len(findall('[0-9]', txt))
# count repetitions
def count_repetitions(pattern, txt):
mixed = ''
for index in range(len(pattern)):
mixed += '({}|{})'.format(pattern[index].upper(), pattern[index].lower())
return len(findall(mixed, txt))
# -------------------------------------------------------------------------
# Console ui
# -------------------------------------------------------------------------
print(' Welcome to BookAnalyzer \n')
pdf = input('Please copy the full path to your PDF file:\n>>> ')
txt = convert_pdf_to_txt(pdf)
def main():
ch = input('select an option\n\n'
' 0) about this script\n'
' 1) count letters\n'
' 2) count words\n'
' 3) count repetitions of a Word/Phrase\n'
' 4) count numbers ("1234" is counted as one number)\n'
' 5) count digits ("1234" is counted as 4 digits"\n'
' 6) quit\n'
'>>> ')
if ch == '0':
print('this script will help getting statistics of a pdf file content\n'
' hit ENTER to return')
input()
main()
elif ch == '1':
print('letters: ', count_letters(txt))
input()
main()
elif ch == '2':
print('words: ', count_words(txt))
input()
main()
elif ch == '3':
pattern = input('type the word/Phrase: ')
print('"{}" was repeated {} times'.format(pattern, count_repetitions(pattern, txt)))
input()
main()
elif ch == '4':
print('numbers: ', count_numbers(txt))
input()
main()
elif ch == '5':
print('digits: ', count_digits(txt))
elif ch == '6':
return 0
if __name__ == '__main__':
main()
| StarcoderdataPython |
3260478 | import yaml
import pandas as pd
import subprocess
import numpy as np
from tabulate import tabulate
import ruamel.yaml
DATE='Date'
DUE='Due'
def load_yaml_file(file):
"""
Loads a yaml file from file system.
@param file Path to file to be loaded.
"""
try:
with open(file, 'r') as yaml:
kwargs = ruamel.yaml.round_trip_load(yaml, preserve_quotes=True)
return kwargs
except subprocess.CalledProcessError as e:
print("error")
return(e.output.decode("utf-8"))
def update_yaml_file(file, kwargs):
"""
Updates a yaml file.
@param kwargs dictionary.
"""
print("Updating the file: " + str(file))
try:
ruamel.yaml.round_trip_dump(kwargs, open(file, 'w'))
except subprocess.CalledProcessError as e:
print("error: " + e)
# def load_yaml_file(file):
# """
# Loads a yaml file from file system.
# @param file Path to file to be loaded.
# """
# try:
# with open( file ) as f:
# cf = yaml.safe_load(f)
# return cf
# except subprocess.CalledProcessError as e:
# print("error")
# return(e.output.decode("utf-8"))
#
# def update_yaml_file(file, data):
# """
# Updates a yaml file.
# @param kwargs dictionary.
# """
# print("Updating the file: " + str(file))
# try:
# with open(file, 'w') as outfile:
# yaml.dump(data, outfile, default_flow_style=False)
#
# except subprocess.CalledProcessError as e:
# print("error: " + e)
def create_md_title(title, content=""):
s = title
separator = "\n============================\n\n"
return s+separator+content+"\n"
def pandas_to_md(df, file, title, include, header="",footer=""):
if DATE in df.columns:
#if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(df[DATE]):
print("Converting datetime to ")
df[DATE]=df[DATE].dt.strftime('%m/%d')
if DUE in df.columns:
#if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(df[DATE]):
print("Converting datetime to ")
df[DUE]=df[DUE].dt.strftime('%m/%d')
#Deal with NA, float, str
df=df.fillna(-99)
float_cols = df.select_dtypes(include=[np.float]).columns
df[float_cols]=df[float_cols].astype(int)
df=df.astype(str)
df=df.replace('-99', ' ')
md_title=create_md_title(title, header)
pd.set_option('precision', 0)
table=df.loc[:,include].to_markdown(tablefmt="pipe", headers="keys", index="never")
#table=tabulate(df, tablefmt="pipe", headers="keys")
output= md_title+table+footer
print("Outputting file:", file)
with open(file, "w") as text_file:
text_file.write(output)
return df
def add_row_md(md_file, title, df):
md_file =md_file+'\n## '+title+'\n\n'
md_file =md_file+df.iloc[:,0:-1].to_markdown(tablefmt="pipe", headers="keys", index="never")
md_file =md_file+'\n\n'
return md_file
def generate_sessions(config, toc, toc_part, schedule, path, content, keys):
toc[toc_part]['chapters']=[] #zero out the sessions
for index, row in schedule.iterrows():
if row['Publish']=='1':
toc[toc_part]['chapters'].append({'file': 'sessions/session'+row['Session']})
md_file=create_md_title(row['Topic']+' ('+row['Date'] +')', row['Summary'])
for key in keys:
content[key]=content[key].astype(str)
selection= content[key].loc[content[key]['Session']==row['Session'],:]
if len(selection)>0:
md_file=add_row_md(md_file, key, selection)
file='session'+row['Session']+'.md'
print("Outputting ", file)
with open(path / file, "w") as text_file:
text_file.write(md_file)
return toc
def link_generator(df, target,repo_url,link_name):
for index, row in df.iterrows():
if row['Type']=='Link':
df.loc[index,target]=row[target]+" ["+link_name+"]("+row['Location']+")"
elif row['Type']=='File':
df.loc[index,target]=row[target]+" ["+link_name+"]("+repo_url+"/raw/master/"+row['Location']+")"
elif row['Type'] in ['Notebook', 'Markdown']:
df.loc[index,target]=row[target]+" ["+link_name+"](../"+row['Location']+")"
#elif row['Type']=='Youtube'
df.drop(columns=['Location'],inplace=True)
return df
def create_syllabus(df, item, message, path,repo_url):
location=df.loc[item,'Location']
print(location)
message=message+"\n[Syllabus]("+repo_url+"/raw/master/"+location+")"
message=create_md_title("Syllabus", content=message)
print("Outputting ", path)
with open(path , "w") as text_file:
text_file.write(message)
| StarcoderdataPython |
128087 | <reponame>ConsenSys/mythx-models
"""This module contains the GroupListRequest domain model."""
from datetime import datetime
from typing import Optional
from pydantic import BaseModel, Field
class GroupListRequest(BaseModel):
offset: Optional[int]
created_by: Optional[str] = Field(alias="createdBy")
group_name: Optional[str] = Field(alias="groupName")
date_from: Optional[datetime] = Field(alias="dateFrom")
date_to: Optional[datetime] = Field(alias="dateTo")
class Config:
allow_population_by_field_name = True
use_enum_values = True
@property
def endpoint(self):
return "v1/analysis-groups"
@property
def method(self):
return "GET"
@property
def payload(self):
return {}
@property
def headers(self):
return {}
@property
def parameters(self):
return {
"offset": self.offset,
"createdBy": self.created_by,
"groupName": self.group_name,
"dateFrom": self.date_from.isoformat() if self.date_from else None,
"dateTo": self.date_to.isoformat() if self.date_to else None,
}
| StarcoderdataPython |
156913 | # vim:fileencoding=UTF-8
#
# Copyright © 2015, 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 with modifications
# and the "Commons Clause" Condition, (the "License"); you may not
# use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://raw.githubusercontent.com/StanLivitski/cards.webapp/master/LICENSE
#
# The grant of rights under the License will not include, and the License
# does not grant to you, the right to Sell the Software, or use it for
# gambling, with the exception of certain additions or modifications
# to the Software submitted to the Licensor by third parties.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""cards_web URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.http import HttpResponseRedirect
#from django.contrib import admin
urlpatterns = [
# url(r'^admin/', include(admin.site.urls)),
url(r'^durak/', include('durak_ws.urls')),
url(r'(?:.*/)?favicon.ico$',
lambda request: HttpResponseRedirect(
settings.STATIC_URL + 'durak/images/favicon.ico')),
url(r'', lambda request: HttpResponseRedirect('/durak/')),
]
| StarcoderdataPython |
1729797 | <reponame>aj-clark/earthenterprise<gh_stars>1-10
#-*- Python -*-
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Return the package information for the current version of
# Google Earth Enterprise Fusion.
import sys
import os
def EnsurePathExists(dir):
"""Make sure dir exists, create it if it does not exist."""
if not os.path.exists(dir):
os.system("mkdir -p " + dir)
def ExecEcho(command):
"""Execute and echo the system command."""
print(command)
os.system(command)
def SyncPackages(build_agents_map, dest_dir):
"""Use rsync to copy packages from remote build machines to this machine.
packages: list of PackageInfo for the packages to be copied
build_agents_map: map of architectures to build agent rsync repositories to
copy the packages from
dest_dir: the directory into which the rpm packages will be copied"""
# Clear the destination sync dir
ExecEcho("rm -rf " + dest_dir)
print("\nRsync'ing RPMS from build machines")
# For each build agent, rsync the latest files to the current build machine.
for arch in build_agents_map:
src_dir = build_agents_map[arch]
EnsurePathExists(dest_dir + arch)
ExecEcho("rsync -rtl " + src_dir + " " + dest_dir + arch)
def DumpPackages(packages, package_dir, dest_dir):
"""Dump the contents of all packages to the specified directory.
packages: the list of PackageInfo for the packages to be dumped
package_dir: the package directory
dest_dir: the directory to place the package contents.
"""
print("\nDumping RPM Packages into InstallAnywhere...")
for package in packages :
DumpPackage(package, package_dir, dest_dir)
def DumpPackage (package, package_dir, parent_dir):
"""Dump the contents of a package to the specified directory.
package: the PackageInfo for the package to be dumped
package_dir: the package directory for rpms
parent_dir: the directory to place the package contents.
"""
current_dir = os.getcwd() # Save the current directory.
# We need to get the absolute path to the RPM since we must cd into
# the destination directory to dump the files from the package.
package_path = os.path.abspath(package.RpmName(package_dir))
# The package must be unpacked in a directory of the form:
# "./lib/arch/package-version.arch/"
dest_dir = package.InstallAnywhereDir(parent_dir)
# Clear the existing contents
ExecEcho("rm -rf " + dest_dir + "*")
EnsurePathExists(dest_dir)
os.chdir(dest_dir);
print("Dumping " + package.RpmName() + " to " + parent_dir + package.arch);
os.system("rpm2cpio " + package_path + " | cpio -idm")
os.chdir(current_dir) # cd back to the original working directory
def CopySrpms(packages, srpm_dir, dest_dir):
"""Copy any packages that are marked as 'SrpmRequired' to the dest_dir.
packages: list of PackageInfo's (some of which may be marked 'SrpmRequired')
srpm_dir: the parent dir of the SRPMS
dest_dir: destination directory for the copies of the SRPMS
"""
print("\nCopying SRPMS into InstallAnywhere...");
# Clear the existing SRPMS
os.system("rm -rf " + dest_dir + "*")
EnsurePathExists(dest_dir)
# Compile the "set" of srpms (unique entries only)
srpm_set = set()
for package in packages :
if package.SrpmRequired():
srpm = package.SrpmName(srpm_dir)
srpm_set.add(srpm)
for srpm in srpm_set:
ExecEcho("cp " + srpm + " " + dest_dir)
| StarcoderdataPython |
4828691 | <gh_stars>10-100
class CountryCodeException(Exception):
"""
Country Code is not present in the Phone Number
"""
pass
class CallTimeException(Exception):
"""
Wait time is too short for WhatsApp Web to Open
"""
pass
class InternetException(Exception):
"""
Host machine is not connected to the Internet or the connection Speed is Slow
"""
pass
class InvalidPhoneNumber(Exception):
"""
Phone number given is invalid
"""
pass
class UnsupportedEmailProvider(Exception):
"""
Email provider used to send the Email is not supported
"""
pass
class UnableToAccessApi(Exception):
"""unable to access pywhatkit api"""
pass
| StarcoderdataPython |
3237657 | """Main module tests."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import os.path as op
import tempfile
import shutil
import numpy as np
import tables as tb
from nose import with_setup
from kwiklib.dataio import (BaseRawDataReader, read_raw, create_files,
open_files, close_files, add_recording, add_cluster_group, add_cluster,
get_filenames, Experiment, excerpts)
from spikedetekt2.core import run
from kwiklib.utils import itervalues, get_params, Probe, create_trace
# -----------------------------------------------------------------------------
# Fixtures
# -----------------------------------------------------------------------------
DIRPATH = tempfile.mkdtemp()
sample_rate = 2000.
duration = 1.
nchannels = 8
nsamples = int(sample_rate * duration)
raw_data = .1 * np.random.randn(nsamples, nchannels)
# Add "spikes".
for start, end in excerpts(nsamples, nexcerpts=100, excerpt_size=10):
raw_data[start:end] *= 5
prm = get_params(**{
'nchannels': nchannels,
'sample_rate': sample_rate,
'detect_spikes': 'positive',
'save_high': True,
'save_raw': True,
})
prb = {0:
{
'channels': list(range(nchannels)),
'graph': [(i, i + 1) for i in range(nchannels - 1)],
}
}
def setup():
create_files('myexperiment', dir=DIRPATH, prm=prm, prb=prb)
# Open the files.
files = open_files('myexperiment', dir=DIRPATH, mode='a')
# Add data.
add_recording(files,
sample_rate=sample_rate,
nchannels=nchannels)
add_cluster_group(files, channel_group_id='0', id='0', name='Noise')
add_cluster(files, channel_group_id='0',)
# Close the files
close_files(files)
def teardown():
files = get_filenames('myexperiment', dir=DIRPATH)
[os.remove(path) for path in itervalues(files)]
# -----------------------------------------------------------------------------
# Processing tests
# -----------------------------------------------------------------------------
def test_run_nospikes():
"""Read from NumPy array file."""
# Run the algorithm.
with Experiment('myexperiment', dir=DIRPATH, mode='a') as exp:
run(np.zeros((nsamples, nchannels)),
experiment=exp, prm=prm, probe=Probe(prb))
# Open the data files.
with Experiment('myexperiment', dir=DIRPATH) as exp:
assert len(exp.channel_groups[0].spikes) == 0
@with_setup(setup,)
def test_run_1():
"""Read from NumPy array file."""
# Run the algorithm.
with Experiment('myexperiment', dir=DIRPATH, mode='a') as exp:
run(raw_data, experiment=exp, prm=prm, probe=Probe(prb),)
# Open the data files.
with Experiment('myexperiment', dir=DIRPATH) as exp:
nspikes = len(exp.channel_groups[0].spikes)
assert exp.channel_groups[0].spikes.clusters.main.shape[0] == nspikes
assert exp.channel_groups[0].spikes.features_masks.shape[0] == nspikes
assert exp.channel_groups[0].spikes.waveforms_filtered.shape[0] == nspikes
assert isinstance(exp.channel_groups[0]._node.pca_waveforms,
tb.Array)
# Assert the log file exists.
logfile = exp.gen_filename('log')
assert os.path.exists(logfile)
assert exp.recordings[0].raw.shape == (nsamples, nchannels)
assert exp.recordings[0].high.shape == (nsamples, nchannels)
assert exp.recordings[0].low.shape[0] in range(nsamples // 16 - 2,
nsamples // 16 + 3)
assert exp.recordings[0].low.shape[1] == nchannels
@with_setup(setup,)
def test_run_2():
"""Read from .dat file."""
path = os.path.join(DIRPATH, 'mydatfile.dat')
(raw_data * 1e4).astype(np.int16).tofile(path)
# Run the algorithm.
with Experiment('myexperiment', dir=DIRPATH, mode='a') as exp:
run(path, experiment=exp, prm=prm, probe=Probe(prb))
# Open the data files.
with Experiment('myexperiment', dir=DIRPATH) as exp:
assert len(exp.channel_groups[0].spikes)
@with_setup(setup,)
def test_run_canonical_pcs():
prm_canonical = prm.copy()
canonical_pcs = np.ones((prm['nfeatures_per_channel'],
prm['waveforms_nsamples'],
prm['nchannels']))
prm_canonical['canonical_pcs'] = canonical_pcs
with Experiment('myexperiment', dir=DIRPATH, mode='a') as exp:
run(raw_data, experiment=exp, prm=prm_canonical, probe=Probe(prb),)
@with_setup(setup,)
def test_diagnostics():
dir = tempfile.mkdtemp()
path = op.join(dir, 'diagnostics.py')
with open(path, 'w') as f:
f.write(
'def diagnostics(prm=None, **kwargs):\n'
' print(prm)\n'
'\n')
prm['diagnostics_path'] = path
with Experiment('myexperiment', dir=DIRPATH, mode='a') as exp:
run(np.zeros((nsamples, nchannels)),
experiment=exp, prm=prm, probe=Probe(prb))
shutil.rmtree(dir)
| StarcoderdataPython |
1653831 | """
File name: gdax/client.py
Author: <NAME> <<EMAIL>>
Implementation of GDAX Client to get realtime data.
"""
import json
from datetime import datetime, timedelta
from websocket import create_connection
from cryptostreamer.provider import ProviderClient
class NoProductsError(Exception): pass
class NoChannelsError(Exception): pass
from cryptostreamer import get_logger
LOGGER = get_logger('GdaxClient')
GDAX_WSS_URL = 'wss://ws-feed.gdax.com'
DEFAULT_WS_TIMEOUT = 30
class GdaxClient(ProviderClient):
@classmethod
def create_with_environment(cls):
kwargs = cls.kwargs_from_environment()
return cls(**kwargs)
@classmethod
def kwargs_from_environment(cls):
kwargs = {
'products': cls.get_list_from_env('CRYPTO_STREAMER_GDAX_PRODUCTS'),
'channels': cls.get_list_from_env('CRYPTO_STREAMER_GDAX_CHANNELS'),
'timeout': cls.get_int_from_env('CRYPTO_STREAMER_GDAX_TIMEOUT')
}
return {k: v for k,v in kwargs.items() if v is not None}
def __init__(self,products=[],channels=['matches'],timeout=30):
self._create_connection = create_connection
self._products = products
self._channels = channels
if len(self._products) == 0: raise NoProductsError()
if len(self._channels) == 0: raise NoChannelsError()
self._timeout = timeout or DEFAULT_WS_TIMEOUT
def start(self):
"""
Websocket client connects to GDAX server to the realtime tick data.
Tick data is then streamed into Kafka GDAX topic.
:return: None or on_error callback return
"""
self.on_setup()
self._connect()
self._subscribe()
return self._mainloop()
def stop(self):
LOGGER.info("stop")
try:
self._mainloop_running = False
self._disconnect()
except: pass
def on_setup(self):
"""
Called before connecting to the provider
"""
pass
def on_message(self, msg):
"""
Callback for all the messages.
"""
LOGGER.debug("recv: %s" %msg)
pass
def on_heartbeat(self,heartbeat_msg):
"""
Callback to get heartbeat every second
"""
pass
def on_last_match(self,last_match):
"""
Callback for last_match msg.
Once connected and subscribed, a last match message is sent.
:param last_match: dict
"""
pass
def on_subscriptions(self,subscriptions_msg):
"""
Once the subscription message is sent, an subscriptions_msg answer is sent.
:param subscriptions_msg: dict
"""
pass
def on_match(self,match_msg):
"""
Implement this callback to get each trade.
:param match_msg:
"""
pass
def on_connected(self):
"""
Called when connected to websocket.
"""
pass
def on_disconnected(self):
"""
Called when disconnected from gdax.
"""
pass
def on_connection_error(self,e):
"""
Called when a connection error during subscription or mainloop is caught.
If not implemented, it raises the exception.
:param e: exception
:return: None, if True it reconnects automatically
"""
LOGGER.error(e)
raise e
def _connect(self):
self._ws = self._create_connection(GDAX_WSS_URL, timeout=self._timeout)
self.on_connected()
def _disconnect(self):
LOGGER.info("_disconnect")
self._ws.close()
self._ws = None
self.on_disconnected()
def _subscribe(self):
try:
subscription_msg = self._subscription_message()
heartbeat_msg = self._heartbeat_message()
LOGGER.info("send: %s" % subscription_msg)
self._ws.send(subscription_msg)
# LOGGER.info("send: %s" % heartbeat_msg)
# self._ws.send(heartbeat_msg)
except Exception as e:
return self.on_connection_error(e)
def _subscription_message(self):
"""
Subscription message based on products and channels.
Heartbeat channel is added to have a validation of the sequence.
:return: string
"""
return json.dumps({
'type': 'subscribe',
'product_ids': list(set(self._products)),
'channels': list(set(self._channels + ['heartbeat']))
})
def _heartbeat_message(self):
return json.dumps({"type": "heartbeat", "on": True})
def _needs_ping(self):
return self._pinged_at + timedelta(seconds=10) < datetime.now()
def _ping(self):
self._ws.ping('keepalive')
self._pinged_at = datetime.now()
def _handle_message(self,msg):
"""
Handles all the message and proxy them to callbacks.
:param msg: dict
"""
self.on_message(msg)
msg_type = msg.get('type')
if msg_type == 'heartbeat': self.on_heartbeat(msg)
elif msg_type == 'last_match': self.on_last_match(msg)
elif msg_type == 'subscriptions': self.on_subscriptions(msg)
elif msg_type == 'match': self.on_match(msg)
def _mainloop(self):
"""
The mainloop receives loops and gets and handles
the messages from GDAX.
It sends a ping every 30 seconds.
"""
self._mainloop_running = True
self._pinged_at = datetime.now()
while self._mainloop_running:
self._mainloop_recv_msg()
def _mainloop_recv_msg(self):
try:
if self._needs_ping(): self._ping()
data = self._ws.recv()
except Exception as e:
return self.on_connection_error(e)
msg = json.loads(data)
self._handle_message(msg)
| StarcoderdataPython |
1688491 | from __future__ import print_function
import sys, os, importlib
import PARAMETERS
locals().update(importlib.import_module("PARAMETERS").__dict__)
####################################
# Parameters
####################################
subdirs = ['positive', 'testImages']
####################################
# Main
####################################
overlaps = []
roiCounts = []
for subdir in subdirs:
imgFilenames = getFilesInDirectory(os.path.join(imgDir, subdir), ".jpg")
# loop over all iamges
for imgIndex,imgFilename in enumerate(imgFilenames):
if imgIndex % 20 == 0:
print ("Processing subdir '{}', image {} of {}".format(subdir, imgIndex, len(imgFilenames)))
# load ground truth
imgPath = os.path.join(imgDir, subdir, imgFilename)
imgWidth, imgHeight = imWidthHeight(imgPath)
gtBoxes, gtLabels = readGtAnnotation(imgPath)
gtBoxes = [Bbox(*rect) for rect in gtBoxes]
# load rois and compute scale
rois = readRois(roiDir, subdir, imgFilename)
rois = [Bbox(*roi) for roi in rois]
roiCounts.append(len(rois))
# for each ground truth, compute if it is covered by an roi
maxOverlap = -1
for gtIndex, (gtLabel, gtBox) in enumerate(zip(gtLabels,gtBoxes)):
assert (gtBox.max() <= max(imgWidth, imgHeight) and gtBox.max() >= 0)
gtLabel = gtLabel.decode('utf-8')
if gtLabel in classes[1:]:
for roi in rois:
assert (roi.max() <= max(imgWidth, imgHeight) and roi.max() >= 0)
overlap = bboxComputeOverlapVoc(gtBox, roi)
maxOverlap = max(maxOverlap, overlap)
overlaps.append(maxOverlap)
print ("Average number of rois per image " + str(1.0 * sum(roiCounts) / len(overlaps)))
# compute recall at different overlaps
overlaps = np.array(overlaps, np.float32)
for overlapThreshold in np.linspace(0,1,11):
recall = 1.0 * sum(overlaps >= overlapThreshold) / len(overlaps)
print ("At threshold {:.2f}: recall = {:2.2f}".format(overlapThreshold, recall))
| StarcoderdataPython |
4831945 | <reponame>zbwa/selenium_python
"""Класс логирования"""
import logging
import inspect
def customLogger(logLevel=logging.DEBUG):
loggerName = inspect.stack()[1][3]
logger = logging.getLogger(loggerName)
logger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler('automation.log', mode='a')
fileHandler.setLevel(logLevel)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
return logger | StarcoderdataPython |
39139 | <gh_stars>1-10
"""
Created by <NAME>.
"""
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
from textblob_de.lemmatizers import PatternParserLemmatizer
from tqdm import tqdm
from nltk.corpus import stopwords
from utilities.utilities import transform_data
import pickle
class PreprocessingText:
def __init__(self, text, **kwargs):
self.text = text
self.text_processor = TextPreProcessor(
# terms that will be normalize e.g. <EMAIL> to <email>
normalize=['url', 'email', 'percent', 'money', 'phone', 'user',
'time', 'date', 'number'],
# terms that will be annotated e.g. <hashtag>#test</hashtag>
annotate={"hashtag", "allcaps", "elongated", "repeated",
'emphasis'},
fix_html=True, # fix HTML tokens
unpack_hashtags=True, # perform word segmentation on hashtags
# select a tokenizer. You can use SocialTokenizer, or pass your own if not text tokenized on whitespace
# the tokenizer, should take as input a string and return a list of tokens
tokenizer=SocialTokenizer(lowercase=True).tokenize,
# list of dictionaries, for replacing tokens extracted from the text,
# with other expressions. You can pass more than one dictionaries.
dicts=[emoticons]
)
def remove_stopwords(self, data):
stop_ger = stopwords.words('german')
allowed_stopwords = ['kein', 'keine', 'keinem',
'keinen', 'keiner', 'keines', 'nicht', 'nichts']
for a in allowed_stopwords:
stop_ger.remove(a)
customstopwords = ['rt', 'mal', 'heute', 'gerade', 'erst', 'macht', 'eigentlich', 'warum',
'gibt', 'gar', 'immer', 'schon', 'beim', 'ganz', 'dass', 'wer', 'mehr', 'gleich', 'wohl']
normalizedwords = ['<url>', '<email>', '<percent>', 'money>',
'<phone>', '<user>', '<time>', '<url>', '<date>', '<number>']
stop_ger = stop_ger + customstopwords + normalizedwords
clean_data = []
if(type(data) == list):
for d in data:
data_stop_words = []
for word in d:
if word not in stop_ger:
data_stop_words.append(word)
clean_data.append(data_stop_words)
if(type(data) == str):
words = data.split()
for word in words:
if word not in stop_ger:
clean_data.append(word)
return clean_data
def lemmatize_words(self, data):
_lemmatizer = PatternParserLemmatizer()
lemmatized_data = []
if(type(data) == list):
for d in data:
text = ""
for word in d:
text = text + " " + word
l = _lemmatizer.lemmatize(text)
lemmatized_data.append([i[0] for i in l])
if(type(data) == str):
l = _lemmatizer.lemmatize(data)
lemmatized_data.append([i[0] for i in l])
return lemmatized_data
def ekphrasis_preprocessing(self):
X_clean = []
if(type(self.text) == str):
X_clean.append(self.text_processor.pre_process_doc(self.text))
if(type(self.text) == list):
for row in tqdm(self.text):
X_clean.append(self.text_processor.pre_process_doc(row))
return X_clean
| StarcoderdataPython |
110177 | # Update this file for version changes
__version__ = '0.5.3'
| StarcoderdataPython |
117871 | <reponame>Vimalanathan93/Udacity-DLND
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import os
import copy
from collections import OrderedDict
import time
from PIL import Image
# get dataset path
# get network parameters -hidden units,learning rate, epochs
# get model parameters -vgg16 or resnet50
# get if cuda
# training and validation log
class Network(nn.Module):
def __init__(self, input_size, output_size, hidden_layers, drop_p):
''' Builds a feedforward network with arbitrary hidden layers.
Arguments
---------
input_size: integer, size of the input
output_size: integer, size of the output layer
hidden_layers: list of integers, the sizes of the hidden layers
drop_p: float between 0 and 1, dropout probability
'''
super().__init__()
self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])
layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])
self.output = nn.Linear(hidden_layers[-1], output_size)
self.dropout = nn.Dropout(p=drop_p)
def forward(self, x):
''' Forward pass through the network, returns the output logits '''
for each in self.hidden_layers:
x = F.relu(each(x))
x = self.dropout(x)
x = self.output(x)
return F.log_softmax(x, dim=1)
def get_data(path):
data_dir = path
train_dir = data_dir + 'train'
valid_dir = data_dir + 'val'
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
# TODO: Using the image datasets and the trainforms, define the dataloaders
train_iter = torch.utils.data.DataLoader(image_datasets['train'], batch_size=32,shuffle=True, num_workers=4)
val_iter = torch.utils.data.DataLoader(image_datasets['val'], batch_size=16,shuffle=True, num_workers=4)
return train_iter,val_iter,image_datasets
def train(train_iter,val_iter,pretrainmodel,hidden_units,lr,drop_p,cuda,epochs,print_every,verbose):
if pretrainmodel=='resnet18':
model = models.resnet18(pretrained=True)
for param in model.parameters():
param.requires_grad = False
classifier = Network(512,102,hidden_units,drop_p)
model.fc = classifier
else:
model = models.vgg16(pretrained=True)
for param in model.parameters():
param.requires_grad = False
classifier = Network(25088,102,hidden_units,drop_p)
model.classifier = classifier
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=lr)
steps = 0
running_loss = 0
for e in range(epochs):
# Model in training mode, dropout is on
start = time.time()
if cuda:
model.cuda()
model.train()
for images, labels in iter(train_iter):
steps += 1
start1=time.time()
inputs = Variable(images)
targets = Variable(labels)
if cuda:
inputs,targets = inputs.cuda(),targets.cuda()
optimizer.zero_grad()
output = model.forward(inputs)
loss = criterion(output, targets)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
if steps % print_every == 0:
model.eval()
accuracy = 0
val_loss = 0
for ii, (images, labels) in enumerate(val_iter):
inputs = Variable(images, volatile=True)
labels = Variable(labels, volatile=True)
if cuda:
inputs,labels = inputs.cuda(),labels.cuda()
output = model.forward(inputs)
val_loss += criterion(output, labels).data[0]
ps = torch.exp(output).data
equality = (labels.data == ps.max(1)[1])
accuracy += equality.type_as(torch.FloatTensor()).mean()
if verbose==1:
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Val Loss: {:.3f}.. ".format(val_loss/len(val_iter)),
"Val Accuracy: {:.3f}..".format(accuracy/len(val_iter)),
"Time taken for {} steps: {:.3f} seconds..".format(print_every,time.time()-start1)
)
running_loss = 0
model.train()
if verbose==1:
print("Time taken for {} epoch : {:.3f} seconds..".format(e+1,time.time()-start))
return model,optimizer,classifier
def store_checkpoint(filepath,model,optimizer,classifier):
model.class_to_idx = image_datasets['train'].class_to_idx
if pretrainmodel=='vgg16':
input_size = 25088
hidden_layers = [each.out_features for each in model.classifier.hidden_layers]
else:
input_size=512
hidden_layers = [each.out_features for each in classifier.hidden_layers]
checkpoint = {'input_size': input_size,
'output_size': output_size,
'hidden_layers': hidden_layers,
'state_dict': model.state_dict(),
'epochs':epochs,
'optimizer_state_dict':optimizer.state_dict,
'model':'vgg16'
}
torch.save(checkpoint, filepath)
if __name__ == "__main__":
import argparse
# get dataset path
# get network parameters -hidden units,learning rate, epochs
# get model parameters -vgg16 or resnet50
# get if cuda
# training and validation log
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataset", dest="filepath",required=True,
help="provide the directory for dataset ex: -d data/flowers")
parser.add_argument("-gpu", dest="cuda",
action="store_true", default=False,
help="if gpu is installed, default is False")
parser.add_argument("-hidden",nargs='+',dest="hidden",
required=True, default=False,
help="provide the hidden unit sizes separated by spaces ex: 4096 2000 512")
parser.add_argument("-lr",type=float,dest="lr",
default=0.0005,
help="provide learning rate, defaults to 0.0005")
parser.add_argument("-epochs",type=int,dest="epochs",
required=True,
help="provide number of epochs")
parser.add_argument("-pretrainmodel",dest="pretrainmodel",
default="vgg16",
help="provide the pretrained model to use: either vgg16 or resnet18")
parser.add_argument("-drop_p",dest="drop_p",
default=0.5,type=float,
help="Provide dropout probability, defaults to 0.5")
# parser.add_argument("-out_s",dest="out_s",
# default=102,type=int,
# help="Provide output size for the model")
parser.add_argument("-verbose",dest="verbose",
default=1,type=int,
help="Either 0- do not print training and val logs, 1- print training and val logs")
parser.add_argument("-chkp",dest="checkpoint",
required=True,
help="Provide Checkpoint file path")
# parser.add_argument("-help", "--docs",
# help=''' "-d", "--dataset" : "provide the directory for dataset ex: -d data/flowers"
# "-gpu" : "if gpu is installed, default is False"
# "-hidden": "provide the hidden unit sizes separated by spaces ex: 4096 2000 512"
# "-lr" : "provide learning rate, defaults to 0.0005"
# "-epochs" : "provide number of epochs"
# "-pretrainmodel" : "provide the pretrained model to use: either vgg16 or resnet18"
# "-drop_p" : "Provide dropout probability, defaults to 0.5"
# "-chkp" : "Provide Checkpoint file path"
# "-verbose" : "Either 0- do not print training and val logs, 1- print training and val logs"
# ''')
args = parser.parse_args()
hidden = [int(i) for i in args.hidden]
train_iter,val_iter,image_datasets = get_data(args.filepath)
model,optimizer,classifier = train(train_iter,val_iter,args.pretrainmodel,hidden,args.lr,args.drop_p,args.cuda,int(args.epochs),100,args.verbose)
store_checkpoint(args.checkpoint,model,optimizer,classifier)
| StarcoderdataPython |
1678818 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
import numpy as np
import pandas as pd
import os
import collections
from ..serialize import Serialization
from threeML.io.file_utils import file_existing_and_readable, sanitize_filename
from threeML.exceptions.custom_exceptions import custom_warnings
from ..psf_fast import PSFWrapper
from .response_bin import ResponseBin
_instances = {}
def hawc_response_factory(response_file_name):
"""
A factory function for the response which keeps a cache, so that the same response is not read over and
over again.
:param response_file_name:
:return: an instance of HAWCResponse
"""
response_file_name = sanitize_filename(response_file_name, abspath=True)
# See if this response is in the cache, if not build it
if not response_file_name in _instances:
print("Creating singleton for %s" % response_file_name)
# Use the extension of the file to figure out which kind of response it is (ROOT or HDF)
extension = os.path.splitext(response_file_name)[-1]
if extension == ".root":
new_instance = HAWCResponse.from_root_file(response_file_name)
elif extension in ['.hd5', '.hdf5', '.hdf']:
new_instance = HAWCResponse.from_hdf5(response_file_name)
else: # pragma: no cover
raise NotImplementedError("Extension %s for response file %s not recognized." % (extension,
response_file_name))
_instances[response_file_name] = new_instance
# return the response, whether it was already in the cache or we just built it
return _instances[response_file_name] # type: HAWCResponse
class HAWCResponse(object):
def __init__(self, response_file_name, dec_bins, response_bins):
self._response_file_name = response_file_name
self._dec_bins = dec_bins
self._response_bins = response_bins
if len(dec_bins) < 2:
custom_warnings.warn("Only {0} dec bins given in {1}, will not try to interpolate.".format(len(dec_bins), response_file_name))
custom_warnings.warn("Single-dec-bin mode is intended for development work only at this time and may not work with extended sources.")
@classmethod
def from_hdf5(cls, response_file_name):
"""
Build response from a HDF5 file. Do not use directly, use the hawc_response_factory function instead.
:param response_file_name:
:return: a HAWCResponse instance
"""
response_bins = collections.OrderedDict()
with Serialization(response_file_name, mode='r') as serializer:
meta_dfs, _ = serializer.retrieve_pandas_object('/dec_bins_definition')
effarea_dfs, _ = serializer.retrieve_pandas_object('/effective_area')
psf_dfs, _ = serializer.retrieve_pandas_object('/psf')
declination_centers = effarea_dfs.index.levels[0]
energy_bins = effarea_dfs.index.levels[1]
min_decs = []
max_decs = []
for dec_center in declination_centers:
these_response_bins = collections.OrderedDict()
for i, energy_bin in enumerate(energy_bins):
these_meta = meta_dfs.loc[dec_center, energy_bin]
min_dec = these_meta['min_dec']
max_dec = these_meta['max_dec']
dec_center_ = these_meta['declination_center']
assert dec_center_ == dec_center, "Response is corrupted"
# If this is the first energy bin, let's store the minimum and maximum dec of this bin
if i == 0:
min_decs.append(min_dec)
max_decs.append(max_dec)
else:
# Check that the minimum and maximum declination for this bin are the same as for
# the first energy bin
assert min_dec == min_decs[-1], "Response is corrupted"
assert max_dec == max_decs[-1], "Response is corrupted"
sim_n_sig_events = these_meta['n_sim_signal_events']
sim_n_bg_events = these_meta['n_sim_bkg_events']
this_effarea_df = effarea_dfs.loc[dec_center, energy_bin]
sim_energy_bin_low = this_effarea_df.loc[:, 'sim_energy_bin_low'].values
sim_energy_bin_centers = this_effarea_df.loc[:, 'sim_energy_bin_centers'].values
sim_energy_bin_hi = this_effarea_df.loc[:, 'sim_energy_bin_hi'].values
sim_differential_photon_fluxes = this_effarea_df.loc[:, 'sim_differential_photon_fluxes'].values
sim_signal_events_per_bin = this_effarea_df.loc[:, 'sim_signal_events_per_bin'].values
this_psf = PSFWrapper.from_pandas(psf_dfs.loc[dec_center, energy_bin, :])
this_response_bin = ResponseBin(energy_bin, min_dec, max_dec, dec_center,
sim_n_sig_events, sim_n_bg_events,
sim_energy_bin_low,
sim_energy_bin_centers,
sim_energy_bin_hi,
sim_differential_photon_fluxes,
sim_signal_events_per_bin,
this_psf)
these_response_bins[energy_bin] = this_response_bin
# Store the response bins for this declination bin
response_bins[dec_center] = these_response_bins
dec_bins = list(zip(min_decs, declination_centers, max_decs))
return cls(response_file_name, dec_bins, response_bins)
@classmethod
def from_root_file(cls, response_file_name):
"""
Build response from a ROOT file. Do not use directly, use the hawc_response_factory function instead.
:param response_file_name:
:return: a HAWCResponse instance
"""
from ..root_handler import open_ROOT_file, get_list_of_keys, tree_to_ndarray
# Make sure file is readable
response_file_name = sanitize_filename(response_file_name)
# Check that they exists and can be read
if not file_existing_and_readable(response_file_name): # pragma: no cover
raise IOError("Response %s does not exist or is not readable" % response_file_name)
# Read response
with open_ROOT_file(response_file_name) as root_file:
# Get the name of the trees
object_names = get_list_of_keys(root_file)
# Make sure we have all the things we need
assert 'LogLogSpectrum' in object_names
assert 'DecBins' in object_names
assert 'AnalysisBins' in object_names
# Read spectrum used during the simulation
log_log_spectrum = root_file.Get("LogLogSpectrum")
# Get the analysis bins definition
dec_bins_ = tree_to_ndarray(root_file.Get("DecBins"))
dec_bins_lower_edge = dec_bins_['lowerEdge'] # type: np.ndarray
dec_bins_upper_edge = dec_bins_['upperEdge'] # type: np.ndarray
dec_bins_center = dec_bins_['simdec'] # type: np.ndarray
dec_bins = list(zip(dec_bins_lower_edge, dec_bins_center, dec_bins_upper_edge))
# Read in the ids of the response bins ("analysis bins" in LiFF jargon)
try:
response_bins_ids = tree_to_ndarray(root_file.Get("AnalysisBins"), "name") # type: np.ndarray
except ValueError:
try:
response_bins_ids = tree_to_ndarray(root_file.Get("AnalysisBins"), "id") # type: np.ndarray
except ValueError:
# Some old response files (or energy responses) have no "name" branch
custom_warnings.warn("Response %s has no AnalysisBins 'id' or 'name' branch. "
"Will try with default names" % response_file_name)
response_bins_ids = None
response_bins_ids = response_bins_ids.astype(str)
# Now we create a dictionary of ResponseBin instances for each dec bin_name
response_bins = collections.OrderedDict()
for dec_id in range(len(dec_bins)):
this_response_bins = collections.OrderedDict()
min_dec, dec_center, max_dec = dec_bins[dec_id]
# If we couldn't get the reponse_bins_ids above, let's use the default names
if response_bins_ids is None:
# Default are just integers. let's read how many nHit bins are from the first dec bin
dec_id_label = "dec_%02i" % dec_id
n_energy_bins = root_file.Get(dec_id_label).GetNkeys()
response_bins_ids = list(range(n_energy_bins))
for response_bin_id in response_bins_ids:
this_response_bin = ResponseBin.from_ttree(root_file, dec_id, response_bin_id, log_log_spectrum,
min_dec, dec_center, max_dec)
this_response_bins[response_bin_id] = this_response_bin
response_bins[dec_bins[dec_id][1]] = this_response_bins
# Now the file is closed. Let's explicitly remove f so we are sure it is freed
del root_file
# Instance the class and return it
instance = cls(response_file_name, dec_bins, response_bins)
return instance
def get_response_dec_bin(self, dec, interpolate=False):
"""
Get the responses for the provided declination bin, optionally interpolating the PSF
:param dec: the declination where the response is desired at
:param interpolate: whether to interpolate or not the PSF between the two closes response bins
:return:
"""
# Sort declination bins by distance to the provided declination
dec_bins_keys = list(self._response_bins.keys())
dec_bins_by_distance = sorted(dec_bins_keys, key=lambda x: abs(x - dec))
#never try to interpolate if only one dec bin is given
if len(dec_bins_keys) < 2:
interpolate = False
if not interpolate:
# Find the closest dec bin_name. We iterate over all the dec bins because we don't want to assume
# that the bins are ordered by Dec in the file (and the operation is very cheap anyway,
# since the dec bins are few)
closest_dec_key = dec_bins_by_distance[0]
return self._response_bins[closest_dec_key]
else:
# Find the two closest responses
dec_bin_one, dec_bin_two = dec_bins_by_distance[:2]
# Let's handle the special case where the requested dec is exactly on a response bin
if abs(dec_bin_one - dec) < 0.01:
# Do not interpolate
return self._response_bins[dec_bin_one]
energy_bins_one = self._response_bins[dec_bin_one]
energy_bins_two = self._response_bins[dec_bin_two]
# Now linearly interpolate between them
# Compute the weights according to the distance to the source
w1 = old_div((dec - dec_bin_two), (dec_bin_one - dec_bin_two))
w2 = old_div((dec - dec_bin_one), (dec_bin_two - dec_bin_one))
new_responses = collections.OrderedDict()
for bin_id in energy_bins_one:
this_new_response = energy_bins_one[bin_id].combine_with_weights(energy_bins_two[bin_id], dec, w1, w2)
new_responses[bin_id] = this_new_response
return new_responses
@property
def dec_bins(self):
return self._dec_bins
@property
def response_bins(self):
return self._response_bins
@property
def n_energy_planes(self):
return len(list(self._response_bins.values())[0])
def display(self, verbose=False):
"""
Prints summary of the current object content.
:param verbose bool: Prints the full list of declinations and analysis bins.
"""
print("Response file: %s" % self._response_file_name)
print("Number of dec bins: %s" % len(self._dec_bins))
if verbose:
print(self._dec_bins)
print("Number of energy/nHit planes per dec bin_name: %s" % (self.n_energy_planes))
if verbose:
print(list(self._response_bins.values())[0].keys())
def write(self, filename):
"""
Write the response to HDF5.
:param filename: output file. WARNING: it will be overwritten if existing.
:return:
"""
filename = sanitize_filename(filename)
# Unravel the dec bins
min_decs, center_decs, max_decs = list(zip(*self._dec_bins))
# We get the definition of the response bins, as well as their coordinates (the dec center) and store them
# in lists. Later on we will use these to make 3 dataframes containing all the needed data
multi_index_keys = []
effarea_dfs = []
psf_dfs = []
all_metas = []
# Loop over all the dec bins (making sure that they are in order)
for dec_center in sorted(center_decs):
for bin_id in self._response_bins[dec_center]:
response_bin = self._response_bins[dec_center][bin_id]
this_effarea_df, this_meta, this_psf_df = response_bin.to_pandas()
effarea_dfs.append(this_effarea_df)
psf_dfs.append(this_psf_df)
assert bin_id == response_bin.name, \
'Bin name inconsistency: {} != {}'.format(bin_id, response_bin.name)
multi_index_keys.append((dec_center, response_bin.name))
all_metas.append(pd.Series(this_meta))
# Create the dataframe with all the effective areas (with a multi-index)
effarea_df = pd.concat(effarea_dfs, axis=0, keys=multi_index_keys)
psf_df = pd.concat(psf_dfs, axis=0, keys=multi_index_keys)
meta_df = pd.concat(all_metas, axis=1, keys=multi_index_keys).T
# Now write the 4 dataframes to file
with Serialization(filename, mode='w') as serializer:
serializer.store_pandas_object('/dec_bins_definition', meta_df)
serializer.store_pandas_object('/effective_area', effarea_df)
serializer.store_pandas_object('/psf', psf_df)
| StarcoderdataPython |
176534 | <gh_stars>0
class RedbotMotorActor(object):
# TODO(asydorchuk): load constants from the config file.
_MAXIMUM_FREQUENCY = 50
def __init__(self, gpio, power_pin, direction_pin_1, direction_pin_2):
self.gpio = gpio
self.power_pin = power_pin
self.direction_pin_1 = direction_pin_1
self.direction_pin_2 = direction_pin_2
self.gpio.setup(power_pin, self.gpio.OUT)
self.gpio.setup(direction_pin_1, self.gpio.OUT)
self.gpio.setup(direction_pin_2, self.gpio.OUT)
self.motor_controller = self.gpio.PWM(
self.power_pin, self._MAXIMUM_FREQUENCY)
def _setDirectionForward(self):
self.gpio.output(self.direction_pin_1, True)
self.gpio.output(self.direction_pin_2, False)
def _setDirectionBackward(self):
self.gpio.output(self.direction_pin_1, False)
self.gpio.output(self.direction_pin_2, True)
def start(self):
self.gpio.output(self.direction_pin_1, False)
self.gpio.output(self.direction_pin_2, False)
self.motor_controller.start(0.0)
self.relative_power = 0.0
def stop(self):
self.gpio.output(self.direction_pin_1, False)
self.gpio.output(self.direction_pin_2, False)
self.motor_controller.stop()
self.relative_power = 0.0
def setPower(self, relative_power):
if relative_power < 0:
self._setDirectionBackward()
else:
self._setDirectionForward()
power = int(100.0 * abs(relative_power))
self.motor_controller.ChangeDutyCycle(power)
self.relative_power = relative_power
| StarcoderdataPython |
3217030 | # ----------------------------------------------------------
# Define resources
# ----------------------------------------------------------
import logging
from threading import Timer
from datetime import datetime
from ...kernel.agent.Action import Action
# ----------------------------------------------------------
# Define the action
# ----------------------------------------------------------
class TimeoutAction(Action):
"""
@See Action
"""
def handler(self):
""" Timeout handler """
if self.agent.isTimeout():
self.agent.setTimeout(False)
self.adm.sendEvent(self.agent.id, 'response', 'timeout')
def execute(self, data):
"""
@param data
"""
if not self.agent.isTimeout():
self.agent.setTimeout(True)
r = Timer(data['time'], self.handler)
r.start()
def catchException(self, exception):
"""
Catch the exception.
@param exception Response exception
"""
pass
| StarcoderdataPython |
193226 | <reponame>hpd/general
'''
A script to create Maya lights and cameras from Otoy light stage data files
Usage:
import os
import sys
sys.path.append( "/path/to/script" )
import mayaLightStageImport as mlsi
lightStageData = "/path/to/lightStage/data"
cameraDir = os.path.join( lightStageData, "CH2_cameras" )
lightingDir = os.path.join( lightStageData, "LS_lighting_info" )
# Create lights, key frames to match light stage key frames
lightStageGroup = mlsi.createLightStageLights( lightingDir )
# Create cameras
cameraGroup = mlsi.createLightStageCameras(cameraDir)
'''
import math
import os
import sys
def parseCamera(cameraFile):
camera = {}
with open(cameraFile, 'r') as fileHandle:
lines = fileHandle.readlines()
for line in lines:
tokens = line.strip().split('=')
camera[tokens[0].strip().lower()] = map(float, tokens[1].split())
return camera
def readCameras(camerasDir):
cameraData = {}
for fileName in os.listdir(camerasDir):
if fileName.startswith("cam"):
print(fileName)
cameraName = fileName.split('.')[0]
camera = parseCamera(os.path.join(camerasDir, fileName))
cameraData[cameraName] = camera
return cameraData
def parseLightDirections(directionsFile):
print(directionsFile)
directions = {}
with open(directionsFile, 'r') as fileHandle:
lines = fileHandle.readlines()
for line in lines:
tokens = line.strip().split()
directions[int(tokens[0].strip())] = map(float, tokens[1:])
return directions
def parseLightPolarization(polarizationFile):
print(polarizationFile)
polarization = {}
with open(polarizationFile, 'r') as fileHandle:
lines = fileHandle.readlines()
for line in lines:
tokens = line.strip().split()
polarization[int(tokens[0].strip())] = int(tokens[1].strip())
return polarization
def parseLightConfigurations(configurationsFile):
print(configurationsFile)
configurations = {}
with open(configurationsFile, 'r') as fileHandle:
lines = fileHandle.readlines()
i = 1
for line in lines:
tokens = line.strip().split()
configurations[i] = map(int, tokens)
i += 1
return configurations
def readLights(lightingDir):
lightData = {}
for fileName in os.listdir(lightingDir):
print(fileName)
if fileName.startswith("directions"):
directions = parseLightDirections(os.path.join(lightingDir, fileName))
lightData['directions'] = directions
elif fileName.startswith("is_a_vertex"):
polarization = parseLightPolarization(os.path.join(lightingDir, fileName))
lightData['polarization'] = polarization
elif fileName.startswith("reference_lighting"):
configurations = parseLightConfigurations(os.path.join(lightingDir, fileName))
lightData['configurations'] = configurations
return lightData
import maya.cmds as cmds
def inchesToCentimeters(inches):
return inches*2.54
def halfInchesToCentimeters(inches):
return inches*1.27
def createLightStageLight(name, direction, diameterIn, distanceToFrontIn, useGroup=True):
diameterCm = inchesToCentimeters(diameterIn)
distanceToFrontCm = inchesToCentimeters(distanceToFrontIn)
sphereLight = cmds.polySphere(name=name, r=0.5, sx=20, sy=20, ax=[0, 1, 0], cuv=2, ch=1)[0]
lightScale = diameterCm
cmds.setAttr("%s.%s" % (sphereLight, "scaleX"), lightScale)
cmds.setAttr("%s.%s" % (sphereLight, "scaleY"), lightScale)
cmds.setAttr("%s.%s" % (sphereLight, "scaleZ"), lightScale)
if useGroup:
cmds.setAttr("%s.%s" % (sphereLight, "scaleZ"), 0.0)
lightGroup = cmds.group(sphereLight, name=("%sRotation" % sphereLight))
lightTranslate = distanceToFrontCm
cmds.setAttr("%s.%s" % (sphereLight, "translateZ"), lightTranslate)
rx = -math.asin( direction[1] )*180.0/3.14159
ry = math.atan2( direction[0], direction[2] )*180.0/3.14159
cmds.setAttr("%s.%s" % (lightGroup, "rotateX"), rx)
cmds.setAttr("%s.%s" % (lightGroup, "rotateY"), ry)
return lightGroup
else:
lightTranslate = map( lambda x: x*(distanceToFrontCm + diameterCm/2.), direction)
cmds.setAttr("%s.%s" % (sphereLight, "translateX"), lightTranslate[0])
cmds.setAttr("%s.%s" % (sphereLight, "translateY"), lightTranslate[1])
cmds.setAttr("%s.%s" % (sphereLight, "translateZ"), lightTranslate[2])
return sphereLight
def setLightConfigurationVisibility(lightConfigurations, lightNumber, name):
for frame, configuration in lightConfigurations.iteritems():
#print( frame )
visible = ( lightNumber in configuration )
#print( visible )
cmds.currentTime( frame )
cmds.setAttr("%s.%s" % (name, "v"), visible)
cmds.setKeyframe("%s.%s" % (name, "v"))
def createLightStageLights(lightingDir):
diameterIn = 4
distanceToFrontIn = 55
lightData = readLights(lightingDir)
for dict, value in lightData.iteritems():
print( dict )
print( value )
lightDirections = lightData['directions']
lightPolarizations = lightData['polarization']
lightConfigurations = lightData['configurations']
lightsPolarized = []
lightsUnpolarized = []
for lightNumber, lightDirection in lightDirections.iteritems():
print( lightNumber, lightDirection )
if lightPolarizations[lightNumber] == 0:
name = "lightStageLightUnPolarized" + str(lightNumber)
lightsPolarized.append( createLightStageLight(name, lightDirection, diameterIn, distanceToFrontIn) )
setLightConfigurationVisibility(lightConfigurations, lightNumber, name)
else:
name = "lightStageLightPolarized" + str(lightNumber)
lightsUnpolarized.append( createLightStageLight(name, lightDirection, diameterIn, distanceToFrontIn) )
setLightConfigurationVisibility(lightConfigurations, lightNumber, name)
polarizedLightsGroup = cmds.group( lightsPolarized, name="polarizedLights" )
unpolarizedLightsGroup = cmds.group( lightsUnpolarized, name="polarizedLights" )
lightStageGroup = cmds.group( [polarizedLightsGroup, unpolarizedLightsGroup], name="lightStageLights" )
return lightStageGroup
def createLightStageCamera(name, rx, ry, rz, translate, focalLengthPixels, ppX, ppY):
c = cmds.camera(name=name)[0]
# Rotation
rotationMatrix = [0.0]*16
#for i in range(0,3): rotationMatrix[i] = rx[i]
#for i in range(0,3): rotationMatrix[i+4] = ry[i]
#for i in range(0,3): rotationMatrix[i+8] = rz[i]
# Transpose the rotation matrix values
for i in range(0,3): rotationMatrix[i*4 ] = rx[i]
for i in range(0,3): rotationMatrix[i*4+1] = ry[i]
for i in range(0,3): rotationMatrix[i*4+2] = rz[i]
rotationMatrix[15] = 1.0
cmds.xform( c, a=True, matrix=rotationMatrix )
# Translation
t = map( halfInchesToCentimeters, translate )
cmds.xform( c, a=True, translation=t )
# Scale
#cmds.xform( c, a=True, scale=[10.0, 10.0, 10.0] )
# Film Back
cmds.setAttr( "%s.verticalFilmAperture" % c, 1.417)
cmds.setAttr( "%s.horizontalFilmAperture" % c, 0.9449)
cmds.setAttr( "%s.cap" % c, l=True )
cmds.setAttr( "%s.filmFit" % c, 2)
# Focal Length
sensorWidthMM = 24.0
imageWidthPixels = 3456
imageHeightPixels = 5184
focalLengthMM = focalLengthPixels * sensorWidthMM / imageWidthPixels
cmds.setAttr( "%s.focalLength" % c, focalLengthMM)
# Film Back Offset
centerX = imageWidthPixels/2.
offsetX = (centerX - ppX)/imageWidthPixels
cmds.setAttr( "%s.horizontalFilmOffset" % c, offsetX)
centerY = imageHeightPixels/2.
offsetY = (centerY - ppY)/imageHeightPixels
cmds.setAttr( "%s.verticalFilmOffset" % c, offsetY)
return c
def createLightStageCameras(cameraDir):
cameraData = readCameras(cameraDir)
mayaCameras = []
for cam, data in cameraData.iteritems():
mayaCamera = createLightStageCamera(cam, data['rx'], data['ry'], data['rz'],
data['t'], data['fc'][0], data['pp'][0], data['pp'][1])
mayaCameras.append( mayaCamera )
cameraGroup = cmds.group( mayaCameras, name="lightStageCameras" )
return cameraGroup
| StarcoderdataPython |
4825033 | import datetime
from contextlib import contextmanager
from flask_sqlalchemy import BaseQuery
from flask_sqlalchemy import SQLAlchemy as _SQLAlchemy
from sqlalchemy import asc, desc
class SQLAlchemy(_SQLAlchemy):
@contextmanager
def auto_commit(self):
try:
yield
self.session.commit()
except Exception as e:
db.session.rollback()
raise e
db = SQLAlchemy(query_class=BaseQuery)
class Base(db.Model):
__abstract__ = True
__table_args__ = {"extend_existing": True}
def __getitem__(self, item):
return getattr(self, item)
def keys(self):
return self.fields
def hide(self, *keys):
for key in keys:
self.fields.remove(key)
return self
def append(self, *keys):
for key in keys:
self.fields.append(key)
return self
@classmethod
def get_by_id(cls, id_):
return cls.query.get(id_)
@classmethod
def create(cls, **kwargs):
base = cls()
with db.auto_commit():
for key, value in kwargs.items():
if value is not None:
if hasattr(cls, key):
try:
setattr(base, key, value)
except:
pass
if hasattr(cls, 'create_time'):
if kwargs.get('create_time'):
setattr(base, 'create_time', kwargs['create_time'])
else:
setattr(base, 'create_time', datetime.datetime.now())
db.session.add(base)
return base
def modify(self, **kwargs):
with db.auto_commit():
for key, value in kwargs.items():
if value is not None:
if hasattr(self, key):
try:
setattr(self, key, value)
except:
pass
def delete(self):
with db.auto_commit():
db.session.delete(self)
@classmethod
def search(cls, **kwargs):
res = cls.query
for key, value in kwargs.items():
if value is not None:
if hasattr(cls, key):
if isinstance(value, str):
res = res.filter(getattr(cls, key).like(value))
else:
res = res.filter(getattr(cls, key) == value)
if key == 'start_date':
res = res.filter(getattr(cls, 'create_time') >= value)
if key == 'end_date':
res = res.filter(getattr(cls, 'create_time') < value + datetime.timedelta(days=1))
if kwargs.get('order'):
for key, value in kwargs['order'].items():
if hasattr(cls, key):
if value == 'asc':
res = res.order_by(asc(getattr(cls, key)))
if value == 'desc':
res = res.order_by(desc(getattr(cls, key)))
page = kwargs.get('page') if kwargs.get('page') else 1
page_size = kwargs.get('page_size') if kwargs.get('page_size') else 20
data = {
'meta': {
'count': res.count(),
'page': page,
'page_size': page_size
}
}
if page_size != -1:
res = res.offset((page - 1) * page_size).limit(page_size)
res = res.all()
data['data'] = res
return data
| StarcoderdataPython |
75574 | from pathlib import Path
from lib_bgp_simulator import BaseGraphSystemTester
from lib_bgp_simulator import BGPSimpleAS
from lib_bgp_simulator import ROVSimpleAS
from lib_bgp_simulator import Graph013
from ..unstable import Unstable
from ....as_classes import ROVPPV1SimpleAS
from ....as_classes import ROVPPV2SimpleAS
from ....as_classes import ROVPPV2aSimpleAS
from ....engine_input import ROVPPSuperprefixPrefixHijack
class BaseSuperPrefixPrefix08Tester(Unstable, BaseGraphSystemTester):
GraphInfoCls = Graph013
BaseASCls = BGPSimpleAS
EngineInputCls = ROVPPSuperprefixPrefixHijack
base_dir = Path(__file__).parent
adopting_asns = (2, )
class Test061SupreprefixPrefix08(BaseSuperPrefixPrefix08Tester):
AdoptASCls = ROVSimpleAS
class Test062SupreprefixPrefix08(BaseSuperPrefixPrefix08Tester):
AdoptASCls = ROVPPV1SimpleAS
class Test063SupreprefixPrefix08(BaseSuperPrefixPrefix08Tester):
AdoptASCls = ROVPPV2SimpleAS
class Test064SupreprefixPrefix08(BaseSuperPrefixPrefix08Tester):
AdoptASCls = ROVPPV2aSimpleAS
| StarcoderdataPython |
1726948 | # Much of the code below has been copied from
# https://github.com/google/earthengine-api/blob/master/python/ee/cli/commands.py
import sys
import datetime
import csv
import ee
class ReportWriter(object):
def __init__(self, filename=None):
self.total_size = 0
self.writers = [csv.writer(sys.stdout)]
self.writer_fo = None
if filename:
if sys.version_info[0] < 3:
self.writer_fo = open(filename + '.csv', 'wb')
else:
self.writer_fo = open(filename + '.csv', 'w')
self.writers.append(csv.writer(self.writer_fo))
def __del__(self):
if self.writer_fo and not self.writer_fo.closed:
self.writer_fo.close()
print('Total size [MB]: {:.2f}'.format(self.total_size))
def writerow(self, data):
[writer.writerow(data) for writer in self.writers]
def report(filename):
ee.Initialize()
assets_root = ee.data.getAssetRoots()
writer = ReportWriter(filename)
writer.writerow(['Asset id', 'Type', 'Size [MB]', 'Time', 'Owners', 'Readers', 'Writers'])
for asset in assets_root:
# List size+name for every leaf asset, and show totals for non-leaves.
if asset['type'] == ee.data.ASSET_TYPE_FOLDER:
children = ee.data.getList(asset)
for child in children:
_print_size(child, writer)
else:
_print_size(asset, writer)
def get_datetime_str(epoch):
dt = datetime.datetime.fromtimestamp(epoch / 10**6) # microseconds to seconds
return dt.strftime("%Y-%m-%d %H:%M:%S")
def _print_size(asset, writer):
asset_info = ee.data.getInfo(asset['id'])
if 'properties' in asset_info and 'system:asset_size' in asset_info['properties']:
size = asset_info['properties']['system:asset_size']
else:
size = _get_size(asset)
size = round(size / 1024**2, 2) # size in MB
type = asset_info['type']
time = get_datetime_str(asset_info['version'])
acl = ee.data.getAssetAcl(asset['id'])
owners = ' '.join(acl['owners'])
readers = ' '.join(acl['readers'])
writers = ' '.join(acl['writers'])
writer.writerow([asset['id'], type, size, time, owners, readers, writers])
writer.total_size += size
def _get_size(asset):
"""Returns the size of the given asset in bytes."""
size_parsers = {
'Folder': _get_size_folder,
'ImageCollection': _get_size_image_collection,
}
if asset['type'] not in size_parsers:
raise ee.EEException(
'Cannot get size for asset type "%s"' % asset['type'])
return size_parsers[asset['type']](asset)
def _get_size_image(asset):
info = ee.data.getInfo(asset['id'])
return info['properties']['system:asset_size']
def _get_size_folder(asset):
children = ee.data.getList(asset)
sizes = [_get_size(child) for child in children]
return sum(sizes)
def _get_size_image_collection(asset):
images = ee.ImageCollection(asset['id'])
sizes = images.aggregate_array('system:asset_size')
return sum(sizes.getInfo())
if __name__ == '__main__':
report(None) | StarcoderdataPython |
1730970 | <reponame>makemebitter/cerebro-ds
# Copyright 2020 <NAME> and <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from cerebro_gpdb.pathmagic import * # noqa
from cerebro_gpdb.utils import DBConnect
import os
import dill
# from cerebro.code.catalog import get_data_catalog
from cerebro_gpdb.pg_page_reader import table_page_read
from cerebro_gpdb.pg_page_reader import toast_page_read
from cerebro_gpdb.utils import logs
import pandas as pd
import re
import numpy as np
SYS_CAT_PATH = '/mnt/nfs/sys_cat.dill'
def input_fn(
file_path,
overwrite_table_page_path=None,
overwrite_toast_page_path=None
):
file_path_splited = os.path.split(file_path)
mode = file_path_splited[-1]
logs("CURRENT MODE: {}".format(mode))
root_dir = file_path_splited[0]
with open(SYS_CAT_PATH, "rb") as f:
sys_cats = dill.load(f)
df_shape = sys_cats['shape']
sys_cats = sys_cats[mode]
gp_segment_id = int(re.search("gpseg(\d+)", file_path).group(1))
sys_cat = sys_cats.loc[sys_cats['gp_segment_id'] == gp_segment_id].iloc[0]
table_page_path = os.path.join(root_dir, str(int(sys_cat['relfilenode'])))
toast_page_path = os.path.join(
root_dir, str(int(sys_cat['toast_relfilenode'])))
table_name = sys_cat['relname']
if overwrite_table_page_path:
table_page_path = overwrite_table_page_path
if overwrite_toast_page_path:
toast_page_path = overwrite_toast_page_path
df_data, df_toast = table_page_read(table_page_path)
df_actual_data = toast_page_read(
toast_page_path, df_toast, df_shape, table_name)
return df_actual_data
class DirectAccessClient(DBConnect):
def __init__(self, db_creds, db_name, train_name,
valid_name, size=8):
super(DirectAccessClient, self).__init__(db_creds)
self.db_name = db_name
self.train_name = train_name
self.valid_name = valid_name
if 'imagenet' in self.train_name:
self.name_like = 'imagenet'
elif 'criteo' in self.train_name:
self.name_like = 'criteo'
self.size = size
self.actual_size = 8 if size == 1 else size
with open("../gp_configs/gphost_list_{}".format(size), 'r') as f:
host_list = f.readlines()
self.host_list = sorted([x.rstrip() for x in host_list])
self.workers = [
"http://{}:8000".format(x) for x in self.host_list
]
self.segment_ids = [
re.search("worker(\d+)", x).group(1) for x in self.host_list]
print(self.workers)
def get_df_att_user(self, relid):
query_string = """ SELECT * FROM pg_catalog.pg_attribute where attrelid={}
""".format(relid)
schema = [
'attrelid', 'attname', 'atttypid', 'attstattarget', 'attlen',
'attnum', 'attndims', 'attcacheoff', 'atttypmod', 'attbyval',
'attstorage', 'attalign', 'attnotnull', 'atthasdef',
'attisdropped', 'attislocal', 'attinhcount'
]
df_att_user = self.pd_query(query_string, schema)
return df_att_user
def get_df_pagefiles(self):
query_string = """
select a.gp_segment_id, a.oid, a.relname, a.relfilenode,b.oid,
b.relname, b.relfilenode, b.reltoastidxid from
gp_dist_random('pg_class') a
LEFT OUTER JOIN gp_dist_random('pg_class') b
ON (a.reltoastrelid = b.oid and a.gp_segment_id = b.gp_segment_id)
where a.relname like '%{}%';
""".format(self.name_like)
schema = [
'gp_segment_id', 'oid', 'relname', 'relfilenode', 'toast_oid',
'toast_relname', 'toast_relfilenode', 'reltoastidxid'
]
df_pagefiles = self.pd_query(query_string, schema)
return df_pagefiles
def get_var_shape(self, table_name):
query_string = """
select __dist_key__, buffer_id,
independent_var_shape, dependent_var_shape
from {}""".format(table_name)
schema = [
'__dist_key__',
'buffer_id',
'independent_var_shape',
'dependent_var_shape'
]
df_var_shape = self.pd_query(query_string, schema)
df_var_shape['table_name'] = table_name
return df_var_shape
def get_df_workers(self):
query_string = """SELECT dbid,content,hostname,address FROM
pg_catalog.gp_segment_configuration"""
schema = ['dbid', 'content', 'hostname', 'address']
df_pagefiles = self.pd_query(query_string, schema)
return df_pagefiles
def get_df_dboid(self):
query_string = """SELECT oid,datname FROM pg_catalog.pg_database"""
schema = ['oid', 'datname']
df_dboid = self.pd_query(query_string, schema)
return df_dboid
def get_df_shape(self, valid_name, train_name):
df_valid_shape = self.get_var_shape(valid_name)
df_train_shape = self.get_var_shape(train_name)
df_shape = pd.concat([df_valid_shape, df_train_shape], axis=0)
return df_shape
def get_workers(self):
return self.workers
def cat_factory(self):
avalibility = np.eye(self.size, dtype=int).tolist()
if self.size == 1 or self.size == 8:
data_root = '/mnt/gpdata'
else:
data_root = '/mnt/gpdata_{}'.format(self.size)
cat = {
'data_root': data_root,
'train': ['gpseg{}/base'.format(x) for x in self.segment_ids],
'train_availability': avalibility,
'valid': ['gpseg{}/base'.format(x) for x in self.segment_ids],
'valid_availability': avalibility
}
return cat
def generate_cats(self):
df_dboid = self.get_df_dboid()
dboid = df_dboid.loc[df_dboid['datname']
== self.db_name]['oid'].iloc[0]
data_cat = self.cat_factory()
data_cat['train'] = [
'gpseg{}/base/{}/train'.format(i, dboid) for i in self.segment_ids]
data_cat['valid'] = [
'gpseg{}/base/{}/valid'.format(i, dboid) for i in self.segment_ids]
df_pagefiles = self.get_df_pagefiles()
sys_cats = {}
for mode, relname in zip(
['train', 'valid'], [self.train_name, self.valid_name]):
rows = df_pagefiles.loc[df_pagefiles['relname'] == relname]
sys_cats[mode] = rows
df_shape = self.get_df_shape(self.valid_name, self.train_name)
sys_cats['shape'] = df_shape
with open(SYS_CAT_PATH, "wb") as f:
dill.dump(sys_cats, f)
return data_cat, sys_cats
| StarcoderdataPython |
3261853 | <gh_stars>0
result = (int(input()) ** int(input())) + (int(input()) ** int(input()))
print(result)
| StarcoderdataPython |
114328 | <filename>senscritiquescraper/utils/search_utils.py
import logging
from bs4 import BeautifulSoup
from typing import Optional
import urllib.parse
logger = logging.getLogger(__name__)
GENRE_CHOICES = ["Morceaux", "Albums", "Films", "Livres", "Séries", "BD", "Jeux"]
def sanitize_text(text: str) -> str:
"""Sanitize text to URL-compatible text."""
return urllib.parse.quote_plus(text)
def get_search_url(search_term: str, genre: str = None) -> str:
"""Returns the senscritique search URL for a search term."""
search_term_sanitized = sanitize_text(search_term)
if genre not in GENRE_CHOICES:
url = f"https://www.senscritique.com/search?q={search_term_sanitized}"
else:
url = f"https://www.senscritique.com/search?q={search_term_sanitized}&categories[0][0]={genre}"
return url
def get_search_result(soup: BeautifulSoup, position: int) -> Optional[str]:
"""Returns the URL result of the BeautifulSoup object at the defined position."""
try:
url_list = [
x.find_all("a")[1]["href"]
for x in soup.find_all(
"div", {"class": "ProductListItem__Container-sc-1ci68b-0"}
)
]
if position > len(url_list):
logger.error(
f"Desired result not found in search results (Desired result: position {position}, number of search results: {len(url_list)})."
)
return None
return url_list[position - 1]
except Exception as e:
logger.error(e)
return None
| StarcoderdataPython |
44828 | <filename>exam_system/exams/models.py
from django.db import models
from questions.models import Question
from topics.models import Topic
class Exam(models.Model):
id = models.AutoField(primary_key = True)
name = models.TextField()
start_date = models.DateField()
end_date = models.DateField()
number_of_question = models.IntegerField()
time_duration = models.IntegerField()
class ExamQuestionTopic(models.Model):
id = models.AutoField(primary_key = True)
# exam_id = models.IntegerField()
# question_id = models.IntegerField()
# topic_id = models.IntegerField()
exam = models.ForeignKey(Exam, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
topic = models.ForeignKey(Topic, on_delete=models.CASCADE) | StarcoderdataPython |
1677440 | <reponame>vnitinv/thrift-versioning-py
#
# Autogenerated by Thrift Compiler (0.9.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class IfStatus:
DOWN = 0
UP = 1
_VALUES_TO_NAMES = {
0: "DOWN",
1: "UP",
}
_NAMES_TO_VALUES = {
"DOWN": 0,
"UP": 1,
}
class InvalidInterfaceException(TException):
"""
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InvalidInterfaceException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IF:
"""
Attributes:
- if_name
- unit
- prefix
- size
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'if_name', None, None, ), # 1
(2, TType.I32, 'unit', None, None, ), # 2
(3, TType.STRING, 'prefix', None, None, ), # 3
(4, TType.I32, 'size', None, None, ), # 4
)
def __init__(self, if_name=None, unit=None, prefix=None, size=None,):
self.if_name = if_name
self.unit = unit
self.prefix = prefix
self.size = size
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.if_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.unit = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.prefix = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.size = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('IF')
if self.if_name is not None:
oprot.writeFieldBegin('if_name', TType.STRING, 1)
oprot.writeString(self.if_name)
oprot.writeFieldEnd()
if self.unit is not None:
oprot.writeFieldBegin('unit', TType.I32, 2)
oprot.writeI32(self.unit)
oprot.writeFieldEnd()
if self.prefix is not None:
oprot.writeFieldBegin('prefix', TType.STRING, 3)
oprot.writeString(self.prefix)
oprot.writeFieldEnd()
if self.size is not None:
oprot.writeFieldBegin('size', TType.I32, 4)
oprot.writeI32(self.size)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ReturnStatus:
"""
Attributes:
- err_code
- traceback
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'err_code', None, None, ), # 1
None, # 2
(3, TType.STRING, 'traceback', None, None, ), # 3
)
def __init__(self, err_code=None, traceback=None,):
self.err_code = err_code
self.traceback = traceback
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.err_code = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.traceback = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ReturnStatus')
if self.err_code is not None:
oprot.writeFieldBegin('err_code', TType.I32, 1)
oprot.writeI32(self.err_code)
oprot.writeFieldEnd()
if self.traceback is not None:
oprot.writeFieldBegin('traceback', TType.STRING, 3)
oprot.writeString(self.traceback)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| StarcoderdataPython |
1606654 | from ..models import *
import bcrypt
def setPassword(request):
try:
passwordObj = ApiPassword()
except:
return '400'
try:
ApiPassword.objects.get(apiName=request['apiName'])
return "409-1"
except:
passwordObj.apiName = request['apiName']
passwordObj.apiPassword = bcrypt.hashpw(str(request['apiPassword']), bcrypt.gensalt())
passwordObj.save()
return '202'
def modifyPassword(request):
try:
passwordObj = ApiPassword.objects.get(apiName=request['apiName'])
except:
return '404-2'
if bcrypt.checkpw(str(request['oldPassword']),str(passwordObj.apiPassword)):
passwordObj.apiPassword = <PASSWORD>.hashpw(str(request['newPassword']), bcrypt.gensalt())
else:
return "409-1"
passwordObj.save()
return '202'
| StarcoderdataPython |
3253641 | <reponame>daljaru/daljaru.github.io<gh_stars>0
adj_list = [[2,1],
[3,0],
[3,0],
[9,8,2,1],
[5],
[7,6,4],
[7,5],
[6,5],
[3],
[3]]
N = len(adj_list)
visited = [False] * N
def bfs(i):
queue = []
visited[i] = True
queue.append(i)
while len(queue) != 0:
v = queue.pop(0)
print(v, ' ', end='')
for w in adj_list[v]:
if not visited[w]:
visited[w] = True
queue.append(w)
print('BFS 방문 순서:')
for i in range(N):
if not visited[i]:
bfs(i)
uujj
| StarcoderdataPython |
1731263 | <reponame>StephenZhang945/P3_Behavioral-Cloning
import csv
import cv2
import numpy as np
import random
import sklearn
from sklearn.model_selection import train_test_split
samples = []
with open('./data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
def generator(samples, batch_size = 30):
num_samples = len(samples)
correction = 0.2
while True:
random.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
for i in range(3):
sample_path = batch_sample[i]
sample_name = sample_path.split('/')[-1]
name = './data/IMG/'+sample_name
image1 = cv2.imread(name)
image1 = cv2.cvtColor(image1,cv2.COLOR_BGR2RGB)
# image1 = np.array(image1, dtype = np.float64)
# random_bright = .25+np.random.uniform()
# image1[:,:,2] = image1[:,:,2]*random_bright
# image1[:,:,2][image1[:,:,2]>255] = 255
# image1 = np.array(image1, dtype = np.uint8)
# image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)
center_angle = float(batch_sample[3])
images.append(image1)
if i ==1:
angles.append(min(1.0, center_angle + correction))
elif i ==2:
angles.append(max(-1.0, center_angle - correction))
else:
angles.append(center_angle)
aug_images, aug_angles = [], []
for image, angle in zip(images, angles):
aug_images.append(image)
aug_angles.append(angle)
aug_images.append(cv2.flip(image, 1))
aug_angles.append(angle*-1.0)
X_train = np.array(aug_images)
y_train = np.array(aug_angles)
yield sklearn.utils.shuffle(X_train, y_train)
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
train_generator = generator(train_samples, batch_size=30)
validation_generator = generator(validation_samples, batch_size=30)
model = Sequential()
model.add(Lambda(lambda x : (x / 255.0) - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping = ((70,25),(0,0))))
model.add(Convolution2D(24,5,5, subsample=(2,2), activation= 'relu'))
model.add(ELU())
model.add(Dropout(0.5))
model.add(Convolution2D(36,5,5, subsample=(2,2), activation= 'relu'))
model.add(ELU())
model.add(Convolution2D(48,5,5, subsample=(2,2), activation= 'relu'))
model.add(ELU())
model.add(Convolution2D(64,3,3, activation='relu'))
model.add(ELU())
model.add(Convolution2D(64,3,3, activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(ELU())
model.add(Dense(50))
model.add(ELU())
model.add(Dense(10))
model.add(ELU())
model.add(Dense(1))
model.compile(loss = 'mse', optimizer = 'adam')
model.fit_generator(train_generator, samples_per_epoch= len(train_samples), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=3, verbose = 1)
model.save('model06.h5')
from keras.models import load_model
new_model = load_model('model06.h5')
model.summary()
| StarcoderdataPython |
1768877 | <gh_stars>0
from django.contrib import admin
from hello.models import Post
class PostAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
search_fields = ('title', 'content')
admin.site.register(Post, PostAdmin)
| StarcoderdataPython |
1690853 | import yaml
from mule.task.mule import get_configs, list_agents, list_env, list_jobs, list_tasks
from mule.task.error import messages
from mule.logger import logger, start_debug
import mule.task.parser
from mule.task import Job
from mule.util import JobContext, prettify_json
import mule.util.yaml.env_var_loader as yaml_util
import mule.task.validator
def _execute_job(job_config):
job = Job(job_config)
job_context = JobContext(job_config)
return job.execute(job_context)
def _get_job_config(mule_config, job):
job_def = mule_config["jobs"].get(job)
job_configs = job_def.get("configs", {})
tasks = job_def.get("tasks", [])
mule_agents = mule_config.get("agents", [])
task_configs = []
agents = []
for job_task in tasks:
task = _get_task(mule_config, job_task)
task_configs.append(task)
if "dependencies" in task:
for dependency in task["dependencies"]:
task = _get_task(mule_config, dependency)
task_configs.append(task)
if len(mule_agents):
all_agents = {}
for agent in mule_agents:
all_agents[agent.get("name")] = agent
agents_names = list({task.get("agent") for task in task_configs if task.get("agent")})
agents = [all_agents[name] for name in agents_names]
return {
"name": job,
"configs": job_configs,
"agents": agents,
"tasks": tasks,
"task_configs": task_configs,
}
def _get_task(mule_config, job_task):
for task in mule_config["tasks"]:
if "name" in task:
name = ".".join((task["task"], task["name"]))
else:
name = task["task"]
if name == job_task:
return task
def _yaml_read_raw(args):
return args.list_agents or args.list_env
def main():
args = mule.task.parser.parseArgs()
try:
mule_yamls = args.file
if not len(mule_yamls):
mule_yamls.append("mule.yaml")
if args.recipe:
plugins = mule.validator.get_plugin("saddle")
if not len(plugins):
raise Exception(messages.PLUGIN_NOT_FOUND.format("saddle"))
else:
saddle = plugins[0]
out = saddle.compiler.compile(args.recipe)
job_yaml = yaml.safe_load(out)
job_configs = yaml_util.read_yaml(job_yaml.get("items"), raw=False)
for j_c in job_configs:
_execute_job(j_c)
return
if args.debug:
start_debug(args)
mule_config = get_configs(mule_yamls, raw=_yaml_read_raw(args))
if args.list_agents:
if args.verbose:
print(prettify_json({agent.get("name"): agent for agent in mule_config.get("agents")}))
else:
print("\n".join(list_agents(mule_config.get("agents"))))
elif args.list_jobs:
if args.verbose:
jobs_config = mule_config.get("jobs")
print(prettify_json({job: jobs_config.get(job) for job in jobs_config}))
else:
print("\n".join(list_jobs(mule_config.get("jobs"))))
elif args.list_env:
print(prettify_json(list_env(mule_config, args.list_env, args.verbose)))
elif args.list_tasks:
if args.verbose:
print(prettify_json({task.get("name"): task for task in mule_config.get("tasks")}))
else:
print("\n".join(list_tasks(mule_config.get("tasks"))))
else:
if args.job not in mule_config.get("jobs"):
raise Exception(messages.JOB_NOT_FOUND.format(args.job))
job_config =_get_job_config(mule_config, args.job)
_execute_job(job_config)
except Exception as error:
logger.error(messages.MULE_DRIVER_EXCEPTION.format(args.job, args.file, error))
raise error
| StarcoderdataPython |
1737751 | # Generated by Django 2.0.2 on 2018-07-20 04:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_app', '0017_auto_20180720_0446'),
]
operations = [
migrations.AlterField(
model_name='regularuser',
name='event',
field=models.ManyToManyField(blank=True, null=True, to='user_app.Event'),
),
]
| StarcoderdataPython |
7146 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AcquiredPhoneNumbers
from ._models_py3 import CommunicationError
from ._models_py3 import CommunicationErrorResponse
from ._models_py3 import PhoneNumberCapabilities
from ._models_py3 import PhoneNumberCapabilitiesRequest
from ._models_py3 import PhoneNumberCost
from ._models_py3 import PhoneNumberOperation
from ._models_py3 import PhoneNumberPurchaseRequest
from ._models_py3 import PhoneNumberSearchRequest
from ._models_py3 import PhoneNumberSearchResult
from ._models_py3 import PurchasedPhoneNumber
except (SyntaxError, ImportError):
from ._models import AcquiredPhoneNumbers # type: ignore
from ._models import CommunicationError # type: ignore
from ._models import CommunicationErrorResponse # type: ignore
from ._models import PhoneNumberCapabilities # type: ignore
from ._models import PhoneNumberCapabilitiesRequest # type: ignore
from ._models import PhoneNumberCost # type: ignore
from ._models import PhoneNumberOperation # type: ignore
from ._models import PhoneNumberPurchaseRequest # type: ignore
from ._models import PhoneNumberSearchRequest # type: ignore
from ._models import PhoneNumberSearchResult # type: ignore
from ._models import PurchasedPhoneNumber # type: ignore
from ._phone_numbers_client_enums import (
BillingFrequency,
PhoneNumberAssignmentType,
PhoneNumberCapabilityType,
PhoneNumberOperationStatus,
PhoneNumberOperationType,
PhoneNumberType,
)
__all__ = [
'AcquiredPhoneNumbers',
'CommunicationError',
'CommunicationErrorResponse',
'PhoneNumberCapabilities',
'PhoneNumberCapabilitiesRequest',
'PhoneNumberCost',
'PhoneNumberOperation',
'PhoneNumberPurchaseRequest',
'PhoneNumberSearchRequest',
'PhoneNumberSearchResult',
'PurchasedPhoneNumber',
'BillingFrequency',
'PhoneNumberAssignmentType',
'PhoneNumberCapabilityType',
'PhoneNumberOperationStatus',
'PhoneNumberOperationType',
'PhoneNumberType',
]
| StarcoderdataPython |
3371520 | <filename>simple_neural_net/prediction_models.py
import numpy as np
class PredictionModel:
def predictions(
self,
outputs
):
# outputs: (num_outputs, num_examples)
# predictions: (?, num_examples)
raise NotImplementedError('Method must be implemented by child class')
class BinaryClassificationPredictionModel(PredictionModel):
def predictions(
self,
outputs
):
# outputs: (1, num_examples)
# predictions: (1, num_examples)
y_hat = np.around(outputs).astype('int')
return y_hat
| StarcoderdataPython |
26171 | <filename>calico/etcddriver/test/test_hwm.py
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_hwm
~~~~~~~~
Tests for high water mark tracking function.
"""
import logging
from unittest import TestCase
from mock import Mock, call, patch
from calico.etcddriver import hwm
from calico.etcddriver.hwm import HighWaterTracker
_log = logging.getLogger(__name__)
class TestHighWaterTracker(TestCase):
def setUp(self):
self.hwm = HighWaterTracker()
def test_mainline(self):
# Test merging of updates between a snapshot with etcd_index 10 and
# updates coming in afterwards with indexes 11, 12, ...
# We use prefix "/a/$" because $ is not allowed in the trie so it
# implicitly tests encoding/decoding is being properly applied.
old_hwm = self.hwm.update_hwm("/a/$/c", 9) # Pre-snapshot
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/b/c/d", 9) # Pre-snapshot
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/j/c/d", 9) # Pre-snapshot
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 3)
# While merging a snapshot we track deletions.
self.hwm.start_tracking_deletions()
# Send in some keys from the snapshot.
old_hwm = self.hwm.update_hwm("/a/$/c", 10) # From snapshot
self.assertEqual(old_hwm, 9)
old_hwm = self.hwm.update_hwm("/a/$/d", 10) # From snapshot
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/d/e/f", 10) # From snapshot
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 5)
# This key is first seen in the event stream, so the snapshot version
# should be ignored.
old_hwm = self.hwm.update_hwm("/a/h/i", 11) # From events
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/a/h/i", 10) # From snapshot
self.assertEqual(old_hwm, 11)
old_hwm = self.hwm.update_hwm("/a/h/i", 12) # From events
self.assertEqual(old_hwm, 11) # Still 11, snapshot ignored.
self.assertEqual(len(self.hwm), 6)
# Then a whole subtree gets deleted by the events.
deleted_keys = self.hwm.store_deletion("/a/$", 13)
self.assertEqual(set(deleted_keys), set(["/a/$/c", "/a/$/d"]))
self.assertEqual(len(self.hwm), 4)
# But afterwards, we see a snapshot key within the subtree, it should
# be ignored.
old_hwm = self.hwm.update_hwm("/a/$/e", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
# Then a new update from the event stream, recreates the directory.
old_hwm = self.hwm.update_hwm("/a/$/f", 14)
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 5)
# And subsequent updates are processed ignoring the delete.
old_hwm = self.hwm.update_hwm("/a/$/f", 15)
self.assertEqual(old_hwm, 14)
# However, snapshot updates from within the deleted subtree are still
# ignored.
old_hwm = self.hwm.update_hwm("/a/$/e", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
old_hwm = self.hwm.update_hwm("/a/$/f", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
old_hwm = self.hwm.update_hwm("/a/$/g", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
self.assertEqual(len(self.hwm), 5)
# But ones outside the subtree ar not.
old_hwm = self.hwm.update_hwm("/f/g", 10)
self.assertEqual(old_hwm, None)
# And subsequent updates are processed ignoring the delete.
old_hwm = self.hwm.update_hwm("/a/$/f", 16)
self.assertEqual(old_hwm, 15)
# End of snapshot: we stop tracking deletions, which should free up the
# resources.
self.hwm.stop_tracking_deletions()
self.assertEqual(self.hwm._deletion_hwms, None)
# Then, subseqent updates should be handled normally.
old_hwm = self.hwm.update_hwm("/a/$/f", 17)
self.assertEqual(old_hwm, 16) # From previous event
old_hwm = self.hwm.update_hwm("/g/b/f", 18)
self.assertEqual(old_hwm, None) # Seen for the first time.
old_hwm = self.hwm.update_hwm("/d/e/f", 19)
self.assertEqual(old_hwm, 10) # From the snapshot.
self.assertEqual(len(self.hwm), 7)
# We should be able to find all the keys that weren't seen during
# the snapshot.
old_keys = self.hwm.remove_old_keys(10)
self.assertEqual(set(old_keys), set(["/b/c/d", "/j/c/d"]))
self.assertEqual(len(self.hwm), 5)
# They should now be gone from the index.
old_hwm = self.hwm.update_hwm("/b/c/d", 20)
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 6)
class TestKeyEncoding(TestCase):
def test_encode_key(self):
self.assert_enc_dec("/calico/v1/foo/bar", "/calico/v1/foo/bar/")
self.assert_enc_dec("/:_-./foo", "/:_-./foo/")
self.assert_enc_dec("/:_-.~/foo", "/:_-.%7E/foo/")
self.assert_enc_dec("/%/foo", "/%25/foo/")
self.assert_enc_dec(u"/\u01b1/foo", "/%C6%B1/foo/")
self.assertEqual(hwm.encode_key("/foo/"), "/foo/")
def assert_enc_dec(self, key, expected_encoding):
encoded = hwm.encode_key(key)
self.assertEqual(
encoded,
expected_encoding,
msg="Expected %r to encode as %r but got %r" %
(key, expected_encoding, encoded))
decoded = hwm.decode_key(encoded)
self.assertEqual(
decoded,
key,
msg="Expected %r to decode as %r but got %r" %
(encoded, key, decoded))
| StarcoderdataPython |
21271 | <reponame>AliRzvn/HW1
import numpy as np
from module import Module
class Linear(Module):
def __init__(self, name, input_dim, output_dim, l2_coef=.0):
super(Linear, self).__init__(name)
self.l2_coef = l2_coef # coefficient of l2 regularization.
self.W = np.random.randn(input_dim, output_dim) # weights of the layer.
self.b = np.random.randn(output_dim, ) # biases of the layer.
self.dW = None # gradients of loss w.r.t. the weights.
self.db = None # gradients of loss w.r.t. the biases.
def forward(self, x, **kwargs):
"""
x: input array.
out: output of Linear module for input x.
**Save whatever you need for backward pass in self.cache.
"""
out = None
# todo: implement the forward propagation for Linear module.
return out
def backward(self, dout):
"""
dout: gradients of Loss w.r.t. this layer's output.
dx: gradients of Loss w.r.t. this layer's input.
"""
dx = None
# todo: implement the backward propagation for Linear module.
# don't forget to update self.dW and self.db.
return dx
| StarcoderdataPython |
95060 | <filename>draugr/torch_utilities/tensors/__init__.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__doc__ = r"""
Created on 21/02/2020
"""
from pathlib import Path
with open(Path(__file__).parent / "README.md", "r") as this_init_file:
__doc__ += this_init_file.read()
from .info import *
from .mixins import *
from .normalise import *
from .reshaping import *
from .tensor_container import *
from .to_scalar import *
from .to_tensor import *
from .dimension_order import *
from .types import *
| StarcoderdataPython |
1783317 | from .signals import events
| StarcoderdataPython |
3267724 | from django.db import models
from django.utils import timezone
from django.contrib.postgres.fields import JSONField
# Create your models here.
# Extra fields to be added in the json file: related_entity, reputation_dimension, sentiment_score
class Tweet(models.Model):
tweet_id = models.CharField(max_length=50, primary_key=True, default='Undefined: ' + str(timezone.now()))
tweet = JSONField(default={'message': 'undefined'})
created_at = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return 'tweet_id: ' + self.tweet['id_str']
# Table to store training set of tweets
class TweetTrainingSet(models.Model):
tweet = JSONField(default={'message': 'undefined'})
def __unicode__(self):
return 'tweet_id: ' + self.tweet['id_str']
# Table for statistics, the front end will use the data in this table to draw graphs
class Statistics(models.Model):
related_entity = models.CharField(max_length=20, default='Undefined')
reputation_dimension = models.CharField(max_length=20, default='Whole')
timestamp = models.DateField(default=timezone.now)
total_tweets_count = models.IntegerField(default=0)
negative_count = models.IntegerField(default=0)
# This attribute takes into account of how negative each tweets are
reputation_score = models.FloatField(default=0)
| StarcoderdataPython |
3248056 | '''
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
This walks all of the combinations of metrics, dimensions, and aggregations.
METRICS - contains descriptions of the metric to be pulled and the dimensions
for that metric. See also the docs here:
https://opendistro.github.io/for-elasticsearch-docs/docs/pa/reference/.
'''
import json
import requests
class NodeTracker():
''' Discovers nodes in the cluster, and holds a map from node name to
ip address. Construct the object, then use ip() to retrieve the
address from the node name.'''
def __init__(self, args):
''' Constructs a local dict, and fills it.'''
self._args = args
self._nodes_map = dict()
self._retrieve_node_ids_and_ips()
def _retrieve_node_ids_and_ips(self):
''' Use _cat/nodes to pull the name and IP for all of the nodes in
the cluster. '''
url = 'https://{}:9200/_nodes'.format(self._args.endpoint)
response = requests.get(url,
### HACK ALERT !!! TODO TODO TODO!!! Add real auth ###
auth=('admin', 'admin'),
verify=False)
if int(response.status_code) >= 300:
raise Exception('Bad response code trying to get node names and ips')
json_response = json.loads(response.text)
if 'nodes' not in json_response:
raise Exception('Bad response - no nodes')
for node_id, values in json_response['nodes'].items():
self._nodes_map[node_id] = values['ip']
def ip(self, node_name):
if node_name in self._nodes_map:
return self._nodes_map[node_name]
raise ValueError('{} is not a recognized node name'.format(node_name))
def print_table(self):
for name, ip in self._nodes_map.items():
print(' {} {}'.format(name, ip))
| StarcoderdataPython |
69088 | <filename>DataFrameDemo.py
#ss DataFrameDemo.py
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import mean
from pyspark.sql.types import Row
from pyspark.sql.functions import pandas_udf
import pandas as pd
from pyspark.sql.functions import expr
from pyspark.sql.functions import sum
from pyspark.sql.functions import first
if __name__ == '__main__':
spark = SparkSession.builder.appName("DataFrameCreation").getOrCreate()
def somefunc(row: Row): print(f" {row['CarrierTrackingNumber']}")
print("----------------------------------------------------------------------")
print(" DataFrame Demo")
print("----------------------------------------------------------------------")
filepathParquet = ".\\resources\\parquet\\AdventureWorks-oltp\\Sales.SalesOrderDetail.parquet"
df = spark.read.format("parquet").load(filepathParquet)
df.cache()
df.collect() # Return the DataFrame as a list of Row
print('Basic statistics')
df.describe(["UnitPrice"]).show() # Compute basic statistics
df.dtypes # List of tuples of the types of each column
df.select("CarrierTrackingNumber") # Select a single column
df.select(col("CarrierTrackingNumber"))
df.select(df["CarrierTrackingNumber"])
df.select("SalesOrderID", "ProductID") # Select multiple columns
df.select(df["UnitPrice"] / 1000).alias("PriceK") # Select, transform and alias a column
df.select("ProductID").distinct() # Select unique values of ProductID
df.filter(df["UnitPrice"] > 1000) # Select only certain rows that fit a criteria
df.first() # First row of the DataFrame; results in Row
df.head() # First row of the DataFrame; results in Row
df.head(5) # First 5 rows of the DataFrame
print("\n5 carrier tracking numbers")
df.limit(5).foreach(somefunc) # 5 rows from DataFrame and apply a function to each row
print('\nSchema:')
df.printSchema() # Print schema
df.select("SalesOrderID", "ProductID").show(5) # Show top 5 rows. Default of 20
df.summary() # Same as describe but with percentiles
df.tail(5) # Last 5 rows
df.createOrReplaceTempView("table") # Create a temporary view for using SQL on
spark.sql("SELECT * FROM table")
grouped = df.groupBy("SalesOrderID") # Group by a column
grouped.agg(mean(df["LineTotal"]).alias("AvgLineTotal")) # Aggregate after grouping
dfpd = df.toPandas() # Convert to pandas DataFrame
srs = dfpd.loc[:, 'LineTotal'] # Get the series representing a column
# Create a new DataFrame
rowData = [("foo","A",10,"14ZX")
,("foo","B",14,"52OS")
,("foo","C",28,"37AT")
,("foo","D",57,"49WE")
,("foo","B",13,"14ZX")
,("bar","Z",50,"88EE")
,("bar","G",24,"33TY")
,("bar","O",63,"43BR")
,("baz","A",77,"29BT")
,("qaz","J",44,"12AB")]
colnames = ["Name","Letter","Num","Code"]
df2 = spark.createDataFrame([Row(**dict(zip(colnames, row))) for row in rowData])
print('Before unpivot:')
df2.show()
# Unpivot the DataFrame
# expr("stack(number of columns, Code1, column1, Code2, column 2 as (ColumnName for Codes, Values)")
df3 = df2.select("Name", "Letter", expr("stack(2, 'Code', Code, 'Num', string(Num)) as (Column, Value)"))
print('After unpivot:')
df3.show()
# Pivot the DataFrame
df2.groupBy("Name").pivot("Letter")\
.agg(sum("Num").alias("Num")
,first("Letter").alias("Letter")).show()
spark.stop() | StarcoderdataPython |
1783015 | <gh_stars>1-10
from django.shortcuts import render
from rest_framework import viewsets
from ..models import NewsHeading
from ..serializers import NewsHeadingSerializer
class NewsHeadingViewSet(viewsets.ReadOnlyModelViewSet):
queryset = NewsHeading.objects.all().order_by('pk')
serializer_class = NewsHeadingSerializer
| StarcoderdataPython |
3277148 | import demistomock as demisto
from CommonServerPython import *
def get_query(cre_name_null):
if cre_name_null == "False":
query = "SELECT *,\"CRE Name\",\"CRE Description\",CATEGORYNAME(highlevelcategory) " \
"FROM events WHERE \"CRE NAME\" <> NULL AND INOFFENSE({0}) START '{1}'"
else:
query = "SELECT *,\"CRE Name\",\"CRE Description\",CATEGORYNAME(highlevelcategory) " \
"FROM events WHERE \"CRE NAME\" IS NULL AND INOFFENSE({0}) START '{1}'"
return query
def main():
d_args = demisto.args()
is_cre_name_null = demisto.args().get("is_cre_name_null", "True")
QUERY = get_query(is_cre_name_null)
offense_id = demisto.get(d_args, 'offenseID')
start_time = demisto.get(d_args, 'startTime')
# Try converting from date string to timestamp
try:
start_time = date_to_timestamp(str(start_time), '%Y-%m-%dT%H:%M:%S.%f000Z')
except Exception:
pass
d_args["query_expression"] = QUERY.format(offense_id, start_time)
resp = demisto.executeCommand('QRadarFullSearch', d_args)
if isError(resp[0]):
demisto.results(resp)
else:
data = demisto.get(resp[0], 'Contents.events')
if not data:
resp[0]['HumanReadable'] = "No Correlations were found for offense id {0}".format(offense_id)
else:
data = data if isinstance(data, list) else [data]
QRadar = {
'Correlation': []
} # type: Dict
for corr in data:
keys = corr.keys()
correlation = {
"SourceIP": demisto.get(corr, "sourceip")
} # type: Dict
# Standardized known keys
keys.remove("sourceip") if "sourceip" in keys else None
correlation["CREDescription"] = demisto.get(corr, "CRE Description")
keys.remove("CRE Description") if "CRE Description" in keys else ""
correlation["CREName"] = demisto.get(corr, "CRE Name")
keys.remove("CRE Name") if "CRE Name" in keys else ""
correlation["QID"] = demisto.get(corr, "qid")
keys.remove("qid") if "qid" in keys else ""
correlation["DestinationIP"] = demisto.get(corr, "destinationip")
keys.remove("destinationip") if "destinationip" in keys else ""
correlation["Category"] = demisto.get(corr, "categoryname_highlevelcategory")
keys.remove("categoryname_highlevelcategory") if "categoryname_highlevelcategory" in keys else ""
correlation["CategoryID"] = demisto.get(corr, "category")
keys.remove("category") if "category" in keys else ""
correlation["Username"] = demisto.get(corr, "username")
keys.remove("username") if "username" in keys else ""
correlation["StartTime"] = demisto.get(corr, "starttime")
keys.remove("starttime") if "starttime" in keys else ""
# Push to context rest of the keys (won't be shown in 'outputs')
for key in keys:
correlation[''.join(x for x in key.title() if not x.isspace())] = demisto.get(corr, key)
QRadar['Correlation'].append(correlation)
context = {"QRadar": QRadar}
resp[0]['EntryContext'] = context
demisto.results(resp)
# python2 uses __builtin__ python3 uses builtins
if __name__ in ('__builtin__', 'builtins'):
main()
| StarcoderdataPython |
1694972 | <filename>utils/request.py
from dataclasses import dataclass
from typing import Dict, Any
import requests
@dataclass
class Response:
status_code: int
text: str
as_dict: object
headers: dict
class APIRequest:
def get(self, url: str) -> Response:
response = requests.get(url)
return self.__get_responses(response)
def post(self, url: str, payload: Dict[Any, Any], headers: Dict[str, str]) -> Response:
response = requests.post(url, data=payload, headers=headers)
return self.__get_responses(response)
def put(self, url: str, payload: Dict[Any, Any], headers: Dict[str, str]) -> Response:
response = requests.put(url, data=payload, headers=headers)
return self.__get_responses(response)
def delete(self, url: str) -> Response:
response = requests.delete(url)
return self.__get_responses(response)
def __get_responses(self, response: "requests.models.Response") -> Response:
status_code = response.status_code
text = response.text
try:
as_dict = response.json()
except Exception:
as_dict = {}
headers = response.headers
return Response(
status_code, text, as_dict, headers
)
| StarcoderdataPython |
4807210 | from github.celery import app as celery_app | StarcoderdataPython |
1726546 | from django_de.global_settings import *
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = (
'djangode.herokuapp.com',
'django-de.org',
'www.django-de.org',
'localhost',
)
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY')
DEBUG = False
TEMPLATE_DEBUG = False
ADMINS = (
('<NAME>', os.getenv('DJANGO_ADMIN_EMAIL')),
)
MANAGERS = ADMINS
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATIC_URL = 'http://' + AWS_STORAGE_BUCKET_NAME + '.s3.amazonaws.com/'
TWITTER_USERNAME = 'djangode'
TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY')
TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET')
TWITTER_ACCESS_TOKEN_KEY = os.getenv('TWITTER_ACCESS_TOKEN_KEY')
TWITTER_ACCESS_TOKEN_SECRET = os.getenv('TWITTER_ACCESS_TOKEN_SECRET')
| StarcoderdataPython |
70872 | <reponame>vfloeser/TumorDelivery<filename>plotuw.py
##########################################################################################
# G E N E R A L I N F O #
# #
# Plot u or w, depending on configuration #
# #
##########################################################################################
from parameters import *
from library_time import *
from paths import *
import numpy as np
import pylab as plt
import matplotlib.pyplot as mplt
import logging, getopt, sys
import time
import os
plt.rc('text', usetex=True)
plt.rcParams.update({'font.size': 16})
##########################################################################################
# C O N F I G U R A T I O N
##########################################################################################
var = u1
al = 1 # 1, 3, 5, 10 or 25
mode = "u" # u or w
##########################################################################################
# M A I N
##########################################################################################
if __name__ == '__main__':
if not os.path.exists('save'):
os.makedirs('save')
print('Created folder save!')
if not os.path.exists('plots'):
os.makedirs('plots')
print('Created folder plots!')
if not os.path.exists('plots/uw'):
os.makedirs('plots/uw')
print('Created folder plots/uw!')
t = np.linspace(tmin, tmax, Nt)
r = np.linspace(0,R,Nr)
avg_var_t = np.zeros(Nt, dtype=np.float64)
avg_var_r = np.zeros(Nr, dtype=np.float64)
for i in range(Nt):
avg_var_t[i] = np.sum(var[:,i])
avg_var_t[i] /= Nt
for i in range(Nr):
avg_var_r[i] = np.sum(var[i,:])
avg_var_r[i] /= Nr
mplt.figure(111)
for i in range (1,5):
mplt.plot(r/R, var[:,int(i*Nt/4)-2], label='t = '+'{0:.2f}'.format(t[int(i*Nt/4)-2]))
mplt.plot(r/R, var[:,0], label=r'$t$ = '+'{0:.2f}'.format(0))
mplt.ylabel(r'$'+mode+'\quad [P mol/ccm]$')
mplt.xlabel('r')
mplt.xlim(0,1)
mplt.legend()
mplt.tight_layout()
mplt.savefig('plots/uw/'+mode+str(al)+'.pdf', format='pdf')
mplt.figure(222)
mplt.plot(r/R, avg_var_r, label='avg')
mplt.ylabel(r'$'+mode+'\quad [P mol/ccm]$')
mplt.xlabel('r')
mplt.xlim(0,1)
mplt.legend()
mplt.tight_layout()
mplt.savefig('plots/uw/'+mode+str(al)+'.pdf', format='pdf')
mplt.figure(333)
mplt.plot(t, avg_var_t, label='avg')
mplt.ylabel(r'$'+mode+'\quad [P mol/ccm]$')
mplt.xlabel('t [h]')
mplt.xlim(tmin,tmax)
mplt.legend()
mplt.tight_layout()
mplt.savefig('plots/uw/'+mode+str(al)+'.pdf', format='pdf')
mplt.figure(8)
mplt.imshow(var,origin='lower')
mplt.colorbar()
mplt.show() | StarcoderdataPython |
1621664 | # Generated by Django 3.0.8 on 2020-08-07 16:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=60, unique=True, verbose_name='email')),
('username', models.CharField(blank=True, max_length=50)),
('title', models.CharField(default='Earl', editable=False, max_length=20)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=100)),
('passport_id', models.CharField(max_length=15)),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('birthday', models.DateField(blank=True, null=True)),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('desc', models.TextField()),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField()),
],
),
]
| StarcoderdataPython |
3266023 | # Copyright (c) 2020-2021, <NAME>
# License: MIT License
from typing import TYPE_CHECKING, Tuple
import math
from ezdxf.math import Vec3, X_AXIS, Y_AXIS, Vec2, Matrix44, sign, OCS
if TYPE_CHECKING:
from ezdxf.eztypes import DXFGraphic, Vertex
__all__ = [
"TransformError",
"NonUniformScalingError",
"InsertTransformationError",
"transform_extrusion",
"transform_thickness_and_extrusion_without_ocs",
"OCSTransform",
]
class TransformError(Exception):
pass
class NonUniformScalingError(TransformError):
pass
class InsertTransformationError(TransformError):
pass
def transform_thickness_and_extrusion_without_ocs(
entity: "DXFGraphic", m: Matrix44
) -> None:
if entity.dxf.hasattr("thickness"):
thickness = entity.dxf.thickness
reflection = sign(thickness)
thickness = m.transform_direction(entity.dxf.extrusion * thickness)
entity.dxf.thickness = thickness.magnitude * reflection
entity.dxf.extrusion = thickness.normalize()
elif entity.dxf.hasattr("extrusion"): # without thickness?
extrusion = m.transform_direction(entity.dxf.extrusion)
entity.dxf.extrusion = extrusion.normalize()
def transform_extrusion(extrusion: "Vertex", m: Matrix44) -> Tuple[Vec3, bool]:
"""Transforms the old `extrusion` vector into a new extrusion vector.
Returns the new extrusion vector and a boolean value: ``True`` if the new
OCS established by the new extrusion vector has a uniform scaled xy-plane,
else ``False``.
The new extrusion vector is perpendicular to plane defined by the
transformed x- and y-axis.
Args:
extrusion: extrusion vector of the old OCS
m: transformation matrix
Returns:
"""
ocs = OCS(extrusion)
ocs_x_axis_in_wcs = ocs.to_wcs(X_AXIS)
ocs_y_axis_in_wcs = ocs.to_wcs(Y_AXIS)
x_axis, y_axis = m.transform_directions(
(ocs_x_axis_in_wcs, ocs_y_axis_in_wcs)
)
# Check for uniform scaled xy-plane:
is_uniform = math.isclose(
x_axis.magnitude_square, y_axis.magnitude_square, abs_tol=1e-9
)
new_extrusion = x_axis.cross(y_axis).normalize()
return new_extrusion, is_uniform
class OCSTransform:
def __init__(self, extrusion: Vec3 = None, m: Matrix44 = None):
self.m = m
if extrusion is None:
self.old_ocs = None
self.scale_uniform = False
self.new_ocs = None
else:
self.old_ocs = OCS(extrusion)
new_extrusion, self.scale_uniform = transform_extrusion(
extrusion, m
)
self.new_ocs = OCS(new_extrusion)
@property
def old_extrusion(self) -> Vec3:
return self.old_ocs.uz
@property
def new_extrusion(self) -> Vec3:
return self.new_ocs.uz
@classmethod
def from_ocs(cls, old: OCS, new: OCS, m: Matrix44) -> "OCSTransform":
ocs = cls()
ocs.m = m
ocs.old_ocs = old
ocs.new_ocs = new
return ocs
def transform_length(self, length: "Vertex", reflection=1.0) -> float:
"""Returns magnitude of `length` direction vector transformed from
old OCS into new OCS including `reflection` correction applied.
"""
return self.m.transform_direction(
self.old_ocs.to_wcs(length)
).magnitude * sign(reflection)
def transform_width(self, width: float) -> float:
"""Transform the width of a linear OCS entity from the old OCS
into the new OCS. (LWPOLYLINE!)
"""
if abs(width) > 1e-12: # assume a uniform scaling!
return max(
self.transform_length((abs(width), 0, 0)),
self.transform_length((0, abs(width), 0))
)
return 0.0
transform_scale_factor = transform_length
def transform_ocs_direction(self, direction: Vec3) -> Vec3:
"""Transform an OCS direction from the old OCS into the new OCS."""
# OCS origin is ALWAYS the WCS origin!
old_wcs_direction = self.old_ocs.to_wcs(direction)
new_wcs_direction = self.m.transform_direction(old_wcs_direction)
return self.new_ocs.from_wcs(new_wcs_direction)
def transform_thickness(self, thickness: float) -> float:
"""Transform the thickness attribute of an OCS entity from the old OCS
into the new OCS.
Thickness is always applied in the z-axis direction of the OCS
a.k.a. extrusion vector.
"""
# Only the z-component of the thickness vector transformed into the
# new OCS is relevant for the extrusion in the direction of the new
# OCS z-axis.
# Input and output thickness can be negative!
new_ocs_thickness = self.transform_ocs_direction(Vec3(0, 0, thickness))
return new_ocs_thickness.z
def transform_vertex(self, vertex: "Vertex") -> Vec3:
"""Returns vertex transformed from old OCS into new OCS."""
return self.new_ocs.from_wcs(
self.m.transform(self.old_ocs.to_wcs(vertex))
)
def transform_2d_vertex(self, vertex: "Vertex", elevation: float) -> Vec2:
"""Returns 2D vertex transformed from old OCS into new OCS."""
v = Vec3(vertex).replace(z=elevation)
return self.new_ocs.from_wcs(
self.m.transform(self.old_ocs.to_wcs(v))
).vec2
def transform_direction(self, direction: "Vertex") -> Vec3:
"""Returns direction transformed from old OCS into new OCS."""
return self.new_ocs.from_wcs(
self.m.transform_direction(self.old_ocs.to_wcs(direction))
)
def transform_angle(self, angle: float) -> float:
"""Returns angle (in radians) from old OCS transformed into new OCS."""
return self.transform_direction(Vec3.from_angle(angle)).angle
def transform_deg_angle(self, angle: float) -> float:
"""Returns angle (in degrees) from old OCS transformed into new OCS."""
return math.degrees(self.transform_angle(math.radians(angle)))
| StarcoderdataPython |
1723489 | <filename>test/test_strengthening.py
from meteor_reasoner.graphutil.graph_strengthening import *
from meteor_reasoner.classes import *
import copy
def test_strengthening():
head = Atom("C", tuple([Term("nan")]))
literal_a = Literal(Atom("A", tuple([Term("X", "variable")])), [Operator("Boxminus", Interval(1, 2, False, False)),Operator("Boxminus", Interval(1, 2, False, False))])
literal_b = Literal(Atom("B", tuple([Term("nan")])), [Operator("Diamondminus", Interval(0, 1, False, False))])
literal_c = BinaryLiteral(copy.deepcopy(literal_a), copy.deepcopy(literal_b), Operator("Since", Interval(4, 5, False, False)))
body = [literal_a, literal_b, literal_c]
rule = Rule(head, body)
print(rule)
new_rule = transformation([rule])
print(new_rule[0])
test_strengthening() | StarcoderdataPython |
1611078 | """ Classes for running optimization problems."""
# Author: <NAME> (modified by <NAME>)
# License: BSD 3 clause
from .ga_runner import GARunner
from .rhc_runner import RHCRunner
from .sa_runner import SARunner
from .mimic_runner import MIMICRunner
from .nngs_runner import NNGSRunner
from .skmlp_runner import SKMLPRunner
from .utils import (build_data_filename)
| StarcoderdataPython |
17192 | # Machine Learning Online Class - Exercise 2: Logistic Regression
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the logistic
# regression exercise. You will need to complete the following functions
# in this exericse:
#
# sigmoid.py
# costFunction.py
# predict.py
# costFunctionReg.py
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
from plotData import *
import costFunction as cf
import plotDecisionBoundary as pdb
import predict as predict
from sigmoid import *
plt.ion()
# Load data
# The first two columns contain the exam scores and the third column contains the label.
data = np.loadtxt('ex2data1.txt', delimiter=',')
print('plot_decision_boundary data[0, 0:1] = \n{}'.format(data[0, 0:1]))
print('plot_decision_boundary data[0, 0:2] = \n{}'.format(data[0, 0:2]))
print('plot_decision_boundary data[0, 0:3] = \n{}'.format(data[0, 0:3]))
print('plot_decision_boundary data[0, 1:1] = \n{}'.format(data[0, 1:1]))
print('plot_decision_boundary data[0, 1:2] = \n{}'.format(data[0, 1:2]))
print('plot_decision_boundary data[0, 1:3] = \n{}'.format(data[0, 1:3]))
print('plot_decision_boundary data[0, 2:1] = \n{}'.format(data[0, 2:1]))
print('plot_decision_boundary data[0, 2:2] = \n{}'.format(data[0, 2:2]))
print('plot_decision_boundary data[0, 2:3] = \n{}'.format(data[0, 2:3]))
X = data[:, 0:2]
y = data[:, 2]
# ===================== Part 1: Plotting =====================
# We start the exercise by first plotting the data to understand the
# the problem we are working with.
print('Plotting Data with + indicating (y = 1) examples and o indicating (y = 0) examples.')
plot_data(X, y)
plt.axis([30, 100, 30, 100])
# Specified in plot order. 按绘图顺序指定
plt.legend(['Admitted', 'Not admitted'], loc=1)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 2: Compute Cost and Gradient =====================
# In this part of the exercise, you will implement the cost and gradient
# for logistic regression. You need to complete the code in
# costFunction.py
# Setup the data array appropriately, and add ones for the intercept term
(m, n) = X.shape
# Add intercept term
X = np.c_[np.ones(m), X]
# Initialize fitting parameters
initial_theta = np.zeros(n + 1) # 初始化权重theta
# Compute and display initial cost and gradient
cost, grad = cf.cost_function(initial_theta, X, y)
np.set_printoptions(formatter={'float': '{: 0.4f}\n'.format})
print('Cost at initial theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.693')
print('Gradient at initial theta (zeros): \n{}'.format(grad))
print('Expected gradients (approx): \n-0.1000\n-12.0092\n-11.2628')
# Compute and display cost and gradient with non-zero theta
test_theta = np.array([-24, 0.2, 0.2])
cost, grad = cf.cost_function(test_theta, X, y)
print('Cost at test theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.218')
print('Gradient at test theta: \n{}'.format(grad))
print('Expected gradients (approx): \n0.043\n2.566\n2.647')
input('Program paused. Press ENTER to continue')
# ===================== Part 3: Optimizing using fmin_bfgs =====================
# In this exercise, you will use a built-in function (opt.fmin_bfgs) to find the
# optimal parameters theta
def cost_func(t):
return cf.cost_function(t, X, y)[0]
def grad_func(t):
return cf.cost_function(t, X, y)[1]
# Run fmin_bfgs to obtain the optimal theta
theta, cost, *unused = opt.fmin_bfgs(f=cost_func, fprime=grad_func, x0=initial_theta, maxiter=400, full_output=True, disp=False)
print('Cost at theta found by fmin: {:0.4f}'.format(cost))
print('Expected cost (approx): 0.203')
print('theta: \n{}'.format(theta))
print('Expected Theta (approx): \n-25.161\n0.206\n0.201')
# Plot boundary 画出二分边界
pdb.plot_decision_boundary(theta, X, y)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 4: Predict and Accuracies =====================
# After learning the parameters, you'll like to use it to predict the outcomes
# on unseen data. In this part, you will use the logistic regression model
# to predict the probability that a student with score 45 on exam 1 and
# score 85 on exam 2 will be admitted
#
# Furthermore, you will compute the training and test set accuracies of our model.
#
# Your task is to complete the code in predict.py
# Predict probability for a student with score 45 on exam 1
# and score 85 on exam 2
prob = sigmoid(np.array([1, 45, 85]).dot(theta))
print('For a student with scores 45 and 85, we predict an admission probability of {:0.4f}'.format(prob))
print('Expected value : 0.775 +/- 0.002')
# Compute the accuracy on our training set
p = predict.predict(theta, X)
print('Train accuracy: {}'.format(np.mean(y == p) * 100))
print('Expected accuracy (approx): 89.0')
input('ex2 Finished. Press ENTER to exit')
| StarcoderdataPython |
4813263 | import logging
from smbus2 import SMBus
from twisted.internet.task import LoopingCall
bus = SMBus(1)
logger = logging.getLogger(__name__)
def _periodic_check_door(instance):
try:
try:
bus_data = bus.read_i2c_block_data(instance.sensor_i2c_address, 0, 8)
except OSError:
return
for data_byte in bus_data[0:5]:
# check sensor events
if data_byte == 0x01:
instance._send_event('door_open')
elif data_byte == 0x02:
instance._send_event('door_closed')
elif data_byte == 0x03:
instance._send_event('cancel_requested')
# check parking mode
parking_status = bus_data[5]
if parking_status != 0x00 or instance.parking_status != 0x00:
instance.parking_mode = (parking_status == 0x00)
instance.parking_distance[0] = bus_data[6]
instance.parking_distance[1] = bus_data[7]
if parking_status != instance.parking_status:
instance._send_event('parking_status_changed', parking_status, instance.parking_status)
instance.parking_status = parking_status
# call the parking callbacks
for fn, args, kwargs in instance.parking_event_callbacks:
fn(instance, *args, *kwargs)
except:
logger.exception("Error in _periodic_check_door")
class SensorControl(object):
INTERVAL_REGULAR = 2
INTERVAL_FAST = 0.5
EVENTS = ['door_closed', 'door_open', 'override_button_pressed', 'parking_status_changed']
event_handlers = None
parking_event_callbacks = None
def __init__(self, i2c_address):
self.sensor_i2c_address = i2c_address
self.lc = None
self.event_handlers = {k:[] for k in self.EVENTS}
self.parking_event_callbacks = []
self.parking_status = 0
self.parking_distance = [0, 0]
self.parking_mode = False
def start(self, interval=None, now=True):
self.stop()
self.lc = LoopingCall(_periodic_check_door, self)
self.lc.start(interval or self.INTERVAL_REGULAR, now)
def stop(self):
if self.lc and self.lc.running:
self.lc.stop()
def add_parking_data_update_callback(self, fn, *args, **kwargs):
self.parking_event_callbacks.append((fn, args, kwargs))
def add_event_handler(self, event, fn, *args, **kwargs):
self.event_handlers[event].append((fn, args, kwargs))
def _send_event(self, event, *args, **kwargs):
# detect special case of entering in parking mode
# make faster reads
if event == 'parking_status_changed':
if args[1] == 0x00 and args[0] != 0x00:
self.parking_mode = True
self.start(self.INTERVAL_FAST, now=False)
logger.info("Entering parking mode")
elif args[0] == 0x00 and args[1] != 0x00:
self.parking_mode = False
self.start(self.INTERVAL_REGULAR, now=False)
logger.info("Exiting parking mode")
for handler in self.event_handlers.get(event, []):
handler[0](data=(args, kwargs), *handler[1], **handler[2])
| StarcoderdataPython |
191811 | #! /usr/bin/env nix-shell
#! nix-shell -i python3 leaflet.nix
import os
import folium
import glob
import pickle
from urllib.parse import urlsplit, urlunsplit
import html
import sys
from osgeo import gdal, osr
from branca.element import CssLink, Figure, JavascriptLink, MacroElement
from jinja2 import Template
import json
vrt_dir = sys.argv[1]
metadata_dir = sys.argv[2]
zoom_level = int(sys.argv[3])
output_dir = sys.argv[4]
def make_event_link(event):
split_url = urlsplit(event['map_url'])
map_url = urlunsplit((split_url.scheme, split_url.netloc, '/rg2/#{}'.format(event['kartatid']) , "", ""))
return '<a href="{}">{} - {}</a>'.format(map_url, event['name'].replace("'","\\'"), event['date'])
icon_map = dict ([ ("I", "darkred"),
("N", "lightred"),
("R", "blue"),
("L", "lightblue"),
("T", "gray"),
("X", "gray") ])
def make_colour(event):
return icon_map[event['rawtype']]
def GetCenter(gt,cols,rows):
''' Return list of corner coordinates from a geotransform
@type gt: C{tuple/list}
@param gt: geotransform
@type cols: C{int}
@param cols: number of columns in the dataset
@type rows: C{int}
@param rows: number of rows in the dataset
@rtype: C{[float,...,float]}
@return: coordinates of each corner
'''
px = cols/2
py = rows/2
x=gt[0]+(px*gt[1])+(py*gt[2])
y=gt[3]+(px*gt[4])+(py*gt[5])
return [x, y]
def ReprojectCoords(coords,src_srs,tgt_srs):
''' Reproject a list of x,y coordinates.
@type geom: C{tuple/list}
@param geom: List of [[x,y],...[x,y]] coordinates
@type src_srs: C{osr.SpatialReference}
@param src_srs: OSR SpatialReference object
@type tgt_srs: C{osr.SpatialReference}
@param tgt_srs: OSR SpatialReference object
@rtype: C{tuple/list}
@return: List of transformed [[x,y],...[x,y]] coordinates
'''
trans_coords=[]
transform = osr.CoordinateTransformation( src_srs, tgt_srs)
for x,y in coords:
x,y,z = transform.TransformPoint(x,y)
trans_coords.append([x,y])
return trans_coords
def GetCenterImage(raster):
ds=gdal.Open(raster)
gt=ds.GetGeoTransform()
cols = ds.RasterXSize
rows = ds.RasterYSize
center = GetCenter(gt,cols,rows)
src_srs=osr.SpatialReference()
src_srs.ImportFromWkt(ds.GetProjection())
tgt_srs=osr.SpatialReference()
tgt_srs.ImportFromEPSG(4326)
geo_ext=ReprojectCoords([center],src_srs,tgt_srs)
return [geo_ext[0][1], geo_ext[0][0]]
class FlagControl(MacroElement):
"""
Adds a measurem widget on the map.
Parameters
----------
position: location of the widget
default is 'topright'.
primary_length_unit and secondary_length_unit: length units
defaults are 'meters' and 'miles' respectively.
primary_area_unit and secondary_area_unit: ara units
defaults are 'sqmeters' and 'acres' respectively.
See https://github.com/ljagis/leaflet-measure for more information.
"""
_template = Template("""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = new L.easyButton({
states: [{
stateName: 'set-flag',
icon: 'fa-flag',
title: 'Flag an incorrect map',
onClick: function(control, map) {
control.state('undo-flag');
}
}, {
icon: 'fa-undo',
stateName: 'undo-flag',
onClick: function(control, map) {
map.state = false;
control.state('set-flag');
},
title: 'reset flag'
}]})
{{this._parent.get_name()}}.on('keypress', function(e) {
if (e.originalEvent.charCode == 102){
{{this._parent.get_name()}}.addControl({{this.get_name()}});
}})
{{this._parent.get_name()}}.flagControl = {{this.get_name()}};
{% endmacro %}
""") # noqa
def __init__(self):
"""Coordinate, linear, and area measure control"""
super(FlagControl, self).__init__()
self._name = 'FlagControl'
options = {
}
self.options = json.dumps(options)
def render(self, **kwargs):
super(FlagControl, self).render()
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
figure.header.add_child(
JavascriptLink('https://cdn.jsdelivr.net/npm/leaflet-easybutton@2/src/easy-button.js')) # noqa
figure.header.add_child(
CssLink('https://cdn.jsdelivr.net/npm/leaflet-easybutton@2/src/easy-button.css')) # no})
images = glob.glob(os.path.join(vrt_dir,'*.jpg.vrt'))
print(len(images))
flagControl = FlagControl()
print(flagControl)
m = folium.Map([54.3781, -3.4360], zoom_start=6, tiles='OpenStreetMap'
, prefer_canvas=True, flag_control=flagControl)
flagControl.add_to(m)
def flag_click(key):
return Template(
u"""function (e) {
if ({{m}}.flagControl._currentState.stateName == "undo-flag"){
jQuery.get(\"http://europe-west1-rg-maps-216117.cloudfunctions.net/flag_map?hash="""+key+u"""")
{{m}}.flagControl.state("set-flag")
}
}""")
for image_vrt in images:
print(image_vrt)
key = os.path.splitext(os.path.splitext(os.path.basename(image_vrt))[0])[0]
event = pickle.load(open(os.path.join(metadata_dir,'{}.pickle'.format(key)), 'rb'))
print(key)
center = GetCenterImage(image_vrt)
popup = folium.map.Popup(html=make_event_link(event))
folium.Circle( center, radius=100
, popup=popup
, color=make_colour(event)
, fill_color=make_colour(event)
, fill=True
, onclick=flag_click(key)).add_to(m)
tiles_loc = "https://s3-eu-west-1.amazonaws.com/rg-maps/{z}/{x}/{y}.png"
tiles_loc_dev = "{z}/{x}/{y}.png"
img = folium.raster_layers.TileLayer(tiles=tiles_loc_dev
, attr="RouteGadget"
, name="RouteGadget"
, tms=True
, max_native_zoom=zoom_level
, overlay=True
, opacity=0.7)
img.add_to(m)
folium.LayerControl().add_to(m)
m.save(os.path.join(output_dir, 'maps.html'))
| StarcoderdataPython |
1600622 | """
A tool for application models to register themselves so that they serve a standard interface
"""
from abc import ABC, abstractmethod
from autumn.model_runner import build_model_runner
from autumn.constants import Region
class RegionAppBase(ABC):
@abstractmethod
def build_model(self, params):
pass
@abstractmethod
def run_model(self, *args, **kwargs):
pass
@property
@abstractmethod
def params(self):
pass
@property
@abstractmethod
def plots_config(self):
pass
class App:
def __init__(self):
self.region_names = []
self.region_apps = {}
def register(self, region_app: RegionAppBase):
name = region_app.region
assert name not in self.region_names
self.region_names.append(name)
self.region_apps[name] = region_app
def get_region_app(self, region: str):
return self.region_apps[region]
| StarcoderdataPython |
170568 | <gh_stars>0
# Copyright 2020 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime as dt
import logging
import time
import repokid.hooks
from repokid.role import Role, Roles
from repokid.utils.dynamo import (
find_role_in_cache,
get_role_data,
role_ids_for_account,
set_role_data,
)
from tabulate import tabulate
from tqdm import tqdm
LOGGER = logging.getLogger("repokid")
def _schedule_repo(account_number, dynamo_table, config, hooks):
"""
Schedule a repo for a given account. Schedule repo for a time in the future (default 7 days) for any roles in
the account with repoable permissions.
"""
scheduled_roles = []
roles = Roles(
[
Role(get_role_data(dynamo_table, roleID))
for roleID in tqdm(role_ids_for_account(dynamo_table, account_number))
]
)
scheduled_time = int(time.time()) + (
86400 * config.get("repo_schedule_period_days", 7)
)
for role in roles:
if not role.aa_data:
LOGGER.warning("Not scheduling %s; missing Access Advisor data", role.arn)
continue
if not role.repoable_permissions > 0:
LOGGER.debug("Not scheduling %s; no repoable permissions", role.arn)
continue
if role.repo_scheduled:
LOGGER.debug(
"Not scheduling %s; already scheduled for %s",
role.arn,
role.repo_scheduled,
)
continue
role.repo_scheduled = scheduled_time
# freeze the scheduled perms to whatever is repoable right now
set_role_data(
dynamo_table,
role.role_id,
{
"RepoScheduled": scheduled_time,
"ScheduledPerms": role.repoable_services,
},
)
scheduled_roles.append(role)
LOGGER.info(
"Scheduled repo for {} days from now for account {} and these roles:\n\t{}".format(
config.get("repo_schedule_period_days", 7),
account_number,
", ".join([r.role_name for r in scheduled_roles]),
)
)
repokid.hooks.call_hooks(hooks, "AFTER_SCHEDULE_REPO", {"roles": scheduled_roles})
def _show_scheduled_roles(account_number, dynamo_table):
"""
Show scheduled repos for a given account. For each scheduled show whether scheduled time is elapsed or not.
"""
roles = Roles(
[
Role(get_role_data(dynamo_table, roleID))
for roleID in tqdm(role_ids_for_account(dynamo_table, account_number))
]
)
# filter to show only roles that are scheduled
roles = roles.filter(active=True)
roles = [role for role in roles if (role.repo_scheduled)]
header = ["Role name", "Scheduled", "Scheduled Time Elapsed?"]
rows = []
curtime = int(time.time())
for role in roles:
rows.append(
[
role.role_name,
dt.fromtimestamp(role.repo_scheduled).strftime("%Y-%m-%d %H:%M"),
role.repo_scheduled < curtime,
]
)
print(tabulate(rows, headers=header))
def _cancel_scheduled_repo(account_number, dynamo_table, role_name=None, is_all=None):
"""
Cancel scheduled repo for a role in an account
"""
if not is_all and not role_name:
LOGGER.error("Either a specific role to cancel or all must be provided")
return
if is_all:
roles = Roles(
[
Role(get_role_data(dynamo_table, roleID))
for roleID in role_ids_for_account(dynamo_table, account_number)
]
)
# filter to show only roles that are scheduled
roles = [role for role in roles if (role.repo_scheduled)]
for role in roles:
set_role_data(
dynamo_table, role.role_id, {"RepoScheduled": 0, "ScheduledPerms": []}
)
LOGGER.info(
"Canceled scheduled repo for roles: {}".format(
", ".join([role.role_name for role in roles])
)
)
return
role_id = find_role_in_cache(dynamo_table, account_number, role_name)
if not role_id:
LOGGER.warn(
"Could not find role with name {} in account {}".format(
role_name, account_number
)
)
return
role = Role(get_role_data(dynamo_table, role_id))
if not role.repo_scheduled:
LOGGER.warn(
"Repo was not scheduled for role {} in account {}".format(
role.role_name, account_number
)
)
return
set_role_data(
dynamo_table, role.role_id, {"RepoScheduled": 0, "ScheduledPerms": []}
)
LOGGER.info(
"Successfully cancelled scheduled repo for role {} in account {}".format(
role.role_name, role.account
)
)
| StarcoderdataPython |
4840968 | from os.path import join
from Input import gui, readData
from Process import processQuestion
from Output import saveToFile, success
def buildPolls(lines, t, d, f):
p = join(d, f) + "-"
fileCount = 0
while lines:
output, start = processQuestion(lines, t)
if output is None:
return fileCount
saveToFile(p, output)
lines = lines[start:]
fileCount += 1
return fileCount
def main():
time, inFilePath, outDir, fileName = gui()
lines = readData(inFilePath)
count = buildPolls(lines, time, outDir, fileName)
success(count, outDir)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3230508 | import cv2
import face_recognition
from urllib.request import urlretrieve
from pathlib import Path
import os
import tempfile
from sys import platform
import random
import string
import utils.console as console
class FaceRecog:
def __init__(self, profile_list, profile_img, num_jitters=10):
self.profile_list = profile_list
self.profile_img = profile_img
self.num_jitters = num_jitters
self.known_face_encodings = []
self.known_face_names = []
console.section('Starting Face Recognition')
def loadKnown(self, label):
console.task('Loading known faces')
pa_g = Path('./known')
pathlist = []
for ext in ['.jpg', '.JPG', '.png', '.PNG', '.jpeg', '.JPEG', '.bmp', '.BMP']:
tmp_pl = pa_g.glob('**/*{}'.format(ext))
for t in tmp_pl:
pathlist.append(t)
for path in pathlist:
p_str = str(path)
delim = '/'
if platform == "win32":
delim = '\\'
console.subtask('Loading {0}'.format(p_str.split(delim)[1]))
im = face_recognition.load_image_file(p_str)
encoding = face_recognition.face_encodings(im, num_jitters=self.num_jitters)
for e in encoding:
self.known_face_encodings.append(e)
self.known_face_names.append(label)
def constructIndexes(self, label):
valid_links = []
console.section('Analyzing')
file_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
file_name += '.jpg'
tmp_path = os.path.join(tempfile.gettempdir(), file_name)
console.task("Storing Image in {0}".format(tmp_path))
for num, i in enumerate(self.profile_img):
console.task('Analyzing {0}...'.format(i.strip()[:90]))
urlretrieve(i, tmp_path)
frame = cv2.imread(tmp_path)
big_frame = cv2.resize(frame, (0, 0), fx=2.0, fy=2.0)
rgb_small_frame = big_frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations, num_jitters=self.num_jitters)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(self.known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = self.known_face_names[first_match_index]
face_names.append(name)
for _, name in zip(face_locations, face_names):
if name == label:
valid_links.append(num)
if os.path.isfile(tmp_path):
console.task("Removing {0}".format(tmp_path))
os.remove(tmp_path)
return valid_links
def getValidLinksAndImg(self, label):
if len(self.known_face_encodings) <= 0:
console.failure('No Face Encodings found!')
console.failure('Did you call `loadKnown(label)` before calling this method?')
return [], []
valid_url = []
valid_img = []
valid_indexes = self.constructIndexes(label)
for index in valid_indexes:
try:
valid_url.append(self.profile_list[index])
valid_img.append(self.profile_img[index])
except:
pass
return valid_url, valid_img
| StarcoderdataPython |
1797875 | <gh_stars>1-10
for r in range(9):
for c in range(r + 1):
print("%dx%d" % (r + 1, c + 1), end=" ")
print()
| StarcoderdataPython |
1732214 | """
readal.py
Galaxy wrapper for automatic conversion of alignments into different formats using ReadAl version 1.4
"""
import sys,optparse,os,subprocess,tempfile,shutil
class Test:
"""
"""
def __init__(self,opts=None):
self.opts = opts
self.iname = 'infile_copy'
shutil.copy(self.opts.input,self.iname)
def run(self):
# tlf = open(self.opts.log,'w')
cl = []
file_path = os.path.dirname(os.path.abspath(__file__))
dir_path = file_path[:file_path.rfind("tools")]
cl.append('%sdependencies//trimal-trimAl_1.4/source/readal -in %s -out %s -%s' % (dir_path,self.opts.input,self.opts.output,self.opts.format))
# process = subprocess.Popen(' '.join(cl), shell=True, stderr=tlf, stdout=tlf)
process = subprocess.Popen(' '.join(cl), shell=True)
rval = process.wait()
os.unlink(self.iname)
if __name__ == "__main__":
op = optparse.OptionParser()
op.add_option('-i', '--input', default=None,help='Input file')
op.add_option('-o', '--output', default=None,help='Output file')
op.add_option('-f','--format', default=None, help="Output format")
opts, args = op.parse_args()
assert opts.input <> None
assert os.path.isfile(opts.input)
c = Test(opts)
c.run()
| StarcoderdataPython |
1661372 | from django.urls import include, path
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
urlpatterns = [
path('getAvailableAssets', views.all_asset),
path('getassetmarketprice/<str:name>', views.price_asset)
]
urlpatterns = format_suffix_patterns(urlpatterns)
| StarcoderdataPython |
185714 | import socket
import threading
from . import logger
class TelnetConnectionHandler(threading.Thread):
def __init__(self, sock, address, clients, lock):
super().__init__()
self.socket = sock
self.address, self.port = address
self.clients = clients
self.lock = lock
@property
def client_name(self):
return f'{self.address}:{self.port}'
def run(self):
self.lock.acquire()
self.clients.append(self.client_name)
self.lock.release()
while True:
message = self.socket.recv(4096)
if not message:
break
logger.debug(f'[{self.__class__.__name__}] message from {self.client_name}: {message}')
self.socket.close()
logger.info(f'[{self.__class__.__name__}] connection from {self.client_name} closed')
self.lock.acquire()
self.clients.remove(self.client_name)
self.lock.release()
class TelnetServer(threading.Thread):
def __init__(self, address='', port=23):
super().__init__()
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((address, port))
self.server.listen(5)
self.lock = threading.Lock()
self.clients = []
def run(self):
while True:
soc, addr = self.server.accept()
logger.info(f'[{self.__class__.__name__}] new connection {addr[0]}:{addr[1]}')
TelnetConnectionHandler(soc, addr, self.clients, self.lock).start()
| StarcoderdataPython |
65711 | """File to hold important constant values and configure drone upon startup"""
from mavsdk import System
MAX_ALT: int = 750 # Feet
TAKEOFF_ALT: int = 100 # Feet
WAIT: float = 2.0 # Seconds
async def config_params(drone: System) -> None:
"""
Sets certain parameters within the drone for flight
Parameters
----------
drone: System
MAVSDK object for manual drone control & manipulation
"""
await drone.param.set_param_float("MIS_TAKEOFF_ALT", TAKEOFF_ALT)
# Set data link loss failsafe mode HOLD
await drone.param.set_param_int("NAV_DLL_ACT", 1)
# Set offboard loss failsafe mode HOLD
await drone.param.set_param_int("COM_OBL_ACT", 1)
# Set offboard loss failsafe mode when RC is available HOLD
await drone.param.set_param_int("COM_OBL_RC_ACT", 5)
# Set RC loss failsafe mode HOLD
await drone.param.set_param_int("NAV_RCL_ACT", 1)
await drone.param.set_param_float("LNDMC_XY_VEL_MAX", 0.5)
await drone.param.set_param_float("LNDMC_ALT_MAX", MAX_ALT)
| StarcoderdataPython |
3370309 | class Solution:
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
node_list = []
def DFS(node, row, column):
if node is not None:
node_list.append((column, row, node.val))
# preorder DFS
DFS(node.left, row + 1, column - 1)
DFS(node.right, row + 1, column + 1)
# step 1). construct the node list, with the coordinates
DFS(root, 0, 0)
# step 2). sort the node list globally, according to the coordinates
node_list.sort()
# step 3). retrieve the sorted results grouped by the column index
ret = []
curr_column_index = node_list[0][0]
curr_column = []
for column, row, value in node_list:
if column == curr_column_index:
curr_column.append(value)
else:
# end of a column, and start the next column
ret.append(curr_column)
curr_column_index = column
curr_column = [value]
# add the last column
ret.append(curr_column)
return ret
| StarcoderdataPython |
3205404 | <filename>pyelixys/hal/elixysobject.py<gh_stars>0
#!/usr/bin/env python
import sys
from pyelixys.hal.hwconf import config
class ElixysObject(object):
"""Parent object for all elixys systems
All onjects can therefore access the system
config and status
"""
sysconf = config
| StarcoderdataPython |
12565 | <reponame>icbi-lab/nextNEOpi
#!/usr/bin/env python
"""
Requirements:
* Python >= 3.7
* Pysam
Copyright (c) 2021 <NAME> <<EMAIL>>
MIT License <http://opensource.org/licenses/MIT>
"""
RELEASE = False
__version_info__ = (
"0",
"1",
)
__version__ = ".".join(__version_info__)
__version__ += "-dev" if not RELEASE else ""
import os
import sys
import argparse
def parse_csin(csin_fh, csin_info):
for line in csin_fh:
if line.find("MHC I CSiN") != -1:
_, csin_v = line.split(" = ")
csin_info["MHCI"] = round(float(csin_v.strip()), 3)
if line.find("MHC II CSiN") != -1:
_, csin_v = line.split(" = ")
csin_info["MHCII"] = round(float(csin_v.strip()), 3)
if line.find("Total CSiN") != -1:
_, csin_v = line.split(" = ")
csin_info["combined"] = round(float(csin_v.strip()), 3)
return csin_info
def parse_tmb(tmb_fh, tmb_info, tmb_type):
for line in tmb_fh:
if line.find("Coverage") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["cov_genome"] = v.strip()
if tmb_type == "coding":
tmb_info["cov_coding"] = v.strip()
if line.find("Variants") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["variants_tot"] = v.strip()
if tmb_type == "coding":
tmb_info["variants_coding"] = v.strip()
if line.find("Mutational load (") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["TMB"] = round(float(v.strip()), 3)
if tmb_type == "coding":
tmb_info["TMB_coding"] = round(float(v.strip()), 3)
if line.find("Mutational load clonal") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["TMB_clonal"] = round(float(v.strip()), 3)
if tmb_type == "coding":
tmb_info["TMB_clonal_coding"] = round(float(v.strip()), 3)
return tmb_info
def write_output(out_fh, tmb_info, csin_info, sample_name):
header_fields = [
"SampleID",
"TMB",
"TMB_clonal",
"TMB_coding",
"TMB_clonal_coding",
"variants_total",
"variants_coding",
"coverage_genome",
"coverage_coding",
"CSiN_MHC_I",
"CSiN_MHC_II",
"CSiN_combined",
]
data_fields = [
sample_name,
tmb_info["TMB"],
tmb_info["TMB_clonal"],
tmb_info["TMB_coding"],
tmb_info["TMB_clonal_coding"],
tmb_info["variants_tot"],
tmb_info["variants_coding"],
tmb_info["cov_genome"],
tmb_info["cov_coding"],
csin_info["MHCI"],
csin_info["MHCII"],
csin_info["combined"],
]
out_fh.write("\t".join(header_fields) + "\n")
out_fh.write("\t".join(map(str, data_fields)) + "\n")
def _file_write(fname):
"""Returns an open file handle if the given filename exists."""
return open(fname, "w")
def _file_read(fname):
"""Returns an open file handle if the given filename exists."""
return open(fname, "r")
if __name__ == "__main__":
usage = __doc__.split("\n\n\n")
parser = argparse.ArgumentParser(description="Compile sample info sheet")
parser.add_argument(
"--tmb",
required=True,
type=_file_read,
help="TMB file",
)
parser.add_argument(
"--tmb_coding",
required=True,
type=_file_read,
help="TMB coding file",
)
parser.add_argument(
"--csin",
required=True,
type=_file_read,
help="CSiN file",
)
parser.add_argument(
"--out",
required=True,
type=_file_write,
help="Output file",
)
parser.add_argument(
"--sample_name",
required=True,
type=str,
help="Sample name",
)
parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__
)
args = parser.parse_args()
tmb = args.tmb
tmb_coding = args.tmb_coding
csin = args.csin
out = args.out
sample_name = args.sample_name
tmb_info = {
"cov_genome": 0,
"cov_coding": 0,
"variants_tot": 0,
"variants_coding": 0,
"TMB": 0,
"TMB_clonal": 0,
"TMB_coding": 0,
"TMB_clonal_coding": 0,
}
csin_info = {"MHCI": 0, "MHCII": 0, "combined": 0}
tmb_info = parse_tmb(tmb, tmb_info, "all")
tmb_info = parse_tmb(tmb_coding, tmb_info, "coding")
csin_info = parse_csin(csin, csin_info)
write_output(out, tmb_info, csin_info, sample_name)
out.close()
| StarcoderdataPython |
4812337 | <reponame>ooici/pyon
# $ANTLR 3.1.3 Mar 18, 2009 10:09:25 src/SavedFSM/Monitor.g 2012-03-12 22:09:37
import sys
from antlr3 import *
from antlr3.compat import set, frozenset
# for convenience in actions
HIDDEN = BaseRecognizer.HIDDEN
# token types
RESV=12
ANNOTATION=25
ASSERTION=28
PARALLEL=19
T__61=61
ID=26
T__60=60
EOF=-1
PROTOCOL=20
TYPE=14
T__55=55
INTERACTION=4
T__56=56
ML_COMMENT=32
T__57=57
T__58=58
ROLES=24
T__51=51
T__52=52
T__53=53
T__54=54
T__59=59
FULLSTOP=11
PLUS=7
SEND=13
DIGIT=30
T__50=50
T__42=42
T__43=43
T__40=40
T__41=41
T__46=46
T__47=47
T__44=44
T__45=45
LINE_COMMENT=33
T__48=48
T__49=49
RECLABEL=18
NUMBER=29
WHITESPACE=31
INT=5
VALUE=15
MULT=9
MINUS=8
ASSERT=21
UNORDERED=17
EMPTY=23
StringLiteral=27
T__34=34
GLOBAL_ESCAPE=22
T__35=35
T__36=36
T__37=37
T__38=38
T__39=39
BRANCH=16
DIV=10
STRING=6
class MonitorLexer(Lexer):
grammarFileName = "src/SavedFSM/Monitor.g"
antlr_version = version_str_to_tuple("3.1.3 Mar 18, 2009 10:09:25")
antlr_version_str = "3.1.3 Mar 18, 2009 10:09:25"
def __init__(self, input=None, state=None):
if state is None:
state = RecognizerSharedState()
super(MonitorLexer, self).__init__(input, state)
self.dfa9 = self.DFA9(
self, 9,
eot = self.DFA9_eot,
eof = self.DFA9_eof,
min = self.DFA9_min,
max = self.DFA9_max,
accept = self.DFA9_accept,
special = self.DFA9_special,
transition = self.DFA9_transition
)
# $ANTLR start "INTERACTION"
def mINTERACTION(self, ):
try:
_type = INTERACTION
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:7:13: ( 'interaction' )
# src/SavedFSM/Monitor.g:7:15: 'interaction'
pass
self.match("interaction")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "INTERACTION"
# $ANTLR start "INT"
def mINT(self, ):
try:
_type = INT
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:8:5: ( 'int' )
# src/SavedFSM/Monitor.g:8:7: 'int'
pass
self.match("int")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "INT"
# $ANTLR start "STRING"
def mSTRING(self, ):
try:
_type = STRING
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:9:8: ( 'string' )
# src/SavedFSM/Monitor.g:9:10: 'string'
pass
self.match("string")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "STRING"
# $ANTLR start "PLUS"
def mPLUS(self, ):
try:
_type = PLUS
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:10:6: ( '+' )
# src/SavedFSM/Monitor.g:10:8: '+'
pass
self.match(43)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "PLUS"
# $ANTLR start "MINUS"
def mMINUS(self, ):
try:
_type = MINUS
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:11:7: ( '-' )
# src/SavedFSM/Monitor.g:11:9: '-'
pass
self.match(45)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "MINUS"
# $ANTLR start "MULT"
def mMULT(self, ):
try:
_type = MULT
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:12:6: ( '*' )
# src/SavedFSM/Monitor.g:12:8: '*'
pass
self.match(42)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "MULT"
# $ANTLR start "DIV"
def mDIV(self, ):
try:
_type = DIV
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:13:5: ( '/' )
# src/SavedFSM/Monitor.g:13:7: '/'
pass
self.match(47)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "DIV"
# $ANTLR start "FULLSTOP"
def mFULLSTOP(self, ):
try:
_type = FULLSTOP
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:14:10: ( '.' )
# src/SavedFSM/Monitor.g:14:12: '.'
pass
self.match(46)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "FULLSTOP"
# $ANTLR start "RESV"
def mRESV(self, ):
try:
_type = RESV
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:15:6: ( 'RESV' )
# src/SavedFSM/Monitor.g:15:8: 'RESV'
pass
self.match("RESV")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "RESV"
# $ANTLR start "SEND"
def mSEND(self, ):
try:
_type = SEND
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:16:6: ( 'SEND' )
# src/SavedFSM/Monitor.g:16:8: 'SEND'
pass
self.match("SEND")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "SEND"
# $ANTLR start "TYPE"
def mTYPE(self, ):
try:
_type = TYPE
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:17:6: ( 'TYPE' )
# src/SavedFSM/Monitor.g:17:8: 'TYPE'
pass
self.match("TYPE")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "TYPE"
# $ANTLR start "VALUE"
def mVALUE(self, ):
try:
_type = VALUE
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:18:7: ( 'VALUE' )
# src/SavedFSM/Monitor.g:18:9: 'VALUE'
pass
self.match("VALUE")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "VALUE"
# $ANTLR start "BRANCH"
def mBRANCH(self, ):
try:
_type = BRANCH
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:19:8: ( 'BRANCH' )
# src/SavedFSM/Monitor.g:19:10: 'BRANCH'
pass
self.match("BRANCH")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "BRANCH"
# $ANTLR start "UNORDERED"
def mUNORDERED(self, ):
try:
_type = UNORDERED
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:20:11: ( 'UNORDERED' )
# src/SavedFSM/Monitor.g:20:13: 'UNORDERED'
pass
self.match("UNORDERED")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "UNORDERED"
# $ANTLR start "RECLABEL"
def mRECLABEL(self, ):
try:
_type = RECLABEL
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:21:10: ( 'RECLABEL' )
# src/SavedFSM/Monitor.g:21:12: 'RECLABEL'
pass
self.match("RECLABEL")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "RECLABEL"
# $ANTLR start "PARALLEL"
def mPARALLEL(self, ):
try:
_type = PARALLEL
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:22:10: ( 'PARALLEL' )
# src/SavedFSM/Monitor.g:22:12: 'PARALLEL'
pass
self.match("PARALLEL")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "PARALLEL"
# $ANTLR start "PROTOCOL"
def mPROTOCOL(self, ):
try:
_type = PROTOCOL
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:23:10: ( 'PROTOCOL' )
# src/SavedFSM/Monitor.g:23:12: 'PROTOCOL'
pass
self.match("PROTOCOL")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "PROTOCOL"
# $ANTLR start "ASSERT"
def mASSERT(self, ):
try:
_type = ASSERT
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:24:8: ( 'ASSERT' )
# src/SavedFSM/Monitor.g:24:10: 'ASSERT'
pass
self.match("ASSERT")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ASSERT"
# $ANTLR start "GLOBAL_ESCAPE"
def mGLOBAL_ESCAPE(self, ):
try:
_type = GLOBAL_ESCAPE
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:25:15: ( 'GLOBAL_ESCAPE' )
# src/SavedFSM/Monitor.g:25:17: 'GLOBAL_ESCAPE'
pass
self.match("GLOBAL_ESCAPE")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "GLOBAL_ESCAPE"
# $ANTLR start "EMPTY"
def mEMPTY(self, ):
try:
_type = EMPTY
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:26:7: ( 'EMPTY' )
# src/SavedFSM/Monitor.g:26:9: 'EMPTY'
pass
self.match("EMPTY")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "EMPTY"
# $ANTLR start "ROLES"
def mROLES(self, ):
try:
_type = ROLES
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:27:7: ( 'ROLES' )
# src/SavedFSM/Monitor.g:27:9: 'ROLES'
pass
self.match("ROLES")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ROLES"
# $ANTLR start "T__34"
def mT__34(self, ):
try:
_type = T__34
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:28:7: ( 'import' )
# src/SavedFSM/Monitor.g:28:9: 'import'
pass
self.match("import")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__34"
# $ANTLR start "T__35"
def mT__35(self, ):
try:
_type = T__35
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:29:7: ( 'protocol' )
# src/SavedFSM/Monitor.g:29:9: 'protocol'
pass
self.match("protocol")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__35"
# $ANTLR start "T__36"
def mT__36(self, ):
try:
_type = T__36
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:30:7: ( ',' )
# src/SavedFSM/Monitor.g:30:9: ','
pass
self.match(44)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__36"
# $ANTLR start "T__37"
def mT__37(self, ):
try:
_type = T__37
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:31:7: ( ';' )
# src/SavedFSM/Monitor.g:31:9: ';'
pass
self.match(59)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__37"
# $ANTLR start "T__38"
def mT__38(self, ):
try:
_type = T__38
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:32:7: ( 'from' )
# src/SavedFSM/Monitor.g:32:9: 'from'
pass
self.match("from")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__38"
# $ANTLR start "T__39"
def mT__39(self, ):
try:
_type = T__39
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:33:7: ( 'as' )
# src/SavedFSM/Monitor.g:33:9: 'as'
pass
self.match("as")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__39"
# $ANTLR start "T__40"
def mT__40(self, ):
try:
_type = T__40
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:34:7: ( 'at' )
# src/SavedFSM/Monitor.g:34:9: 'at'
pass
self.match("at")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__40"
# $ANTLR start "T__41"
def mT__41(self, ):
try:
_type = T__41
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:35:7: ( '{' )
# src/SavedFSM/Monitor.g:35:9: '{'
pass
self.match(123)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__41"
# $ANTLR start "T__42"
def mT__42(self, ):
try:
_type = T__42
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:36:7: ( '}' )
# src/SavedFSM/Monitor.g:36:9: '}'
pass
self.match(125)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__42"
# $ANTLR start "T__43"
def mT__43(self, ):
try:
_type = T__43
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:37:7: ( '(' )
# src/SavedFSM/Monitor.g:37:9: '('
pass
self.match(40)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__43"
# $ANTLR start "T__44"
def mT__44(self, ):
try:
_type = T__44
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:38:7: ( ')' )
# src/SavedFSM/Monitor.g:38:9: ')'
pass
self.match(41)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__44"
# $ANTLR start "T__45"
def mT__45(self, ):
try:
_type = T__45
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:39:7: ( 'role' )
# src/SavedFSM/Monitor.g:39:9: 'role'
pass
self.match("role")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__45"
# $ANTLR start "T__46"
def mT__46(self, ):
try:
_type = T__46
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:40:7: ( 'introduces' )
# src/SavedFSM/Monitor.g:40:9: 'introduces'
pass
self.match("introduces")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__46"
# $ANTLR start "T__47"
def mT__47(self, ):
try:
_type = T__47
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:41:7: ( ':' )
# src/SavedFSM/Monitor.g:41:9: ':'
pass
self.match(58)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__47"
# $ANTLR start "T__48"
def mT__48(self, ):
try:
_type = T__48
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:42:7: ( 'to' )
# src/SavedFSM/Monitor.g:42:9: 'to'
pass
self.match("to")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__48"
# $ANTLR start "T__49"
def mT__49(self, ):
try:
_type = T__49
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:43:7: ( 'choice' )
# src/SavedFSM/Monitor.g:43:9: 'choice'
pass
self.match("choice")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__49"
# $ANTLR start "T__50"
def mT__50(self, ):
try:
_type = T__50
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:44:7: ( 'or' )
# src/SavedFSM/Monitor.g:44:9: 'or'
pass
self.match("or")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__50"
# $ANTLR start "T__51"
def mT__51(self, ):
try:
_type = T__51
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:45:7: ( 'repeat' )
# src/SavedFSM/Monitor.g:45:9: 'repeat'
pass
self.match("repeat")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__51"
# $ANTLR start "T__52"
def mT__52(self, ):
try:
_type = T__52
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:46:7: ( 'rec' )
# src/SavedFSM/Monitor.g:46:9: 'rec'
pass
self.match("rec")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__52"
# $ANTLR start "T__53"
def mT__53(self, ):
try:
_type = T__53
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:47:7: ( 'end' )
# src/SavedFSM/Monitor.g:47:9: 'end'
pass
self.match("end")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__53"
# $ANTLR start "T__54"
def mT__54(self, ):
try:
_type = T__54
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:48:7: ( 'run' )
# src/SavedFSM/Monitor.g:48:9: 'run'
pass
self.match("run")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__54"
# $ANTLR start "T__55"
def mT__55(self, ):
try:
_type = T__55
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:49:7: ( 'inline' )
# src/SavedFSM/Monitor.g:49:9: 'inline'
pass
self.match("inline")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__55"
# $ANTLR start "T__56"
def mT__56(self, ):
try:
_type = T__56
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:50:7: ( 'parallel' )
# src/SavedFSM/Monitor.g:50:9: 'parallel'
pass
self.match("parallel")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__56"
# $ANTLR start "T__57"
def mT__57(self, ):
try:
_type = T__57
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:51:7: ( 'and' )
# src/SavedFSM/Monitor.g:51:9: 'and'
pass
self.match("and")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__57"
# $ANTLR start "T__58"
def mT__58(self, ):
try:
_type = T__58
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:52:7: ( 'do' )
# src/SavedFSM/Monitor.g:52:9: 'do'
pass
self.match("do")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__58"
# $ANTLR start "T__59"
def mT__59(self, ):
try:
_type = T__59
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:53:7: ( 'interrupt' )
# src/SavedFSM/Monitor.g:53:9: 'interrupt'
pass
self.match("interrupt")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__59"
# $ANTLR start "T__60"
def mT__60(self, ):
try:
_type = T__60
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:54:7: ( 'by' )
# src/SavedFSM/Monitor.g:54:9: 'by'
pass
self.match("by")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__60"
# $ANTLR start "T__61"
def mT__61(self, ):
try:
_type = T__61
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:55:7: ( 'unordered' )
# src/SavedFSM/Monitor.g:55:9: 'unordered'
pass
self.match("unordered")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "T__61"
# $ANTLR start "ID"
def mID(self, ):
try:
_type = ID
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:156:4: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* )
# src/SavedFSM/Monitor.g:156:6: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )*
pass
if (65 <= self.input.LA(1) <= 90) or self.input.LA(1) == 95 or (97 <= self.input.LA(1) <= 122):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
# src/SavedFSM/Monitor.g:156:29: ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )*
while True: #loop1
alt1 = 2
LA1_0 = self.input.LA(1)
if ((48 <= LA1_0 <= 57) or (65 <= LA1_0 <= 90) or LA1_0 == 95 or (97 <= LA1_0 <= 122)) :
alt1 = 1
if alt1 == 1:
# src/SavedFSM/Monitor.g:
pass
if (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 90) or self.input.LA(1) == 95 or (97 <= self.input.LA(1) <= 122):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop1
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ID"
# $ANTLR start "NUMBER"
def mNUMBER(self, ):
try:
_type = NUMBER
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:158:8: ( ( DIGIT )+ )
# src/SavedFSM/Monitor.g:158:10: ( DIGIT )+
pass
# src/SavedFSM/Monitor.g:158:10: ( DIGIT )+
cnt2 = 0
while True: #loop2
alt2 = 2
LA2_0 = self.input.LA(1)
if ((48 <= LA2_0 <= 57)) :
alt2 = 1
if alt2 == 1:
# src/SavedFSM/Monitor.g:158:11: DIGIT
pass
self.mDIGIT()
else:
if cnt2 >= 1:
break #loop2
eee = EarlyExitException(2, self.input)
raise eee
cnt2 += 1
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "NUMBER"
# $ANTLR start "WHITESPACE"
def mWHITESPACE(self, ):
try:
_type = WHITESPACE
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:160:12: ( ( '\\t' | ' ' | '\\r' | '\\n' | '\\u000C' )+ )
# src/SavedFSM/Monitor.g:160:14: ( '\\t' | ' ' | '\\r' | '\\n' | '\\u000C' )+
pass
# src/SavedFSM/Monitor.g:160:14: ( '\\t' | ' ' | '\\r' | '\\n' | '\\u000C' )+
cnt3 = 0
while True: #loop3
alt3 = 2
LA3_0 = self.input.LA(1)
if ((9 <= LA3_0 <= 10) or (12 <= LA3_0 <= 13) or LA3_0 == 32) :
alt3 = 1
if alt3 == 1:
# src/SavedFSM/Monitor.g:
pass
if (9 <= self.input.LA(1) <= 10) or (12 <= self.input.LA(1) <= 13) or self.input.LA(1) == 32:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
if cnt3 >= 1:
break #loop3
eee = EarlyExitException(3, self.input)
raise eee
cnt3 += 1
#action start
_channel = HIDDEN;
#action end
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "WHITESPACE"
# $ANTLR start "DIGIT"
def mDIGIT(self, ):
try:
# src/SavedFSM/Monitor.g:162:16: ( '0' .. '9' )
# src/SavedFSM/Monitor.g:162:18: '0' .. '9'
pass
self.matchRange(48, 57)
finally:
pass
# $ANTLR end "DIGIT"
# $ANTLR start "ASSERTION"
def mASSERTION(self, ):
try:
_type = ASSERTION
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:164:11: ( '@{' ( options {greedy=false; } : . )* '}' )
# src/SavedFSM/Monitor.g:164:13: '@{' ( options {greedy=false; } : . )* '}'
pass
self.match("@{")
# src/SavedFSM/Monitor.g:164:18: ( options {greedy=false; } : . )*
while True: #loop4
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == 125) :
alt4 = 2
elif ((0 <= LA4_0 <= 124) or (126 <= LA4_0 <= 65535)) :
alt4 = 1
if alt4 == 1:
# src/SavedFSM/Monitor.g:164:45: .
pass
self.matchAny()
else:
break #loop4
self.match(125)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ASSERTION"
# $ANTLR start "ANNOTATION"
def mANNOTATION(self, ):
try:
_type = ANNOTATION
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:166:12: ( '[[' ( options {greedy=false; } : . )* ']]' )
# src/SavedFSM/Monitor.g:166:14: '[[' ( options {greedy=false; } : . )* ']]'
pass
self.match("[[")
# src/SavedFSM/Monitor.g:166:19: ( options {greedy=false; } : . )*
while True: #loop5
alt5 = 2
LA5_0 = self.input.LA(1)
if (LA5_0 == 93) :
LA5_1 = self.input.LA(2)
if (LA5_1 == 93) :
alt5 = 2
elif ((0 <= LA5_1 <= 92) or (94 <= LA5_1 <= 65535)) :
alt5 = 1
elif ((0 <= LA5_0 <= 92) or (94 <= LA5_0 <= 65535)) :
alt5 = 1
if alt5 == 1:
# src/SavedFSM/Monitor.g:166:46: .
pass
self.matchAny()
else:
break #loop5
self.match("]]")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ANNOTATION"
# $ANTLR start "ML_COMMENT"
def mML_COMMENT(self, ):
try:
_type = ML_COMMENT
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:169:5: ( '/*' ( options {greedy=false; } : . )* '*/' )
# src/SavedFSM/Monitor.g:169:9: '/*' ( options {greedy=false; } : . )* '*/'
pass
self.match("/*")
# src/SavedFSM/Monitor.g:169:14: ( options {greedy=false; } : . )*
while True: #loop6
alt6 = 2
LA6_0 = self.input.LA(1)
if (LA6_0 == 42) :
LA6_1 = self.input.LA(2)
if (LA6_1 == 47) :
alt6 = 2
elif ((0 <= LA6_1 <= 46) or (48 <= LA6_1 <= 65535)) :
alt6 = 1
elif ((0 <= LA6_0 <= 41) or (43 <= LA6_0 <= 65535)) :
alt6 = 1
if alt6 == 1:
# src/SavedFSM/Monitor.g:169:41: .
pass
self.matchAny()
else:
break #loop6
self.match("*/")
#action start
_channel=HIDDEN;
#action end
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ML_COMMENT"
# $ANTLR start "LINE_COMMENT"
def mLINE_COMMENT(self, ):
try:
_type = LINE_COMMENT
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:172:14: ( '//' ( options {greedy=false; } : . )* '\\n' )
# src/SavedFSM/Monitor.g:172:16: '//' ( options {greedy=false; } : . )* '\\n'
pass
self.match("//")
# src/SavedFSM/Monitor.g:172:21: ( options {greedy=false; } : . )*
while True: #loop7
alt7 = 2
LA7_0 = self.input.LA(1)
if (LA7_0 == 10) :
alt7 = 2
elif ((0 <= LA7_0 <= 9) or (11 <= LA7_0 <= 65535)) :
alt7 = 1
if alt7 == 1:
# src/SavedFSM/Monitor.g:172:48: .
pass
self.matchAny()
else:
break #loop7
self.match(10)
#action start
_channel=HIDDEN;
#action end
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LINE_COMMENT"
# $ANTLR start "StringLiteral"
def mStringLiteral(self, ):
try:
_type = StringLiteral
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:174:14: ( '\"' (~ ( '\\\\' | '\"' ) )* '\"' )
# src/SavedFSM/Monitor.g:174:16: '\"' (~ ( '\\\\' | '\"' ) )* '\"'
pass
self.match(34)
# src/SavedFSM/Monitor.g:174:20: (~ ( '\\\\' | '\"' ) )*
while True: #loop8
alt8 = 2
LA8_0 = self.input.LA(1)
if ((0 <= LA8_0 <= 33) or (35 <= LA8_0 <= 91) or (93 <= LA8_0 <= 65535)) :
alt8 = 1
if alt8 == 1:
# src/SavedFSM/Monitor.g:174:22: ~ ( '\\\\' | '\"' )
pass
if (0 <= self.input.LA(1) <= 33) or (35 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop8
self.match(34)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "StringLiteral"
def mTokens(self):
# src/SavedFSM/Monitor.g:1:8: ( INTERACTION | INT | STRING | PLUS | MINUS | MULT | DIV | FULLSTOP | RESV | SEND | TYPE | VALUE | BRANCH | UNORDERED | RECLABEL | PARALLEL | PROTOCOL | ASSERT | GLOBAL_ESCAPE | EMPTY | ROLES | T__34 | T__35 | T__36 | T__37 | T__38 | T__39 | T__40 | T__41 | T__42 | T__43 | T__44 | T__45 | T__46 | T__47 | T__48 | T__49 | T__50 | T__51 | T__52 | T__53 | T__54 | T__55 | T__56 | T__57 | T__58 | T__59 | T__60 | T__61 | ID | NUMBER | WHITESPACE | ASSERTION | ANNOTATION | ML_COMMENT | LINE_COMMENT | StringLiteral )
alt9 = 57
alt9 = self.dfa9.predict(self.input)
if alt9 == 1:
# src/SavedFSM/Monitor.g:1:10: INTERACTION
pass
self.mINTERACTION()
elif alt9 == 2:
# src/SavedFSM/Monitor.g:1:22: INT
pass
self.mINT()
elif alt9 == 3:
# src/SavedFSM/Monitor.g:1:26: STRING
pass
self.mSTRING()
elif alt9 == 4:
# src/SavedFSM/Monitor.g:1:33: PLUS
pass
self.mPLUS()
elif alt9 == 5:
# src/SavedFSM/Monitor.g:1:38: MINUS
pass
self.mMINUS()
elif alt9 == 6:
# src/SavedFSM/Monitor.g:1:44: MULT
pass
self.mMULT()
elif alt9 == 7:
# src/SavedFSM/Monitor.g:1:49: DIV
pass
self.mDIV()
elif alt9 == 8:
# src/SavedFSM/Monitor.g:1:53: FULLSTOP
pass
self.mFULLSTOP()
elif alt9 == 9:
# src/SavedFSM/Monitor.g:1:62: RESV
pass
self.mRESV()
elif alt9 == 10:
# src/SavedFSM/Monitor.g:1:67: SEND
pass
self.mSEND()
elif alt9 == 11:
# src/SavedFSM/Monitor.g:1:72: TYPE
pass
self.mTYPE()
elif alt9 == 12:
# src/SavedFSM/Monitor.g:1:77: VALUE
pass
self.mVALUE()
elif alt9 == 13:
# src/SavedFSM/Monitor.g:1:83: BRANCH
pass
self.mBRANCH()
elif alt9 == 14:
# src/SavedFSM/Monitor.g:1:90: UNORDERED
pass
self.mUNORDERED()
elif alt9 == 15:
# src/SavedFSM/Monitor.g:1:100: RECLABEL
pass
self.mRECLABEL()
elif alt9 == 16:
# src/SavedFSM/Monitor.g:1:109: PARALLEL
pass
self.mPARALLEL()
elif alt9 == 17:
# src/SavedFSM/Monitor.g:1:118: PROTOCOL
pass
self.mPROTOCOL()
elif alt9 == 18:
# src/SavedFSM/Monitor.g:1:127: ASSERT
pass
self.mASSERT()
elif alt9 == 19:
# src/SavedFSM/Monitor.g:1:134: GLOBAL_ESCAPE
pass
self.mGLOBAL_ESCAPE()
elif alt9 == 20:
# src/SavedFSM/Monitor.g:1:148: EMPTY
pass
self.mEMPTY()
elif alt9 == 21:
# src/SavedFSM/Monitor.g:1:154: ROLES
pass
self.mROLES()
elif alt9 == 22:
# src/SavedFSM/Monitor.g:1:160: T__34
pass
self.mT__34()
elif alt9 == 23:
# src/SavedFSM/Monitor.g:1:166: T__35
pass
self.mT__35()
elif alt9 == 24:
# src/SavedFSM/Monitor.g:1:172: T__36
pass
self.mT__36()
elif alt9 == 25:
# src/SavedFSM/Monitor.g:1:178: T__37
pass
self.mT__37()
elif alt9 == 26:
# src/SavedFSM/Monitor.g:1:184: T__38
pass
self.mT__38()
elif alt9 == 27:
# src/SavedFSM/Monitor.g:1:190: T__39
pass
self.mT__39()
elif alt9 == 28:
# src/SavedFSM/Monitor.g:1:196: T__40
pass
self.mT__40()
elif alt9 == 29:
# src/SavedFSM/Monitor.g:1:202: T__41
pass
self.mT__41()
elif alt9 == 30:
# src/SavedFSM/Monitor.g:1:208: T__42
pass
self.mT__42()
elif alt9 == 31:
# src/SavedFSM/Monitor.g:1:214: T__43
pass
self.mT__43()
elif alt9 == 32:
# src/SavedFSM/Monitor.g:1:220: T__44
pass
self.mT__44()
elif alt9 == 33:
# src/SavedFSM/Monitor.g:1:226: T__45
pass
self.mT__45()
elif alt9 == 34:
# src/SavedFSM/Monitor.g:1:232: T__46
pass
self.mT__46()
elif alt9 == 35:
# src/SavedFSM/Monitor.g:1:238: T__47
pass
self.mT__47()
elif alt9 == 36:
# src/SavedFSM/Monitor.g:1:244: T__48
pass
self.mT__48()
elif alt9 == 37:
# src/SavedFSM/Monitor.g:1:250: T__49
pass
self.mT__49()
elif alt9 == 38:
# src/SavedFSM/Monitor.g:1:256: T__50
pass
self.mT__50()
elif alt9 == 39:
# src/SavedFSM/Monitor.g:1:262: T__51
pass
self.mT__51()
elif alt9 == 40:
# src/SavedFSM/Monitor.g:1:268: T__52
pass
self.mT__52()
elif alt9 == 41:
# src/SavedFSM/Monitor.g:1:274: T__53
pass
self.mT__53()
elif alt9 == 42:
# src/SavedFSM/Monitor.g:1:280: T__54
pass
self.mT__54()
elif alt9 == 43:
# src/SavedFSM/Monitor.g:1:286: T__55
pass
self.mT__55()
elif alt9 == 44:
# src/SavedFSM/Monitor.g:1:292: T__56
pass
self.mT__56()
elif alt9 == 45:
# src/SavedFSM/Monitor.g:1:298: T__57
pass
self.mT__57()
elif alt9 == 46:
# src/SavedFSM/Monitor.g:1:304: T__58
pass
self.mT__58()
elif alt9 == 47:
# src/SavedFSM/Monitor.g:1:310: T__59
pass
self.mT__59()
elif alt9 == 48:
# src/SavedFSM/Monitor.g:1:316: T__60
pass
self.mT__60()
elif alt9 == 49:
# src/SavedFSM/Monitor.g:1:322: T__61
pass
self.mT__61()
elif alt9 == 50:
# src/SavedFSM/Monitor.g:1:328: ID
pass
self.mID()
elif alt9 == 51:
# src/SavedFSM/Monitor.g:1:331: NUMBER
pass
self.mNUMBER()
elif alt9 == 52:
# src/SavedFSM/Monitor.g:1:338: WHITESPACE
pass
self.mWHITESPACE()
elif alt9 == 53:
# src/SavedFSM/Monitor.g:1:349: ASSERTION
pass
self.mASSERTION()
elif alt9 == 54:
# src/SavedFSM/Monitor.g:1:359: ANNOTATION
pass
self.mANNOTATION()
elif alt9 == 55:
# src/SavedFSM/Monitor.g:1:370: ML_COMMENT
pass
self.mML_COMMENT()
elif alt9 == 56:
# src/SavedFSM/Monitor.g:1:381: LINE_COMMENT
pass
self.mLINE_COMMENT()
elif alt9 == 57:
# src/SavedFSM/Monitor.g:1:394: StringLiteral
pass
self.mStringLiteral()
# lookup tables for DFA #9
DFA9_eot = DFA.unpack(
u"\1\uffff\2\44\3\uffff\1\57\1\uffff\13\44\2\uffff\2\44\4\uffff\1"
u"\44\1\uffff\7\44\6\uffff\3\44\3\uffff\17\44\1\140\1\141\4\44\1"
u"\147\1\44\1\151\1\44\1\153\1\154\1\44\1\160\23\44\2\uffff\1\u0084"
u"\2\44\1\u0087\1\u0088\1\uffff\1\44\1\uffff\1\u008a\2\uffff\3\44"
u"\1\uffff\3\44\1\u0091\2\44\1\u0094\1\u0095\12\44\1\u00a0\1\uffff"
u"\1\u00a1\1\44\2\uffff\1\44\1\uffff\6\44\1\uffff\1\44\1\u00ac\2"
u"\uffff\1\u00ad\6\44\1\u00b4\2\44\2\uffff\6\44\1\u00bd\1\u00be\1"
u"\u00bf\1\44\2\uffff\1\u00c1\3\44\1\u00c5\1\44\1\uffff\2\44\1\u00c9"
u"\1\u00ca\4\44\3\uffff\1\44\1\uffff\3\44\1\uffff\3\44\2\uffff\4"
u"\44\1\u00da\1\44\1\u00dc\1\u00dd\1\44\1\u00df\1\u00e0\2\44\1\u00e3"
u"\1\44\1\uffff\1\u00e5\2\uffff\1\44\2\uffff\1\u00e7\1\44\1\uffff"
u"\1\u00e9\1\uffff\1\44\1\uffff\1\u00eb\1\uffff\1\44\1\uffff\1\44"
u"\1\u00ee\1\uffff"
)
DFA9_eof = DFA.unpack(
u"\u00ef\uffff"
)
DFA9_min = DFA.unpack(
u"\1\11\1\155\1\164\3\uffff\1\52\1\uffff\2\105\1\131\1\101\1\122"
u"\1\116\1\101\1\123\1\114\1\115\1\141\2\uffff\1\162\1\156\4\uffff"
u"\1\145\1\uffff\1\157\1\150\1\162\1\156\1\157\1\171\1\156\6\uffff"
u"\1\154\1\160\1\162\3\uffff\1\103\1\114\1\116\1\120\1\114\1\101"
u"\1\117\1\122\1\117\1\123\1\117\1\120\1\157\1\162\1\157\2\60\1\144"
u"\1\154\1\143\1\156\1\60\1\157\1\60\1\144\2\60\1\157\1\60\1\151"
u"\1\157\1\151\1\126\1\114\1\105\1\104\1\105\1\125\1\116\1\122\1"
u"\101\1\124\1\105\1\102\1\124\1\164\1\141\1\155\2\uffff\1\60\2\145"
u"\2\60\1\uffff\1\151\1\uffff\1\60\2\uffff\2\162\1\157\1\uffff\1"
u"\156\1\162\1\156\1\60\1\101\1\123\2\60\1\105\1\103\1\104\1\114"
u"\1\117\1\122\1\101\1\131\1\157\1\154\1\60\1\uffff\1\60\1\141\2"
u"\uffff\1\143\1\uffff\1\144\1\141\1\144\1\145\1\164\1\147\1\uffff"
u"\1\102\1\60\2\uffff\1\60\1\110\1\105\1\114\1\103\1\124\1\114\1"
u"\60\1\143\1\154\2\uffff\1\164\2\145\1\143\2\165\3\60\1\105\2\uffff"
u"\1\60\1\122\1\105\1\117\1\60\1\137\1\uffff\1\157\1\145\2\60\1\162"
u"\1\164\1\160\1\143\3\uffff\1\114\1\uffff\1\105\2\114\1\uffff\1"
u"\105\2\154\2\uffff\1\145\1\151\1\164\1\145\1\60\1\104\2\60\1\123"
u"\2\60\1\144\1\157\1\60\1\163\1\uffff\1\60\2\uffff\1\103\2\uffff"
u"\1\60\1\156\1\uffff\1\60\1\uffff\1\101\1\uffff\1\60\1\uffff\1\120"
u"\1\uffff\1\105\1\60\1\uffff"
)
DFA9_max = DFA.unpack(
u"\1\175\1\156\1\164\3\uffff\1\57\1\uffff\1\117\1\105\1\131\1\101"
u"\1\122\1\116\1\122\1\123\1\114\1\115\1\162\2\uffff\1\162\1\164"
u"\4\uffff\1\165\1\uffff\1\157\1\150\1\162\1\156\1\157\1\171\1\156"
u"\6\uffff\1\164\1\160\1\162\3\uffff\1\123\1\114\1\116\1\120\1\114"
u"\1\101\1\117\1\122\1\117\1\123\1\117\1\120\1\157\1\162\1\157\2"
u"\172\1\144\1\154\1\160\1\156\1\172\1\157\1\172\1\144\2\172\1\157"
u"\1\172\1\151\1\157\1\151\1\126\1\114\1\105\1\104\1\105\1\125\1"
u"\116\1\122\1\101\1\124\1\105\1\102\1\124\1\164\1\141\1\155\2\uffff"
u"\1\172\2\145\2\172\1\uffff\1\151\1\uffff\1\172\2\uffff\2\162\1"
u"\157\1\uffff\1\156\1\162\1\156\1\172\1\101\1\123\2\172\1\105\1"
u"\103\1\104\1\114\1\117\1\122\1\101\1\131\1\157\1\154\1\172\1\uffff"
u"\1\172\1\141\2\uffff\1\143\1\uffff\1\144\1\162\1\144\1\145\1\164"
u"\1\147\1\uffff\1\102\1\172\2\uffff\1\172\1\110\1\105\1\114\1\103"
u"\1\124\1\114\1\172\1\143\1\154\2\uffff\1\164\2\145\1\143\2\165"
u"\3\172\1\105\2\uffff\1\172\1\122\1\105\1\117\1\172\1\137\1\uffff"
u"\1\157\1\145\2\172\1\162\1\164\1\160\1\143\3\uffff\1\114\1\uffff"
u"\1\105\2\114\1\uffff\1\105\2\154\2\uffff\1\145\1\151\1\164\1\145"
u"\1\172\1\104\2\172\1\123\2\172\1\144\1\157\1\172\1\163\1\uffff"
u"\1\172\2\uffff\1\103\2\uffff\1\172\1\156\1\uffff\1\172\1\uffff"
u"\1\101\1\uffff\1\172\1\uffff\1\120\1\uffff\1\105\1\172\1\uffff"
)
DFA9_accept = DFA.unpack(
u"\3\uffff\1\4\1\5\1\6\1\uffff\1\10\13\uffff\1\30\1\31\2\uffff\1"
u"\35\1\36\1\37\1\40\1\uffff\1\43\7\uffff\1\62\1\63\1\64\1\65\1\66"
u"\1\71\3\uffff\1\67\1\70\1\7\60\uffff\1\33\1\34\5\uffff\1\44\1\uffff"
u"\1\46\1\uffff\1\56\1\60\3\uffff\1\2\23\uffff\1\55\2\uffff\1\50"
u"\1\52\1\uffff\1\51\6\uffff\1\11\2\uffff\1\12\1\13\12\uffff\1\32"
u"\1\41\12\uffff\1\25\1\14\6\uffff\1\24\10\uffff\1\53\1\26\1\3\1"
u"\uffff\1\15\3\uffff\1\22\3\uffff\1\47\1\45\17\uffff\1\17\1\uffff"
u"\1\20\1\21\1\uffff\1\27\1\54\2\uffff\1\57\1\uffff\1\16\1\uffff"
u"\1\61\1\uffff\1\42\1\uffff\1\1\2\uffff\1\23"
)
DFA9_special = DFA.unpack(
u"\u00ef\uffff"
)
DFA9_transition = [
DFA.unpack(u"\2\46\1\uffff\2\46\22\uffff\1\46\1\uffff\1\51\5\uffff"
u"\1\31\1\32\1\5\1\3\1\23\1\4\1\7\1\6\12\45\1\34\1\24\4\uffff\1\47"
u"\1\17\1\14\2\44\1\21\1\44\1\20\10\44\1\16\1\44\1\10\1\11\1\12\1"
u"\15\1\13\4\44\1\50\3\uffff\1\44\1\uffff\1\26\1\42\1\36\1\41\1\40"
u"\1\25\2\44\1\1\5\44\1\37\1\22\1\44\1\33\1\2\1\35\1\43\5\44\1\27"
u"\1\uffff\1\30"),
DFA.unpack(u"\1\53\1\52"),
DFA.unpack(u"\1\54"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\55\4\uffff\1\56"),
DFA.unpack(u""),
DFA.unpack(u"\1\60\11\uffff\1\61"),
DFA.unpack(u"\1\62"),
DFA.unpack(u"\1\63"),
DFA.unpack(u"\1\64"),
DFA.unpack(u"\1\65"),
DFA.unpack(u"\1\66"),
DFA.unpack(u"\1\67\20\uffff\1\70"),
DFA.unpack(u"\1\71"),
DFA.unpack(u"\1\72"),
DFA.unpack(u"\1\73"),
DFA.unpack(u"\1\75\20\uffff\1\74"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\76"),
DFA.unpack(u"\1\101\4\uffff\1\77\1\100"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\103\11\uffff\1\102\5\uffff\1\104"),
DFA.unpack(u""),
DFA.unpack(u"\1\105"),
DFA.unpack(u"\1\106"),
DFA.unpack(u"\1\107"),
DFA.unpack(u"\1\110"),
DFA.unpack(u"\1\111"),
DFA.unpack(u"\1\112"),
DFA.unpack(u"\1\113"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\115\7\uffff\1\114"),
DFA.unpack(u"\1\116"),
DFA.unpack(u"\1\117"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\121\17\uffff\1\120"),
DFA.unpack(u"\1\122"),
DFA.unpack(u"\1\123"),
DFA.unpack(u"\1\124"),
DFA.unpack(u"\1\125"),
DFA.unpack(u"\1\126"),
DFA.unpack(u"\1\127"),
DFA.unpack(u"\1\130"),
DFA.unpack(u"\1\131"),
DFA.unpack(u"\1\132"),
DFA.unpack(u"\1\133"),
DFA.unpack(u"\1\134"),
DFA.unpack(u"\1\135"),
DFA.unpack(u"\1\136"),
DFA.unpack(u"\1\137"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\142"),
DFA.unpack(u"\1\143"),
DFA.unpack(u"\1\145\14\uffff\1\144"),
DFA.unpack(u"\1\146"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\150"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\152"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\155"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\4\44\1\156"
u"\14\44\1\157\10\44"),
DFA.unpack(u"\1\161"),
DFA.unpack(u"\1\162"),
DFA.unpack(u"\1\163"),
DFA.unpack(u"\1\164"),
DFA.unpack(u"\1\165"),
DFA.unpack(u"\1\166"),
DFA.unpack(u"\1\167"),
DFA.unpack(u"\1\170"),
DFA.unpack(u"\1\171"),
DFA.unpack(u"\1\172"),
DFA.unpack(u"\1\173"),
DFA.unpack(u"\1\174"),
DFA.unpack(u"\1\175"),
DFA.unpack(u"\1\176"),
DFA.unpack(u"\1\177"),
DFA.unpack(u"\1\u0080"),
DFA.unpack(u"\1\u0081"),
DFA.unpack(u"\1\u0082"),
DFA.unpack(u"\1\u0083"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u0085"),
DFA.unpack(u"\1\u0086"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u""),
DFA.unpack(u"\1\u0089"),
DFA.unpack(u""),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u008b"),
DFA.unpack(u"\1\u008c"),
DFA.unpack(u"\1\u008d"),
DFA.unpack(u""),
DFA.unpack(u"\1\u008e"),
DFA.unpack(u"\1\u008f"),
DFA.unpack(u"\1\u0090"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u0092"),
DFA.unpack(u"\1\u0093"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u0096"),
DFA.unpack(u"\1\u0097"),
DFA.unpack(u"\1\u0098"),
DFA.unpack(u"\1\u0099"),
DFA.unpack(u"\1\u009a"),
DFA.unpack(u"\1\u009b"),
DFA.unpack(u"\1\u009c"),
DFA.unpack(u"\1\u009d"),
DFA.unpack(u"\1\u009e"),
DFA.unpack(u"\1\u009f"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u""),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00a2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u00a3"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00a4"),
DFA.unpack(u"\1\u00a5\20\uffff\1\u00a6"),
DFA.unpack(u"\1\u00a7"),
DFA.unpack(u"\1\u00a8"),
DFA.unpack(u"\1\u00a9"),
DFA.unpack(u"\1\u00aa"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00ab"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00ae"),
DFA.unpack(u"\1\u00af"),
DFA.unpack(u"\1\u00b0"),
DFA.unpack(u"\1\u00b1"),
DFA.unpack(u"\1\u00b2"),
DFA.unpack(u"\1\u00b3"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00b5"),
DFA.unpack(u"\1\u00b6"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u00b7"),
DFA.unpack(u"\1\u00b8"),
DFA.unpack(u"\1\u00b9"),
DFA.unpack(u"\1\u00ba"),
DFA.unpack(u"\1\u00bb"),
DFA.unpack(u"\1\u00bc"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00c0"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00c2"),
DFA.unpack(u"\1\u00c3"),
DFA.unpack(u"\1\u00c4"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00c6"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00c7"),
DFA.unpack(u"\1\u00c8"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00cb"),
DFA.unpack(u"\1\u00cc"),
DFA.unpack(u"\1\u00cd"),
DFA.unpack(u"\1\u00ce"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u00cf"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00d0"),
DFA.unpack(u"\1\u00d1"),
DFA.unpack(u"\1\u00d2"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00d3"),
DFA.unpack(u"\1\u00d4"),
DFA.unpack(u"\1\u00d5"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u00d6"),
DFA.unpack(u"\1\u00d7"),
DFA.unpack(u"\1\u00d8"),
DFA.unpack(u"\1\u00d9"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00db"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00de"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00e1"),
DFA.unpack(u"\1\u00e2"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00e4"),
DFA.unpack(u""),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u00e6"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"\1\u00e8"),
DFA.unpack(u""),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00ea"),
DFA.unpack(u""),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00ec"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00ed"),
DFA.unpack(u"\12\44\7\uffff\32\44\4\uffff\1\44\1\uffff\32\44"),
DFA.unpack(u"")
]
# class definition for DFA #9
class DFA9(DFA):
pass
def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
from antlr3.main import LexerMain
main = LexerMain(MonitorLexer)
main.stdin = stdin
main.stdout = stdout
main.stderr = stderr
main.execute(argv)
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
153314 | """
return a new sorted merged list from K sorted lists, each with size N.
"""
from functools import reduce
flat_map = lambda f, xs: reduce(lambda a, b: a + b, map(f, xs))
# O(KN log KN)
def merge_lists(lists):
# flattend_list = []
# for l in lists:
# flattend_list.extend(l)
flattend_list = flat_map(lambda x: x, lists)
return sorted(flattend_list)
print(merge_lists([[10, 15, 30], [12, 15, 20], [17, 20, 32]]))
# [10, 15, 30, 12, 15, 20, 17, 20, 32]
# print(reduce(lambda a, b : a + b, [1,2,3,4,5]))
import heapq
# O(KN log K)
def merge(lists):
print(lists)
merged_list = []
# (val, list_index, element_index)
heap = [(lst[0], i, 0) for i, lst in enumerate(lists) if lst]
print(heap)
heapq.heapify(heap)
print(heap)
while heap:
val, list_ind, element_ind = heapq.heappop(heap)
merged_list.append(val)
# Let's say the smallest element is E. Once we get E, we know we're interested in only
# the next element of the list that held E. Then we'd extract out the second smallest element and etc.
if element_ind + 1 < len(lists[list_ind]):
next_tuple = (lists[list_ind][element_ind + 1],
list_ind,
element_ind + 1)
heapq.heappush(heap, next_tuple)
print(heap)
return merged_list
print("*" * 100)
print(merge([[10, 15, 30, 31], [12, 15, 20, 21], [17, 20, 32, 33], [1, 2, 3, 4]]))
# [1, 2, 3, 4, 10, 12, 15, 15, 17, 20, 20, 21, 30, 31, 32, 33] | StarcoderdataPython |
1672816 | <reponame>omarocegueda/dipy<gh_stars>0
import numpy as np
from .localtrack import local_tracker
from dipy.align import Bunch
from dipy.tracking import utils
# enum TissueClass (tissue_classifier.pxd) is not accessible
# from here. To be changed when minimal cython version > 0.21.
# cython 0.21 - cpdef enum to export values into Python-level namespace
# https://github.com/cython/cython/commit/50133b5a91eea348eddaaad22a606a7fa1c7c457
TissueTypes = Bunch(OUTSIDEIMAGE=-1, INVALIDPOINT=0, TRACKPOINT=1, ENDPOINT=2)
class LocalTracking(object):
"""A streamline generator for local tracking methods"""
@staticmethod
def _get_voxel_size(affine):
"""Computes the voxel sizes of an image from the affine.
Checks that the affine does not have any shear because local_tracker
assumes that the data is sampled on a regular grid.
"""
lin = affine[:3, :3]
dotlin = np.dot(lin.T, lin)
# Check that the affine is well behaved
if not np.allclose(np.triu(dotlin, 1), 0., atol=1e-5):
msg = ("The affine provided seems to contain shearing, data must "
"be acquired or interpolated on a regular grid to be used "
"with `LocalTracking`.")
raise ValueError(msg)
return np.sqrt(dotlin.diagonal())
def __init__(self, direction_getter, tissue_classifier, seeds, affine,
step_size, max_cross=None, maxlen=500, fixedstep=True,
return_all=True):
"""Creates streamlines by using local fiber-tracking.
Parameters
----------
direction_getter : instance of DirectionGetter
Used to get directions for fiber tracking.
tissue_classifier : instance of TissueClassifier
Identifies endpoints and invalid points to inform tracking.
seeds : array (N, 3)
Points to seed the tracking. Seed points should be given in point
space of the track (see ``affine``).
affine : array (4, 4)
Coordinate space for the streamline point with respect to voxel
indices of input data. This affine can contain scaling, rotational,
and translational components but should not contain any shearing.
An identity matrix can be used to generate streamlines in "voxel
coordinates" as long as isotropic voxels were used to acquire the
data.
step_size : float
Step size used for tracking.
max_cross : int or None
The maximum number of direction to track from each seed in crossing
voxels. By default all initial directions are tracked.
maxlen : int
Maximum number of steps to track from seed. Used to prevent
infinite loops.
fixedstep : bool
If true, a fixed stepsize is used, otherwise a variable step size
is used.
return_all : bool
If true, return all generated streamlines, otherwise only
streamlines reaching end points or exiting the image.
"""
self.direction_getter = direction_getter
self.tissue_classifier = tissue_classifier
self.seeds = seeds
if affine.shape != (4, 4):
raise ValueError("affine should be a (4, 4) array.")
self.affine = affine
self._voxel_size = self._get_voxel_size(affine)
self.step_size = step_size
self.fixed = fixedstep
self.max_cross = max_cross
self.maxlen = maxlen
self.return_all = return_all
def __iter__(self):
# Make tracks, move them to point space and return
track = self._generate_streamlines()
return utils.move_streamlines(track, self.affine)
def _generate_streamlines(self):
"""A streamline generator"""
N = self.maxlen
dg = self.direction_getter
tc = self.tissue_classifier
ss = self.step_size
fixed = self.fixed
max_cross = self.max_cross
vs = self._voxel_size
# Get inverse transform (lin/offset) for seeds
inv_A = np.linalg.inv(self.affine)
lin = inv_A[:3, :3]
offset = inv_A[:3, 3]
F = np.empty((N + 1, 3), dtype=float)
B = F.copy()
for s in self.seeds:
s = np.dot(lin, s) + offset
directions = dg.initial_direction(s)
if directions.size == 0 and self.return_all:
# only the seed position
yield [s]
directions = directions[:max_cross]
for first_step in directions:
stepsF, tissue_class = local_tracker(dg, tc, s, first_step,
vs, F, ss, fixed)
if not (self.return_all or
tissue_class == TissueTypes.ENDPOINT or
tissue_class == TissueTypes.OUTSIDEIMAGE):
continue
first_step = -first_step
stepsB, tissue_class = local_tracker(dg, tc, s, first_step,
vs, B, ss, fixed)
if not (self.return_all or
tissue_class == TissueTypes.ENDPOINT or
tissue_class == TissueTypes.OUTSIDEIMAGE):
continue
if stepsB == 1:
streamline = F[:stepsF].copy()
else:
parts = (B[stepsB-1:0:-1], F[:stepsF])
streamline = np.concatenate(parts, axis=0)
yield streamline
| StarcoderdataPython |
3229751 | <reponame>mail2nsrajesh/ansible-pacemaker
#!/usr/bin/python
# (c) 2017, <NAME> <<EMAIL>>
#
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DOCUMENTATION = '''
---
module: pacemaker_is_active
short_description: Check if a resource is active.
version_added: "2.3"
author: "<NAME> (chem)"
description:
- Check if a resource is completly started in a pacemaker cluster.
- This works for master/slave, clone, primitive resource.
options:
resource:
description:
- The name of the resource to check, without any "-clone", "-master"
suffix.
required: true
max_wait:
description:
- How many seconds should we wait for the resource to be active.
required: false
default: 3
'''
EXAMPLES = '''
---
- name: Ensure galera is started
hosts: localhost
gather_facts: no
tasks:
- name: galera ready
pacemaker_is_active:
resource: galera
max_wait: 10
'''
RETURN = '''
change:
description: True if the resource is active.
type: bool
out:
description: A short summary of the resource.
type: string
sample: {"out": "Resource galera is active."}
'''
ANSIBLE_METADATA = r"""
status:
- stableinterface
supported_by: committer
version: "1.0"
"""
# Should be at the top (flake8 E402), but ansible requires that module
# import being after metadata.
import subprocess
from time import sleep
from ansible.module_utils.basic import AnsibleModule
from lxml import etree
class Resource(object):
"Base clase for resource and resource factory."
get_type = None
def _filter_xpath(self, xpath):
"Filter the cib on some xpath."
xml_string = self.mod.run_command(['crm_mon', '-r', '--as-xml'],
{'check_rc': True})[1]
tree = etree.fromstring(str(xml_string))
return tree.xpath(xpath)
def _current_count(self, role):
"Calculate the current active instance."
return int(self._filter_xpath(
"count(//resource[@id='{0}' and {1} and {2} and {3} and {4}])"
.format(self.name,
"@orphaned='false'",
"@failed='false'",
"@active='true'",
"@role='{0}'".format(role),
)
))
def _get_crm_resource(self, prop):
return self.mod.run_command(
['crm_resource', '-r',
self.name,
'--meta', '-g', prop]
)
def _create_result(self, msg):
return {
'resource_type': self.get_type,
'resource_name': self.name,
'msg': msg,
}
def __init__(self, mod, resource_name):
self.mod = mod
self.name = resource_name
def fail(self, msg):
result = self._create_result(msg)
return self.mod.fail_json(**result)
def success(self, msg):
result = self._create_result(msg)
result['changed'] = False
return self.mod.exit_json(**result)
def from_type(self):
"""Infer the type of a resource from its name. Factory method.
Using the resource name as a parameter it returns a "Clone",
"Master", "Primitive" instance. If no resource matching the name
could be found, it return a "Resource" instance.
"""
res_array = self._filter_xpath(
'//resources/*[contains(@id,"{0}")]'.format(self.name)
)
if len(res_array) == 0:
return self
res = res_array[0]
if res.tag == 'resource':
return Primitive(self.mod, self.name)
elif res.tag == 'clone':
if res.get('multi_state') == 'false':
return Clone(self.mod, self.name)
elif res.get('multi_state') == 'true':
return Master(self.mod, self.name)
return self
class Master(Resource):
"Representation of a master/slave resource."
get_type = 'master'
def expected_count(self):
"""Return the expected number of instance of a master resource.
This function takes a resource name (the resource must be of master
type) and returns the master-max attribute if present or 1 if the
attribute is not present. It raise a error in other cases..
"""
rc, stdout, stderr = self._get_crm_resource('master-max')
if rc == 0:
return int(stdout)
elif rc == 6:
return 1
return self.fail(
"Unknow error geting crm_resource for master '{0}'."
.format(self.name)
)
def current_count(self):
"Calculate the current active instance."
return self._current_count('Master')
class Clone(Resource):
"Representation of a clone resource."
get_type = 'clone'
def _pipe_no_shell(self, cmd1_array, cmd2_array):
"Pipe cmd1_array into cmd2_array without using shell interpolation."
self.mod.get_bin_path(cmd1_array[0], required=True)
self.mod.get_bin_path(cmd2_array[0], required=True)
cmd1 = subprocess.Popen(cmd1_array, stdout=subprocess.PIPE)
cmd2 = subprocess.Popen(cmd2_array,
stdin=cmd1.stdout,
stdout=subprocess.PIPE)
return cmd2.communicate()
def expected_count(self):
"""Return the expected number of clone resource on the system.
This function takes a resource name which should be of type
"clone" and returns the clone-max attribute if present. If
clone-max is not present it returns the number of nodes which
have the property "$resourcename-role" set to true (composable
ha). If that number is 0 (pre-composable ha), we count the
number of nodes in the cluster We raise an error in other
cases.
"""
rc, stdout, stderr = self._get_crm_resource('clone-max')
if rc == 0:
return int(stdout)
elif rc == 6:
count = int(self._pipe_no_shell(
['pcs', 'property'],
['grep', '-c',
"{0}-role=true".format(self.name)]
)[0])
if count == 0:
return int(self._pipe_no_shell(['crm_node', '-l'],
['wc', '-l'])[0])
else:
return count
return self.fail(
"Unknow error geting crm_resource for master '{0}'."
.format(self.name)
)
def current_count(self):
"Calculate the current active instance."
return self._current_count("Started")
class Primitive(Clone):
"Representation of a primitive resource."
get_type = 'primitive'
def expected_count(self):
return 1
def is_resource_active(mod):
"""Return success if a resource active, failure otherwise.
Takes the resource name as an argument and does the following:
a) master/slave resources
Returns active only if the needed number of masters is set
e.g. galera needs to be master on all nodes where galera is
supposed to run (that is == to the number of controllers in
pre-composable ha and the number of nodes with galera-role=true
properties set in composable ha) redis will need to have master on
only one node.
b) cloned resources
Returns active if the resource is started on the needed nodes
e.g. same as master/slave resources the needed number of nodes is
equal to the cluster nodes in pre-composable and to the
haproxy-role property count in composable ha.
c) primitive resources returns active
If the resource is started on one node e.g. A/P resources like
cinder-volume, VIPs.
"""
max_tries = int(mod.params["max_wait"])
resource_name = mod.params["resource"]
current_try = 0
resource = Resource(mod, resource_name).from_type()
if resource.get_type is None:
return resource.fail("Resource '{0}' doesn't exist in the cib.".format(
resource.name
))
resource_expected_count = resource.expected_count()
while resource_expected_count != resource.current_count():
if current_try >= max_tries-1:
return resource.fail(
"Max wait time of {0} seconds reached waiting for {1}".format(
max_tries, resource.name
))
sleep(1)
current_try += 1
return resource.success("{0} resource {1} is active".format(resource.get_type,
resource.name))
def main():
"Main function called by Ansible."
mod = AnsibleModule(
argument_spec=dict(
resource=dict(required=True),
max_wait=dict(default=5), # in seconds
)
)
return is_resource_active(mod)
if __name__ == '__main__':
main()
| StarcoderdataPython |
157190 | <reponame>mdgaziur/Ranky
'''
Data list contains all users info. Each user's info must be a list.
First Element: 0 (int)
Second Element: name (str)
Third Element: username (str)
Fourth Element: toph link (str)
Fifth Element: dimik link (str)
Sixth Element: uri link (str)
Note: If any user does not have an account leave there an empty string.
'''
data = [
[0, "<NAME>", "mahin", "", "",
"https://urionlinejudge.com.br/judge/en/profile/239509"],
[0, "<NAME>", "majed", "", "",
"https://urionlinejudge.com.br/judge/en/profile/229317"],
[0, "<NAME>", "mdvirus", "https://toph.co/u/mdvirus",
"https://dimikoj.com/users/53/mdvirus", "https://urionlinejudge.com.br/judge/en/profile/223624"],
[0, "<NAME>", "HackersBoss",
"https://toph.co/u/HackersBoss", "", ""],
[0, "<NAME>", "newbie_mukit", "https://toph.co/u/newbie_mukit",
"", "https://urionlinejudge.com.br/judge/en/profile/228785"],
[0, "<NAME>", "toukir48bit",
"https://toph.co/u/toukir48bit", "", ""],
[0, "<NAME>", "SifatTheCoder",
"https://toph.co/u/SifatTheCoder", "", ""],
[0, "<NAME>", "HelloRakib",
"https://toph.co/u/HelloRakib", "", ""],
[0, "<NAME>", "fiveG_coder",
"https://toph.co/u/fiveG_coder", "", ""],
[0, "Most Rumana Ak<NAME>", "programmer_upa",
"https://toph.co/u/programmer_upa", "", ""],
[0, "<NAME>", "Uniqe_coder",
"https://toph.co/u/Uniqe_coder", "", ""],
[0, "<NAME>", "Simple_coder",
"https://toph.co/u/Simple_coder", "", ""],
[0, "Most Masuda Akter Momota", "itsmomota",
"https://toph.co/u/itsmomota", "", ""],
[0, "<NAME>", "Scanfl", "https://toph.co/u/Scanfl", "", ""],
[0, "<NAME>", "Smart_coder",
"https://toph.co/u/Smart_coder", "", ""]
]
| StarcoderdataPython |
1693185 | from evaluation import AbstractEvaluation
from alignment.data import AlignmentMatch
from tools import ConfigConsts
import bert_score
class BertScoreMetric(AbstractEvaluation):
def getMetricName(self):
return "BertScore"
def evaluate(self, alignmentMatches):
process_line = []
align_line = []
for alignmentMatch in alignmentMatches:
process_line.append(alignmentMatch.process_line)
align_line.append(alignmentMatch.align_line)
all_preds, hashBert = bert_score.score(
process_line,
align_line,
model_type=self.local_config_utils.getValue(ConfigConsts.CONF_SEC_EVALUATION, ConfigConsts.CONF_METRICS_BERT_SCORE_MODEL, "bert-base-multilingual-cased"),
idf=False,
return_hash=True,
verbose=False)
for P, R, F1, alignmentMatch in zip (all_preds[0].tolist(), all_preds[1].tolist(), all_preds[2].tolist(), alignmentMatches):
alignmentMatch.addEvalationMetricResult(self.getMetricName()+"_Hash", hashBert)
alignmentMatch.addEvalationMetricResult(self.getMetricName()+"_P", P)
alignmentMatch.addEvalationMetricResult(self.getMetricName()+"_R", R)
alignmentMatch.addEvalationMetricResult(self.getMetricName()+"_F1", F1)
| StarcoderdataPython |
159576 | <reponame>theplusagency/wagtail-commerce
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-17 20:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtailcommerce.products.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcommerce_stores', '0001_initial'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, verbose_name='name')),
('active', models.BooleanField(verbose_name='active')),
('available_on', models.DateTimeField(blank=True, null=True, verbose_name='available on')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created on')),
('content_type', models.ForeignKey(on_delete=models.SET(wagtailcommerce.products.models.get_default_product_content_type), related_name='products', to='contenttypes.ContentType', verbose_name='content type')),
('store', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='wagtailcommerce_stores.Store')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProductVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sku', models.CharField(max_length=32, unique=True, verbose_name='SKU')),
('name', models.CharField(blank=True, max_length=100, verbose_name='name')),
('price', models.DecimalField(decimal_places=2, max_digits=12, verbose_name='price')),
('content_type', models.ForeignKey(on_delete=models.SET(wagtailcommerce.products.models.get_default_product_variant_content_type), related_name='product_variants', to='contenttypes.ContentType', verbose_name='content type')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='variants', to='wagtailcommerce_products.Product')),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
1724282 | # _*_ coding:utf-8 _*_
if __name__ == "__main__":
# squares = [1, 4, 9, 16, 25]
# print(squares)
# print(squares[0])
# print(squares[-1])
# print(squares[0:])
# print(squares[:-1])
# print(squares + [36, 49, 64, 81, 100])
# cubes = [1, 8, 27, 65, 125]
# print(cubes)
# cubes[3] = 64
# print(cubes)
# cubes.append(216)
# cubes.append(7 ** 3)
# print(cubes)
# letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# print(letters)
# letters[2:5] = ['C', 'D', 'E']
# print(letters)
# letters[2:5] = []
# print(letters)
# letters[:] = []
# print(letters)
# print(type(letters))
# print(len(letters))
# a = ['a', 'b', 'c']
# n = [1, 2, 3]
# x = [a, n]
# print(x)
# print(x[0])
# print(x[0][1])
# a, b = 0, 1
# while a < 10:
# print(a)
# a, b = b, a + b
a, b = 0, 1
while a < 10:
print(a, end = ',')
a, b = b, a + b | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.