hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f739bca44667cc162cc953a6ea1d4d1f45486694 | 7,581 | py | Python | mmdet/ops/point_sample.py | qiguming/mmdetection | 741b638a1fc60408f46d673457a3e1e513d18cf1 | [
"Apache-2.0"
] | 295 | 2020-07-16T13:03:29.000Z | 2022-03-29T05:20:12.000Z | mmdet/ops/point_sample.py | wondervictor/lvis-mmdet | 68532eb6f4643ddf0179a4384c8c9e004a2c1d07 | [
"Apache-2.0"
] | 136 | 2021-07-11T11:26:54.000Z | 2022-03-31T02:45:34.000Z | mmdet/ops/point_sample.py | wondervictor/lvis-mmdet | 68532eb6f4643ddf0179a4384c8c9e004a2c1d07 | [
"Apache-2.0"
] | 84 | 2021-05-29T06:58:14.000Z | 2022-03-31T07:44:10.000Z | # Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
def normalize(grid):
"""Normalize input grid from [-1, 1] to [0, 1]
Args:
grid (Tensor): The grid to be normalize, range [-1, 1].
Returns:
Tensor: Normalized grid, range [0, 1].
"""
return (grid + 1.0) / 2.0
def denormalize(grid):
"""Denormalize input grid from range [0, 1] to [-1, 1]
Args:
grid (Tensor): The grid to be denormalize, range [0, 1].
Returns:
Tensor: Denormalized grid, range [-1, 1].
"""
return grid * 2.0 - 1.0
def generate_grid(num_grid, size, device):
"""Generate regular square grid of points in [0, 1] x [0, 1] coordinate
space.
Args:
num_grid (int): The number of grids to sample, one for each region.
size (tuple(int, int)): The side size of the regular grid.
device (torch.device): Desired device of returned tensor.
Returns:
(torch.Tensor): A tensor of shape (num_grid, size[0]*size[1], 2) that
contains coordinates for the regular grids.
"""
affine_trans = torch.tensor([[[1., 0., 0.], [0., 1., 0.]]], device=device)
grid = F.affine_grid(
affine_trans, torch.Size((1, 1, *size)), align_corners=False)
grid = normalize(grid)
return grid.view(1, -1, 2).expand(num_grid, -1, -1)
def rel_roi_point_to_abs_img_point(rois, rel_roi_points):
"""Convert roi based relative point coordinates to image based absolute
point coordinates.
Args:
rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5)
rel_roi_points (Tensor): Point coordinates inside RoI, relative to
RoI, location, range (0, 1), shape (N, P, 2)
Returns:
Tensor: Image based absolute point coordinates, shape (N, P, 2)
"""
with torch.no_grad():
assert rel_roi_points.size(0) == rois.size(0)
assert rois.dim() == 2
assert rel_roi_points.dim() == 3
assert rel_roi_points.size(2) == 2
# remove batch idx
if rois.size(1) == 5:
rois = rois[:, 1:]
abs_img_points = rel_roi_points.clone()
abs_img_points[:, :, 0] = abs_img_points[:, :, 0] * (
rois[:, None, 2] - rois[:, None, 0])
abs_img_points[:, :, 1] = abs_img_points[:, :, 1] * (
rois[:, None, 3] - rois[:, None, 1])
abs_img_points[:, :, 0] += rois[:, None, 0]
abs_img_points[:, :, 1] += rois[:, None, 1]
return abs_img_points
def abs_img_point_to_rel_img_point(abs_img_points,
img_shape,
spatial_scale=1.):
"""Convert image based absolute point coordinates to image based relative
coordinates for sampling.
Args:
abs_img_points (Tensor): Image based absolute point coordinates,
shape (N, P, 2)
img_shape (tuple): (height, width) of image or feature map.
spatial_scale (float): Scale points by this factor. Default: 1.
Returns:
Tensor: Image based relative point coordinates for sampling,
shape (N, P, 2)
"""
assert isinstance(img_shape, tuple) and len(img_shape) == 2
h, w = img_shape
scale = torch.tensor([w, h],
dtype=torch.float,
device=abs_img_points.device)
scale = scale.view(1, 1, 2)
rel_img_points = abs_img_points / scale * spatial_scale
return rel_img_points
def rel_roi_point_to_rel_img_point(rois,
rel_roi_points,
img_shape,
spatial_scale=1.):
"""Convert roi based relative point coordinates to image based absolute
point coordinates.
Args:
rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5)
rel_roi_points (Tensor): Point coordinates inside RoI, relative to
RoI, location, range (0, 1), shape (N, P, 2)
img_shape (tuple): (height, width) of image or feature map.
spatial_scale (float): Scale points by this factor. Default: 1.
Returns:
Tensor: Image based relative point coordinates for sampling,
shape (N, P, 2)
"""
abs_img_point = rel_roi_point_to_abs_img_point(rois, rel_roi_points)
rel_img_point = abs_img_point_to_rel_img_point(abs_img_point, img_shape,
spatial_scale)
return rel_img_point
def point_sample(input, points, align_corners=False, **kwargs):
"""A wrapper around :function:`grid_sample` to support 3D point_coords
tensors Unlike :function:`torch.nn.functional.grid_sample` it assumes
point_coords to lie inside [0, 1] x [0, 1] square.
Args:
input (Tensor): Feature map, shape (N, C, H, W).
points (Tensor): Image based absolute point coordinates (normalized),
range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2).
align_corners (bool): Whether align_corners. Default: False
Returns:
Tensor: Features of `point` on `input`, shape (N, C, P) or
(N, C, Hgrid, Wgrid).
"""
add_dim = False
if points.dim() == 3:
add_dim = True
points = points.unsqueeze(2)
output = F.grid_sample(
input, denormalize(points), align_corners=align_corners, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
class SimpleRoIAlign(nn.Module):
def __init__(self, out_size, spatial_scale, aligned=True):
"""Simple RoI align in PointRend, faster than standard RoIAlign.
Args:
out_size (tuple[int]): h, w
spatial_scale (float): scale the input boxes by this number
aligned (bool): if False, use the legacy implementation in
MMDetection, align_corners=True will be used in F.grid_sample.
If True, align the results more perfectly.
"""
super(SimpleRoIAlign, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = float(spatial_scale)
# to be consistent with other RoI ops
self.use_torchvision = False
self.aligned = aligned
def forward(self, features, rois):
num_imgs = features.size(0)
num_rois = rois.size(0)
rel_roi_points = generate_grid(
num_rois, self.out_size, device=rois.device)
point_feats = []
for batch_ind in range(num_imgs):
# unravel batch dim
feat = features[batch_ind].unsqueeze(0)
inds = (rois[:, 0].long() == batch_ind)
if inds.any():
rel_img_points = rel_roi_point_to_rel_img_point(
rois[inds], rel_roi_points[inds], feat.shape[2:],
self.spatial_scale).unsqueeze(0)
point_feat = point_sample(
feat, rel_img_points, align_corners=not self.aligned)
point_feat = point_feat.squeeze(0).transpose(0, 1)
point_feats.append(point_feat)
channels = features.size(1)
roi_feats = torch.cat(point_feats, dim=0)
roi_feats = roi_feats.reshape(num_rois, channels, *self.out_size)
return roi_feats
def __repr__(self):
format_str = self.__class__.__name__
format_str += '(out_size={}, spatial_scale={}'.format(
self.out_size, self.spatial_scale)
return format_str
| 34.616438 | 101 | 0.599921 | rt torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
def normalize(grid):
return (grid + 1.0) / 2.0
def denormalize(grid):
return grid * 2.0 - 1.0
def generate_grid(num_grid, size, device):
affine_trans = torch.tensor([[[1., 0., 0.], [0., 1., 0.]]], device=device)
grid = F.affine_grid(
affine_trans, torch.Size((1, 1, *size)), align_corners=False)
grid = normalize(grid)
return grid.view(1, -1, 2).expand(num_grid, -1, -1)
def rel_roi_point_to_abs_img_point(rois, rel_roi_points):
with torch.no_grad():
assert rel_roi_points.size(0) == rois.size(0)
assert rois.dim() == 2
assert rel_roi_points.dim() == 3
assert rel_roi_points.size(2) == 2
if rois.size(1) == 5:
rois = rois[:, 1:]
abs_img_points = rel_roi_points.clone()
abs_img_points[:, :, 0] = abs_img_points[:, :, 0] * (
rois[:, None, 2] - rois[:, None, 0])
abs_img_points[:, :, 1] = abs_img_points[:, :, 1] * (
rois[:, None, 3] - rois[:, None, 1])
abs_img_points[:, :, 0] += rois[:, None, 0]
abs_img_points[:, :, 1] += rois[:, None, 1]
return abs_img_points
def abs_img_point_to_rel_img_point(abs_img_points,
img_shape,
spatial_scale=1.):
assert isinstance(img_shape, tuple) and len(img_shape) == 2
h, w = img_shape
scale = torch.tensor([w, h],
dtype=torch.float,
device=abs_img_points.device)
scale = scale.view(1, 1, 2)
rel_img_points = abs_img_points / scale * spatial_scale
return rel_img_points
def rel_roi_point_to_rel_img_point(rois,
rel_roi_points,
img_shape,
spatial_scale=1.):
abs_img_point = rel_roi_point_to_abs_img_point(rois, rel_roi_points)
rel_img_point = abs_img_point_to_rel_img_point(abs_img_point, img_shape,
spatial_scale)
return rel_img_point
def point_sample(input, points, align_corners=False, **kwargs):
add_dim = False
if points.dim() == 3:
add_dim = True
points = points.unsqueeze(2)
output = F.grid_sample(
input, denormalize(points), align_corners=align_corners, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
class SimpleRoIAlign(nn.Module):
def __init__(self, out_size, spatial_scale, aligned=True):
super(SimpleRoIAlign, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = float(spatial_scale)
self.use_torchvision = False
self.aligned = aligned
def forward(self, features, rois):
num_imgs = features.size(0)
num_rois = rois.size(0)
rel_roi_points = generate_grid(
num_rois, self.out_size, device=rois.device)
point_feats = []
for batch_ind in range(num_imgs):
feat = features[batch_ind].unsqueeze(0)
inds = (rois[:, 0].long() == batch_ind)
if inds.any():
rel_img_points = rel_roi_point_to_rel_img_point(
rois[inds], rel_roi_points[inds], feat.shape[2:],
self.spatial_scale).unsqueeze(0)
point_feat = point_sample(
feat, rel_img_points, align_corners=not self.aligned)
point_feat = point_feat.squeeze(0).transpose(0, 1)
point_feats.append(point_feat)
channels = features.size(1)
roi_feats = torch.cat(point_feats, dim=0)
roi_feats = roi_feats.reshape(num_rois, channels, *self.out_size)
return roi_feats
def __repr__(self):
format_str = self.__class__.__name__
format_str += '(out_size={}, spatial_scale={}'.format(
self.out_size, self.spatial_scale)
return format_str
| true | true |
f739bcd914e9dfcfeea5216d022fb72a6c908765 | 1,526 | py | Python | prediction/bimod/qglc/cactus_ccs_comp.py | dylanhross/dmccs | 8b403a90b6cb7edd9d7abc172462e9d9b62b5dd3 | [
"MIT"
] | 3 | 2021-05-17T20:19:41.000Z | 2022-02-01T21:43:30.000Z | prediction/bimod/qglc/cactus_ccs_comp.py | dylanhross/dmccs | 8b403a90b6cb7edd9d7abc172462e9d9b62b5dd3 | [
"MIT"
] | null | null | null | prediction/bimod/qglc/cactus_ccs_comp.py | dylanhross/dmccs | 8b403a90b6cb7edd9d7abc172462e9d9b62b5dd3 | [
"MIT"
] | null | null | null | #!/Library/Frameworks/Python.framework/Versions/3.8/bin/python3
"""
"""
from matplotlib import pyplot as plt
import numpy as np
# set up plot fonts
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Helvetica', 'Arial']
rcParams['font.size'] = 11
# common settings for the same style across plots
f_size = (3.5, 4)
bs = {
'linewidth': 1., 'align': 'center', 'width': 0.75, 'capstyle': 'round', 'capsize': 6,
'error_kw': {
'elinewidth': 1., 'ecolor': 'k'
}
}
"""
bs = {
'fill': False, 'linewidth': 2, 'align': 'center', 'width': 0.8, 'capstyle': 'round', 'capsize': 6, 'hatch': '//'
}
"""
fig = plt.figure(figsize=f_size)
ax = fig.add_subplot(111)
ax.axhline(202.8, c='k', ls=':', lw=1.5)
ax.axhline(209.5, c='k', ls=':', lw=1.5)
labels = ['7', '5', '3', "3'", "4'"]
x = [3, 4, 5, 2, 1]
ccs = np.loadtxt('cactus_y.txt')
ec = ['orchid', 'yellow', 'lightpink', 'royalblue', 'darkorange']
for x_, ccs_, ec_ in zip(x, ccs, ec):
ax.bar(x_, ccs_, edgecolor=ec_, ecolor=ec_, color=ec_, fill=True, **bs)
ax.bar(x_, ccs_, fill=False, **bs)
ax.set_xticks([1, 2, 3, 4, 5])
ax.set_xticklabels(labels, fontstyle='italic', fontsize=14)
#ax.set_xlim([150, 400])
ax.set_ylim([195, 215])
for d in ['top', 'right']:
ax.spines[d].set_visible(False)
ax.set_ylabel(r'CCS ($\AA^2$)')
#ax.set_xlabel('m/z')
# save figure
png = 'qglc_cactus_comp.png'
plt.savefig(png, dpi=400, bbox_inches='tight')
plt.tight_layout()
#plt.show()
#plt.close()
| 22.115942 | 116 | 0.616645 |
from matplotlib import pyplot as plt
import numpy as np
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Helvetica', 'Arial']
rcParams['font.size'] = 11
f_size = (3.5, 4)
bs = {
'linewidth': 1., 'align': 'center', 'width': 0.75, 'capstyle': 'round', 'capsize': 6,
'error_kw': {
'elinewidth': 1., 'ecolor': 'k'
}
}
fig = plt.figure(figsize=f_size)
ax = fig.add_subplot(111)
ax.axhline(202.8, c='k', ls=':', lw=1.5)
ax.axhline(209.5, c='k', ls=':', lw=1.5)
labels = ['7', '5', '3', "3'", "4'"]
x = [3, 4, 5, 2, 1]
ccs = np.loadtxt('cactus_y.txt')
ec = ['orchid', 'yellow', 'lightpink', 'royalblue', 'darkorange']
for x_, ccs_, ec_ in zip(x, ccs, ec):
ax.bar(x_, ccs_, edgecolor=ec_, ecolor=ec_, color=ec_, fill=True, **bs)
ax.bar(x_, ccs_, fill=False, **bs)
ax.set_xticks([1, 2, 3, 4, 5])
ax.set_xticklabels(labels, fontstyle='italic', fontsize=14)
ax.set_ylim([195, 215])
for d in ['top', 'right']:
ax.spines[d].set_visible(False)
ax.set_ylabel(r'CCS ($\AA^2$)')
png = 'qglc_cactus_comp.png'
plt.savefig(png, dpi=400, bbox_inches='tight')
plt.tight_layout()
| true | true |
f739be22973bf9b06e33d5e55a97e26be3aff193 | 2,574 | py | Python | src/primaires/joueur/commandes/decrire/__init__.py | vlegoff/tsunami | 36b3b974f6eefbf15cd5d5f099fc14630e66570b | [
"BSD-3-Clause"
] | 14 | 2015-08-21T19:15:21.000Z | 2017-11-26T13:59:17.000Z | src/primaires/joueur/commandes/decrire/__init__.py | vincent-lg/tsunami | 36b3b974f6eefbf15cd5d5f099fc14630e66570b | [
"BSD-3-Clause"
] | 20 | 2015-09-29T20:50:45.000Z | 2018-06-21T12:58:30.000Z | src/primaires/joueur/commandes/decrire/__init__.py | vlegoff/tsunami | 36b3b974f6eefbf15cd5d5f099fc14630e66570b | [
"BSD-3-Clause"
] | 3 | 2015-05-02T19:42:03.000Z | 2018-09-06T10:55:00.000Z | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 CORTIER Benoît
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'decrire'"""
from primaires.interpreteur.commande.commande import Commande
class CmdDecrire(Commande):
"""Commande 'decrire'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "décrire", "describe")
self.groupe = "joueur"
self.aide_courte = "Ouvre un éditeur pour se décrire."
self.aide_longue = \
"Cette commande permet de manipuler votre description. " \
"Elle ouvre un éditeur dans lequel vous pouvez modifier " \
"cette description. La description doit d'abord être validée " \
"par un administrateur avant d'être visible à tous."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
editeur = type(self).importeur.interpreteur.construire_editeur(
"descedit", personnage, personnage)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
| 44.37931 | 79 | 0.731546 |
from primaires.interpreteur.commande.commande import Commande
class CmdDecrire(Commande):
def __init__(self):
Commande.__init__(self, "décrire", "describe")
self.groupe = "joueur"
self.aide_courte = "Ouvre un éditeur pour se décrire."
self.aide_longue = \
"Cette commande permet de manipuler votre description. " \
"Elle ouvre un éditeur dans lequel vous pouvez modifier " \
"cette description. La description doit d'abord être validée " \
"par un administrateur avant d'être visible à tous."
def interpreter(self, personnage, dic_masques):
editeur = type(self).importeur.interpreteur.construire_editeur(
"descedit", personnage, personnage)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
| true | true |
f739bea2b48f57cf60765df43c24f03f3ffad921 | 8,874 | py | Python | venv/Lib/site-packages/ipyparallel/apps/baseapp.py | BoxicaLion/BasicMathFormulas | 4d9782f2c0c75ecccf4c0ea995f324f93e4fb6e2 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/ipyparallel/apps/baseapp.py | BoxicaLion/BasicMathFormulas | 4d9782f2c0c75ecccf4c0ea995f324f93e4fb6e2 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/ipyparallel/apps/baseapp.py | BoxicaLion/BasicMathFormulas | 4d9782f2c0c75ecccf4c0ea995f324f93e4fb6e2 | [
"MIT"
] | null | null | null | # encoding: utf-8
"""
The Base Application class for ipyparallel apps
"""
import os
import logging
import re
import sys
from traitlets.config.application import catch_config_error, LevelFormatter
from IPython.core import release
from IPython.core.crashhandler import CrashHandler
from IPython.core.application import (
BaseIPythonApplication,
base_aliases as base_ip_aliases,
base_flags as base_ip_flags
)
from IPython.utils.path import expand_path
from IPython.utils.process import check_pid
from ipython_genutils import py3compat
from ipython_genutils.py3compat import unicode_type
from .._version import __version__
from traitlets import Unicode, Bool, Instance, Dict, observe
#-----------------------------------------------------------------------------
# Module errors
#-----------------------------------------------------------------------------
class PIDFileError(Exception):
pass
#-----------------------------------------------------------------------------
# Crash handler for this application
#-----------------------------------------------------------------------------
class ParallelCrashHandler(CrashHandler):
"""sys.excepthook for IPython itself, leaves a detailed report on disk."""
def __init__(self, app):
contact_name = release.authors['Min'][0]
contact_email = release.author_email
bug_tracker = 'https://github.com/ipython/ipython/issues'
super(ParallelCrashHandler,self).__init__(
app, contact_name, contact_email, bug_tracker
)
#-----------------------------------------------------------------------------
# Main application
#-----------------------------------------------------------------------------
base_aliases = {}
base_aliases.update(base_ip_aliases)
base_aliases.update({
'work-dir' : 'BaseParallelApplication.work_dir',
'log-to-file' : 'BaseParallelApplication.log_to_file',
'clean-logs' : 'BaseParallelApplication.clean_logs',
'log-url' : 'BaseParallelApplication.log_url',
'cluster-id' : 'BaseParallelApplication.cluster_id',
})
base_flags = {
'log-to-file' : (
{'BaseParallelApplication' : {'log_to_file' : True}},
"send log output to a file"
)
}
base_flags.update(base_ip_flags)
class BaseParallelApplication(BaseIPythonApplication):
"""The base Application for ipyparallel apps
Primary extensions to BaseIPythonApplication:
* work_dir
* remote logging via pyzmq
* IOLoop instance
"""
version = __version__
crash_handler_class = ParallelCrashHandler
def _log_level_default(self):
# temporarily override default_log_level to INFO
return logging.INFO
def _log_format_default(self):
"""override default log format to include time"""
return u"%(asctime)s.%(msecs).03d [%(name)s]%(highlevel)s %(message)s"
work_dir = Unicode(py3compat.getcwd(), config=True,
help='Set the working dir for the process.'
)
@observe('work_dir')
def _work_dir_changed(self, change):
self.work_dir = unicode_type(expand_path(change['new']))
log_to_file = Bool(config=True,
help="whether to log to a file")
clean_logs = Bool(False, config=True,
help="whether to cleanup old logfiles before starting")
log_url = Unicode('', config=True,
help="The ZMQ URL of the iplogger to aggregate logging.")
cluster_id = Unicode('', config=True,
help="""String id to add to runtime files, to prevent name collisions when
using multiple clusters with a single profile simultaneously.
When set, files will be named like: 'ipcontroller-<cluster_id>-engine.json'
Since this is text inserted into filenames, typical recommendations apply:
Simple character strings are ideal, and spaces are not recommended (but should
generally work).
"""
)
@observe('cluster_id')
def _cluster_id_changed(self, change):
if change['new']:
self.name += '{}-{}'.format(self.__class__.name, change['new'])
else:
self.name = self.__class__.name
def _config_files_default(self):
return ['ipcontroller_config.py', 'ipengine_config.py', 'ipcluster_config.py']
loop = Instance('tornado.ioloop.IOLoop')
def _loop_default(self):
from ipyparallel.util import ioloop
return ioloop.IOLoop.current()
aliases = Dict(base_aliases)
flags = Dict(base_flags)
@catch_config_error
def initialize(self, argv=None):
"""initialize the app"""
super(BaseParallelApplication, self).initialize(argv)
self.to_work_dir()
self.reinit_logging()
def to_work_dir(self):
wd = self.work_dir
if unicode_type(wd) != py3compat.getcwd():
os.chdir(wd)
self.log.info("Changing to working dir: %s" % wd)
# This is the working dir by now.
sys.path.insert(0, '')
def reinit_logging(self):
# Remove old log files
log_dir = self.profile_dir.log_dir
if self.clean_logs:
for f in os.listdir(log_dir):
if re.match(r'%s-\d+\.(log|err|out)' % self.name, f):
try:
os.remove(os.path.join(log_dir, f))
except (OSError, IOError):
# probably just conflict from sibling process
# already removing it
pass
if self.log_to_file:
# Start logging to the new log file
log_filename = self.name + u'-' + str(os.getpid()) + u'.log'
logfile = os.path.join(log_dir, log_filename)
open_log_file = open(logfile, 'w')
else:
open_log_file = None
if open_log_file is not None:
while self.log.handlers:
self.log.removeHandler(self.log.handlers[0])
self._log_handler = logging.StreamHandler(open_log_file)
self.log.addHandler(self._log_handler)
else:
self._log_handler = self.log.handlers[0]
# Add timestamps to log format:
self._log_formatter = LevelFormatter(self.log_format,
datefmt=self.log_datefmt)
self._log_handler.setFormatter(self._log_formatter)
# do not propagate log messages to root logger
# ipcluster app will sometimes print duplicate messages during shutdown
# if this is 1 (default):
self.log.propagate = False
def write_pid_file(self, overwrite=False):
"""Create a .pid file in the pid_dir with my pid.
This must be called after pre_construct, which sets `self.pid_dir`.
This raises :exc:`PIDFileError` if the pid file exists already.
"""
pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid')
if os.path.isfile(pid_file):
pid = self.get_pid_from_file()
if not overwrite:
raise PIDFileError(
'The pid file [%s] already exists. \nThis could mean that this '
'server is already running with [pid=%s].' % (pid_file, pid)
)
with open(pid_file, 'w') as f:
self.log.info("Creating pid file: %s" % pid_file)
f.write(repr(os.getpid())+'\n')
def remove_pid_file(self):
"""Remove the pid file.
This should be called at shutdown by registering a callback with
:func:`reactor.addSystemEventTrigger`. This needs to return
``None``.
"""
pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid')
if os.path.isfile(pid_file):
try:
self.log.info("Removing pid file: %s" % pid_file)
os.remove(pid_file)
except:
self.log.warn("Error removing the pid file: %s" % pid_file)
def get_pid_from_file(self):
"""Get the pid from the pid file.
If the pid file doesn't exist a :exc:`PIDFileError` is raised.
"""
pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid')
if os.path.isfile(pid_file):
with open(pid_file, 'r') as f:
s = f.read().strip()
try:
pid = int(s)
except:
raise PIDFileError("invalid pid file: %s (contents: %r)"%(pid_file, s))
return pid
else:
raise PIDFileError('pid file not found: %s' % pid_file)
def check_pid(self, pid):
try:
return check_pid(pid)
except Exception:
self.log.warn(
"Could not determine whether pid %i is running. "
" Making the likely assumption that it is."%pid
)
return True
| 35.354582 | 91 | 0.587785 |
import os
import logging
import re
import sys
from traitlets.config.application import catch_config_error, LevelFormatter
from IPython.core import release
from IPython.core.crashhandler import CrashHandler
from IPython.core.application import (
BaseIPythonApplication,
base_aliases as base_ip_aliases,
base_flags as base_ip_flags
)
from IPython.utils.path import expand_path
from IPython.utils.process import check_pid
from ipython_genutils import py3compat
from ipython_genutils.py3compat import unicode_type
from .._version import __version__
from traitlets import Unicode, Bool, Instance, Dict, observe
class PIDFileError(Exception):
pass
class ParallelCrashHandler(CrashHandler):
def __init__(self, app):
contact_name = release.authors['Min'][0]
contact_email = release.author_email
bug_tracker = 'https://github.com/ipython/ipython/issues'
super(ParallelCrashHandler,self).__init__(
app, contact_name, contact_email, bug_tracker
)
base_aliases = {}
base_aliases.update(base_ip_aliases)
base_aliases.update({
'work-dir' : 'BaseParallelApplication.work_dir',
'log-to-file' : 'BaseParallelApplication.log_to_file',
'clean-logs' : 'BaseParallelApplication.clean_logs',
'log-url' : 'BaseParallelApplication.log_url',
'cluster-id' : 'BaseParallelApplication.cluster_id',
})
base_flags = {
'log-to-file' : (
{'BaseParallelApplication' : {'log_to_file' : True}},
"send log output to a file"
)
}
base_flags.update(base_ip_flags)
class BaseParallelApplication(BaseIPythonApplication):
version = __version__
crash_handler_class = ParallelCrashHandler
def _log_level_default(self):
return logging.INFO
def _log_format_default(self):
return u"%(asctime)s.%(msecs).03d [%(name)s]%(highlevel)s %(message)s"
work_dir = Unicode(py3compat.getcwd(), config=True,
help='Set the working dir for the process.'
)
@observe('work_dir')
def _work_dir_changed(self, change):
self.work_dir = unicode_type(expand_path(change['new']))
log_to_file = Bool(config=True,
help="whether to log to a file")
clean_logs = Bool(False, config=True,
help="whether to cleanup old logfiles before starting")
log_url = Unicode('', config=True,
help="The ZMQ URL of the iplogger to aggregate logging.")
cluster_id = Unicode('', config=True,
help="""String id to add to runtime files, to prevent name collisions when
using multiple clusters with a single profile simultaneously.
When set, files will be named like: 'ipcontroller-<cluster_id>-engine.json'
Since this is text inserted into filenames, typical recommendations apply:
Simple character strings are ideal, and spaces are not recommended (but should
generally work).
"""
)
@observe('cluster_id')
def _cluster_id_changed(self, change):
if change['new']:
self.name += '{}-{}'.format(self.__class__.name, change['new'])
else:
self.name = self.__class__.name
def _config_files_default(self):
return ['ipcontroller_config.py', 'ipengine_config.py', 'ipcluster_config.py']
loop = Instance('tornado.ioloop.IOLoop')
def _loop_default(self):
from ipyparallel.util import ioloop
return ioloop.IOLoop.current()
aliases = Dict(base_aliases)
flags = Dict(base_flags)
@catch_config_error
def initialize(self, argv=None):
super(BaseParallelApplication, self).initialize(argv)
self.to_work_dir()
self.reinit_logging()
def to_work_dir(self):
wd = self.work_dir
if unicode_type(wd) != py3compat.getcwd():
os.chdir(wd)
self.log.info("Changing to working dir: %s" % wd)
sys.path.insert(0, '')
def reinit_logging(self):
log_dir = self.profile_dir.log_dir
if self.clean_logs:
for f in os.listdir(log_dir):
if re.match(r'%s-\d+\.(log|err|out)' % self.name, f):
try:
os.remove(os.path.join(log_dir, f))
except (OSError, IOError):
pass
if self.log_to_file:
log_filename = self.name + u'-' + str(os.getpid()) + u'.log'
logfile = os.path.join(log_dir, log_filename)
open_log_file = open(logfile, 'w')
else:
open_log_file = None
if open_log_file is not None:
while self.log.handlers:
self.log.removeHandler(self.log.handlers[0])
self._log_handler = logging.StreamHandler(open_log_file)
self.log.addHandler(self._log_handler)
else:
self._log_handler = self.log.handlers[0]
self._log_formatter = LevelFormatter(self.log_format,
datefmt=self.log_datefmt)
self._log_handler.setFormatter(self._log_formatter)
self.log.propagate = False
def write_pid_file(self, overwrite=False):
pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid')
if os.path.isfile(pid_file):
pid = self.get_pid_from_file()
if not overwrite:
raise PIDFileError(
'The pid file [%s] already exists. \nThis could mean that this '
'server is already running with [pid=%s].' % (pid_file, pid)
)
with open(pid_file, 'w') as f:
self.log.info("Creating pid file: %s" % pid_file)
f.write(repr(os.getpid())+'\n')
def remove_pid_file(self):
pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid')
if os.path.isfile(pid_file):
try:
self.log.info("Removing pid file: %s" % pid_file)
os.remove(pid_file)
except:
self.log.warn("Error removing the pid file: %s" % pid_file)
def get_pid_from_file(self):
pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid')
if os.path.isfile(pid_file):
with open(pid_file, 'r') as f:
s = f.read().strip()
try:
pid = int(s)
except:
raise PIDFileError("invalid pid file: %s (contents: %r)"%(pid_file, s))
return pid
else:
raise PIDFileError('pid file not found: %s' % pid_file)
def check_pid(self, pid):
try:
return check_pid(pid)
except Exception:
self.log.warn(
"Could not determine whether pid %i is running. "
" Making the likely assumption that it is."%pid
)
return True
| true | true |
f739bf1ddfe0951926c6f007d89fed6ea491e3cf | 11,960 | py | Python | main.py | phwissmann/TV7_EPG_Parser | 20e152feb49dcfe68c194ad858f3f33c2b07ea45 | [
"MIT"
] | null | null | null | main.py | phwissmann/TV7_EPG_Parser | 20e152feb49dcfe68c194ad858f3f33c2b07ea45 | [
"MIT"
] | null | null | null | main.py | phwissmann/TV7_EPG_Parser | 20e152feb49dcfe68c194ad858f3f33c2b07ea45 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import requests
import re
import datetime
import html
import json
from epg_sources.tele import tele
from epg_sources.teleboy import teleboy
from icon_sources.tele import tele as teleicon
from icon_sources.teleboy import teleboy as teleboyicon
class channel_item:
id: str
lang: str
display_name: str
class programm_item:
start: datetime
stop: datetime
channel: str
icon: str
title: str
country: str
desc: str
sub_title: str
credits: dict
category: str
episode_num: str
date: int
length: int
def __main__():
print("[*] Getting/parsing Init7 tvchannels.m3u playlist")
channels = get_channel_list()
channels = prepare_channel_list(channels)
print("[*] Getting EPG and icons data from teleboy.ch")
teleboy_raw = teleboy.get_epg_by_duration(7*24*60)
teleboy_icons = teleboyicon.get_images(teleboy_raw)
teleboy_icons_matched = match_icons(
channels, teleboy_icons, './mappings/teleboy.json')
teleboy_epg = match_teleboy_epg(channels, teleboy_raw)
print("[✓] Matched " +
str(len(teleboy_icons_matched)) + " teleboy.ch icons")
print("[*] Getting icons data from tele.ch")
tele_icons = teleicon.get_images()
tele_icons_matched = match_icons(
channels, tele_icons, './mappings/tele.json')
print("[✓] Matched " + str(len(tele_icons_matched)) + " tele.ch icons")
print("[*] Getting EPG data from tele.ch")
tele_raw = tele.get_epg_by_duration(7*24*60)
tele_epg = match_tele_epg(channels, tele_raw)
# generate the xml for the channels
all_icons = {**tele_icons_matched, **teleboy_icons_matched}
print("[✓] Total " + str(len(all_icons)) + " icons")
channels_xmltv = channels_to_xmltv(channels, all_icons)
# generate tv7_teleboy_epg.xml
with open('tv7_teleboy_epg.xml', 'w+') as w:
w.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?><tv>" +
channels_xmltv + programms_to_xmltv(teleboy_epg) + "</tv>")
# generate tv7_tele_epg.xml
with open('tv7_tele_epg.xml', 'w+') as w:
w.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?><tv>" +
channels_xmltv + programms_to_xmltv(tele_epg) + "</tv>")
# generate tv7_epg.xml
full_epg = []
full_epg.extend(tele_epg)
full_epg.extend(teleboy_epg)
programms_xmltv = programms_to_xmltv(full_epg)
with open('tv7_epg.xml', 'w+') as w:
w.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?><tv>" +
channels_xmltv + programms_xmltv + "</tv>")
def get_channel_list():
tv7channel_list = requests.get("https://api.init7.net/tvchannels.m3u").text
tv7channel_list = re.sub(r"udp:\/\/.+", "", tv7channel_list)
tv7channel_list = tv7channel_list.replace("\n", "")
tv7channel_list = tv7channel_list.replace("#EXTM3U", "")
tv7channel_list = tv7channel_list.split("#EXTINF:-1,")
return tv7channel_list
def prepare_channel_list(channel_list):
prepared_list = []
for channel in channel_list:
prepared_list.append({
"display_name": channel,
"id": channel.lower().replace("hd", "").replace("schweiz", "").replace("ch", "").replace("(", "").replace(")", "").replace(" ", ""),
"lang": "de"
})
return prepared_list
def gen_channel_id_from_name(channel_name):
return channel_name.lower().replace("hd", "").replace("schweiz", "").replace("ch", "").replace("(", "").replace(")", "").replace(" ", "")
def find_channel_by_id(id, channel_list):
for channel in channel_list:
if id == channel["id"]:
return True
return False
def match_tele_epg(channel_list, tele_epg):
print("[*] Matching tele.ch EPG data (" + str(len(tele_epg)) +
" programms to " + str(len(channel_list)) + " channels)")
mapping = json.loads(open('./mappings/tele.json', 'r').read())
programms = []
matched_channels = set()
for programm in tele_epg:
channel_id = gen_channel_id_from_name(programm["channelLong"])
if channel_id in mapping:
channel_id = mapping[channel_id]
if find_channel_by_id(channel_id, channel_list):
matched_channels.add(channel_id)
programm_matched = {
"start": programm["availabilityStartTime"],
"stop": programm["availabilityEndTime"],
"channel": channel_id,
"icon": programm["image"],
"title": programm["title"],
}
if "subtitle" in programm and programm["subtitle"]:
programm_matched["sub_title"] = programm["subtitle"]
if "productionCountry" in programm and programm["productionCountry"]:
programm_matched["country"] = programm["productionCountry"]
if "synopsis" in programm and programm["synopsis"]:
programm_matched["desc"] = programm["synopsis"]
if "persons" in programm and programm["persons"]:
programm_matched["credits"] = programm["persons"]
if "cast" in programm["persons"] and programm["persons"]["cast"]:
programm_matched["credits"]["actors"] = programm["persons"]["cast"]
del programm_matched["credits"]["cast"]
if "category" in programm and programm["category"]:
programm_matched["category"] = programm["category"]
if "episode" in programm and "season" in programm and programm["episode"] and programm["season"]:
programm_matched["episode_num"] = "S" + \
str(programm["season"]) + " E" + str(programm["episode"])
elif "episode" in programm and programm["episode"]:
programm_matched["episode_num"] = programm["episode"]
if "productionYearFirst" in programm and programm["productionYearFirst"]:
programm_matched["date"] = programm["productionYearFirst"]
programms.append(programm_matched)
print("[✓] Matched " + str(len(matched_channels)) + " tele.ch channels")
return programms
def match_icons(channel_list, icons, mapping):
print("[*] Matching channel icons (" + str(len(icons)) +
" icons to " + str(len(channel_list)) + " channels)")
mapping = json.loads(open(mapping, 'r').read())
icons_matched = {}
for icon in icons:
channel_id = gen_channel_id_from_name(icon['name'])
if channel_id in mapping:
channel_id = mapping[channel_id]
if find_channel_by_id(channel_id, channel_list):
icons_matched[channel_id] = icon['src']
return icons_matched
def match_teleboy_epg(channel_list, teleboy_epg):
print("[*] Matching teleboy.ch EPG data (" + str(len(teleboy_epg)) +
" programms to " + str(len(channel_list)) + " channels)")
mapping = json.loads(open('./mappings/teleboy.json', 'r').read())
programms = []
matched_channels = set()
for programm in teleboy_epg:
channel_id = gen_channel_id_from_name(programm["station"])
if channel_id in mapping:
channel_id = mapping[channel_id]
if find_channel_by_id(channel_id, channel_list):
matched_channels.add(channel_id)
programm_matched = {
"start": programm["begin"],
"stop": programm["end"],
"channel": channel_id,
"icon": programm["image"],
"title": programm["title"],
}
if "subtitle" in programm and programm["subtitle"]:
programm_matched["sub_title"] = programm["subtitle"]
if "country" in programm and programm["country"]:
programm_matched["country"] = programm["country"]
if "desc" in programm and programm["desc"]:
programm_matched["desc"] = programm["desc"]
if "episode_num" in programm and "season_num" in programm and programm["episode_num"] and programm["season_num"]:
programm_matched["episode_num"] = "S" + \
str(programm["season_num"]) + " E" + \
str(programm["episode_num"])
elif "episode_num" in programm and programm["episode_num"]:
programm_matched["episode_num"] = programm["episode_num"]
if "year" in programm and programm["year"]:
programm_matched["date"] = programm["year"]
programms.append(programm_matched)
print("[✓] Matched " + str(len(matched_channels)) + " teleboy.ch channels")
return programms
def programms_to_xmltv(programms):
print("[*] Generating XML for " + str(len(programms)) + " programms")
programms_xml = ""
for programm in programms:
programm_xml = ""
programm_xml = programm_xml + "<programme start=\""+programm["start"].strftime(
"%Y%m%d%H%M%S %z")+"\" stop=\""+programm["stop"].strftime("%Y%m%d%H%M%S %z")+"\" channel=\""+programm["channel"]+"\">"
programm_xml = programm_xml + "<icon src=\""+programm["icon"]+"\" />"
programm_xml = programm_xml + "<title>" + \
html.escape(programm["title"] or "")+"</title>"
if "sub_title" in programm:
programm_xml = programm_xml + "<sub-title>" + \
html.escape(programm["sub_title"] or "")+"</sub-title>"
if "country" in programm:
programm_xml = programm_xml + "<country>" + \
html.escape(programm["country"] or "")+"</country>"
if "category" in programm:
programm_xml = programm_xml + "<category lang=\"de\">" + \
html.escape(programm["category"] or "")+"</category>"
if "desc" in programm:
programm_xml = programm_xml + "<desc lang=\"de\">" + \
html.escape(programm["desc"] or "")+"</desc>"
if "persons" in programm:
programm_xml = programm_xml + "<credits>"
for attrib in programm["persons"]:
if attrib == "actors":
for actor in programm["persons"]["actors"]:
programm_xml = programm_xml + "<actor>" + actor + "</actor>"
else:
programm_xml = programm_xml + "<"+attrib+">" + \
programm["persons"][attrib] + "</"+attrib+">"
programm_xml = programm_xml + "</credits>"
if "episode-num" in programm:
programm_xml = programm_xml + "<episode-num>" + \
programm["episode_num"]+"</episode-num>"
if "date" in programm:
programm_xml = programm_xml + "<date>" + \
str(programm["date"])+"</date>"
if "durationSeconds" in programm:
programm_xml = programm_xml + "<length>" + \
str(programm["duration"])+"</length>"
programm_xml = programm_xml + "</programme>"
programms_xml = programms_xml + programm_xml
return programms_xml
def channels_to_xmltv(channel_list, icons):
print("[*] Generating XML for " + str(len(channel_list)) + " channels")
channels_xml = ""
for channel in channel_list:
channel_xml = "<channel id=\"" + channel["id"] + "\">"
channel_xml = channel_xml + "<display-name lang=\"de\">" + \
channel["display_name"] + "</display-name>"
channel_xml = channel_xml + "<display-name lang=\"fr\">" + \
channel["display_name"] + "</display-name>"
channel_xml = channel_xml + "<display-name lang=\"it\">" + \
channel["display_name"] + "</display-name>"
if channel['id'] in icons:
channel_xml = channel_xml + "<icon src=\"" + \
icons[channel['id']] + "\" />"
channel_xml = channel_xml + "</channel>"
channels_xml = channels_xml + channel_xml
return channels_xml
__main__()
# programm.availabilityStartTime.strftime("%Y%m%d%H%M%S %z"),
| 37.492163 | 144 | 0.596237 |
import requests
import re
import datetime
import html
import json
from epg_sources.tele import tele
from epg_sources.teleboy import teleboy
from icon_sources.tele import tele as teleicon
from icon_sources.teleboy import teleboy as teleboyicon
class channel_item:
id: str
lang: str
display_name: str
class programm_item:
start: datetime
stop: datetime
channel: str
icon: str
title: str
country: str
desc: str
sub_title: str
credits: dict
category: str
episode_num: str
date: int
length: int
def __main__():
print("[*] Getting/parsing Init7 tvchannels.m3u playlist")
channels = get_channel_list()
channels = prepare_channel_list(channels)
print("[*] Getting EPG and icons data from teleboy.ch")
teleboy_raw = teleboy.get_epg_by_duration(7*24*60)
teleboy_icons = teleboyicon.get_images(teleboy_raw)
teleboy_icons_matched = match_icons(
channels, teleboy_icons, './mappings/teleboy.json')
teleboy_epg = match_teleboy_epg(channels, teleboy_raw)
print("[✓] Matched " +
str(len(teleboy_icons_matched)) + " teleboy.ch icons")
print("[*] Getting icons data from tele.ch")
tele_icons = teleicon.get_images()
tele_icons_matched = match_icons(
channels, tele_icons, './mappings/tele.json')
print("[✓] Matched " + str(len(tele_icons_matched)) + " tele.ch icons")
print("[*] Getting EPG data from tele.ch")
tele_raw = tele.get_epg_by_duration(7*24*60)
tele_epg = match_tele_epg(channels, tele_raw)
all_icons = {**tele_icons_matched, **teleboy_icons_matched}
print("[✓] Total " + str(len(all_icons)) + " icons")
channels_xmltv = channels_to_xmltv(channels, all_icons)
with open('tv7_teleboy_epg.xml', 'w+') as w:
w.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?><tv>" +
channels_xmltv + programms_to_xmltv(teleboy_epg) + "</tv>")
with open('tv7_tele_epg.xml', 'w+') as w:
w.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?><tv>" +
channels_xmltv + programms_to_xmltv(tele_epg) + "</tv>")
full_epg = []
full_epg.extend(tele_epg)
full_epg.extend(teleboy_epg)
programms_xmltv = programms_to_xmltv(full_epg)
with open('tv7_epg.xml', 'w+') as w:
w.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?><tv>" +
channels_xmltv + programms_xmltv + "</tv>")
def get_channel_list():
tv7channel_list = requests.get("https://api.init7.net/tvchannels.m3u").text
tv7channel_list = re.sub(r"udp:\/\/.+", "", tv7channel_list)
tv7channel_list = tv7channel_list.replace("\n", "")
tv7channel_list = tv7channel_list.replace("#EXTM3U", "")
tv7channel_list = tv7channel_list.split("#EXTINF:-1,")
return tv7channel_list
def prepare_channel_list(channel_list):
prepared_list = []
for channel in channel_list:
prepared_list.append({
"display_name": channel,
"id": channel.lower().replace("hd", "").replace("schweiz", "").replace("ch", "").replace("(", "").replace(")", "").replace(" ", ""),
"lang": "de"
})
return prepared_list
def gen_channel_id_from_name(channel_name):
return channel_name.lower().replace("hd", "").replace("schweiz", "").replace("ch", "").replace("(", "").replace(")", "").replace(" ", "")
def find_channel_by_id(id, channel_list):
for channel in channel_list:
if id == channel["id"]:
return True
return False
def match_tele_epg(channel_list, tele_epg):
print("[*] Matching tele.ch EPG data (" + str(len(tele_epg)) +
" programms to " + str(len(channel_list)) + " channels)")
mapping = json.loads(open('./mappings/tele.json', 'r').read())
programms = []
matched_channels = set()
for programm in tele_epg:
channel_id = gen_channel_id_from_name(programm["channelLong"])
if channel_id in mapping:
channel_id = mapping[channel_id]
if find_channel_by_id(channel_id, channel_list):
matched_channels.add(channel_id)
programm_matched = {
"start": programm["availabilityStartTime"],
"stop": programm["availabilityEndTime"],
"channel": channel_id,
"icon": programm["image"],
"title": programm["title"],
}
if "subtitle" in programm and programm["subtitle"]:
programm_matched["sub_title"] = programm["subtitle"]
if "productionCountry" in programm and programm["productionCountry"]:
programm_matched["country"] = programm["productionCountry"]
if "synopsis" in programm and programm["synopsis"]:
programm_matched["desc"] = programm["synopsis"]
if "persons" in programm and programm["persons"]:
programm_matched["credits"] = programm["persons"]
if "cast" in programm["persons"] and programm["persons"]["cast"]:
programm_matched["credits"]["actors"] = programm["persons"]["cast"]
del programm_matched["credits"]["cast"]
if "category" in programm and programm["category"]:
programm_matched["category"] = programm["category"]
if "episode" in programm and "season" in programm and programm["episode"] and programm["season"]:
programm_matched["episode_num"] = "S" + \
str(programm["season"]) + " E" + str(programm["episode"])
elif "episode" in programm and programm["episode"]:
programm_matched["episode_num"] = programm["episode"]
if "productionYearFirst" in programm and programm["productionYearFirst"]:
programm_matched["date"] = programm["productionYearFirst"]
programms.append(programm_matched)
print("[✓] Matched " + str(len(matched_channels)) + " tele.ch channels")
return programms
def match_icons(channel_list, icons, mapping):
print("[*] Matching channel icons (" + str(len(icons)) +
" icons to " + str(len(channel_list)) + " channels)")
mapping = json.loads(open(mapping, 'r').read())
icons_matched = {}
for icon in icons:
channel_id = gen_channel_id_from_name(icon['name'])
if channel_id in mapping:
channel_id = mapping[channel_id]
if find_channel_by_id(channel_id, channel_list):
icons_matched[channel_id] = icon['src']
return icons_matched
def match_teleboy_epg(channel_list, teleboy_epg):
print("[*] Matching teleboy.ch EPG data (" + str(len(teleboy_epg)) +
" programms to " + str(len(channel_list)) + " channels)")
mapping = json.loads(open('./mappings/teleboy.json', 'r').read())
programms = []
matched_channels = set()
for programm in teleboy_epg:
channel_id = gen_channel_id_from_name(programm["station"])
if channel_id in mapping:
channel_id = mapping[channel_id]
if find_channel_by_id(channel_id, channel_list):
matched_channels.add(channel_id)
programm_matched = {
"start": programm["begin"],
"stop": programm["end"],
"channel": channel_id,
"icon": programm["image"],
"title": programm["title"],
}
if "subtitle" in programm and programm["subtitle"]:
programm_matched["sub_title"] = programm["subtitle"]
if "country" in programm and programm["country"]:
programm_matched["country"] = programm["country"]
if "desc" in programm and programm["desc"]:
programm_matched["desc"] = programm["desc"]
if "episode_num" in programm and "season_num" in programm and programm["episode_num"] and programm["season_num"]:
programm_matched["episode_num"] = "S" + \
str(programm["season_num"]) + " E" + \
str(programm["episode_num"])
elif "episode_num" in programm and programm["episode_num"]:
programm_matched["episode_num"] = programm["episode_num"]
if "year" in programm and programm["year"]:
programm_matched["date"] = programm["year"]
programms.append(programm_matched)
print("[✓] Matched " + str(len(matched_channels)) + " teleboy.ch channels")
return programms
def programms_to_xmltv(programms):
print("[*] Generating XML for " + str(len(programms)) + " programms")
programms_xml = ""
for programm in programms:
programm_xml = ""
programm_xml = programm_xml + "<programme start=\""+programm["start"].strftime(
"%Y%m%d%H%M%S %z")+"\" stop=\""+programm["stop"].strftime("%Y%m%d%H%M%S %z")+"\" channel=\""+programm["channel"]+"\">"
programm_xml = programm_xml + "<icon src=\""+programm["icon"]+"\" />"
programm_xml = programm_xml + "<title>" + \
html.escape(programm["title"] or "")+"</title>"
if "sub_title" in programm:
programm_xml = programm_xml + "<sub-title>" + \
html.escape(programm["sub_title"] or "")+"</sub-title>"
if "country" in programm:
programm_xml = programm_xml + "<country>" + \
html.escape(programm["country"] or "")+"</country>"
if "category" in programm:
programm_xml = programm_xml + "<category lang=\"de\">" + \
html.escape(programm["category"] or "")+"</category>"
if "desc" in programm:
programm_xml = programm_xml + "<desc lang=\"de\">" + \
html.escape(programm["desc"] or "")+"</desc>"
if "persons" in programm:
programm_xml = programm_xml + "<credits>"
for attrib in programm["persons"]:
if attrib == "actors":
for actor in programm["persons"]["actors"]:
programm_xml = programm_xml + "<actor>" + actor + "</actor>"
else:
programm_xml = programm_xml + "<"+attrib+">" + \
programm["persons"][attrib] + "</"+attrib+">"
programm_xml = programm_xml + "</credits>"
if "episode-num" in programm:
programm_xml = programm_xml + "<episode-num>" + \
programm["episode_num"]+"</episode-num>"
if "date" in programm:
programm_xml = programm_xml + "<date>" + \
str(programm["date"])+"</date>"
if "durationSeconds" in programm:
programm_xml = programm_xml + "<length>" + \
str(programm["duration"])+"</length>"
programm_xml = programm_xml + "</programme>"
programms_xml = programms_xml + programm_xml
return programms_xml
def channels_to_xmltv(channel_list, icons):
print("[*] Generating XML for " + str(len(channel_list)) + " channels")
channels_xml = ""
for channel in channel_list:
channel_xml = "<channel id=\"" + channel["id"] + "\">"
channel_xml = channel_xml + "<display-name lang=\"de\">" + \
channel["display_name"] + "</display-name>"
channel_xml = channel_xml + "<display-name lang=\"fr\">" + \
channel["display_name"] + "</display-name>"
channel_xml = channel_xml + "<display-name lang=\"it\">" + \
channel["display_name"] + "</display-name>"
if channel['id'] in icons:
channel_xml = channel_xml + "<icon src=\"" + \
icons[channel['id']] + "\" />"
channel_xml = channel_xml + "</channel>"
channels_xml = channels_xml + channel_xml
return channels_xml
__main__()
| true | true |
f739bf459e1c6334962155b8a47c9de38d1871d0 | 711 | py | Python | bot/about.py | alesanmed/YourEnglishTeacher_Bot | eafee609484bb78336bc5279d2e7990099b682c7 | [
"Unlicense"
] | 1 | 2021-06-21T15:42:54.000Z | 2021-06-21T15:42:54.000Z | bot/about.py | alesanmed-bots/YourEnglishTeacher_Bot | eafee609484bb78336bc5279d2e7990099b682c7 | [
"Unlicense"
] | 3 | 2018-12-09T09:46:41.000Z | 2018-12-12T17:54:45.000Z | bot/about.py | alesanmed-bots/YourEnglishTeacher_Bot | eafee609484bb78336bc5279d2e7990099b682c7 | [
"Unlicense"
] | null | null | null | # encoding: utf-8
from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
from connectors import users
from telegram.ext import CommandHandler
def main(dispatcher):
about_handler = CommandHandler('about', __about)
dispatcher.add_handler(about_handler)
def __about(bot, update):
update.message.reply_html(
('This bot has been developed by @alesanmed. '
'If you are a user and have any problem, you '
'can contact him for resolving it. If you are '
'a developer and want to contribute to the bot, '
'please refer to the bot '
'<a href="https://github.com/alesanmed/YourEnglishTeacher_Bot">GitHub repository</a>.')
) | 32.318182 | 95 | 0.691983 |
from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
from connectors import users
from telegram.ext import CommandHandler
def main(dispatcher):
about_handler = CommandHandler('about', __about)
dispatcher.add_handler(about_handler)
def __about(bot, update):
update.message.reply_html(
('This bot has been developed by @alesanmed. '
'If you are a user and have any problem, you '
'can contact him for resolving it. If you are '
'a developer and want to contribute to the bot, '
'please refer to the bot '
'<a href="https://github.com/alesanmed/YourEnglishTeacher_Bot">GitHub repository</a>.')
) | true | true |
f739c0031b2456d39e32191b69fe6f9348b018ec | 7,912 | py | Python | nipype/interfaces/ants/utils.py | nicholsn/nipype | 6601b00aac39d17bb9fb3a6801f5a740a6ebb1e3 | [
"BSD-3-Clause"
] | null | null | null | nipype/interfaces/ants/utils.py | nicholsn/nipype | 6601b00aac39d17bb9fb3a6801f5a740a6ebb1e3 | [
"BSD-3-Clause"
] | null | null | null | nipype/interfaces/ants/utils.py | nicholsn/nipype | 6601b00aac39d17bb9fb3a6801f5a740a6ebb1e3 | [
"BSD-3-Clause"
] | null | null | null | """ANTS Apply Transforms interface
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os
from .base import ANTSCommand, ANTSCommandInputSpec
from ..base import (TraitedSpec, File, traits,
isdefined)
from ...utils.filemanip import split_filename
from nipype.interfaces.base import InputMultiPath
class AverageAffineTransformInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True, position=0, desc='image dimension (2 or 3)')
output_affine_transform = File(argstr='%s', mandatory=True, position=1, desc='Outputfname.txt: the name of the resulting transform.')
transforms = InputMultiPath(File(exists=True), argstr='%s', mandatory=True,
position=3, desc=('transforms to average'))
class AverageAffineTransformOutputSpec(TraitedSpec):
affine_transform = File(exists=True, desc='average transform file')
class AverageAffineTransform(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import AverageAffineTransform
>>> avg = AverageAffineTransform()
>>> avg.inputs.dimension = 3
>>> avg.inputs.transforms = ['trans.mat', 'func_to_struct.mat']
>>> avg.inputs.output_affine_transform = 'MYtemplatewarp.mat'
>>> avg.cmdline
'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat'
"""
_cmd = 'AverageAffineTransform'
input_spec = AverageAffineTransformInputSpec
output_spec = AverageAffineTransformOutputSpec
def _format_arg(self, opt, spec, val):
return super(AverageAffineTransform, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['affine_transform'] = os.path.abspath(
self.inputs.output_affine_transform)
return outputs
class AverageImagesInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', mandatory=True,
position=0, desc='image dimension (2 or 3)')
output_average_image = File("average.nii", argstr='%s', position=1, desc='the name of the resulting image.', usedefault=True, hash_files=False)
normalize = traits.Bool(argstr="%d", mandatory=True, position=2, desc='Normalize: if true, the 2nd image' +
'is divided by its mean. This will select the largest image to average into.')
images = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=3, desc=('image to apply transformation to (generally a coregistered functional)'))
class AverageImagesOutputSpec(TraitedSpec):
output_average_image = File(exists=True, desc='average image file')
class AverageImages(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import AverageImages
>>> avg = AverageImages()
>>> avg.inputs.dimension = 3
>>> avg.inputs.output_average_image = "average.nii.gz"
>>> avg.inputs.normalize = True
>>> avg.inputs.images = ['rc1s1.nii', 'rc1s1.nii']
>>> avg.cmdline
'AverageImages 3 average.nii.gz 1 rc1s1.nii rc1s1.nii'
"""
_cmd = 'AverageImages'
input_spec = AverageImagesInputSpec
output_spec = AverageImagesOutputSpec
def _format_arg(self, opt, spec, val):
return super(AverageImages, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_average_image'] = os.path.realpath(
self.inputs.output_average_image)
return outputs
class MultiplyImagesInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True, position=0, desc='image dimension (2 or 3)')
first_input = File(
argstr='%s', exists=True, mandatory=True, position=1, desc='image 1')
second_input = traits.Either(File(exists=True), traits.Float, argstr='%s', mandatory=True, position=2, desc='image 2 or multiplication weight')
output_product_image = File(argstr='%s', mandatory=True, position=3, desc='Outputfname.nii.gz: the name of the resulting image.')
class MultiplyImagesOutputSpec(TraitedSpec):
output_product_image = File(exists=True, desc='average image file')
class MultiplyImages(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import MultiplyImages
>>> test = MultiplyImages()
>>> test.inputs.dimension = 3
>>> test.inputs.first_input = 'moving2.nii'
>>> test.inputs.second_input = 0.25
>>> test.inputs.output_product_image = "out.nii"
>>> test.cmdline
'MultiplyImages 3 moving2.nii 0.25 out.nii'
"""
_cmd = 'MultiplyImages'
input_spec = MultiplyImagesInputSpec
output_spec = MultiplyImagesOutputSpec
def _format_arg(self, opt, spec, val):
return super(MultiplyImages, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_product_image'] = os.path.abspath(
self.inputs.output_product_image)
return outputs
class JacobianDeterminantInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True,
position=0, desc='image dimension (2 or 3)')
warp_file = File(argstr='%s', exists=True, mandatory=True,
position=1, desc='input warp file')
output_prefix = File(argstr='%s', genfile=True, hash_files=False,
position=2,
desc=('prefix of the output image filename: '
'PREFIX(log)jacobian.nii.gz'))
use_log = traits.Enum(0, 1, argstr='%d', position=3,
desc='log transform the jacobian determinant')
template_mask = File(argstr='%s', exists=True, position=4,
desc='template mask to adjust for head size')
norm_by_total = traits.Enum(0, 1, argstr='%d', position=5,
desc=('normalize jacobian by total in mask to '
'adjust for head size'))
projection_vector = traits.List(traits.Float(), argstr='%s', sep='x',
position=6,
desc='vector to project warp against')
class JacobianDeterminantOutputSpec(TraitedSpec):
jacobian_image = File(exists=True, desc='(log transformed) jacobian image')
class JacobianDeterminant(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import JacobianDeterminant
>>> jacobian = JacobianDeterminant()
>>> jacobian.inputs.dimension = 3
>>> jacobian.inputs.warp_file = 'ants_Warp.nii.gz'
>>> jacobian.inputs.output_prefix = 'Sub001_'
>>> jacobian.inputs.use_log = 1
>>> jacobian.cmdline
'ANTSJacobian 3 ants_Warp.nii.gz Sub001_ 1'
"""
_cmd = 'ANTSJacobian'
input_spec = JacobianDeterminantInputSpec
output_spec = JacobianDeterminantOutputSpec
def _gen_filename(self, name):
if name == 'output_prefix':
output = self.inputs.output_prefix
if not isdefined(output):
_, name, ext = split_filename(self.inputs.warp_file)
output = name + '_'
return output
return None
def _list_outputs(self):
outputs = self._outputs().get()
if self.inputs.use_log == 1:
outputs['jacobian_image'] = os.path.abspath(
self._gen_filename('output_prefix') + 'logjacobian.nii.gz')
else:
outputs['jacobian_image'] = os.path.abspath(
self._gen_filename('output_prefix') + 'jacobian.nii.gz')
return outputs
| 40.574359 | 168 | 0.652679 | import os
from .base import ANTSCommand, ANTSCommandInputSpec
from ..base import (TraitedSpec, File, traits,
isdefined)
from ...utils.filemanip import split_filename
from nipype.interfaces.base import InputMultiPath
class AverageAffineTransformInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True, position=0, desc='image dimension (2 or 3)')
output_affine_transform = File(argstr='%s', mandatory=True, position=1, desc='Outputfname.txt: the name of the resulting transform.')
transforms = InputMultiPath(File(exists=True), argstr='%s', mandatory=True,
position=3, desc=('transforms to average'))
class AverageAffineTransformOutputSpec(TraitedSpec):
affine_transform = File(exists=True, desc='average transform file')
class AverageAffineTransform(ANTSCommand):
_cmd = 'AverageAffineTransform'
input_spec = AverageAffineTransformInputSpec
output_spec = AverageAffineTransformOutputSpec
def _format_arg(self, opt, spec, val):
return super(AverageAffineTransform, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['affine_transform'] = os.path.abspath(
self.inputs.output_affine_transform)
return outputs
class AverageImagesInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', mandatory=True,
position=0, desc='image dimension (2 or 3)')
output_average_image = File("average.nii", argstr='%s', position=1, desc='the name of the resulting image.', usedefault=True, hash_files=False)
normalize = traits.Bool(argstr="%d", mandatory=True, position=2, desc='Normalize: if true, the 2nd image' +
'is divided by its mean. This will select the largest image to average into.')
images = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=3, desc=('image to apply transformation to (generally a coregistered functional)'))
class AverageImagesOutputSpec(TraitedSpec):
output_average_image = File(exists=True, desc='average image file')
class AverageImages(ANTSCommand):
_cmd = 'AverageImages'
input_spec = AverageImagesInputSpec
output_spec = AverageImagesOutputSpec
def _format_arg(self, opt, spec, val):
return super(AverageImages, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_average_image'] = os.path.realpath(
self.inputs.output_average_image)
return outputs
class MultiplyImagesInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True, position=0, desc='image dimension (2 or 3)')
first_input = File(
argstr='%s', exists=True, mandatory=True, position=1, desc='image 1')
second_input = traits.Either(File(exists=True), traits.Float, argstr='%s', mandatory=True, position=2, desc='image 2 or multiplication weight')
output_product_image = File(argstr='%s', mandatory=True, position=3, desc='Outputfname.nii.gz: the name of the resulting image.')
class MultiplyImagesOutputSpec(TraitedSpec):
output_product_image = File(exists=True, desc='average image file')
class MultiplyImages(ANTSCommand):
_cmd = 'MultiplyImages'
input_spec = MultiplyImagesInputSpec
output_spec = MultiplyImagesOutputSpec
def _format_arg(self, opt, spec, val):
return super(MultiplyImages, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_product_image'] = os.path.abspath(
self.inputs.output_product_image)
return outputs
class JacobianDeterminantInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True,
position=0, desc='image dimension (2 or 3)')
warp_file = File(argstr='%s', exists=True, mandatory=True,
position=1, desc='input warp file')
output_prefix = File(argstr='%s', genfile=True, hash_files=False,
position=2,
desc=('prefix of the output image filename: '
'PREFIX(log)jacobian.nii.gz'))
use_log = traits.Enum(0, 1, argstr='%d', position=3,
desc='log transform the jacobian determinant')
template_mask = File(argstr='%s', exists=True, position=4,
desc='template mask to adjust for head size')
norm_by_total = traits.Enum(0, 1, argstr='%d', position=5,
desc=('normalize jacobian by total in mask to '
'adjust for head size'))
projection_vector = traits.List(traits.Float(), argstr='%s', sep='x',
position=6,
desc='vector to project warp against')
class JacobianDeterminantOutputSpec(TraitedSpec):
jacobian_image = File(exists=True, desc='(log transformed) jacobian image')
class JacobianDeterminant(ANTSCommand):
_cmd = 'ANTSJacobian'
input_spec = JacobianDeterminantInputSpec
output_spec = JacobianDeterminantOutputSpec
def _gen_filename(self, name):
if name == 'output_prefix':
output = self.inputs.output_prefix
if not isdefined(output):
_, name, ext = split_filename(self.inputs.warp_file)
output = name + '_'
return output
return None
def _list_outputs(self):
outputs = self._outputs().get()
if self.inputs.use_log == 1:
outputs['jacobian_image'] = os.path.abspath(
self._gen_filename('output_prefix') + 'logjacobian.nii.gz')
else:
outputs['jacobian_image'] = os.path.abspath(
self._gen_filename('output_prefix') + 'jacobian.nii.gz')
return outputs
| true | true |
f739c131ee53f3aa4246f208bf0957e5d21c1c45 | 19,982 | py | Python | scarlet/display.py | Xudewang/scarlet | 1d2a1806038cda8ac96e4c766a5cfa0b8ae5c1b7 | [
"MIT"
] | 1 | 2021-06-02T07:05:42.000Z | 2021-06-02T07:05:42.000Z | scarlet/display.py | Xudewang/scarlet | 1d2a1806038cda8ac96e4c766a5cfa0b8ae5c1b7 | [
"MIT"
] | null | null | null | scarlet/display.py | Xudewang/scarlet | 1d2a1806038cda8ac96e4c766a5cfa0b8ae5c1b7 | [
"MIT"
] | null | null | null | import numpy as np
from astropy.visualization.lupton_rgb import LinearMapping, AsinhMapping
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Polygon
from matplotlib.ticker import MaxNLocator
from .bbox import Box
from .component import Component
def channels_to_rgb(channels):
"""Get the linear mapping of multiple channels to RGB channels
The mapping created here assumes the the channels are ordered in wavelength
direction, starting with the shortest wavelength. The mapping seeks to produce
a relatively even weights for across all channels. It does not consider e.g.
signal-to-noise variations across channels or human perception.
Parameters
----------
channels: int in range(0,7)
Number of channels
Returns
-------
array (3, channels) to map onto RGB
"""
assert channels in range(
0, 8
), "No mapping has been implemented for more than {} channels".format(channels)
channel_map = np.zeros((3, channels))
if channels == 1:
channel_map[0, 0] = channel_map[1, 0] = channel_map[2, 0] = 1
elif channels == 2:
channel_map[0, 1] = 0.667
channel_map[1, 1] = 0.333
channel_map[1, 0] = 0.333
channel_map[2, 0] = 0.667
channel_map /= 0.667
elif channels == 3:
channel_map[0, 2] = 1
channel_map[1, 1] = 1
channel_map[2, 0] = 1
elif channels == 4:
channel_map[0, 3] = 1
channel_map[0, 2] = 0.333
channel_map[1, 2] = 0.667
channel_map[1, 1] = 0.667
channel_map[2, 1] = 0.333
channel_map[2, 0] = 1
channel_map /= 1.333
elif channels == 5:
channel_map[0, 4] = 1
channel_map[0, 3] = 0.667
channel_map[1, 3] = 0.333
channel_map[1, 2] = 1
channel_map[1, 1] = 0.333
channel_map[2, 1] = 0.667
channel_map[2, 0] = 1
channel_map /= 1.667
elif channels == 6:
channel_map[0, 5] = 1
channel_map[0, 4] = 0.667
channel_map[0, 3] = 0.333
channel_map[1, 4] = 0.333
channel_map[1, 3] = 0.667
channel_map[1, 2] = 0.667
channel_map[1, 1] = 0.333
channel_map[2, 2] = 0.333
channel_map[2, 1] = 0.667
channel_map[2, 0] = 1
channel_map /= 2
elif channels == 7:
channel_map[:, 6] = 2/3.
channel_map[0, 5] = 1
channel_map[0, 4] = 0.667
channel_map[0, 3] = 0.333
channel_map[1, 4] = 0.333
channel_map[1, 3] = 0.667
channel_map[1, 2] = 0.667
channel_map[1, 1] = 0.333
channel_map[2, 2] = 0.333
channel_map[2, 1] = 0.667
channel_map[2, 0] = 1
channel_map /= 2
return channel_map
class LinearPercentileNorm(LinearMapping):
def __init__(self, img, percentiles=[1, 99]):
"""Create norm that is linear between lower and upper percentile of img
Parameters
----------
img: array_like
Image to normalize
percentile: array_like, default=[1,99]
Lower and upper percentile to consider. Pixel values below will be
set to zero, above to saturated.
"""
assert len(percentiles) == 2
vmin, vmax = np.percentile(img, percentiles)
super().__init__(minimum=vmin, maximum=vmax)
class AsinhPercentileNorm(AsinhMapping):
def __init__(self, img, percentiles=[1, 99]):
"""Create norm that is linear between lower and upper percentile of img
Parameters
----------
img: array_like
Image to normalize
percentile: array_like, default=[1,99]
Lower and upper percentile to consider. Pixel values below will be
set to zero, above to saturated.
"""
assert len(percentiles) == 2
vmin, vmax = np.percentile(img, percentiles)
# solution for beta assumes flat spectrum at vmax
stretch = vmax - vmin
beta = stretch / np.sinh(1)
super().__init__(minimum=vmin, stretch=stretch, Q=beta)
def img_to_3channel(img, channel_map=None, fill_value=0):
"""Convert multi-band image cube into 3 RGB channels
Parameters
----------
img: array_like
This should be an array with dimensions (channels, height, width).
channel_map: array_like
Linear mapping with dimensions (3, channels)
fill_value: float, default=`0`
Value to use for any masked pixels.
Returns
-------
RGB: numpy array with dtype float
"""
# expand single img into cube
assert len(img.shape) in [2, 3]
if len(img.shape) == 2:
ny, nx = img.shape
img_ = img.reshape(1, ny, nx)
elif len(img.shape) == 3:
img_ = img
C = len(img_)
# filterWeights: channel x band
if channel_map is None:
channel_map = channels_to_rgb(C)
else:
assert channel_map.shape == (3, len(img))
# map channels onto RGB channels
_, ny, nx = img_.shape
rgb = np.dot(channel_map, img_.reshape(C, -1)).reshape(3, ny, nx)
if hasattr(rgb, "mask"):
rgb = rgb.filled(fill_value)
return rgb
def img_to_rgb(img, channel_map=None, fill_value=0, norm=None, mask=None):
"""Convert images to normalized RGB.
If normalized values are outside of the range [0..255], they will be
truncated such as to preserve the corresponding color.
Parameters
----------
img: array_like
This should be an array with dimensions (channels, height, width).
channel_map: array_like
Linear mapping with dimensions (3, channels)
fill_value: float, default=`0`
Value to use for any masked pixels.
norm: `scarlet.display.Norm`, default `None`
Norm to use for mapping in the allowed range [0..255]. If `norm=None`,
`scarlet.display.LinearPercentileNorm` will be used.
mask: array_like
A [0,1] binary mask to apply over the top of the image,
where pixels with mask==1 are masked out.
Returns
-------
rgb: numpy array with dimensions (3, height, width) and dtype uint8
"""
RGB = img_to_3channel(img, channel_map=channel_map)
if norm is None:
norm = LinearMapping(image=RGB)
rgb = norm.make_rgb_image(*RGB)
if mask is not None:
rgb = np.dstack([rgb, ~mask * 255])
return rgb
panel_size = 4.0
def show_likelihood(blend, figsize=None, **kwargs):
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(blend.log_likelihood, **kwargs)
ax.set_xlabel("Iteration")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_ylabel("log-Likelihood")
return fig
def show_observation(
observation,
norm=None,
channel_map=None,
sky_coords=None,
show_psf=False,
add_labels=True,
figsize=None,
):
"""Plot observation in standardized form.
"""
panels = 1 if show_psf is False else 2
if figsize is None:
figsize = (panel_size * panels, panel_size)
fig, ax = plt.subplots(1, panels, figsize=figsize)
if not hasattr(ax, "__iter__"):
ax = (ax,)
# Mask any pixels with zero weight in all bands
mask = np.sum(observation.weights, axis=0) == 0
# if there are no masked pixels, do not use a mask
if np.all(mask == 0):
mask = None
panel = 0
extent = get_extent(observation.bbox)
ax[panel].imshow(
img_to_rgb(observation.data, norm=norm, channel_map=channel_map, mask=mask),
extent=extent,
origin="lower",
)
ax[panel].set_title("Observation")
if add_labels:
assert sky_coords is not None, "Provide sky_coords for labeled objects"
for k, center in enumerate(sky_coords):
center_ = observation.get_pixel(center)
color = "w" if observation.C > 1 else "r"
ax[panel].text(*center_[::-1], k, color=color, ha="center", va="center")
panel += 1
if show_psf:
psf_image = np.zeros(observation.data.shape)
if observation.psf is not None:
psf_model = observation.psf.get_model()
# make PSF as bright as the brightest pixel of the observation
psf_model *= (
observation.data.mean(axis=0).max() / psf_model.mean(axis=0).max()
)
# insert into middle of "blank" observation
full_box = Box(psf_image.shape)
shift = tuple(
psf_image.shape[c] // 2 - psf_model.shape[c] // 2
for c in range(full_box.D)
)
model_box = Box(psf_model.shape) + shift
model_box.insert_into(psf_image, psf_model)
# slices = scarlet.box.overlapped_slices
ax[panel].imshow(img_to_rgb(psf_image, norm=norm), origin="lower")
ax[panel].set_title("PSF")
fig.tight_layout()
return fig
def show_scene(
sources,
observation=None,
norm=None,
channel_map=None,
show_model=True,
show_observed=False,
show_rendered=False,
show_residual=False,
add_labels=True,
add_boxes=False,
figsize=None,
linear=True,
):
"""Plot all sources to recreate the scence.
The functions provides a fast way of evaluating the quality of the entire model,
i.e. the combination of all scences that seek to fit the observation.
Parameters
----------
sources: list of source models
observation: `~scarlet.Observation`
norm: norm to compress image intensity to the range [0,255]
channel_map: array_like
Linear mapping with dimensions (3, channels)
show_model: bool
Whether the model is shown in the model frame
show_observed: bool
Whether the observation is shown
show_rendered: bool
Whether the model, rendered to match the observation, is shown
show_residual: bool
Whether the residuals between rendered model and observation is shown
add_label: bool
Whether each source is labeled with its numerical index in the source list
add_boxes: bool
Whether each source box is shown
figsize: matplotlib figsize argument
linear: bool
Whether or not to display the scene in a single line (`True`) or
on multiple lines (`False`).
Returns
-------
matplotlib figure
"""
if show_observed or show_rendered or show_residual:
assert (
observation is not None
), "Provide matched observation to show observed frame"
panels = sum((show_model, show_observed, show_rendered, show_residual))
if linear:
if figsize is None:
figsize = (panel_size * panels, panel_size)
fig, ax = plt.subplots(1, panels, figsize=figsize)
else:
columns = int(np.ceil(panels / 2))
if figsize is None:
figsize = (panel_size * columns, panel_size * 2)
fig = plt.figure(figsize=figsize)
ax = [fig.add_subplot(2, columns, n + 1) for n in range(panels)]
if not hasattr(ax, "__iter__"):
ax = (ax,)
# Mask any pixels with zero weight in all bands
if observation is not None:
mask = np.sum(observation.weights, axis=0) == 0
# if there are no masked pixels, do not use a mask
if np.all(mask == 0):
mask = None
model_frame = sources[0].frame
model = np.zeros(model_frame.shape)
for src in sources:
model += src.get_model(frame=model_frame)
panel = 0
if show_model:
extent = get_extent(model_frame.bbox)
ax[panel].imshow(
img_to_rgb(model, norm=norm, channel_map=channel_map),
extent=extent,
origin="lower",
)
ax[panel].set_title("Model")
panel += 1
if show_rendered or show_residual:
model = observation.render(model)
extent = get_extent(observation.bbox)
if show_rendered:
ax[panel].imshow(
img_to_rgb(model, norm=norm, channel_map=channel_map, mask=mask),
extent=extent,
origin="lower",
)
ax[panel].set_title("Model Rendered")
panel += 1
if show_observed:
ax[panel].imshow(
img_to_rgb(observation.data, norm=norm, channel_map=channel_map, mask=mask),
extent=extent,
origin="lower",
)
ax[panel].set_title("Observation")
panel += 1
if show_residual:
residual = observation.data - model
norm_ = LinearPercentileNorm(residual)
ax[panel].imshow(
img_to_rgb(residual, norm=norm_, channel_map=channel_map, mask=mask),
extent=extent,
origin="lower",
)
ax[panel].set_title("Residual")
panel += 1
for k, src in enumerate(sources):
if add_boxes:
panel = 0
box_kwargs = {"facecolor": "none", "edgecolor": "w", "lw": 0.5}
if show_model:
extent = get_extent(src.bbox)
rect = Rectangle(
(extent[0], extent[2]),
extent[1] - extent[0],
extent[3] - extent[2],
**box_kwargs
)
ax[panel].add_artist(rect)
panel = 1
if observation is not None:
start, stop = src.bbox.start[-2:][::-1], src.bbox.stop[-2:][::-1]
points = (start, (start[0], stop[1]), stop, (stop[0], start[1]))
coords = [
observation.get_pixel(model_frame.get_sky_coord(p)) for p in points
]
for panel in range(panel, panels):
poly = Polygon(coords, closed=True, **box_kwargs)
ax[panel].add_artist(poly)
if add_labels and hasattr(src, "center") and src.center is not None:
center = src.center
panel = 0
if show_model:
ax[panel].text(*center[::-1], k, color="w", ha="center", va="center")
panel = 1
if observation is not None:
center_ = observation.get_pixel(model_frame.get_sky_coord(center))
for panel in range(panel, panels):
ax[panel].text(
*center_[::-1], k, color="w", ha="center", va="center"
)
fig.tight_layout()
return fig
def get_extent(bbox):
return [bbox.start[-1], bbox.stop[-1], bbox.start[-2], bbox.stop[-2]]
def show_sources(
sources,
observation=None,
norm=None,
channel_map=None,
show_model=True,
show_observed=False,
show_rendered=False,
show_spectrum=True,
figsize=None,
model_mask=None,
add_markers=True,
add_boxes=False,
):
"""Plot each source individually.
The functions provides an more detailed inspection of every source in the list.
Parameters
----------
sources: list of source models
observation: `~scarlet.Observation`
norm: norm to compress image intensity to the range [0,255]
channel_map: array_like
Linear mapping with dimensions (3, channels)
show_model: bool
Whether the model is shown in the model frame
show_observed: bool
Whether the observation is shown
show_rendered: bool
Whether the model, rendered to match the observation, is shown
show_spectrum: bool
Whether source specturm is shown.
For multi-component sources, spectra are shown separately.
figsize: matplotlib figsize argument
model_mask: array
Mask used to hide pixels in the model only.
add_markers: bool
Whether or not to mark the centers of the sources
with their source number.
add_boxes: bool
Whether source boxes are shown
Returns
-------
matplotlib figure
"""
if show_observed or show_rendered:
assert (
observation is not None
), "Provide matched observation to show observed frame"
panels = sum((show_model, show_observed, show_rendered, show_spectrum))
if figsize is None:
figsize = (panel_size * panels, panel_size * len(list(sources)))
fig, ax = plt.subplots(len(list(sources)), panels, figsize=figsize, squeeze=False)
marker_kwargs = {"mew": 1, "ms": 10}
box_kwargs = {"facecolor": "none", "edgecolor": "w", "lw": 0.5}
for k, src in enumerate(sources):
model_frame = src.frame
if hasattr(src, "center") and src.center is not None:
center = np.array(src.center)[::-1]
else:
center = None
if add_boxes:
start, stop = src.bbox.start[-2:][::-1], src.bbox.stop[-2:][::-1]
points = (start, (start[0], stop[1]), stop, (stop[0], start[1]))
box_coords = [
observation.get_pixel(model_frame.get_sky_coord(p)) for p in points
]
# model in its bbox
panel = 0
model = src.get_model()
if show_model:
# Show the unrendered model in it's bbox
extent = get_extent(src.bbox)
ax[k][panel].imshow(
img_to_rgb(model, norm=norm, channel_map=channel_map, mask=model_mask),
extent=extent,
origin="lower",
)
ax[k][panel].set_title("Model Source {}".format(k))
if center is not None and add_markers:
ax[k][panel].plot(*center, "wx", **marker_kwargs)
panel += 1
# model in observation frame
if show_rendered:
# Center and show the rendered model
model_ = src.get_model(frame=model_frame)
model_ = observation.render(model_)
extent = get_extent(observation.bbox)
ax[k][panel].imshow(
img_to_rgb(model_, norm=norm, channel_map=channel_map),
extent=extent,
origin="lower",
)
ax[k][panel].set_title("Model Source {} Rendered".format(k))
if center is not None and add_markers:
center_ = observation.get_pixel(model_frame.get_sky_coord(center))
ax[k][panel].plot(*center_, "wx", **marker_kwargs)
if add_boxes:
poly = Polygon(box_coords, closed=True, **box_kwargs)
ax[k][panel].add_artist(poly)
panel += 1
if show_observed:
# Center the observation on the source and display it
_images = observation.data
ax[k][panel].imshow(
img_to_rgb(_images, norm=norm, channel_map=channel_map),
extent=extent,
origin="lower",
)
ax[k][panel].set_title("Observation".format(k))
if center is not None and add_markers:
center_ = observation.get_pixel(model_frame.get_sky_coord(center))
ax[k][panel].plot(*center_, "wx", **marker_kwargs)
if add_boxes:
poly = Polygon(box_coords, closed=True, **box_kwargs)
ax[k][panel].add_artist(poly)
panel += 1
if show_spectrum:
# needs to be evaluated in the source box to prevent truncation
if hasattr(src, "__iter__") and isinstance(src[0], Component):
spectra = []
for component in src:
model_ = component.get_model()
spectra.append(model_.sum(axis=(1, 2)))
else:
spectra = [model.sum(axis=(1, 2))]
for spectrum in spectra:
ax[k][panel].plot(spectrum)
ax[k][panel].set_xticks(range(len(spectrum)))
if hasattr(src.frame, "channels") and src.frame.channels is not None:
ax[k][panel].set_xticklabels(src.frame.channels)
ax[k][panel].set_title("Spectrum")
ax[k][panel].set_xlabel("Channel")
ax[k][panel].set_ylabel("Intensity")
fig.tight_layout()
return fig
| 34.274443 | 88 | 0.591232 | import numpy as np
from astropy.visualization.lupton_rgb import LinearMapping, AsinhMapping
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Polygon
from matplotlib.ticker import MaxNLocator
from .bbox import Box
from .component import Component
def channels_to_rgb(channels):
assert channels in range(
0, 8
), "No mapping has been implemented for more than {} channels".format(channels)
channel_map = np.zeros((3, channels))
if channels == 1:
channel_map[0, 0] = channel_map[1, 0] = channel_map[2, 0] = 1
elif channels == 2:
channel_map[0, 1] = 0.667
channel_map[1, 1] = 0.333
channel_map[1, 0] = 0.333
channel_map[2, 0] = 0.667
channel_map /= 0.667
elif channels == 3:
channel_map[0, 2] = 1
channel_map[1, 1] = 1
channel_map[2, 0] = 1
elif channels == 4:
channel_map[0, 3] = 1
channel_map[0, 2] = 0.333
channel_map[1, 2] = 0.667
channel_map[1, 1] = 0.667
channel_map[2, 1] = 0.333
channel_map[2, 0] = 1
channel_map /= 1.333
elif channels == 5:
channel_map[0, 4] = 1
channel_map[0, 3] = 0.667
channel_map[1, 3] = 0.333
channel_map[1, 2] = 1
channel_map[1, 1] = 0.333
channel_map[2, 1] = 0.667
channel_map[2, 0] = 1
channel_map /= 1.667
elif channels == 6:
channel_map[0, 5] = 1
channel_map[0, 4] = 0.667
channel_map[0, 3] = 0.333
channel_map[1, 4] = 0.333
channel_map[1, 3] = 0.667
channel_map[1, 2] = 0.667
channel_map[1, 1] = 0.333
channel_map[2, 2] = 0.333
channel_map[2, 1] = 0.667
channel_map[2, 0] = 1
channel_map /= 2
elif channels == 7:
channel_map[:, 6] = 2/3.
channel_map[0, 5] = 1
channel_map[0, 4] = 0.667
channel_map[0, 3] = 0.333
channel_map[1, 4] = 0.333
channel_map[1, 3] = 0.667
channel_map[1, 2] = 0.667
channel_map[1, 1] = 0.333
channel_map[2, 2] = 0.333
channel_map[2, 1] = 0.667
channel_map[2, 0] = 1
channel_map /= 2
return channel_map
class LinearPercentileNorm(LinearMapping):
def __init__(self, img, percentiles=[1, 99]):
assert len(percentiles) == 2
vmin, vmax = np.percentile(img, percentiles)
super().__init__(minimum=vmin, maximum=vmax)
class AsinhPercentileNorm(AsinhMapping):
def __init__(self, img, percentiles=[1, 99]):
assert len(percentiles) == 2
vmin, vmax = np.percentile(img, percentiles)
stretch = vmax - vmin
beta = stretch / np.sinh(1)
super().__init__(minimum=vmin, stretch=stretch, Q=beta)
def img_to_3channel(img, channel_map=None, fill_value=0):
assert len(img.shape) in [2, 3]
if len(img.shape) == 2:
ny, nx = img.shape
img_ = img.reshape(1, ny, nx)
elif len(img.shape) == 3:
img_ = img
C = len(img_)
if channel_map is None:
channel_map = channels_to_rgb(C)
else:
assert channel_map.shape == (3, len(img))
_, ny, nx = img_.shape
rgb = np.dot(channel_map, img_.reshape(C, -1)).reshape(3, ny, nx)
if hasattr(rgb, "mask"):
rgb = rgb.filled(fill_value)
return rgb
def img_to_rgb(img, channel_map=None, fill_value=0, norm=None, mask=None):
RGB = img_to_3channel(img, channel_map=channel_map)
if norm is None:
norm = LinearMapping(image=RGB)
rgb = norm.make_rgb_image(*RGB)
if mask is not None:
rgb = np.dstack([rgb, ~mask * 255])
return rgb
panel_size = 4.0
def show_likelihood(blend, figsize=None, **kwargs):
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(blend.log_likelihood, **kwargs)
ax.set_xlabel("Iteration")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_ylabel("log-Likelihood")
return fig
def show_observation(
observation,
norm=None,
channel_map=None,
sky_coords=None,
show_psf=False,
add_labels=True,
figsize=None,
):
panels = 1 if show_psf is False else 2
if figsize is None:
figsize = (panel_size * panels, panel_size)
fig, ax = plt.subplots(1, panels, figsize=figsize)
if not hasattr(ax, "__iter__"):
ax = (ax,)
mask = np.sum(observation.weights, axis=0) == 0
if np.all(mask == 0):
mask = None
panel = 0
extent = get_extent(observation.bbox)
ax[panel].imshow(
img_to_rgb(observation.data, norm=norm, channel_map=channel_map, mask=mask),
extent=extent,
origin="lower",
)
ax[panel].set_title("Observation")
if add_labels:
assert sky_coords is not None, "Provide sky_coords for labeled objects"
for k, center in enumerate(sky_coords):
center_ = observation.get_pixel(center)
color = "w" if observation.C > 1 else "r"
ax[panel].text(*center_[::-1], k, color=color, ha="center", va="center")
panel += 1
if show_psf:
psf_image = np.zeros(observation.data.shape)
if observation.psf is not None:
psf_model = observation.psf.get_model()
psf_model *= (
observation.data.mean(axis=0).max() / psf_model.mean(axis=0).max()
)
full_box = Box(psf_image.shape)
shift = tuple(
psf_image.shape[c] // 2 - psf_model.shape[c] // 2
for c in range(full_box.D)
)
model_box = Box(psf_model.shape) + shift
model_box.insert_into(psf_image, psf_model)
ax[panel].imshow(img_to_rgb(psf_image, norm=norm), origin="lower")
ax[panel].set_title("PSF")
fig.tight_layout()
return fig
def show_scene(
sources,
observation=None,
norm=None,
channel_map=None,
show_model=True,
show_observed=False,
show_rendered=False,
show_residual=False,
add_labels=True,
add_boxes=False,
figsize=None,
linear=True,
):
if show_observed or show_rendered or show_residual:
assert (
observation is not None
), "Provide matched observation to show observed frame"
panels = sum((show_model, show_observed, show_rendered, show_residual))
if linear:
if figsize is None:
figsize = (panel_size * panels, panel_size)
fig, ax = plt.subplots(1, panels, figsize=figsize)
else:
columns = int(np.ceil(panels / 2))
if figsize is None:
figsize = (panel_size * columns, panel_size * 2)
fig = plt.figure(figsize=figsize)
ax = [fig.add_subplot(2, columns, n + 1) for n in range(panels)]
if not hasattr(ax, "__iter__"):
ax = (ax,)
if observation is not None:
mask = np.sum(observation.weights, axis=0) == 0
if np.all(mask == 0):
mask = None
model_frame = sources[0].frame
model = np.zeros(model_frame.shape)
for src in sources:
model += src.get_model(frame=model_frame)
panel = 0
if show_model:
extent = get_extent(model_frame.bbox)
ax[panel].imshow(
img_to_rgb(model, norm=norm, channel_map=channel_map),
extent=extent,
origin="lower",
)
ax[panel].set_title("Model")
panel += 1
if show_rendered or show_residual:
model = observation.render(model)
extent = get_extent(observation.bbox)
if show_rendered:
ax[panel].imshow(
img_to_rgb(model, norm=norm, channel_map=channel_map, mask=mask),
extent=extent,
origin="lower",
)
ax[panel].set_title("Model Rendered")
panel += 1
if show_observed:
ax[panel].imshow(
img_to_rgb(observation.data, norm=norm, channel_map=channel_map, mask=mask),
extent=extent,
origin="lower",
)
ax[panel].set_title("Observation")
panel += 1
if show_residual:
residual = observation.data - model
norm_ = LinearPercentileNorm(residual)
ax[panel].imshow(
img_to_rgb(residual, norm=norm_, channel_map=channel_map, mask=mask),
extent=extent,
origin="lower",
)
ax[panel].set_title("Residual")
panel += 1
for k, src in enumerate(sources):
if add_boxes:
panel = 0
box_kwargs = {"facecolor": "none", "edgecolor": "w", "lw": 0.5}
if show_model:
extent = get_extent(src.bbox)
rect = Rectangle(
(extent[0], extent[2]),
extent[1] - extent[0],
extent[3] - extent[2],
**box_kwargs
)
ax[panel].add_artist(rect)
panel = 1
if observation is not None:
start, stop = src.bbox.start[-2:][::-1], src.bbox.stop[-2:][::-1]
points = (start, (start[0], stop[1]), stop, (stop[0], start[1]))
coords = [
observation.get_pixel(model_frame.get_sky_coord(p)) for p in points
]
for panel in range(panel, panels):
poly = Polygon(coords, closed=True, **box_kwargs)
ax[panel].add_artist(poly)
if add_labels and hasattr(src, "center") and src.center is not None:
center = src.center
panel = 0
if show_model:
ax[panel].text(*center[::-1], k, color="w", ha="center", va="center")
panel = 1
if observation is not None:
center_ = observation.get_pixel(model_frame.get_sky_coord(center))
for panel in range(panel, panels):
ax[panel].text(
*center_[::-1], k, color="w", ha="center", va="center"
)
fig.tight_layout()
return fig
def get_extent(bbox):
return [bbox.start[-1], bbox.stop[-1], bbox.start[-2], bbox.stop[-2]]
def show_sources(
sources,
observation=None,
norm=None,
channel_map=None,
show_model=True,
show_observed=False,
show_rendered=False,
show_spectrum=True,
figsize=None,
model_mask=None,
add_markers=True,
add_boxes=False,
):
if show_observed or show_rendered:
assert (
observation is not None
), "Provide matched observation to show observed frame"
panels = sum((show_model, show_observed, show_rendered, show_spectrum))
if figsize is None:
figsize = (panel_size * panels, panel_size * len(list(sources)))
fig, ax = plt.subplots(len(list(sources)), panels, figsize=figsize, squeeze=False)
marker_kwargs = {"mew": 1, "ms": 10}
box_kwargs = {"facecolor": "none", "edgecolor": "w", "lw": 0.5}
for k, src in enumerate(sources):
model_frame = src.frame
if hasattr(src, "center") and src.center is not None:
center = np.array(src.center)[::-1]
else:
center = None
if add_boxes:
start, stop = src.bbox.start[-2:][::-1], src.bbox.stop[-2:][::-1]
points = (start, (start[0], stop[1]), stop, (stop[0], start[1]))
box_coords = [
observation.get_pixel(model_frame.get_sky_coord(p)) for p in points
]
panel = 0
model = src.get_model()
if show_model:
extent = get_extent(src.bbox)
ax[k][panel].imshow(
img_to_rgb(model, norm=norm, channel_map=channel_map, mask=model_mask),
extent=extent,
origin="lower",
)
ax[k][panel].set_title("Model Source {}".format(k))
if center is not None and add_markers:
ax[k][panel].plot(*center, "wx", **marker_kwargs)
panel += 1
# model in observation frame
if show_rendered:
# Center and show the rendered model
model_ = src.get_model(frame=model_frame)
model_ = observation.render(model_)
extent = get_extent(observation.bbox)
ax[k][panel].imshow(
img_to_rgb(model_, norm=norm, channel_map=channel_map),
extent=extent,
origin="lower",
)
ax[k][panel].set_title("Model Source {} Rendered".format(k))
if center is not None and add_markers:
center_ = observation.get_pixel(model_frame.get_sky_coord(center))
ax[k][panel].plot(*center_, "wx", **marker_kwargs)
if add_boxes:
poly = Polygon(box_coords, closed=True, **box_kwargs)
ax[k][panel].add_artist(poly)
panel += 1
if show_observed:
# Center the observation on the source and display it
_images = observation.data
ax[k][panel].imshow(
img_to_rgb(_images, norm=norm, channel_map=channel_map),
extent=extent,
origin="lower",
)
ax[k][panel].set_title("Observation".format(k))
if center is not None and add_markers:
center_ = observation.get_pixel(model_frame.get_sky_coord(center))
ax[k][panel].plot(*center_, "wx", **marker_kwargs)
if add_boxes:
poly = Polygon(box_coords, closed=True, **box_kwargs)
ax[k][panel].add_artist(poly)
panel += 1
if show_spectrum:
# needs to be evaluated in the source box to prevent truncation
if hasattr(src, "__iter__") and isinstance(src[0], Component):
spectra = []
for component in src:
model_ = component.get_model()
spectra.append(model_.sum(axis=(1, 2)))
else:
spectra = [model.sum(axis=(1, 2))]
for spectrum in spectra:
ax[k][panel].plot(spectrum)
ax[k][panel].set_xticks(range(len(spectrum)))
if hasattr(src.frame, "channels") and src.frame.channels is not None:
ax[k][panel].set_xticklabels(src.frame.channels)
ax[k][panel].set_title("Spectrum")
ax[k][panel].set_xlabel("Channel")
ax[k][panel].set_ylabel("Intensity")
fig.tight_layout()
return fig
| true | true |
f739c1ff940a1c8a4d27ed70b89d0437b211ebb7 | 23,006 | py | Python | assets/src/ba_data/python/bastd/ui/watch.py | MalTarDesigns/ballistica | c38ae5c39b3cc7985be166a959245ca060d3bf31 | [
"MIT"
] | 2 | 2020-07-02T22:18:58.000Z | 2020-07-02T22:19:49.000Z | assets/src/ba_data/python/bastd/ui/watch.py | MalTarDesigns/ballistica | c38ae5c39b3cc7985be166a959245ca060d3bf31 | [
"MIT"
] | null | null | null | assets/src/ba_data/python/bastd/ui/watch.py | MalTarDesigns/ballistica | c38ae5c39b3cc7985be166a959245ca060d3bf31 | [
"MIT"
] | null | null | null | # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Provides UI functionality for watching replays."""
from __future__ import annotations
import os
from typing import TYPE_CHECKING, cast
import _ba
import ba
if TYPE_CHECKING:
from typing import Any, Optional, Tuple, Dict
class WatchWindow(ba.Window):
"""Window for watching replays."""
def __init__(self,
transition: Optional[str] = 'in_right',
origin_widget: ba.Widget = None):
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
from bastd.ui import tabs
ba.set_analytics_screen('Watch Window')
scale_origin: Optional[Tuple[float, float]]
if origin_widget is not None:
self._transition_out = 'out_scale'
scale_origin = origin_widget.get_screen_space_center()
transition = 'in_scale'
else:
self._transition_out = 'out_right'
scale_origin = None
ba.app.main_window = 'Watch'
self._tab_data: Dict[str, Any] = {}
self._my_replays_scroll_width: Optional[float] = None
self._my_replays_watch_replay_button: Optional[ba.Widget] = None
self._scrollwidget: Optional[ba.Widget] = None
self._columnwidget: Optional[ba.Widget] = None
self._my_replay_selected: Optional[str] = None
self._my_replays_rename_window: Optional[ba.Widget] = None
self._my_replay_rename_text: Optional[ba.Widget] = None
self._r = 'watchWindow'
self._width = 1240 if ba.app.small_ui else 1040
x_inset = 100 if ba.app.small_ui else 0
self._height = (578
if ba.app.small_ui else 670 if ba.app.med_ui else 800)
self._current_tab: Optional[str] = None
extra_top = 20 if ba.app.small_ui else 0
super().__init__(root_widget=ba.containerwidget(
size=(self._width, self._height + extra_top),
transition=transition,
toolbar_visibility='menu_minimal',
scale_origin_stack_offset=scale_origin,
scale=(1.3 if ba.app.small_ui else 0.97 if ba.app.med_ui else 0.8),
stack_offset=(0, -10) if ba.app.small_ui else (
0, 15) if ba.app.med_ui else (0, 0)))
if ba.app.small_ui and ba.app.toolbars:
ba.containerwidget(edit=self._root_widget,
on_cancel_call=self._back)
self._back_button = None
else:
self._back_button = btn = ba.buttonwidget(
parent=self._root_widget,
autoselect=True,
position=(70 + x_inset, self._height - 74),
size=(140, 60),
scale=1.1,
label=ba.Lstr(resource='backText'),
button_type='back',
on_activate_call=self._back)
ba.containerwidget(edit=self._root_widget, cancel_button=btn)
ba.buttonwidget(edit=btn,
button_type='backSmall',
size=(60, 60),
label=ba.charstr(ba.SpecialChar.BACK))
ba.textwidget(parent=self._root_widget,
position=(self._width * 0.5, self._height - 38),
size=(0, 0),
color=ba.app.title_color,
scale=1.5,
h_align='center',
v_align='center',
text=ba.Lstr(resource=self._r + '.titleText'),
maxwidth=400)
tabs_def = [('my_replays',
ba.Lstr(resource=self._r + '.myReplaysText'))]
scroll_buffer_h = 130 + 2 * x_inset
tab_buffer_h = 750 + 2 * x_inset
self._tab_buttons = tabs.create_tab_buttons(
self._root_widget,
tabs_def,
pos=(tab_buffer_h * 0.5, self._height - 130),
size=(self._width - tab_buffer_h, 50),
on_select_call=self._set_tab)
if ba.app.toolbars:
ba.widget(edit=self._tab_buttons[tabs_def[-1][0]],
right_widget=_ba.get_special_widget('party_button'))
if ba.app.small_ui:
bbtn = _ba.get_special_widget('back_button')
ba.widget(edit=self._tab_buttons[tabs_def[0][0]],
up_widget=bbtn,
left_widget=bbtn)
self._scroll_width = self._width - scroll_buffer_h
self._scroll_height = self._height - 180
# not actually using a scroll widget anymore; just an image
scroll_left = (self._width - self._scroll_width) * 0.5
scroll_bottom = self._height - self._scroll_height - 79 - 48
buffer_h = 10
buffer_v = 4
ba.imagewidget(parent=self._root_widget,
position=(scroll_left - buffer_h,
scroll_bottom - buffer_v),
size=(self._scroll_width + 2 * buffer_h,
self._scroll_height + 2 * buffer_v),
texture=ba.gettexture('scrollWidget'),
model_transparent=ba.getmodel('softEdgeOutside'))
self._tab_container: Optional[ba.Widget] = None
self._restore_state()
def _set_tab(self, tab: str) -> None:
# pylint: disable=too-many-locals
from bastd.ui import tabs
if self._current_tab == tab:
return
self._current_tab = tab
# We wanna preserve our current tab between runs.
cfg = ba.app.config
cfg['Watch Tab'] = tab
cfg.commit()
# Update tab colors based on which is selected.
tabs.update_tab_button_colors(self._tab_buttons, tab)
if self._tab_container:
self._tab_container.delete()
scroll_left = (self._width - self._scroll_width) * 0.5
scroll_bottom = self._height - self._scroll_height - 79 - 48
# A place where tabs can store data to get cleared when
# switching to a different tab
self._tab_data = {}
if tab == 'my_replays':
c_width = self._scroll_width
c_height = self._scroll_height - 20
sub_scroll_height = c_height - 63
self._my_replays_scroll_width = sub_scroll_width = (
680 if ba.app.small_ui else 640)
self._tab_container = cnt = ba.containerwidget(
parent=self._root_widget,
position=(scroll_left, scroll_bottom +
(self._scroll_height - c_height) * 0.5),
size=(c_width, c_height),
background=False,
selection_loop_to_parent=True)
v = c_height - 30
ba.textwidget(parent=cnt,
position=(c_width * 0.5, v),
color=(0.6, 1.0, 0.6),
scale=0.7,
size=(0, 0),
maxwidth=c_width * 0.9,
h_align='center',
v_align='center',
text=ba.Lstr(
resource='replayRenameWarningText',
subs=[('${REPLAY}',
ba.Lstr(resource='replayNameDefaultText'))
]))
b_width = 140 if ba.app.small_ui else 178
b_height = (107
if ba.app.small_ui else 142 if ba.app.med_ui else 190)
b_space_extra = (0 if ba.app.small_ui else
-2 if ba.app.med_ui else -5)
b_color = (0.6, 0.53, 0.63)
b_textcolor = (0.75, 0.7, 0.8)
btnv = c_height - (48 if ba.app.small_ui else
45 if ba.app.med_ui else 40) - b_height
btnh = 40 if ba.app.small_ui else 40
smlh = 190 if ba.app.small_ui else 225
tscl = 1.0 if ba.app.small_ui else 1.2
self._my_replays_watch_replay_button = btn1 = ba.buttonwidget(
parent=cnt,
size=(b_width, b_height),
position=(btnh, btnv),
button_type='square',
color=b_color,
textcolor=b_textcolor,
on_activate_call=self._on_my_replay_play_press,
text_scale=tscl,
label=ba.Lstr(resource=self._r + '.watchReplayButtonText'),
autoselect=True)
ba.widget(edit=btn1, up_widget=self._tab_buttons[tab])
if ba.app.small_ui and ba.app.toolbars:
ba.widget(edit=btn1,
left_widget=_ba.get_special_widget('back_button'))
btnv -= b_height + b_space_extra
ba.buttonwidget(parent=cnt,
size=(b_width, b_height),
position=(btnh, btnv),
button_type='square',
color=b_color,
textcolor=b_textcolor,
on_activate_call=self._on_my_replay_rename_press,
text_scale=tscl,
label=ba.Lstr(resource=self._r +
'.renameReplayButtonText'),
autoselect=True)
btnv -= b_height + b_space_extra
ba.buttonwidget(parent=cnt,
size=(b_width, b_height),
position=(btnh, btnv),
button_type='square',
color=b_color,
textcolor=b_textcolor,
on_activate_call=self._on_my_replay_delete_press,
text_scale=tscl,
label=ba.Lstr(resource=self._r +
'.deleteReplayButtonText'),
autoselect=True)
v -= sub_scroll_height + 23
self._scrollwidget = scrlw = ba.scrollwidget(
parent=cnt,
position=(smlh, v),
size=(sub_scroll_width, sub_scroll_height))
ba.containerwidget(edit=cnt, selected_child=scrlw)
self._columnwidget = ba.columnwidget(parent=scrlw, left_border=10)
ba.widget(edit=scrlw,
autoselect=True,
left_widget=btn1,
up_widget=self._tab_buttons[tab])
ba.widget(edit=self._tab_buttons[tab], down_widget=scrlw)
self._my_replay_selected = None
self._refresh_my_replays()
def _no_replay_selected_error(self) -> None:
ba.screenmessage(ba.Lstr(resource=self._r +
'.noReplaySelectedErrorText'),
color=(1, 0, 0))
ba.playsound(ba.getsound('error'))
def _on_my_replay_play_press(self) -> None:
if self._my_replay_selected is None:
self._no_replay_selected_error()
return
_ba.increment_analytics_count('Replay watch')
def do_it() -> None:
try:
# Reset to normal speed.
_ba.set_replay_speed_exponent(0)
_ba.fade_screen(True)
assert self._my_replay_selected is not None
_ba.new_replay_session(_ba.get_replays_dir() + '/' +
self._my_replay_selected)
except Exception:
ba.print_exception('Error running replay session.')
# Drop back into a fresh main menu session
# in case we half-launched or something.
from bastd import mainmenu
_ba.new_host_session(mainmenu.MainMenuSession)
_ba.fade_screen(False, endcall=ba.Call(ba.pushcall, do_it))
ba.containerwidget(edit=self._root_widget, transition='out_left')
def _on_my_replay_rename_press(self) -> None:
if self._my_replay_selected is None:
self._no_replay_selected_error()
return
c_width = 600
c_height = 250
self._my_replays_rename_window = cnt = ba.containerwidget(
scale=1.8 if ba.app.small_ui else 1.55 if ba.app.med_ui else 1.0,
size=(c_width, c_height),
transition='in_scale')
dname = self._get_replay_display_name(self._my_replay_selected)
ba.textwidget(parent=cnt,
size=(0, 0),
h_align='center',
v_align='center',
text=ba.Lstr(resource=self._r + '.renameReplayText',
subs=[('${REPLAY}', dname)]),
maxwidth=c_width * 0.8,
position=(c_width * 0.5, c_height - 60))
self._my_replay_rename_text = txt = ba.textwidget(
parent=cnt,
size=(c_width * 0.8, 40),
h_align='left',
v_align='center',
text=dname,
editable=True,
description=ba.Lstr(resource=self._r + '.replayNameText'),
position=(c_width * 0.1, c_height - 140),
autoselect=True,
maxwidth=c_width * 0.7,
max_chars=200)
cbtn = ba.buttonwidget(
parent=cnt,
label=ba.Lstr(resource='cancelText'),
on_activate_call=ba.Call(
lambda c: ba.containerwidget(edit=c, transition='out_scale'),
cnt),
size=(180, 60),
position=(30, 30),
autoselect=True)
okb = ba.buttonwidget(parent=cnt,
label=ba.Lstr(resource=self._r + '.renameText'),
size=(180, 60),
position=(c_width - 230, 30),
on_activate_call=ba.Call(
self._rename_my_replay,
self._my_replay_selected),
autoselect=True)
ba.widget(edit=cbtn, right_widget=okb)
ba.widget(edit=okb, left_widget=cbtn)
ba.textwidget(edit=txt, on_return_press_call=okb.activate)
ba.containerwidget(edit=cnt, cancel_button=cbtn, start_button=okb)
def _rename_my_replay(self, replay: str) -> None:
new_name = None
try:
if not self._my_replay_rename_text:
return
new_name_raw = cast(
str, ba.textwidget(query=self._my_replay_rename_text))
new_name = new_name_raw + '.brp'
# ignore attempts to change it to what it already is
# (or what it looks like to the user)
if (replay != new_name
and self._get_replay_display_name(replay) != new_name_raw):
old_name_full = (_ba.get_replays_dir() + '/' +
replay).encode('utf-8')
new_name_full = (_ba.get_replays_dir() + '/' +
new_name).encode('utf-8')
# false alarm; ba.textwidget can return non-None val
# pylint: disable=unsupported-membership-test
if os.path.exists(new_name_full):
ba.playsound(ba.getsound('error'))
ba.screenmessage(
ba.Lstr(resource=self._r +
'.replayRenameErrorAlreadyExistsText'),
color=(1, 0, 0))
elif any(char in new_name_raw for char in ['/', '\\', ':']):
ba.playsound(ba.getsound('error'))
ba.screenmessage(ba.Lstr(resource=self._r +
'.replayRenameErrorInvalidName'),
color=(1, 0, 0))
else:
_ba.increment_analytics_count('Replay rename')
os.rename(old_name_full, new_name_full)
self._refresh_my_replays()
ba.playsound(ba.getsound('gunCocking'))
except Exception:
ba.print_exception(
f"Error renaming replay '{replay}' to '{new_name}'.")
ba.playsound(ba.getsound('error'))
ba.screenmessage(
ba.Lstr(resource=self._r + '.replayRenameErrorText'),
color=(1, 0, 0),
)
ba.containerwidget(edit=self._my_replays_rename_window,
transition='out_scale')
def _on_my_replay_delete_press(self) -> None:
from bastd.ui import confirm
if self._my_replay_selected is None:
self._no_replay_selected_error()
return
confirm.ConfirmWindow(
ba.Lstr(resource=self._r + '.deleteConfirmText',
subs=[('${REPLAY}',
self._get_replay_display_name(
self._my_replay_selected))]),
ba.Call(self._delete_replay, self._my_replay_selected), 450, 150)
def _get_replay_display_name(self, replay: str) -> str:
if replay.endswith('.brp'):
replay = replay[:-4]
if replay == '__lastReplay':
return ba.Lstr(resource='replayNameDefaultText').evaluate()
return replay
def _delete_replay(self, replay: str) -> None:
try:
_ba.increment_analytics_count('Replay delete')
os.remove((_ba.get_replays_dir() + '/' + replay).encode('utf-8'))
self._refresh_my_replays()
ba.playsound(ba.getsound('shieldDown'))
if replay == self._my_replay_selected:
self._my_replay_selected = None
except Exception:
ba.print_exception(f"Error deleting replay '{replay}'.")
ba.playsound(ba.getsound('error'))
ba.screenmessage(
ba.Lstr(resource=self._r + '.replayDeleteErrorText'),
color=(1, 0, 0),
)
def _on_my_replay_select(self, replay: str) -> None:
self._my_replay_selected = replay
def _refresh_my_replays(self) -> None:
assert self._columnwidget is not None
for child in self._columnwidget.get_children():
child.delete()
t_scale = 1.6
try:
names = os.listdir(_ba.get_replays_dir())
# ignore random other files in there..
names = [n for n in names if n.endswith('.brp')]
names.sort(key=lambda x: x.lower())
except Exception:
ba.print_exception('Error listing replays dir.')
names = []
assert self._my_replays_scroll_width is not None
assert self._my_replays_watch_replay_button is not None
for i, name in enumerate(names):
txt = ba.textwidget(
parent=self._columnwidget,
size=(self._my_replays_scroll_width / t_scale, 30),
selectable=True,
color=(1.0, 1, 0.4) if name == '__lastReplay.brp' else
(1, 1, 1),
always_highlight=True,
on_select_call=ba.Call(self._on_my_replay_select, name),
on_activate_call=self._my_replays_watch_replay_button.activate,
text=self._get_replay_display_name(name),
h_align='left',
v_align='center',
corner_scale=t_scale,
maxwidth=(self._my_replays_scroll_width / t_scale) * 0.93)
if i == 0:
ba.widget(edit=txt, up_widget=self._tab_buttons['my_replays'])
def _save_state(self) -> None:
try:
sel = self._root_widget.get_selected_child()
if sel == self._back_button:
sel_name = 'Back'
elif sel in list(self._tab_buttons.values()):
sel_name = 'Tab:' + list(self._tab_buttons.keys())[list(
self._tab_buttons.values()).index(sel)]
elif sel == self._tab_container:
sel_name = 'TabContainer'
else:
raise ValueError(f'unrecognized selection {sel}')
ba.app.window_states[self.__class__.__name__] = {
'sel_name': sel_name,
'tab': self._current_tab
}
except Exception:
ba.print_exception(f'Error saving state for {self}.')
def _restore_state(self) -> None:
try:
sel_name = ba.app.window_states.get(self.__class__.__name__,
{}).get('sel_name')
current_tab = ba.app.config.get('Watch Tab')
if current_tab is None or current_tab not in self._tab_buttons:
current_tab = 'my_replays'
self._set_tab(current_tab)
if sel_name == 'Back':
sel = self._back_button
elif sel_name == 'TabContainer':
sel = self._tab_container
elif isinstance(sel_name, str) and sel_name.startswith('Tab:'):
sel = self._tab_buttons[sel_name.split(':')[-1]]
else:
if self._tab_container is not None:
sel = self._tab_container
else:
sel = self._tab_buttons[current_tab]
ba.containerwidget(edit=self._root_widget, selected_child=sel)
except Exception:
ba.print_exception(f'Error restoring state for {self}.')
def _back(self) -> None:
from bastd.ui import mainmenu
self._save_state()
ba.containerwidget(edit=self._root_widget,
transition=self._transition_out)
ba.app.main_menu_window = (mainmenu.MainMenuWindow(
transition='in_left').get_root_widget())
| 43.737643 | 79 | 0.541815 |
from __future__ import annotations
import os
from typing import TYPE_CHECKING, cast
import _ba
import ba
if TYPE_CHECKING:
from typing import Any, Optional, Tuple, Dict
class WatchWindow(ba.Window):
def __init__(self,
transition: Optional[str] = 'in_right',
origin_widget: ba.Widget = None):
from bastd.ui import tabs
ba.set_analytics_screen('Watch Window')
scale_origin: Optional[Tuple[float, float]]
if origin_widget is not None:
self._transition_out = 'out_scale'
scale_origin = origin_widget.get_screen_space_center()
transition = 'in_scale'
else:
self._transition_out = 'out_right'
scale_origin = None
ba.app.main_window = 'Watch'
self._tab_data: Dict[str, Any] = {}
self._my_replays_scroll_width: Optional[float] = None
self._my_replays_watch_replay_button: Optional[ba.Widget] = None
self._scrollwidget: Optional[ba.Widget] = None
self._columnwidget: Optional[ba.Widget] = None
self._my_replay_selected: Optional[str] = None
self._my_replays_rename_window: Optional[ba.Widget] = None
self._my_replay_rename_text: Optional[ba.Widget] = None
self._r = 'watchWindow'
self._width = 1240 if ba.app.small_ui else 1040
x_inset = 100 if ba.app.small_ui else 0
self._height = (578
if ba.app.small_ui else 670 if ba.app.med_ui else 800)
self._current_tab: Optional[str] = None
extra_top = 20 if ba.app.small_ui else 0
super().__init__(root_widget=ba.containerwidget(
size=(self._width, self._height + extra_top),
transition=transition,
toolbar_visibility='menu_minimal',
scale_origin_stack_offset=scale_origin,
scale=(1.3 if ba.app.small_ui else 0.97 if ba.app.med_ui else 0.8),
stack_offset=(0, -10) if ba.app.small_ui else (
0, 15) if ba.app.med_ui else (0, 0)))
if ba.app.small_ui and ba.app.toolbars:
ba.containerwidget(edit=self._root_widget,
on_cancel_call=self._back)
self._back_button = None
else:
self._back_button = btn = ba.buttonwidget(
parent=self._root_widget,
autoselect=True,
position=(70 + x_inset, self._height - 74),
size=(140, 60),
scale=1.1,
label=ba.Lstr(resource='backText'),
button_type='back',
on_activate_call=self._back)
ba.containerwidget(edit=self._root_widget, cancel_button=btn)
ba.buttonwidget(edit=btn,
button_type='backSmall',
size=(60, 60),
label=ba.charstr(ba.SpecialChar.BACK))
ba.textwidget(parent=self._root_widget,
position=(self._width * 0.5, self._height - 38),
size=(0, 0),
color=ba.app.title_color,
scale=1.5,
h_align='center',
v_align='center',
text=ba.Lstr(resource=self._r + '.titleText'),
maxwidth=400)
tabs_def = [('my_replays',
ba.Lstr(resource=self._r + '.myReplaysText'))]
scroll_buffer_h = 130 + 2 * x_inset
tab_buffer_h = 750 + 2 * x_inset
self._tab_buttons = tabs.create_tab_buttons(
self._root_widget,
tabs_def,
pos=(tab_buffer_h * 0.5, self._height - 130),
size=(self._width - tab_buffer_h, 50),
on_select_call=self._set_tab)
if ba.app.toolbars:
ba.widget(edit=self._tab_buttons[tabs_def[-1][0]],
right_widget=_ba.get_special_widget('party_button'))
if ba.app.small_ui:
bbtn = _ba.get_special_widget('back_button')
ba.widget(edit=self._tab_buttons[tabs_def[0][0]],
up_widget=bbtn,
left_widget=bbtn)
self._scroll_width = self._width - scroll_buffer_h
self._scroll_height = self._height - 180
scroll_left = (self._width - self._scroll_width) * 0.5
scroll_bottom = self._height - self._scroll_height - 79 - 48
buffer_h = 10
buffer_v = 4
ba.imagewidget(parent=self._root_widget,
position=(scroll_left - buffer_h,
scroll_bottom - buffer_v),
size=(self._scroll_width + 2 * buffer_h,
self._scroll_height + 2 * buffer_v),
texture=ba.gettexture('scrollWidget'),
model_transparent=ba.getmodel('softEdgeOutside'))
self._tab_container: Optional[ba.Widget] = None
self._restore_state()
def _set_tab(self, tab: str) -> None:
from bastd.ui import tabs
if self._current_tab == tab:
return
self._current_tab = tab
cfg = ba.app.config
cfg['Watch Tab'] = tab
cfg.commit()
tabs.update_tab_button_colors(self._tab_buttons, tab)
if self._tab_container:
self._tab_container.delete()
scroll_left = (self._width - self._scroll_width) * 0.5
scroll_bottom = self._height - self._scroll_height - 79 - 48
self._tab_data = {}
if tab == 'my_replays':
c_width = self._scroll_width
c_height = self._scroll_height - 20
sub_scroll_height = c_height - 63
self._my_replays_scroll_width = sub_scroll_width = (
680 if ba.app.small_ui else 640)
self._tab_container = cnt = ba.containerwidget(
parent=self._root_widget,
position=(scroll_left, scroll_bottom +
(self._scroll_height - c_height) * 0.5),
size=(c_width, c_height),
background=False,
selection_loop_to_parent=True)
v = c_height - 30
ba.textwidget(parent=cnt,
position=(c_width * 0.5, v),
color=(0.6, 1.0, 0.6),
scale=0.7,
size=(0, 0),
maxwidth=c_width * 0.9,
h_align='center',
v_align='center',
text=ba.Lstr(
resource='replayRenameWarningText',
subs=[('${REPLAY}',
ba.Lstr(resource='replayNameDefaultText'))
]))
b_width = 140 if ba.app.small_ui else 178
b_height = (107
if ba.app.small_ui else 142 if ba.app.med_ui else 190)
b_space_extra = (0 if ba.app.small_ui else
-2 if ba.app.med_ui else -5)
b_color = (0.6, 0.53, 0.63)
b_textcolor = (0.75, 0.7, 0.8)
btnv = c_height - (48 if ba.app.small_ui else
45 if ba.app.med_ui else 40) - b_height
btnh = 40 if ba.app.small_ui else 40
smlh = 190 if ba.app.small_ui else 225
tscl = 1.0 if ba.app.small_ui else 1.2
self._my_replays_watch_replay_button = btn1 = ba.buttonwidget(
parent=cnt,
size=(b_width, b_height),
position=(btnh, btnv),
button_type='square',
color=b_color,
textcolor=b_textcolor,
on_activate_call=self._on_my_replay_play_press,
text_scale=tscl,
label=ba.Lstr(resource=self._r + '.watchReplayButtonText'),
autoselect=True)
ba.widget(edit=btn1, up_widget=self._tab_buttons[tab])
if ba.app.small_ui and ba.app.toolbars:
ba.widget(edit=btn1,
left_widget=_ba.get_special_widget('back_button'))
btnv -= b_height + b_space_extra
ba.buttonwidget(parent=cnt,
size=(b_width, b_height),
position=(btnh, btnv),
button_type='square',
color=b_color,
textcolor=b_textcolor,
on_activate_call=self._on_my_replay_rename_press,
text_scale=tscl,
label=ba.Lstr(resource=self._r +
'.renameReplayButtonText'),
autoselect=True)
btnv -= b_height + b_space_extra
ba.buttonwidget(parent=cnt,
size=(b_width, b_height),
position=(btnh, btnv),
button_type='square',
color=b_color,
textcolor=b_textcolor,
on_activate_call=self._on_my_replay_delete_press,
text_scale=tscl,
label=ba.Lstr(resource=self._r +
'.deleteReplayButtonText'),
autoselect=True)
v -= sub_scroll_height + 23
self._scrollwidget = scrlw = ba.scrollwidget(
parent=cnt,
position=(smlh, v),
size=(sub_scroll_width, sub_scroll_height))
ba.containerwidget(edit=cnt, selected_child=scrlw)
self._columnwidget = ba.columnwidget(parent=scrlw, left_border=10)
ba.widget(edit=scrlw,
autoselect=True,
left_widget=btn1,
up_widget=self._tab_buttons[tab])
ba.widget(edit=self._tab_buttons[tab], down_widget=scrlw)
self._my_replay_selected = None
self._refresh_my_replays()
def _no_replay_selected_error(self) -> None:
ba.screenmessage(ba.Lstr(resource=self._r +
'.noReplaySelectedErrorText'),
color=(1, 0, 0))
ba.playsound(ba.getsound('error'))
def _on_my_replay_play_press(self) -> None:
if self._my_replay_selected is None:
self._no_replay_selected_error()
return
_ba.increment_analytics_count('Replay watch')
def do_it() -> None:
try:
_ba.set_replay_speed_exponent(0)
_ba.fade_screen(True)
assert self._my_replay_selected is not None
_ba.new_replay_session(_ba.get_replays_dir() + '/' +
self._my_replay_selected)
except Exception:
ba.print_exception('Error running replay session.')
from bastd import mainmenu
_ba.new_host_session(mainmenu.MainMenuSession)
_ba.fade_screen(False, endcall=ba.Call(ba.pushcall, do_it))
ba.containerwidget(edit=self._root_widget, transition='out_left')
def _on_my_replay_rename_press(self) -> None:
if self._my_replay_selected is None:
self._no_replay_selected_error()
return
c_width = 600
c_height = 250
self._my_replays_rename_window = cnt = ba.containerwidget(
scale=1.8 if ba.app.small_ui else 1.55 if ba.app.med_ui else 1.0,
size=(c_width, c_height),
transition='in_scale')
dname = self._get_replay_display_name(self._my_replay_selected)
ba.textwidget(parent=cnt,
size=(0, 0),
h_align='center',
v_align='center',
text=ba.Lstr(resource=self._r + '.renameReplayText',
subs=[('${REPLAY}', dname)]),
maxwidth=c_width * 0.8,
position=(c_width * 0.5, c_height - 60))
self._my_replay_rename_text = txt = ba.textwidget(
parent=cnt,
size=(c_width * 0.8, 40),
h_align='left',
v_align='center',
text=dname,
editable=True,
description=ba.Lstr(resource=self._r + '.replayNameText'),
position=(c_width * 0.1, c_height - 140),
autoselect=True,
maxwidth=c_width * 0.7,
max_chars=200)
cbtn = ba.buttonwidget(
parent=cnt,
label=ba.Lstr(resource='cancelText'),
on_activate_call=ba.Call(
lambda c: ba.containerwidget(edit=c, transition='out_scale'),
cnt),
size=(180, 60),
position=(30, 30),
autoselect=True)
okb = ba.buttonwidget(parent=cnt,
label=ba.Lstr(resource=self._r + '.renameText'),
size=(180, 60),
position=(c_width - 230, 30),
on_activate_call=ba.Call(
self._rename_my_replay,
self._my_replay_selected),
autoselect=True)
ba.widget(edit=cbtn, right_widget=okb)
ba.widget(edit=okb, left_widget=cbtn)
ba.textwidget(edit=txt, on_return_press_call=okb.activate)
ba.containerwidget(edit=cnt, cancel_button=cbtn, start_button=okb)
def _rename_my_replay(self, replay: str) -> None:
new_name = None
try:
if not self._my_replay_rename_text:
return
new_name_raw = cast(
str, ba.textwidget(query=self._my_replay_rename_text))
new_name = new_name_raw + '.brp'
if (replay != new_name
and self._get_replay_display_name(replay) != new_name_raw):
old_name_full = (_ba.get_replays_dir() + '/' +
replay).encode('utf-8')
new_name_full = (_ba.get_replays_dir() + '/' +
new_name).encode('utf-8')
if os.path.exists(new_name_full):
ba.playsound(ba.getsound('error'))
ba.screenmessage(
ba.Lstr(resource=self._r +
'.replayRenameErrorAlreadyExistsText'),
color=(1, 0, 0))
elif any(char in new_name_raw for char in ['/', '\\', ':']):
ba.playsound(ba.getsound('error'))
ba.screenmessage(ba.Lstr(resource=self._r +
'.replayRenameErrorInvalidName'),
color=(1, 0, 0))
else:
_ba.increment_analytics_count('Replay rename')
os.rename(old_name_full, new_name_full)
self._refresh_my_replays()
ba.playsound(ba.getsound('gunCocking'))
except Exception:
ba.print_exception(
f"Error renaming replay '{replay}' to '{new_name}'.")
ba.playsound(ba.getsound('error'))
ba.screenmessage(
ba.Lstr(resource=self._r + '.replayRenameErrorText'),
color=(1, 0, 0),
)
ba.containerwidget(edit=self._my_replays_rename_window,
transition='out_scale')
def _on_my_replay_delete_press(self) -> None:
from bastd.ui import confirm
if self._my_replay_selected is None:
self._no_replay_selected_error()
return
confirm.ConfirmWindow(
ba.Lstr(resource=self._r + '.deleteConfirmText',
subs=[('${REPLAY}',
self._get_replay_display_name(
self._my_replay_selected))]),
ba.Call(self._delete_replay, self._my_replay_selected), 450, 150)
def _get_replay_display_name(self, replay: str) -> str:
if replay.endswith('.brp'):
replay = replay[:-4]
if replay == '__lastReplay':
return ba.Lstr(resource='replayNameDefaultText').evaluate()
return replay
def _delete_replay(self, replay: str) -> None:
try:
_ba.increment_analytics_count('Replay delete')
os.remove((_ba.get_replays_dir() + '/' + replay).encode('utf-8'))
self._refresh_my_replays()
ba.playsound(ba.getsound('shieldDown'))
if replay == self._my_replay_selected:
self._my_replay_selected = None
except Exception:
ba.print_exception(f"Error deleting replay '{replay}'.")
ba.playsound(ba.getsound('error'))
ba.screenmessage(
ba.Lstr(resource=self._r + '.replayDeleteErrorText'),
color=(1, 0, 0),
)
def _on_my_replay_select(self, replay: str) -> None:
self._my_replay_selected = replay
def _refresh_my_replays(self) -> None:
assert self._columnwidget is not None
for child in self._columnwidget.get_children():
child.delete()
t_scale = 1.6
try:
names = os.listdir(_ba.get_replays_dir())
names = [n for n in names if n.endswith('.brp')]
names.sort(key=lambda x: x.lower())
except Exception:
ba.print_exception('Error listing replays dir.')
names = []
assert self._my_replays_scroll_width is not None
assert self._my_replays_watch_replay_button is not None
for i, name in enumerate(names):
txt = ba.textwidget(
parent=self._columnwidget,
size=(self._my_replays_scroll_width / t_scale, 30),
selectable=True,
color=(1.0, 1, 0.4) if name == '__lastReplay.brp' else
(1, 1, 1),
always_highlight=True,
on_select_call=ba.Call(self._on_my_replay_select, name),
on_activate_call=self._my_replays_watch_replay_button.activate,
text=self._get_replay_display_name(name),
h_align='left',
v_align='center',
corner_scale=t_scale,
maxwidth=(self._my_replays_scroll_width / t_scale) * 0.93)
if i == 0:
ba.widget(edit=txt, up_widget=self._tab_buttons['my_replays'])
def _save_state(self) -> None:
try:
sel = self._root_widget.get_selected_child()
if sel == self._back_button:
sel_name = 'Back'
elif sel in list(self._tab_buttons.values()):
sel_name = 'Tab:' + list(self._tab_buttons.keys())[list(
self._tab_buttons.values()).index(sel)]
elif sel == self._tab_container:
sel_name = 'TabContainer'
else:
raise ValueError(f'unrecognized selection {sel}')
ba.app.window_states[self.__class__.__name__] = {
'sel_name': sel_name,
'tab': self._current_tab
}
except Exception:
ba.print_exception(f'Error saving state for {self}.')
def _restore_state(self) -> None:
try:
sel_name = ba.app.window_states.get(self.__class__.__name__,
{}).get('sel_name')
current_tab = ba.app.config.get('Watch Tab')
if current_tab is None or current_tab not in self._tab_buttons:
current_tab = 'my_replays'
self._set_tab(current_tab)
if sel_name == 'Back':
sel = self._back_button
elif sel_name == 'TabContainer':
sel = self._tab_container
elif isinstance(sel_name, str) and sel_name.startswith('Tab:'):
sel = self._tab_buttons[sel_name.split(':')[-1]]
else:
if self._tab_container is not None:
sel = self._tab_container
else:
sel = self._tab_buttons[current_tab]
ba.containerwidget(edit=self._root_widget, selected_child=sel)
except Exception:
ba.print_exception(f'Error restoring state for {self}.')
def _back(self) -> None:
from bastd.ui import mainmenu
self._save_state()
ba.containerwidget(edit=self._root_widget,
transition=self._transition_out)
ba.app.main_menu_window = (mainmenu.MainMenuWindow(
transition='in_left').get_root_widget())
| true | true |
f739c29117398c7733bfa2df2adfe0157bd1a664 | 3,354 | py | Python | templation/models.py | qdqmedia/django-templation | 30f2e71f317bf635ace8cce119aface3fe89e876 | [
"BSD-3-Clause"
] | 7 | 2015-12-14T23:22:12.000Z | 2021-04-15T20:24:23.000Z | templation/models.py | qdqmedia/django-templation | 30f2e71f317bf635ace8cce119aface3fe89e876 | [
"BSD-3-Clause"
] | null | null | null | templation/models.py | qdqmedia/django-templation | 30f2e71f317bf635ace8cce119aface3fe89e876 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import shutil
import hmac
import hashlib
from django.db import models
from django.db.models.signals import post_save
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from .settings import DAV_ROOT, PROVIDER_NAME, BOILERPLATE_INITIALIZER, \
get_resource_model, get_resource_access_model, BOILERPLATE_FOLDER, \
import_from_path, SECRET_KEY
class ResourceAccessManager(models.Manager):
def filter_validated(self, *args, **kwargs):
return self.filter(resource_pointer__is_validated=True,
*args, **kwargs)
class ResourcePointer(models.Model):
resource = models.OneToOneField(get_resource_model())
is_validated = models.BooleanField(default=False)
class AbstractResourceAccess(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
resource_pointer = models.ForeignKey(ResourcePointer)
objects = ResourceAccessManager()
class Meta:
abstract = True
verbose_name = _('ResourceAccess')
verbose_name_plural = _('ResourceAccesses')
unique_together = ('user', 'resource_pointer')
def get_absolute_url(self):
"""Returns the WebDav path for this resource."""
return os.path.join('/' + PROVIDER_NAME,
str(self.resource_pointer.resource.id)) + '/'
def get_path(self, append=None):
if append and not append.endswith('/'):
append += '/'
return os.path.join(DAV_ROOT, str(self.resource_pointer.resource.id),
append)
def get_access_token(self):
return hmac.new(SECRET_KEY, str(self.resource_pointer.resource.id),
hashlib.sha1).hexdigest()
def validate_access_token(self, token):
return self.get_access_token() == token
class ResourceAccess(AbstractResourceAccess):
"""Resource Access Model."""
def copy_boilerplate_folder(user_dir):
"""
Default behavior to initialize the webdav folder. copy the resources from
`settings.TEMPLATION_BOILERPLATE_FOLDER` to the newly created folder.
Overridable function with `settings.TEMPLATION_BOILERPLATE_INITIALIZER`.
"""
if os.path.isdir(BOILERPLATE_FOLDER):
shutil.rmtree(user_dir) # copytree needs to create the dir...
shutil.copytree(BOILERPLATE_FOLDER, user_dir)
elif BOILERPLATE_FOLDER: # pragma no cover
raise ValueError(
'{0} is not a valid directory'.format(BOILERPLATE_FOLDER)
)
def create_resource_access(sender, instance, created, **kwargs):
if created:
try:
user_dir = os.path.join(
DAV_ROOT, str(instance.resource_pointer.resource.id)
)
# create in case neither folder or initializer are defined.
os.makedirs(user_dir)
import_from_path(BOILERPLATE_INITIALIZER)(user_dir)
except OSError as e: # pragma no cover
if e.errno != 17:
raise
# When a custom ResourceAccess model is used, you also have to connect the
# signal with your custom model.
if not getattr(settings, 'TEMPLATION_RESOURCE_ACCESS_MODEL', False):
post_save.connect(create_resource_access,
sender=get_resource_access_model())
| 34.22449 | 77 | 0.684258 |
from __future__ import absolute_import
import os
import shutil
import hmac
import hashlib
from django.db import models
from django.db.models.signals import post_save
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from .settings import DAV_ROOT, PROVIDER_NAME, BOILERPLATE_INITIALIZER, \
get_resource_model, get_resource_access_model, BOILERPLATE_FOLDER, \
import_from_path, SECRET_KEY
class ResourceAccessManager(models.Manager):
def filter_validated(self, *args, **kwargs):
return self.filter(resource_pointer__is_validated=True,
*args, **kwargs)
class ResourcePointer(models.Model):
resource = models.OneToOneField(get_resource_model())
is_validated = models.BooleanField(default=False)
class AbstractResourceAccess(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
resource_pointer = models.ForeignKey(ResourcePointer)
objects = ResourceAccessManager()
class Meta:
abstract = True
verbose_name = _('ResourceAccess')
verbose_name_plural = _('ResourceAccesses')
unique_together = ('user', 'resource_pointer')
def get_absolute_url(self):
return os.path.join('/' + PROVIDER_NAME,
str(self.resource_pointer.resource.id)) + '/'
def get_path(self, append=None):
if append and not append.endswith('/'):
append += '/'
return os.path.join(DAV_ROOT, str(self.resource_pointer.resource.id),
append)
def get_access_token(self):
return hmac.new(SECRET_KEY, str(self.resource_pointer.resource.id),
hashlib.sha1).hexdigest()
def validate_access_token(self, token):
return self.get_access_token() == token
class ResourceAccess(AbstractResourceAccess):
def copy_boilerplate_folder(user_dir):
if os.path.isdir(BOILERPLATE_FOLDER):
shutil.rmtree(user_dir)
shutil.copytree(BOILERPLATE_FOLDER, user_dir)
elif BOILERPLATE_FOLDER:
raise ValueError(
'{0} is not a valid directory'.format(BOILERPLATE_FOLDER)
)
def create_resource_access(sender, instance, created, **kwargs):
if created:
try:
user_dir = os.path.join(
DAV_ROOT, str(instance.resource_pointer.resource.id)
)
os.makedirs(user_dir)
import_from_path(BOILERPLATE_INITIALIZER)(user_dir)
except OSError as e:
if e.errno != 17:
raise
if not getattr(settings, 'TEMPLATION_RESOURCE_ACCESS_MODEL', False):
post_save.connect(create_resource_access,
sender=get_resource_access_model())
| true | true |
f739c30aa08b79466c9ab4a61de9c8cd42f10b2e | 24,586 | py | Python | engineio/asyncio_client.py | dominikWin/python-engineio | 3c221aaec7173a046ca42f6aff9be0915cf92237 | [
"MIT"
] | null | null | null | engineio/asyncio_client.py | dominikWin/python-engineio | 3c221aaec7173a046ca42f6aff9be0915cf92237 | [
"MIT"
] | null | null | null | engineio/asyncio_client.py | dominikWin/python-engineio | 3c221aaec7173a046ca42f6aff9be0915cf92237 | [
"MIT"
] | null | null | null | import asyncio
import ssl
try:
import aiohttp
except ImportError: # pragma: no cover
aiohttp = None
import six
try:
import websockets
except ImportError: # pragma: no cover
websockets = None
from . import client
from . import exceptions
from . import packet
from . import payload
class AsyncClient(client.Client):
"""An Engine.IO client for asyncio.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports, compatible with the asyncio
framework on Python 3.5 or newer.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param request_timeout: A timeout in seconds for requests. The default is
5 seconds.
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
skip SSL certificate verification, allowing
connections to servers with self signed certificates.
The default is ``True``.
"""
def is_asyncio_based(self):
return True
async def connect(self, url, headers={}, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Note: this method is a coroutine.
Example usage::
eio = engineio.Client()
await eio.connect('http://localhost:5000')
"""
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, six.text_type):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return await getattr(self, '_connect_' + self.transports[0])(
url, headers, engineio_path)
async def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
Note: this method is a coroutine.
"""
if self.read_loop_task:
await self.read_loop_task
async def send(self, data, binary=None):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
Note: this method is a coroutine.
"""
await self._send_packet(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
async def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
Note: this method is a coroutine.
"""
if self.state == 'connected':
await self._send_packet(packet.Packet(packet.CLOSE))
await self.queue.put(None)
self.state = 'disconnecting'
await self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
await self.ws.close()
if not abort:
await self.read_loop_task
self.state = 'disconnected'
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
Note: this method is a coroutine.
"""
return asyncio.ensure_future(target(*args, **kwargs))
async def sleep(self, seconds=0):
"""Sleep for the requested amount of time.
Note: this method is a coroutine.
"""
return await asyncio.sleep(seconds)
def create_queue(self):
"""Create a queue object."""
q = asyncio.Queue()
q.Empty = asyncio.QueueEmpty
return q
def create_event(self):
"""Create an event object."""
return asyncio.Event()
def _reset(self):
if self.http: # pragma: no cover
asyncio.ensure_future(self.http.close())
super()._reset()
async def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if aiohttp is None: # pragma: no cover
self.logger.error('aiohttp not installed -- cannot make HTTP '
'requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = await self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
timeout=self.request_timeout)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status < 200 or r.status >= 300:
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status))
try:
p = payload.Payload(encoded_payload=await r.read())
except ValueError:
six.raise_from(exceptions.ConnectionError(
'Unexpected response from server'), None)
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
await self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if await self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
async def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if websockets is None: # pragma: no cover
self.logger.error('websockets package not installed')
return False
websocket_url = self._get_engineio_url(url, engineio_path,
'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
# get the cookies from the long-polling connection so that they can
# also be sent the the WebSocket route
cookies = None
if self.http:
cookies = '; '.join(["{}={}".format(cookie.key, cookie.value)
for cookie in self.http._cookie_jar])
headers = headers.copy()
headers['Cookie'] = cookies
try:
if not self.ssl_verify:
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
ws = await websockets.connect(
websocket_url + self._get_url_timestamp(),
extra_headers=headers, ssl=ssl_context)
else:
ws = await websockets.connect(
websocket_url + self._get_url_timestamp(),
extra_headers=headers)
except (websockets.exceptions.InvalidURI,
websockets.exceptions.InvalidHandshake,
OSError):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING, data='probe').encode(
always_bytes=False)
try:
await ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = await ws.recv()
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode(always_bytes=False)
try:
await ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
if self.http: # pragma: no cover
await self.http.close()
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = await ws.recv()
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
self.ws = ws
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
async def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
await self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PONG:
self.pong_received = True
elif pkt.packet_type == packet.CLOSE:
await self.disconnect(abort=True)
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
async def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
await self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
async def _send_request(
self, method, url, headers=None, body=None,
timeout=None): # pragma: no cover
if self.http is None or self.http.closed:
self.http = aiohttp.ClientSession()
http_method = getattr(self.http, method.lower())
try:
if not self.ssl_verify:
return await http_method(
url, headers=headers, data=body,
timeout=aiohttp.ClientTimeout(total=timeout), ssl=False)
else:
return await http_method(
url, headers=headers, data=body,
timeout=aiohttp.ClientTimeout(total=timeout))
except (aiohttp.ClientError, asyncio.TimeoutError) as exc:
self.logger.info('HTTP %s request to %s failed with error %s.',
method, url, exc)
async def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
ret = None
if event in self.handlers:
if asyncio.iscoroutinefunction(self.handlers[event]) is True:
if run_async:
return self.start_background_task(self.handlers[event],
*args)
else:
try:
ret = await self.handlers[event](*args)
except asyncio.CancelledError: # pragma: no cover
pass
except:
self.logger.exception(event + ' async handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
else:
if run_async:
async def async_handler():
return self.handlers[event](*args)
return self.start_background_task(async_handler)
else:
try:
ret = self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
return ret
async def _ping_loop(self):
"""This background task sends a PING to the server at the requested
interval.
"""
self.pong_received = True
self.ping_loop_event.clear()
while self.state == 'connected':
if not self.pong_received:
self.logger.info(
'PONG response has not been received, aborting')
if self.ws:
await self.ws.close()
await self.queue.put(None)
break
self.pong_received = False
await self._send_packet(packet.Packet(packet.PING))
try:
await asyncio.wait_for(self.ping_loop_event.wait(),
self.ping_interval)
except (asyncio.TimeoutError,
asyncio.CancelledError): # pragma: no cover
pass
self.logger.info('Exiting ping task')
async def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = await self._send_request(
'GET', self.base_url + self._get_url_timestamp(),
timeout=max(self.ping_interval, self.ping_timeout) + 5)
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
await self.queue.put(None)
break
if r.status < 200 or r.status >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
await self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=await r.read())
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
await self.queue.put(None)
break
for pkt in p.packets:
await self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
await self.write_loop_task
self.logger.info('Waiting for ping loop task to end')
self.ping_loop_event.set()
await self.ping_loop_task
if self.state == 'connected':
await self._trigger_event('disconnect', run_async=False)
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
async def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = await self.ws.recv()
except websockets.exceptions.ConnectionClosed:
self.logger.info(
'Read loop: WebSocket connection was closed, aborting')
await self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error "%s", aborting', str(e))
await self.queue.put(None)
break
if isinstance(p, six.text_type): # pragma: no cover
p = p.encode('utf-8')
pkt = packet.Packet(encoded_packet=p)
await self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
await self.write_loop_task
self.logger.info('Waiting for ping loop task to end')
self.ping_loop_event.set()
await self.ping_loop_task
if self.state == 'connected':
await self._trigger_event('disconnect', run_async=False)
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
async def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [await asyncio.wait_for(self.queue.get(), timeout)]
except (self.queue.Empty, asyncio.TimeoutError,
asyncio.CancelledError):
self.logger.error('packet queue is empty, aborting')
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get_nowait())
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = await self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=self.request_timeout)
for pkt in packets:
self.queue.task_done()
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
break
if r.status < 200 or r.status >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
await self.ws.send(pkt.encode(always_bytes=False))
self.queue.task_done()
except websockets.exceptions.ConnectionClosed:
self.logger.info(
'Write loop: WebSocket connection was closed, '
'aborting')
break
self.logger.info('Exiting write loop task')
| 41.884157 | 79 | 0.550069 | import asyncio
import ssl
try:
import aiohttp
except ImportError:
aiohttp = None
import six
try:
import websockets
except ImportError:
websockets = None
from . import client
from . import exceptions
from . import packet
from . import payload
class AsyncClient(client.Client):
def is_asyncio_based(self):
return True
async def connect(self, url, headers={}, transports=None,
engineio_path='engine.io'):
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, six.text_type):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return await getattr(self, '_connect_' + self.transports[0])(
url, headers, engineio_path)
async def wait(self):
if self.read_loop_task:
await self.read_loop_task
async def send(self, data, binary=None):
await self._send_packet(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
async def disconnect(self, abort=False):
if self.state == 'connected':
await self._send_packet(packet.Packet(packet.CLOSE))
await self.queue.put(None)
self.state = 'disconnecting'
await self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
await self.ws.close()
if not abort:
await self.read_loop_task
self.state = 'disconnected'
try:
client.connected_clients.remove(self)
except ValueError:
pass
self._reset()
def start_background_task(self, target, *args, **kwargs):
return asyncio.ensure_future(target(*args, **kwargs))
async def sleep(self, seconds=0):
return await asyncio.sleep(seconds)
def create_queue(self):
q = asyncio.Queue()
q.Empty = asyncio.QueueEmpty
return q
def create_event(self):
return asyncio.Event()
def _reset(self):
if self.http:
asyncio.ensure_future(self.http.close())
super()._reset()
async def _connect_polling(self, url, headers, engineio_path):
if aiohttp is None:
self.logger.error('aiohttp not installed -- cannot make HTTP '
'requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = await self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
timeout=self.request_timeout)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status < 200 or r.status >= 300:
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status))
try:
p = payload.Payload(encoded_payload=await r.read())
except ValueError:
six.raise_from(exceptions.ConnectionError(
'Unexpected response from server'), None)
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
await self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
if await self._connect_websocket(url, headers, engineio_path):
return
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
async def _connect_websocket(self, url, headers, engineio_path):
if websockets is None: # pragma: no cover
self.logger.error('websockets package not installed')
return False
websocket_url = self._get_engineio_url(url, engineio_path,
'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
# get the cookies from the long-polling connection so that they can
# also be sent the the WebSocket route
cookies = None
if self.http:
cookies = '; '.join(["{}={}".format(cookie.key, cookie.value)
for cookie in self.http._cookie_jar])
headers = headers.copy()
headers['Cookie'] = cookies
try:
if not self.ssl_verify:
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
ws = await websockets.connect(
websocket_url + self._get_url_timestamp(),
extra_headers=headers, ssl=ssl_context)
else:
ws = await websockets.connect(
websocket_url + self._get_url_timestamp(),
extra_headers=headers)
except (websockets.exceptions.InvalidURI,
websockets.exceptions.InvalidHandshake,
OSError):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING, data='probe').encode(
always_bytes=False)
try:
await ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = await ws.recv()
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode(always_bytes=False)
try:
await ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
if self.http: # pragma: no cover
await self.http.close()
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = await ws.recv()
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
self.ws = ws
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
async def _receive_packet(self, pkt):
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
await self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PONG:
self.pong_received = True
elif pkt.packet_type == packet.CLOSE:
await self.disconnect(abort=True)
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
async def _send_packet(self, pkt):
if self.state != 'connected':
return
await self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
async def _send_request(
self, method, url, headers=None, body=None,
timeout=None): # pragma: no cover
if self.http is None or self.http.closed:
self.http = aiohttp.ClientSession()
http_method = getattr(self.http, method.lower())
try:
if not self.ssl_verify:
return await http_method(
url, headers=headers, data=body,
timeout=aiohttp.ClientTimeout(total=timeout), ssl=False)
else:
return await http_method(
url, headers=headers, data=body,
timeout=aiohttp.ClientTimeout(total=timeout))
except (aiohttp.ClientError, asyncio.TimeoutError) as exc:
self.logger.info('HTTP %s request to %s failed with error %s.',
method, url, exc)
async def _trigger_event(self, event, *args, **kwargs):
run_async = kwargs.pop('run_async', False)
ret = None
if event in self.handlers:
if asyncio.iscoroutinefunction(self.handlers[event]) is True:
if run_async:
return self.start_background_task(self.handlers[event],
*args)
else:
try:
ret = await self.handlers[event](*args)
except asyncio.CancelledError: # pragma: no cover
pass
except:
self.logger.exception(event + ' async handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
else:
if run_async:
async def async_handler():
return self.handlers[event](*args)
return self.start_background_task(async_handler)
else:
try:
ret = self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
return ret
async def _ping_loop(self):
self.pong_received = True
self.ping_loop_event.clear()
while self.state == 'connected':
if not self.pong_received:
self.logger.info(
'PONG response has not been received, aborting')
if self.ws:
await self.ws.close()
await self.queue.put(None)
break
self.pong_received = False
await self._send_packet(packet.Packet(packet.PING))
try:
await asyncio.wait_for(self.ping_loop_event.wait(),
self.ping_interval)
except (asyncio.TimeoutError,
asyncio.CancelledError): # pragma: no cover
pass
self.logger.info('Exiting ping task')
async def _read_loop_polling(self):
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = await self._send_request(
'GET', self.base_url + self._get_url_timestamp(),
timeout=max(self.ping_interval, self.ping_timeout) + 5)
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
await self.queue.put(None)
break
if r.status < 200 or r.status >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
await self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=await r.read())
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
await self.queue.put(None)
break
for pkt in p.packets:
await self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
await self.write_loop_task
self.logger.info('Waiting for ping loop task to end')
self.ping_loop_event.set()
await self.ping_loop_task
if self.state == 'connected':
await self._trigger_event('disconnect', run_async=False)
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
async def _read_loop_websocket(self):
while self.state == 'connected':
p = None
try:
p = await self.ws.recv()
except websockets.exceptions.ConnectionClosed:
self.logger.info(
'Read loop: WebSocket connection was closed, aborting')
await self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error "%s", aborting', str(e))
await self.queue.put(None)
break
if isinstance(p, six.text_type): # pragma: no cover
p = p.encode('utf-8')
pkt = packet.Packet(encoded_packet=p)
await self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
await self.write_loop_task
self.logger.info('Waiting for ping loop task to end')
self.ping_loop_event.set()
await self.ping_loop_task
if self.state == 'connected':
await self._trigger_event('disconnect', run_async=False)
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
async def _write_loop(self):
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [await asyncio.wait_for(self.queue.get(), timeout)]
except (self.queue.Empty, asyncio.TimeoutError,
asyncio.CancelledError):
self.logger.error('packet queue is empty, aborting')
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get_nowait())
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = await self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=self.request_timeout)
for pkt in packets:
self.queue.task_done()
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
break
if r.status < 200 or r.status >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
await self.ws.send(pkt.encode(always_bytes=False))
self.queue.task_done()
except websockets.exceptions.ConnectionClosed:
self.logger.info(
'Write loop: WebSocket connection was closed, '
'aborting')
break
self.logger.info('Exiting write loop task')
| true | true |
f739c35c1e0492971857838c78089425e5b5139e | 300 | py | Python | source/astroNS/links/predicates/regex.py | pyastroNS/astroNS | 35687267179467e4cb7ea59ac119c5f0f182107f | [
"MIT"
] | null | null | null | source/astroNS/links/predicates/regex.py | pyastroNS/astroNS | 35687267179467e4cb7ea59ac119c5f0f182107f | [
"MIT"
] | null | null | null | source/astroNS/links/predicates/regex.py | pyastroNS/astroNS | 35687267179467e4cb7ea59ac119c5f0f182107f | [
"MIT"
] | null | null | null | """ Regular expression match """
import re
pattern = re.compile("(.*) regex '(.*)'")
def fn(groups, lsv_fn):
"""Regular expression search function"""
field, pattern = groups
route_regex = re.compile(pattern)
return lambda data: route_regex.search(str(lsv_fn(data, field))) != None
| 25 | 76 | 0.663333 | import re
pattern = re.compile("(.*) regex '(.*)'")
def fn(groups, lsv_fn):
field, pattern = groups
route_regex = re.compile(pattern)
return lambda data: route_regex.search(str(lsv_fn(data, field))) != None
| true | true |
f739c503de6c40e89ddda501b851f1192c57c3a3 | 27,781 | py | Python | molpal/objectives/pyscreener/docking/base.py | ashuein/molpal | 1e17a0c406516ceaeaf273a6983d06206bcfe76f | [
"MIT"
] | 1 | 2021-01-12T11:51:26.000Z | 2021-01-12T11:51:26.000Z | molpal/objectives/pyscreener/docking/base.py | ashuein/molpal | 1e17a0c406516ceaeaf273a6983d06206bcfe76f | [
"MIT"
] | null | null | null | molpal/objectives/pyscreener/docking/base.py | ashuein/molpal | 1e17a0c406516ceaeaf273a6983d06206bcfe76f | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from concurrent.futures import Executor
import csv
from functools import partial
from itertools import chain
from math import ceil, exp, log10
import os
from pathlib import Path
import timeit
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Type
from rdkit import Chem
from tqdm import tqdm
from molpal.objectives.pyscreener.preprocessing import pdbfix
class Screener(ABC):
"""A Screener conducts virtual screens against an ensemble of receptors.
Classes that implement the Screener interface are responsible for
defining the following methods:
prepare_receptor
prepare_from_smi
prepare_from_file
run_docking
parse_ligand_results
This is an abstract base class and cannot be instantiated.
Attributes
----------
repeats : int
the number of times each docking run will be repeated
score_mode : str
the mode used to calculate a score for an individual docking run given
multiple output scored conformations
receptor_score_mode : str
the mode used to calculate an overall score for a single receptor
given repeated docking runs against that receptor
ensemble_score_mode : str
the mode used to calculate an overall score for an ensemble of receptors
given multiple receptors in an ensemble
distributed : bool
True if the computation will parallelized over a distributed setup.
False if the computation will parallelized over a local setup
num_workers : int
the number of worker processes to initialize when
distributing computation
ncpu : int
the number of cores allocated to each worker process
path : os.PathLike
the path under which input and output folders will be placed
in_path : os.PathLike
the path under which all prepared input files will be placed
out_path : os.PathLike
the path under which all generated output will be placed
verbose : int
the level of output this Screener should output
Parameters
----------
repeats : int (Default = 1)
score_mode : str (Default = 'best')
receptor_score_mode : str (Default = 'best')
ensemble_score_mode : str (Default = 'best')
distributed : bool (Default = False)
num_workers : int (Default = -1)
ncpu : int (Default = 1)
path : Union[str, os.PathLike] (Default = '.')
verbose : int (Default = 0)
**kwargs
additional and unused keyword arguments
"""
def __init__(self, receptors: Optional[Sequence[str]] = None,
pdbids: Optional[Sequence[str]] = None,
repeats: int = 1, score_mode: str = 'best',
receptor_score_mode: str = 'best',
ensemble_score_mode: str = 'best',
distributed: bool = False,
num_workers: int = -1, ncpu: int = 1,
path: str = '.', verbose: int = 0, **kwargs):
self.path = Path(path)
receptors = receptors or []
if pdbids:
receptors.extend((
pdbfix.pdbfix(pdbid=pdbid, path=self.in_path)
for pdbid in pdbids
))
if len(receptors) == 0:
raise ValueError('No receptors or PDBids provided!')
self.receptors = receptors
self.repeats = repeats
self.score_mode = score_mode
self.receptor_score_mode = receptor_score_mode
self.ensemble_score_mode = ensemble_score_mode
self.distributed = distributed
self.num_workers = num_workers
self.ncpu = ncpu
self.verbose = verbose
self.num_docked_ligands = 0
def __len__(self) -> int:
"""The number of ligands this screener has simulated"""
return self.num_docked_ligands
def __call__(self, *args, **kwargs) -> Dict[str, Optional[float]]:
return self.dock(*args, **kwargs)
@property
def path(self) -> Tuple[os.PathLike, os.PathLike]:
"""return the Screener's in_path and out_path"""
return self.__in_path, self.__out_path
@path.setter
def path(self, path: str):
"""set both the in_path and out_path under the input path"""
path = Path(path)
self.in_path = path / 'inputs'
self.out_path = path / 'outputs'
@property
def in_path(self) -> os.PathLike:
return self.__in_path
@in_path.setter
def in_path(self, path: str):
path = Path(path)
if not path.is_dir():
path.mkdir(parents=True)
self.__in_path = path
@property
def out_path(self) -> os.PathLike:
return self.__out_path
@out_path.setter
def out_path(self, path: str):
path = Path(path)
if not path.is_dir():
path.mkdir(parents=True)
self.__out_path = path
def dock(self, *smis_or_files: Iterable,
full_results: bool = False,
**kwargs) -> Dict[str, Optional[float]]:
"""dock the ligands contained in sources
NOTE: the star operator, *, in the function signature.
If intending to pass multiple filepaths as an iterable, first
unpack the iterable in the function call by prepending a *.
If passing multiple SMILES strings, either option is acceptable,
but it is much more efficient to NOT unpack the iterable.
Parameters
----------
smis_or_files: Iterable
an iterable of ligand sources, where each ligand source may be
one of the following:
- a ligand supply file,
- a list of SMILES strings
- a single SMILES string
**kwargs
keyword arguments to pass to the appropriate prepare_from_*
function(s)
Returns
-------
d_smi_score : Dict[str, Optional[float]]
a dictionary mapping SMILES string to the best score among the
corresponding ligands. (None if all corresponding ligands
failed failed to dock)
records : List[Dict]
a list of dictionaries containing the record of every single
docking run performed. Each dictionary contains the following keys:
- smiles: the ligand's SMILES string
- name: the name of the ligand
- in: the filename of the input ligand file
- out: the filename of the output docked ligand file
- log: the filename of the output log file
- score: the ligand's docking score
"""
recordsss = self.dock_ensemble(*smis_or_files, **kwargs)
smis_scores = []
for ligand_results in recordsss:
smi = ligand_results[0][0]['smiles']
score = self.calc_ligand_score(
ligand_results, self.receptor_score_mode,
self.ensemble_score_mode
)
smis_scores.append((smi, score))
d_smi_score = {}
for smi_score in smis_scores:
smi, score = smi_score
if smi not in d_smi_score:
d_smi_score[smi] = score
elif score is None:
continue
else:
curr_score = d_smi_score[smi]
if curr_score is None:
d_smi_score[smi] = score
else:
d_smi_score[smi] = min(d_smi_score[smi], score)
if full_results:
return d_smi_score, list(chain(*list(chain(*recordsss))))
return d_smi_score
def dock_ensemble(self, *smis_or_files: Iterable,
**kwargs) -> List[List[List[Dict]]]:
"""Run the docking program with the ligands contained in *smis_or_files
NOTE: the zip operator, *, in the function signature. If intending to
pass multiple filepaths as an iterable, first unpack the iterable
in the function call by prepending a *
Parameters
----------
smis_or_files: Iterable
an iterable of ligand sources, where each ligand source may be
one of the following:
- a ligand supply file
- a list of SMILES strings
- a single SMILES string
**kwargs
keyword arguments to pass to the appropriate prepare_from_*
function(s)
Returns
-------
recordsss : List[List[List[Dict]]]
an NxMxO list of dictionaries where each dictionary is a record of an individual docking run and:
- N is the number of total ligands that will be docked
- M is the number of receptors each ligand is docked against
- O is the number of times each docking run is repeated.
Each dictionary contains the following keys:
- smiles: the ligand's SMILES string
- name: the name of the ligand
- in: the filename of the input ligand file
- out: the filename of the output docked ligand file
- log: the filename of the output log file
- score: the ligand's docking score
"""
begin = timeit.default_timer()
ligands = self.prepare_ligands(*smis_or_files, **kwargs)
recordsss = self.run_docking(ligands)
self.num_docked_ligands += len(recordsss)
total = timeit.default_timer() - begin
mins, secs = divmod(int(total), 60)
hrs, mins = divmod(mins, 60)
if self.verbose > 0 and len(recordsss) > 0:
print(f' Time to dock {len(recordsss)} ligands:',
f'{hrs:d}h {mins:d}m {secs:d}s ' +
f'({total/len(recordsss):0.3f} s/ligand)', flush=True)
return recordsss
@abstractmethod
def run_docking(self, ligands: Sequence[Tuple[str, str]]
) -> List[List[List[Dict]]]:
"""Run the docking simulations for the input ligands
Parameter
----------
ligands : Sequence[Tuple[str, str]]
a sequence of tuples containing a ligand's SMILES string and the
filepath of the corresponding input file
Returns
-------
List[List[List[Dict]]]
an NxMxO list of dictionaries where each individual dictionary is a
record of an individual docking run and
N is the number of ligands contained in the ligand sources
M is the number of receptors in the ensemble against which each
ligand should be docked
O is the number of times each docking run should be repeated
NOTE: the records contain a 'score' that is None for each entry
as the log/out files must first be parsed to obtain the value
"""
@staticmethod
@abstractmethod
def parse_ligand_results(recs_reps: List[List[Dict]],
score_mode: str = 'best') -> List[List[Dict]]:
"""Parse the results of the docking simulations for a single ligand
Parameter
----------
recs_reps : List[List[Dict]]
an MxO list of list of dictionaries where each individual
dictionary is a record of an individual docking run and
M is the number of receptors in the ensemble against which each
ligand should be docked
O is the number of times each docking run should be repeated
Returns
-------
recs_reps : List[List[Dict]]
the same List as the input argument, but with the 'score' key of
record updated to reflect the desired score parsed
from each docking run
"""
@property
def receptors(self):
return self.__receptors
@receptors.setter
def receptors(self, receptors):
receptors = [self.prepare_receptor(receptor) for receptor in receptors]
receptors = [receptor for receptor in receptors if receptor is not None]
if len(receptors) == 0:
raise RuntimeError('Preparation failed for all receptors!')
self.__receptors = receptors
@abstractmethod
def prepare_receptor(self, *args, **kwargs):
"""Prepare a receptor input file for the docking software"""
@staticmethod
@abstractmethod
def prepare_from_smi(*args, **kwargs):
"""Prepare a ligand input file from a SMILES string"""
@staticmethod
@abstractmethod
def prepare_from_file(*args, **kwargs):
"""Prepare a ligand input file from an input file"""
def prepare_ligands(self, *smis_or_files,
path: Optional[str] = None, **kwargs):
path = path or self.in_path
return list(chain(*(
self._prepare_ligands(source, i+len(self), path, **kwargs)
for i, source in enumerate(smis_or_files)
)))
def _prepare_ligands(self, source, i: int,
path: Optional[str] = None, **kwargs):
if isinstance(source, str):
p_ligand = Path(source)
if not p_ligand.exists():
return [self.prepare_from_smi(source, f'ligand_{i}', path)]
if p_ligand.suffix == '.csv':
return self.prepare_from_csv(source, **kwargs)
if p_ligand.suffix == '.smi':
return self.prepare_from_supply(source, **kwargs)
if p_ligand.suffix == '.sdf':
if kwargs['use_3d']:
return self.prepare_from_file(source, path=path,
**kwargs)
else:
return self.prepare_from_supply(source, **kwargs)
return self.prepare_from_file(source, path=path, **kwargs)
if isinstance(source, Sequence):
return self.prepare_from_smis(source, **kwargs)
raise TypeError('Arg "source" must be of type str or ',
f'Sequence[str]. Got: {type(source)}')
def prepare_from_smis(self, smis: Sequence[str],
names: Optional[Sequence[str]] = None,
start: int = 0, nconvert: Optional[int] = None,
**kwargs) -> List[Tuple]:
"""Convert the list of SMILES strings to their corresponding input files
Parameters
----------
smis : Sequence[str]
a sequence of SMILES strings
names : Optional[Sequence[str]] (Default = None)
a parallel sequence of names for each ligand
start : int (Default = 0)
the index at which to start ligand preparation
nconvert : Optional[int] (Default = None)
the number of ligands to convert. If None, convert all ligands
**kwargs
additional and unused keyword arguments
Returns
-------
ligands : List[Tuple]
a list of tuples containing a ligand's SMILES string and the
filepath of the corresponding input file
"""
begin = timeit.default_timer()
stop = min(len(smis), start+nconvert) if nconvert else len(smis)
if names is None:
width = ceil(log10(len(smis))) + 1
names = (f'ligand_{i:0{width}}' for i in range(start, stop))
else:
# could theoretically handle empty strings
names = names[start:stop]
smis = smis[start:stop]
paths = (self.in_path for _ in range(len(smis)))
CHUNKSIZE = 4
with self.Pool(self.distributed, self.num_workers,
self.ncpu, True) as client:
ligands = client.map(self.prepare_from_smi, smis, names, paths,
chunksize=CHUNKSIZE)
ligands = [
ligand for ligand in tqdm(
ligands, total=len(smis), desc='Preparing ligands',
unit='ligand', smoothing=0.
) if ligand
]
total = timeit.default_timer() - begin
if self.verbose > 1:
m, s = divmod(int(total), 60)
h, m = divmod(m, 60)
if len(ligands) > 0:
print(f' Time to prepare {len(ligands)} ligands: ',
f'{h}h {m}m {s}s ({total/len(ligands):0.4f} s/ligand)',
flush=True)
return ligands
def prepare_from_csv(self, csv_filename: str, title_line: bool = True,
smiles_col: int = 0, name_col: Optional[int] = None,
start: int = 0, nconvert: Optional[int] = None,
**kwargs) -> List[Tuple]:
"""Prepare the input files corresponding to the SMILES strings
contained in a CSV file
Parameters
----------
csv_filename : str
the filename of the CSV file containing the ligands to convert
title_line : bool (Default = True)
does the CSV file contain a title line?
smiles_col : int (Default = 0)
the column containing the SMILES strings
name_col : Optional[int] (Default = None)
the column containing the molecule name
start : int (Default = 0)
the index at which to start conversion
nconvert : Optional[int] (Default = None)
the number of ligands to convert. If None, convert all molecules
**kwargs
additional and unused keyword arguments
Returns
-------
ligands : List[Tuple]
a list of tuples containing a ligand's SMILES string and the
filepath of the corresponding input file. Files are named
<compound_id>.<suffix> if compound_id property exists in the
original supply file. Otherwise, they are named:
lig0.<suffix>, lig1.<suffix>, ...
"""
with open(csv_filename) as fid:
reader = csv.reader(fid)
if title_line:
next(reader)
if name_col is None:
smis = [row[smiles_col] for row in reader]
names = None
else:
smis_names = [(row[smiles_col], row[name_col])
for row in reader]
smis, names = zip(*smis_names)
return self.prepare_from_smis(smis, names=names,
start=start, nconvert=nconvert)
def prepare_from_supply(self, supply: str,
id_prop_name: Optional[str] = None,
start: int = 0, nconvert: Optional[int] = None,
**kwargs) -> List[Tuple]:
"""Prepare the input files corresponding to the molecules contained in
a molecular supply file
Parameters
----------
supply : str
the filename of the SDF or SMI file containing
the ligands to convert
id_prop_name : Optional[str]
the name of the property containing the ID, if one exists
(e.g., "CatalogID", "Chemspace_ID", "Name", etc...)
start : int (Default = 0)
the index at which to start ligand conversion
nconvert : Optional[int] (Default = None)
the number of ligands to convert. If None, convert all molecules
**kwargs
additional and unused keyword arguments
Returns
-------
ligands : List[Tuple[str, str]]
a list of tuples containing a ligand's SMILES string and the
filepath of the corresponding input file. Files are named
<compound_id>.<suffix> if compound_id property exists in the
original supply file. Otherwise, they are named:
lig0.<suffix>, lig1.<suffix>, ...
"""
p_supply = Path(supply)
if p_supply.suffix == '.sdf':
mols = Chem.SDMolSupplier(supply)
elif p_supply.suffix == '.smi':
mols = Chem.SmilesMolSupplier(supply)
else:
raise ValueError(
f'input file: "{supply}" does not have .sdf or .smi extension')
smis = []
names = None
if id_prop_name:
names = []
for mol in mols:
if mol is None:
continue
smis.append(Chem.MolToSmiles(mol))
names.append(mol.GetProp(id_prop_name))
else:
for mol in mols:
if mol is None:
continue
smis.append(Chem.MolToSmiles(mol))
return self.prepare_from_smis(smis, names=names,
start=start, nconvert=nconvert)
@staticmethod
def calc_ligand_score(ligand_results: List[List[Dict]],
receptor_score_mode: str = 'best',
ensemble_score_mode: str = 'best') -> Optional[float]:
"""Calculate the overall score of a ligand given all of its docking
runs
Parameters
----------
ligand_results : List[List[Dict]]
an MxO list of list of dictionaries where each individual dictionary is a record of an individual docking run and
M is the number of receptors the ligand was docked against
O is the number of times each docking run was repeated
receptor_score_mode : str (Default = 'best')
the mode used to calculate the overall score for a given receptor
pose with multiple, repeated runs
ensemble_score_mode : str (Default = 'best')
the mode used to calculate the overall score for a given ensemble
of receptors
Returns
-------
ensemble_score : Optional[float]
the overall score of a ligand's ensemble docking. None if no such
score was calculable
See also
--------
calc_score
for documentation on possible values for receptor_score_mode
and ensemble_score_mode
"""
receptor_scores = []
for receptor in ligand_results:
successful_rep_scores = [
repeat['score']
for repeat in receptor if repeat['score'] is not None
]
if successful_rep_scores:
receptor_scores.append(Screener.calc_score(
successful_rep_scores, receptor_score_mode
))
receptor_scores = [score for score in receptor_scores]
if receptor_scores:
ensemble_score = Screener.calc_score(
receptor_scores, ensemble_score_mode)
else:
ensemble_score = None
return ensemble_score
@staticmethod
def calc_score(scores: Sequence[float], score_mode: str = 'best') -> float:
"""Calculate an overall score from a sequence of scores
Parameters
----------
scores : Sequence[float]
score_mode : str (Default = 'best')
the method used to calculate the overall score
Choices:
'best' - return the top score
'avg' - return the average of the scores
'boltzmann' - return the boltzmann average of the scores
Returns
-------
score : float
"""
scores = sorted(scores)
if score_mode in ('best', 'top'):
score = scores[0]
elif score_mode in ('avg', 'mean'):
score = sum(score for score in scores) / len(scores)
elif score_mode == 'boltzmann':
Z = sum(exp(-score) for score in scores)
score = sum(score * exp(-score) / Z for score in scores)
return score
@staticmethod
def Pool(distributed: bool = False, num_workers: int = -1, ncpu: int = 1,
all_cores: bool = False) -> Type[Executor]:
"""build a process pool to parallelize computation over
Parameters
----------
distributed : bool (Default = False)
whether to return a distributed or a local process pool
num_workers : int (Default = -1)
if distributed is True, then this argument is ignored. If False,
then it should be equal to the total number of worker processes
desired. Using a value of -1 will spawn as many worker processes
as cores available on this machine.
NOTE: this is usually not a good idea and it's much better to
specify the number of processes explicitly.
ncpu : int (Default = 1)
if distributed is True, then this argument should be the number of
cores allocated to each worker. if False, then this should be the
number of cores that is desired to be allocated to each worker.
NOTE: this is an implicit argument because Screener.dock() will
make subprocess calls to progams that themselves can utilize
multiple cores. It will not actually assign <ncpu> cores to
each worker process.
all_cores : bool (Default = False)
whether to initialize as many processes as cores available
(= num_workers * ncpu).
Returns
-------
Executor
the initialized process pool
Notes
-----
in some cases, as shown in the examples below, the values specified for
num_workers and ncpu will be inconsequential. Regardless, it is good
practice for this function to always be called the same way, with only
all_cores changing, depending on the context in which the initialized Executor will be used
Ex. 1
-----
Given: a single machine with 16 cores, screening using vina-type
docking software (via the docking.Vina class)
the function should be called with distributed=False, all_cores=False,
and both num_workers and ncpu should be specified such that the product
of the two is equal to 16.
Choices: (1, 16), (2, 8), (4, 4), (8, 2), and (16, 1). You will often have to determine the optimal values empirically.
Ex. 2
-----
Given: a cluster of machines where you've requested resources for 8
tasks with 2 cores each. The software was then initialized with
8 separate MPI processes and screening using vina-type docking
software is to be performed.
the function should be called with distributed=True and all_cores=False
(neither num_workers or ncpu needs to be specified)
Ex. 3
-----
Given: a single machine with 16 cores, and pure python code is to be
executed in parallel
the function should be called with distributed=False, all_cores=True,
and both num_workers and ncpu should be specified such that the product
of the two is equal to 16.
Choices: see Ex. 1
"""
if distributed:
from mpi4py import MPI
from mpi4py.futures import MPIPoolExecutor as Pool
num_workers = MPI.COMM_WORLD.size
else:
from concurrent.futures import ProcessPoolExecutor as Pool
if num_workers == -1:
try:
num_workers = len(os.sched_getaffinity(0))
except AttributeError:
num_workers = os.cpu_count()
if all_cores:
num_workers *= ncpu
return Pool(max_workers=num_workers)
| 38.854545 | 127 | 0.580685 | from abc import ABC, abstractmethod
from concurrent.futures import Executor
import csv
from functools import partial
from itertools import chain
from math import ceil, exp, log10
import os
from pathlib import Path
import timeit
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Type
from rdkit import Chem
from tqdm import tqdm
from molpal.objectives.pyscreener.preprocessing import pdbfix
class Screener(ABC):
def __init__(self, receptors: Optional[Sequence[str]] = None,
pdbids: Optional[Sequence[str]] = None,
repeats: int = 1, score_mode: str = 'best',
receptor_score_mode: str = 'best',
ensemble_score_mode: str = 'best',
distributed: bool = False,
num_workers: int = -1, ncpu: int = 1,
path: str = '.', verbose: int = 0, **kwargs):
self.path = Path(path)
receptors = receptors or []
if pdbids:
receptors.extend((
pdbfix.pdbfix(pdbid=pdbid, path=self.in_path)
for pdbid in pdbids
))
if len(receptors) == 0:
raise ValueError('No receptors or PDBids provided!')
self.receptors = receptors
self.repeats = repeats
self.score_mode = score_mode
self.receptor_score_mode = receptor_score_mode
self.ensemble_score_mode = ensemble_score_mode
self.distributed = distributed
self.num_workers = num_workers
self.ncpu = ncpu
self.verbose = verbose
self.num_docked_ligands = 0
def __len__(self) -> int:
return self.num_docked_ligands
def __call__(self, *args, **kwargs) -> Dict[str, Optional[float]]:
return self.dock(*args, **kwargs)
@property
def path(self) -> Tuple[os.PathLike, os.PathLike]:
return self.__in_path, self.__out_path
@path.setter
def path(self, path: str):
path = Path(path)
self.in_path = path / 'inputs'
self.out_path = path / 'outputs'
@property
def in_path(self) -> os.PathLike:
return self.__in_path
@in_path.setter
def in_path(self, path: str):
path = Path(path)
if not path.is_dir():
path.mkdir(parents=True)
self.__in_path = path
@property
def out_path(self) -> os.PathLike:
return self.__out_path
@out_path.setter
def out_path(self, path: str):
path = Path(path)
if not path.is_dir():
path.mkdir(parents=True)
self.__out_path = path
def dock(self, *smis_or_files: Iterable,
full_results: bool = False,
**kwargs) -> Dict[str, Optional[float]]:
recordsss = self.dock_ensemble(*smis_or_files, **kwargs)
smis_scores = []
for ligand_results in recordsss:
smi = ligand_results[0][0]['smiles']
score = self.calc_ligand_score(
ligand_results, self.receptor_score_mode,
self.ensemble_score_mode
)
smis_scores.append((smi, score))
d_smi_score = {}
for smi_score in smis_scores:
smi, score = smi_score
if smi not in d_smi_score:
d_smi_score[smi] = score
elif score is None:
continue
else:
curr_score = d_smi_score[smi]
if curr_score is None:
d_smi_score[smi] = score
else:
d_smi_score[smi] = min(d_smi_score[smi], score)
if full_results:
return d_smi_score, list(chain(*list(chain(*recordsss))))
return d_smi_score
def dock_ensemble(self, *smis_or_files: Iterable,
**kwargs) -> List[List[List[Dict]]]:
begin = timeit.default_timer()
ligands = self.prepare_ligands(*smis_or_files, **kwargs)
recordsss = self.run_docking(ligands)
self.num_docked_ligands += len(recordsss)
total = timeit.default_timer() - begin
mins, secs = divmod(int(total), 60)
hrs, mins = divmod(mins, 60)
if self.verbose > 0 and len(recordsss) > 0:
print(f' Time to dock {len(recordsss)} ligands:',
f'{hrs:d}h {mins:d}m {secs:d}s ' +
f'({total/len(recordsss):0.3f} s/ligand)', flush=True)
return recordsss
@abstractmethod
def run_docking(self, ligands: Sequence[Tuple[str, str]]
) -> List[List[List[Dict]]]:
@staticmethod
@abstractmethod
def parse_ligand_results(recs_reps: List[List[Dict]],
score_mode: str = 'best') -> List[List[Dict]]:
@property
def receptors(self):
return self.__receptors
@receptors.setter
def receptors(self, receptors):
receptors = [self.prepare_receptor(receptor) for receptor in receptors]
receptors = [receptor for receptor in receptors if receptor is not None]
if len(receptors) == 0:
raise RuntimeError('Preparation failed for all receptors!')
self.__receptors = receptors
@abstractmethod
def prepare_receptor(self, *args, **kwargs):
@staticmethod
@abstractmethod
def prepare_from_smi(*args, **kwargs):
@staticmethod
@abstractmethod
def prepare_from_file(*args, **kwargs):
def prepare_ligands(self, *smis_or_files,
path: Optional[str] = None, **kwargs):
path = path or self.in_path
return list(chain(*(
self._prepare_ligands(source, i+len(self), path, **kwargs)
for i, source in enumerate(smis_or_files)
)))
def _prepare_ligands(self, source, i: int,
path: Optional[str] = None, **kwargs):
if isinstance(source, str):
p_ligand = Path(source)
if not p_ligand.exists():
return [self.prepare_from_smi(source, f'ligand_{i}', path)]
if p_ligand.suffix == '.csv':
return self.prepare_from_csv(source, **kwargs)
if p_ligand.suffix == '.smi':
return self.prepare_from_supply(source, **kwargs)
if p_ligand.suffix == '.sdf':
if kwargs['use_3d']:
return self.prepare_from_file(source, path=path,
**kwargs)
else:
return self.prepare_from_supply(source, **kwargs)
return self.prepare_from_file(source, path=path, **kwargs)
if isinstance(source, Sequence):
return self.prepare_from_smis(source, **kwargs)
raise TypeError('Arg "source" must be of type str or ',
f'Sequence[str]. Got: {type(source)}')
def prepare_from_smis(self, smis: Sequence[str],
names: Optional[Sequence[str]] = None,
start: int = 0, nconvert: Optional[int] = None,
**kwargs) -> List[Tuple]:
begin = timeit.default_timer()
stop = min(len(smis), start+nconvert) if nconvert else len(smis)
if names is None:
width = ceil(log10(len(smis))) + 1
names = (f'ligand_{i:0{width}}' for i in range(start, stop))
else:
names = names[start:stop]
smis = smis[start:stop]
paths = (self.in_path for _ in range(len(smis)))
CHUNKSIZE = 4
with self.Pool(self.distributed, self.num_workers,
self.ncpu, True) as client:
ligands = client.map(self.prepare_from_smi, smis, names, paths,
chunksize=CHUNKSIZE)
ligands = [
ligand for ligand in tqdm(
ligands, total=len(smis), desc='Preparing ligands',
unit='ligand', smoothing=0.
) if ligand
]
total = timeit.default_timer() - begin
if self.verbose > 1:
m, s = divmod(int(total), 60)
h, m = divmod(m, 60)
if len(ligands) > 0:
print(f' Time to prepare {len(ligands)} ligands: ',
f'{h}h {m}m {s}s ({total/len(ligands):0.4f} s/ligand)',
flush=True)
return ligands
def prepare_from_csv(self, csv_filename: str, title_line: bool = True,
smiles_col: int = 0, name_col: Optional[int] = None,
start: int = 0, nconvert: Optional[int] = None,
**kwargs) -> List[Tuple]:
with open(csv_filename) as fid:
reader = csv.reader(fid)
if title_line:
next(reader)
if name_col is None:
smis = [row[smiles_col] for row in reader]
names = None
else:
smis_names = [(row[smiles_col], row[name_col])
for row in reader]
smis, names = zip(*smis_names)
return self.prepare_from_smis(smis, names=names,
start=start, nconvert=nconvert)
def prepare_from_supply(self, supply: str,
id_prop_name: Optional[str] = None,
start: int = 0, nconvert: Optional[int] = None,
**kwargs) -> List[Tuple]:
p_supply = Path(supply)
if p_supply.suffix == '.sdf':
mols = Chem.SDMolSupplier(supply)
elif p_supply.suffix == '.smi':
mols = Chem.SmilesMolSupplier(supply)
else:
raise ValueError(
f'input file: "{supply}" does not have .sdf or .smi extension')
smis = []
names = None
if id_prop_name:
names = []
for mol in mols:
if mol is None:
continue
smis.append(Chem.MolToSmiles(mol))
names.append(mol.GetProp(id_prop_name))
else:
for mol in mols:
if mol is None:
continue
smis.append(Chem.MolToSmiles(mol))
return self.prepare_from_smis(smis, names=names,
start=start, nconvert=nconvert)
@staticmethod
def calc_ligand_score(ligand_results: List[List[Dict]],
receptor_score_mode: str = 'best',
ensemble_score_mode: str = 'best') -> Optional[float]:
receptor_scores = []
for receptor in ligand_results:
successful_rep_scores = [
repeat['score']
for repeat in receptor if repeat['score'] is not None
]
if successful_rep_scores:
receptor_scores.append(Screener.calc_score(
successful_rep_scores, receptor_score_mode
))
receptor_scores = [score for score in receptor_scores]
if receptor_scores:
ensemble_score = Screener.calc_score(
receptor_scores, ensemble_score_mode)
else:
ensemble_score = None
return ensemble_score
@staticmethod
def calc_score(scores: Sequence[float], score_mode: str = 'best') -> float:
scores = sorted(scores)
if score_mode in ('best', 'top'):
score = scores[0]
elif score_mode in ('avg', 'mean'):
score = sum(score for score in scores) / len(scores)
elif score_mode == 'boltzmann':
Z = sum(exp(-score) for score in scores)
score = sum(score * exp(-score) / Z for score in scores)
return score
@staticmethod
def Pool(distributed: bool = False, num_workers: int = -1, ncpu: int = 1,
all_cores: bool = False) -> Type[Executor]:
if distributed:
from mpi4py import MPI
from mpi4py.futures import MPIPoolExecutor as Pool
num_workers = MPI.COMM_WORLD.size
else:
from concurrent.futures import ProcessPoolExecutor as Pool
if num_workers == -1:
try:
num_workers = len(os.sched_getaffinity(0))
except AttributeError:
num_workers = os.cpu_count()
if all_cores:
num_workers *= ncpu
return Pool(max_workers=num_workers)
| true | true |
f739c519bdff3ea0513a876d0a9b7c47e6b5da68 | 3,963 | py | Python | ClassifierCode/classifier_builder.py | annaw3558/BankNoteClassifier | 18605b44f949a84bf78b43dec741ca003d17209c | [
"MIT"
] | null | null | null | ClassifierCode/classifier_builder.py | annaw3558/BankNoteClassifier | 18605b44f949a84bf78b43dec741ca003d17209c | [
"MIT"
] | null | null | null | ClassifierCode/classifier_builder.py | annaw3558/BankNoteClassifier | 18605b44f949a84bf78b43dec741ca003d17209c | [
"MIT"
] | null | null | null | '''This program contains the functions averageFinder and midpointFinder. AverageData calculates the averages of the
"columns" of a list of numbers (a list of lists of numbers) for real and fake samples (separately) and midpointFinder
finds the midpoint between the real and fake averages.
Data is either given from the test case or from user input, which is run through incomingData.
Assignment 2: classifier_builder
Name: Anna Wood
Student Number: 20091785
NetID: 17aaw2'''
def averageFinder(sample_data):
'''will take a list of attributes and:
averageFinder calculates the average of each of the attributes across all the samples with the
same classification (0 or 1)
input: sample list / list of numbers
output: none, averages are passed to midpointFinder
note - 1 IS REAL 0 IS COUNTERFEIT
'''
real_avgs_counter = 0
counter_avgs_counter = 0
real_avgs = []
counter_avgs = []
avg_len_real = 0
indx = 0
while indx < 4: # while-loop that sums each attribute and adds it to the list of its category (real or counter)
for i in range(0,len(sample_data)): # loop to separate data into 0 and 1
if sample_data[i][4] == 1:
real_avgs_counter += sample_data[i][indx]# if real, attribute is summed in counter
avg_len_real = avg_len_real + 1 /4 # used to count the length of how many real bills
elif sample_data[i][4] == 0: # attribute sum for counterfeit bills
counter_avgs_counter += sample_data[i][indx]
real_avgs.append(real_avgs_counter) # after each attribute is summed it is added to the final list
counter_avgs.append(counter_avgs_counter)
real_avgs_counter = 0 # counters are reset to 0 after each list
counter_avgs_counter = 0
indx += 1 # index for counting the "columns"
avg_len_counter = len(sample_data) - avg_len_real # number of real / counter bills calculated for finding the average
for i in range(0, 4): # divides the real, counterfeit sums by the amount of real & counterfeit items respectively
real_avgs[i] = round((real_avgs[i] / avg_len_real), 3)
counter_avgs[i] = round((counter_avgs[i] / avg_len_counter), 3) # each average rounded to 3 decimal points
return real_avgs, counter_avgs
def midpointFinder(real_avgs, counter_avgs):
'''part 2 of the building classifier, takes the averages of the real and and fake samples and finds
the midpoint (divides by 2). midpoints list should then be returned to classifier
for further classifying
input: averages of real, fake samples
output: midpoints (returned to incomingData)'''
midpoints = [] # empty list for midpoints
for i in range(0,4): # finds midpoints by adding averages and dividing by 2
midpoint = (real_avgs[i] + counter_avgs[i]) / 2
midpoints.append(round(midpoint,3))
return midpoints #returns midpoints to incomingData
def incomingData(training_data):
'''function runs from here when data is passed from our main interface
input: training_data
output: midpoints'''
real_avgs, counter_avgs = averageFinder(training_data)
midpoints = midpointFinder(real_avgs, counter_avgs)
return midpoints # midpoints returned to main interface
if __name__ == '__main__':
sample_data_main = [[ 3, 8, -2, 0, 0], [4, 8, -2, -1,0],[3, -2, 1, 0, 0], [2, 1, 0, -2, 0], # fake samples (5th item 0)
[0, 3, -3, -2, 1], [-3, 3, 0, -3, 1],
[-6, 7, 0, -3, 1] ] # real samples (5th item is 1)
real_avgs , counter_avgs = averageFinder(sample_data_main)
midpoints = midpointFinder(real_avgs, counter_avgs)
print('real averages (test case)',real_avgs, 'should be -3 , 4.333, -1. -2.667')
print('counter averages (test case)',counter_avgs, 'should be 3, 3.75, -0.75, -0.75')
print('midpoints (test case)', midpoints, 'should be 0, 4.041 ish, -0.875, -1.708')
| 43.076087 | 123 | 0.685592 |
def averageFinder(sample_data):
real_avgs_counter = 0
counter_avgs_counter = 0
real_avgs = []
counter_avgs = []
avg_len_real = 0
indx = 0
while indx < 4:
for i in range(0,len(sample_data)):
if sample_data[i][4] == 1:
real_avgs_counter += sample_data[i][indx]
avg_len_real = avg_len_real + 1 /4
elif sample_data[i][4] == 0:
counter_avgs_counter += sample_data[i][indx]
real_avgs.append(real_avgs_counter)
counter_avgs.append(counter_avgs_counter)
real_avgs_counter = 0
counter_avgs_counter = 0
indx += 1
avg_len_counter = len(sample_data) - avg_len_real
for i in range(0, 4):
real_avgs[i] = round((real_avgs[i] / avg_len_real), 3)
counter_avgs[i] = round((counter_avgs[i] / avg_len_counter), 3)
return real_avgs, counter_avgs
def midpointFinder(real_avgs, counter_avgs):
midpoints = []
for i in range(0,4):
midpoint = (real_avgs[i] + counter_avgs[i]) / 2
midpoints.append(round(midpoint,3))
return midpoints
def incomingData(training_data):
real_avgs, counter_avgs = averageFinder(training_data)
midpoints = midpointFinder(real_avgs, counter_avgs)
return midpoints
if __name__ == '__main__':
sample_data_main = [[ 3, 8, -2, 0, 0], [4, 8, -2, -1,0],[3, -2, 1, 0, 0], [2, 1, 0, -2, 0],
[0, 3, -3, -2, 1], [-3, 3, 0, -3, 1],
[-6, 7, 0, -3, 1] ]
real_avgs , counter_avgs = averageFinder(sample_data_main)
midpoints = midpointFinder(real_avgs, counter_avgs)
print('real averages (test case)',real_avgs, 'should be -3 , 4.333, -1. -2.667')
print('counter averages (test case)',counter_avgs, 'should be 3, 3.75, -0.75, -0.75')
print('midpoints (test case)', midpoints, 'should be 0, 4.041 ish, -0.875, -1.708')
| true | true |
f739c60c96f276337c40ba0361f5ed79dc59c86c | 327 | py | Python | src/grpc_interceptor/__init__.py | dan-hipschman-od/grpc-interceptor | b01d7ab47af999d519e5899488f0930a232c30ba | [
"MIT"
] | null | null | null | src/grpc_interceptor/__init__.py | dan-hipschman-od/grpc-interceptor | b01d7ab47af999d519e5899488f0930a232c30ba | [
"MIT"
] | null | null | null | src/grpc_interceptor/__init__.py | dan-hipschman-od/grpc-interceptor | b01d7ab47af999d519e5899488f0930a232c30ba | [
"MIT"
] | null | null | null | """Simplified Python gRPC interceptors."""
from grpc_interceptor.exception_to_status import ExceptionToStatusInterceptor
from grpc_interceptor.server import MethodName, parse_method_name, ServerInterceptor
__all__ = [
"ExceptionToStatusInterceptor",
"MethodName",
"parse_method_name",
"ServerInterceptor",
]
| 25.153846 | 84 | 0.795107 |
from grpc_interceptor.exception_to_status import ExceptionToStatusInterceptor
from grpc_interceptor.server import MethodName, parse_method_name, ServerInterceptor
__all__ = [
"ExceptionToStatusInterceptor",
"MethodName",
"parse_method_name",
"ServerInterceptor",
]
| true | true |
f739c6607eddcef200a3763c3b00bdb60213b3d6 | 1,867 | py | Python | watdo.py | KeyboardFire/watdo-today | 81df04ae526832d5ebea9d657140edf4001938dd | [
"MIT"
] | 2 | 2015-02-02T09:53:40.000Z | 2015-02-05T23:52:41.000Z | watdo.py | KeyboardFire/watdo-today | 81df04ae526832d5ebea9d657140edf4001938dd | [
"MIT"
] | null | null | null | watdo.py | KeyboardFire/watdo-today | 81df04ae526832d5ebea9d657140edf4001938dd | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from html.parser import HTMLParser
import urllib.request
import webbrowser
import datetime
LANG = 'en'
TMPFILE = '/tmp/watdo.html'
class Parser(HTMLParser):
def __init__(self):
super(Parser, self).__init__()
self.handle_next_h2 = False
self.handle_until_h2 = False
self.result = ''
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if self.handle_until_h2:
if tag == 'h2':
self.handle_until_h2 = False
else:
astr = ' '.join(map(lambda k: '%s=%r' % (k, attrs[k]), attrs))
self.result += '<%s %s>' % (tag, astr) if astr else '<%s>' % tag
else:
if 'id' in attrs and attrs['id'] == 'Holidays_and_observances':
self.handle_next_h2 = True
def handle_endtag(self, tag):
if self.handle_until_h2:
self.result += '</%s>' % tag
elif self.handle_next_h2 and tag == 'h2':
self.handle_next_h2 = False
self.handle_until_h2 = True
def handle_data(self, data):
if self.handle_until_h2:
self.result += data.replace('\\n', '\n')
parser = Parser()
today = datetime.date.today().strftime('%B_%d')
parser.feed(str(urllib.request.urlopen('http://%s.wikipedia.org/wiki/%s' % (LANG,today)).read()))
with open(TMPFILE, 'w') as f:
print('''<html lang='%s'>
<head>
<title>watdo</title>
<meta charset='utf-8' />
<base href='http://%s.wikipedia.org/' />
<style>
html { font-family: sans-serif; }
h1 { margin: 0px; font-size: 1.5em; }
</style>
</head>
<body>
<h1>wat do?</h1>
%s
</body>
</html>''' % (LANG, LANG, parser.result), file=f)
webbrowser.open(TMPFILE)
| 30.112903 | 97 | 0.53669 |
from html.parser import HTMLParser
import urllib.request
import webbrowser
import datetime
LANG = 'en'
TMPFILE = '/tmp/watdo.html'
class Parser(HTMLParser):
def __init__(self):
super(Parser, self).__init__()
self.handle_next_h2 = False
self.handle_until_h2 = False
self.result = ''
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if self.handle_until_h2:
if tag == 'h2':
self.handle_until_h2 = False
else:
astr = ' '.join(map(lambda k: '%s=%r' % (k, attrs[k]), attrs))
self.result += '<%s %s>' % (tag, astr) if astr else '<%s>' % tag
else:
if 'id' in attrs and attrs['id'] == 'Holidays_and_observances':
self.handle_next_h2 = True
def handle_endtag(self, tag):
if self.handle_until_h2:
self.result += '</%s>' % tag
elif self.handle_next_h2 and tag == 'h2':
self.handle_next_h2 = False
self.handle_until_h2 = True
def handle_data(self, data):
if self.handle_until_h2:
self.result += data.replace('\\n', '\n')
parser = Parser()
today = datetime.date.today().strftime('%B_%d')
parser.feed(str(urllib.request.urlopen('http://%s.wikipedia.org/wiki/%s' % (LANG,today)).read()))
with open(TMPFILE, 'w') as f:
print('''<html lang='%s'>
<head>
<title>watdo</title>
<meta charset='utf-8' />
<base href='http://%s.wikipedia.org/' />
<style>
html { font-family: sans-serif; }
h1 { margin: 0px; font-size: 1.5em; }
</style>
</head>
<body>
<h1>wat do?</h1>
%s
</body>
</html>''' % (LANG, LANG, parser.result), file=f)
webbrowser.open(TMPFILE)
| true | true |
f739c66d3f0e41d16f5064b1dfd3208c2ce3d1c5 | 378 | py | Python | ModuloTest.py | cohadar/java-snippets | d55baabed6fca9faa2b60cf0b58fa64b651b450d | [
"MIT"
] | null | null | null | ModuloTest.py | cohadar/java-snippets | d55baabed6fca9faa2b60cf0b58fa64b651b450d | [
"MIT"
] | null | null | null | ModuloTest.py | cohadar/java-snippets | d55baabed6fca9faa2b60cf0b58fa64b651b450d | [
"MIT"
] | null | null | null | from __future__ import division
import random
import unittest
class ModuloTest(unittest.TestCase):
def test_negmod(self):
for _ in xrange(10000):
a = random.randint(-10000, 10000)
b = random.randint(-10000, 10000)
if b == 0:
b = random.randint(1, 10000)
q = a // b
r = a % b
self.assertEqual(a, q * b + r)
if __name__ == '__main__':
unittest.main()
| 19.894737 | 36 | 0.653439 | from __future__ import division
import random
import unittest
class ModuloTest(unittest.TestCase):
def test_negmod(self):
for _ in xrange(10000):
a = random.randint(-10000, 10000)
b = random.randint(-10000, 10000)
if b == 0:
b = random.randint(1, 10000)
q = a // b
r = a % b
self.assertEqual(a, q * b + r)
if __name__ == '__main__':
unittest.main()
| true | true |
f739c6b0f9b7f0cc1b3831a9bac8a6add061d250 | 25,211 | py | Python | metasync/dropbox/dropbox.py | dstarikov/metavault | 1933cc6cd828ee9c594a45a78238a9a319de0143 | [
"MIT"
] | 1 | 2019-05-28T15:59:35.000Z | 2019-05-28T15:59:35.000Z | metasync/dropbox/dropbox.py | dstarikov/metavault | 1933cc6cd828ee9c594a45a78238a9a319de0143 | [
"MIT"
] | null | null | null | metasync/dropbox/dropbox.py | dstarikov/metavault | 1933cc6cd828ee9c594a45a78238a9a319de0143 | [
"MIT"
] | null | null | null | __all__ = [
'Dropbox',
'DropboxTeam',
'create_session',
]
# This should always be 0.0.0 in master. Only update this after tagging
# before release.
__version__ = '0.0.0'
import contextlib
import json
import logging
import random
import time
import requests
import six
from . import files, stone_serializers
from .auth import (
AuthError_validator,
RateLimitError_validator,
)
from .common import (
PathRoot,
PathRoot_validator,
PathRootError_validator
)
from .base import DropboxBase
from .base_team import DropboxTeamBase
from .exceptions import (
ApiError,
AuthError,
BadInputError,
HttpError,
PathRootError,
InternalServerError,
RateLimitError,
)
from .session import (
API_HOST,
API_CONTENT_HOST,
API_NOTIFICATION_HOST,
HOST_API,
HOST_CONTENT,
HOST_NOTIFY,
pinned_session,
)
PATH_ROOT_HEADER = 'Dropbox-API-Path-Root'
HTTP_STATUS_INVALID_PATH_ROOT = 422
class RouteResult(object):
"""The successful result of a call to a route."""
def __init__(self, obj_result, http_resp=None):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists. Must be serialized JSON.
:param requests.models.Response http_resp: A raw HTTP response. It will
be used to stream the binary-body payload of the response.
"""
assert isinstance(obj_result, six.string_types), \
'obj_result: expected string, got %r' % type(obj_result)
if http_resp is not None:
assert isinstance(http_resp, requests.models.Response), \
'http_resp: expected requests.models.Response, got %r' % \
type(http_resp)
self.obj_result = obj_result
self.http_resp = http_resp
class RouteErrorResult(object):
"""The error result of a call to a route."""
def __init__(self, request_id, obj_result):
"""
:param str request_id: A request_id can be shared with Dropbox Support
to pinpoint the exact request that returns an error.
:param str obj_result: The result of a route not including the binary
payload portion, if one exists.
"""
self.request_id = request_id
self.obj_result = obj_result
def create_session(max_connections=8, proxies=None):
"""
Creates a session object that can be used by multiple :class:`Dropbox` and
:class:`DropboxTeam` instances. This lets you share a connection pool
amongst them, as well as proxy parameters.
:param int max_connections: Maximum connection pool size.
:param dict proxies: See the `requests module
<http://docs.python-requests.org/en/latest/user/advanced/#proxies>`_
for more details.
:rtype: :class:`requests.sessions.Session`. `See the requests module
<http://docs.python-requests.org/en/latest/user/advanced/#session-objects>`_
for more details.
"""
# We only need as many pool_connections as we have unique hostnames.
session = pinned_session(pool_maxsize=max_connections)
if proxies:
session.proxies = proxies
return session
class _DropboxTransport(object):
"""
Responsible for implementing the wire protocol for making requests to the
Dropbox API.
"""
_API_VERSION = '2'
# Download style means that the route argument goes in a Dropbox-API-Arg
# header, and the result comes back in a Dropbox-API-Result header. The
# HTTP response body contains a binary payload.
_ROUTE_STYLE_DOWNLOAD = 'download'
# Upload style means that the route argument goes in a Dropbox-API-Arg
# header. The HTTP request body contains a binary payload. The result
# comes back in a Dropbox-API-Result header.
_ROUTE_STYLE_UPLOAD = 'upload'
# RPC style means that the argument and result of a route are contained in
# the HTTP body.
_ROUTE_STYLE_RPC = 'rpc'
# This is the default longest time we'll block on receiving data from the server
_DEFAULT_TIMEOUT = 30
def __init__(self,
oauth2_access_token,
max_retries_on_error=4,
max_retries_on_rate_limit=None,
user_agent=None,
session=None,
headers=None,
timeout=_DEFAULT_TIMEOUT):
"""
:param str oauth2_access_token: OAuth2 access token for making client
requests.
:param int max_retries_on_error: On 5xx errors, the number of times to
retry.
:param Optional[int] max_retries_on_rate_limit: On 429 errors, the
number of times to retry. If `None`, always retries.
:param str user_agent: The user agent to use when making requests. This
helps us identify requests coming from your application. We
recommend you use the format "AppName/Version". If set, we append
"/OfficialDropboxPythonSDKv2/__version__" to the user_agent,
:param session: If not provided, a new session (connection pool) is
created. To share a session across multiple clients, use
:func:`create_session`.
:type session: :class:`requests.sessions.Session`
:param dict headers: Additional headers to add to requests.
:param Optional[float] timeout: Maximum duration in seconds that
client will wait for any single packet from the
server. After the timeout the client will give up on
connection. If `None`, client will wait forever. Defaults
to 30 seconds.
"""
assert len(oauth2_access_token) > 0, \
'OAuth2 access token cannot be empty.'
assert headers is None or isinstance(headers, dict), \
'Expected dict, got %r' % headers
self._oauth2_access_token = oauth2_access_token
self._max_retries_on_error = max_retries_on_error
self._max_retries_on_rate_limit = max_retries_on_rate_limit
if session:
assert isinstance(session, requests.sessions.Session), \
'Expected requests.sessions.Session, got %r' % session
self._session = session
else:
self._session = create_session()
self._headers = headers
base_user_agent = 'OfficialDropboxPythonSDKv2/' + __version__
if user_agent:
self._raw_user_agent = user_agent
self._user_agent = '{}/{}'.format(user_agent, base_user_agent)
else:
self._raw_user_agent = None
self._user_agent = base_user_agent
self._logger = logging.getLogger('dropbox')
self._host_map = {HOST_API: API_HOST,
HOST_CONTENT: API_CONTENT_HOST,
HOST_NOTIFY: API_NOTIFICATION_HOST}
self._timeout = timeout
def clone(
self,
oauth2_access_token=None,
max_retries_on_error=None,
max_retries_on_rate_limit=None,
user_agent=None,
session=None,
headers=None,
timeout=None):
"""
Creates a new copy of the Dropbox client with the same defaults unless modified by
arguments to clone()
See constructor for original parameter descriptions.
:return: New instance of Dropbox clent
:rtype: Dropbox
"""
return self.__class__(
oauth2_access_token or self._oauth2_access_token,
max_retries_on_error or self._max_retries_on_error,
max_retries_on_rate_limit or self._max_retries_on_rate_limit,
user_agent or self._user_agent,
session or self._session,
headers or self._headers,
timeout or self._timeout
)
def request(self,
route,
namespace,
request_arg,
request_binary,
timeout=None):
"""
Makes a request to the Dropbox API and in the process validates that
the route argument and result are the expected data types. The
request_arg is converted to JSON based on the arg_data_type. Likewise,
the response is deserialized from JSON and converted to an object based
on the {result,error}_data_type.
:param host: The Dropbox API host to connect to.
:param route: The route to make the request to.
:type route: :class:`.datatypes.stone_base.Route`
:param request_arg: Argument for the route that conforms to the
validator specified by route.arg_type.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:param Optional[float] timeout: Maximum duration in seconds
that client will wait for any single packet from the
server. After the timeout the client will give up on
connection. If `None`, will use default timeout set on
Dropbox object. Defaults to `None`.
:return: The route's result.
"""
host = route.attrs['host'] or 'api'
route_name = namespace + '/' + route.name
if route.version > 1:
route_name += '_v{}'.format(route.version)
route_style = route.attrs['style'] or 'rpc'
serialized_arg = stone_serializers.json_encode(route.arg_type,
request_arg)
if (timeout is None and
route == files.list_folder_longpoll):
# The client normally sends a timeout value to the
# longpoll route. The server will respond after
# <timeout> + random(0, 90) seconds. We increase the
# socket timeout to the longpoll timeout value plus 90
# seconds so that we don't cut the server response short
# due to a shorter socket timeout.
# NB: This is done here because base.py is auto-generated
timeout = request_arg.timeout + 90
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary,
timeout=timeout)
decoded_obj_result = json.loads(res.obj_result)
if isinstance(res, RouteResult):
returned_data_type = route.result_type
obj = decoded_obj_result
elif isinstance(res, RouteErrorResult):
returned_data_type = route.error_type
obj = decoded_obj_result['error']
user_message = decoded_obj_result.get('user_message')
user_message_text = user_message and user_message.get('text')
user_message_locale = user_message and user_message.get('locale')
else:
raise AssertionError('Expected RouteResult or RouteErrorResult, '
'but res is %s' % type(res))
deserialized_result = stone_serializers.json_compat_obj_decode(
returned_data_type, obj, strict=False)
if isinstance(res, RouteErrorResult):
raise ApiError(res.request_id,
deserialized_result,
user_message_text,
user_message_locale)
elif route_style == self._ROUTE_STYLE_DOWNLOAD:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_object(self,
host,
route_name,
route_style,
request_arg,
request_binary,
timeout=None):
"""
Makes a request to the Dropbox API, taking a JSON-serializable Python
object as an argument, and returning one as a response.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:param str request_arg: A JSON-serializable Python object representing
the argument for the route.
:param Optional[bytes] request_binary: Bytes representing the binary
payload. Use None if there is no binary payload.
:param Optional[float] timeout: Maximum duration in seconds
that client will wait for any single packet from the
server. After the timeout the client will give up on
connection. If `None`, will use default timeout set on
Dropbox object. Defaults to `None`.
:return: The route's result as a JSON-serializable Python object.
"""
serialized_arg = json.dumps(request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary,
timeout=timeout)
# This can throw a ValueError if the result is not deserializable,
# but that would be completely unexpected.
deserialized_result = json.loads(res.obj_result)
if isinstance(res, RouteResult) and res.http_resp is not None:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_string_with_retry(self,
host,
route_name,
route_style,
request_json_arg,
request_binary,
timeout=None):
"""
See :meth:`request_json_object` for description of parameters.
:param request_json_arg: A string representing the serialized JSON
argument to the route.
"""
attempt = 0
rate_limit_errors = 0
while True:
self._logger.info('Request to %s', route_name)
try:
return self.request_json_string(host,
route_name,
route_style,
request_json_arg,
request_binary,
timeout=timeout)
except InternalServerError as e:
attempt += 1
if attempt <= self._max_retries_on_error:
# Use exponential backoff
backoff = 2**attempt * random.random()
self._logger.info(
'HttpError status_code=%s: Retrying in %.1f seconds',
e.status_code, backoff)
time.sleep(backoff)
else:
raise
except RateLimitError as e:
rate_limit_errors += 1
if (self._max_retries_on_rate_limit is None or
self._max_retries_on_rate_limit >= rate_limit_errors):
# Set default backoff to 5 seconds.
backoff = e.backoff if e.backoff is not None else 5.0
self._logger.info(
'Ratelimit: Retrying in %.1f seconds.', backoff)
time.sleep(backoff)
else:
raise
def request_json_string(self,
host,
func_name,
route_style,
request_json_arg,
request_binary,
timeout=None):
"""
See :meth:`request_json_string_with_retry` for description of
parameters.
"""
if host not in self._host_map:
raise ValueError('Unknown value for host: %r' % host)
# if not isinstance(request_binary, (six.binary_type, type(None))):
# # Disallow streams and file-like objects even though the underlying
# # requests library supports them. This is to prevent incorrect
# # behavior when a non-rewindable stream is read from, but the
# # request fails and needs to be re-tried at a later time.
# raise TypeError('expected request_binary as binary type, got %s' %
# type(request_binary))
# Fully qualified hostname
fq_hostname = self._host_map[host]
url = self._get_route_url(fq_hostname, func_name)
headers = {'User-Agent': self._user_agent}
if host != HOST_NOTIFY:
headers['Authorization'] = 'Bearer %s' % self._oauth2_access_token
if self._headers:
headers.update(self._headers)
# The contents of the body of the HTTP request
body = None
# Whether the response should be streamed incrementally, or buffered
# entirely. If stream is True, the caller is responsible for closing
# the HTTP response.
stream = False
if route_style == self._ROUTE_STYLE_RPC:
headers['Content-Type'] = 'application/json'
body = request_json_arg
elif route_style == self._ROUTE_STYLE_DOWNLOAD:
headers['Dropbox-API-Arg'] = request_json_arg
stream = True
elif route_style == self._ROUTE_STYLE_UPLOAD:
headers['Content-Type'] = 'application/octet-stream'
headers['Dropbox-API-Arg'] = request_json_arg
body = request_binary
else:
raise ValueError('Unknown operation style: %r' % route_style)
if timeout is None:
timeout = self._timeout
r = self._session.post(url,
headers=headers,
data=body,
stream=stream,
verify=True,
timeout=timeout,
)
request_id = r.headers.get('x-dropbox-request-id')
if r.status_code >= 500:
raise InternalServerError(request_id, r.status_code, r.text)
elif r.status_code == 400:
raise BadInputError(request_id, r.text)
elif r.status_code == 401:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
err = stone_serializers.json_compat_obj_decode(
AuthError_validator, r.json()['error'])
raise AuthError(request_id, err)
elif r.status_code == HTTP_STATUS_INVALID_PATH_ROOT:
err = stone_serializers.json_compat_obj_decode(
PathRootError_validator, r.json()['error'])
raise PathRootError(request_id, err)
elif r.status_code == 429:
err = None
if r.headers.get('content-type') == 'application/json':
err = stone_serializers.json_compat_obj_decode(
RateLimitError_validator, r.json()['error'])
retry_after = err.retry_after
else:
retry_after_str = r.headers.get('retry-after')
if retry_after_str is not None:
retry_after = int(retry_after_str)
else:
retry_after = None
raise RateLimitError(request_id, err, retry_after)
elif 200 <= r.status_code <= 299:
if route_style == self._ROUTE_STYLE_DOWNLOAD:
raw_resp = r.headers['dropbox-api-result']
else:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raw_resp = r.content.decode('utf-8')
if route_style == self._ROUTE_STYLE_DOWNLOAD:
return RouteResult(raw_resp, r)
else:
return RouteResult(raw_resp)
elif r.status_code in (403, 404, 409):
raw_resp = r.content.decode('utf-8')
return RouteErrorResult(request_id, raw_resp)
else:
raise HttpError(request_id, r.status_code, r.text)
def _get_route_url(self, hostname, route_name):
"""Returns the URL of the route.
:param str hostname: Hostname to make the request to.
:param str route_name: Name of the route.
:rtype: str
"""
return 'https://{hostname}/{version}/{route_name}'.format(
hostname=hostname,
version=Dropbox._API_VERSION,
route_name=route_name,
)
def _save_body_to_file(self, download_path, http_resp, chunksize=2**16):
"""
Saves the body of an HTTP response to a file.
:param str download_path: Local path to save data to.
:param http_resp: The HTTP response whose body will be saved.
:type http_resp: :class:`requests.models.Response`
:rtype: None
"""
with open(download_path, 'wb') as f:
with contextlib.closing(http_resp):
for c in http_resp.iter_content(chunksize):
f.write(c)
def with_path_root(self, path_root):
"""
Creates a clone of the Dropbox instance with the Dropbox-API-Path-Root header
as the appropriate serialized instance of PathRoot.
For more information, see
https://www.dropbox.com/developers/reference/namespace-guide#pathrootmodes
:param PathRoot path_root: instance of PathRoot to serialize into the headers field
:return: A :class: `Dropbox`
:rtype: Dropbox
"""
if not isinstance(path_root, PathRoot):
raise ValueError("path_root must be an instance of PathRoot")
return self.clone(
headers={
PATH_ROOT_HEADER: stone_serializers.json_encode(PathRoot_validator, path_root)
}
)
class Dropbox(_DropboxTransport, DropboxBase):
"""
Use this class to make requests to the Dropbox API using a user's access
token. Methods of this class are meant to act on the corresponding user's
Dropbox.
"""
pass
class DropboxTeam(_DropboxTransport, DropboxTeamBase):
"""
Use this class to make requests to the Dropbox API using a team's access
token. Methods of this class are meant to act on the team, but there is
also an :meth:`as_user` method for assuming a team member's identity.
"""
def as_admin(self, team_member_id):
"""
Allows a team credential to assume the identity of an administrator on the team
and perform operations on any team-owned content.
:param str team_member_id: team member id of administrator to perform actions with
:return: A :class:`Dropbox` object that can be used to query on behalf
of this admin of the team.
:rtype: Dropbox
"""
return self._get_dropbox_client_with_select_header('Dropbox-API-Select-Admin',
team_member_id)
def as_user(self, team_member_id):
"""
Allows a team credential to assume the identity of a member of the
team.
:param str team_member_id: team member id of team member to perform actions with
:return: A :class:`Dropbox` object that can be used to query on behalf
of this member of the team.
:rtype: Dropbox
"""
return self._get_dropbox_client_with_select_header('Dropbox-API-Select-User',
team_member_id)
def _get_dropbox_client_with_select_header(self, select_header_name, team_member_id):
"""
Get Dropbox client with modified headers
:param str select_header_name: Header name used to select users
:param str team_member_id: team member id of team member to perform actions with
:return: A :class:`Dropbox` object that can be used to query on behalf
of a member or admin of the team
:rtype: Dropbox
"""
new_headers = self._headers.copy() if self._headers else {}
new_headers[select_header_name] = team_member_id
return Dropbox(
self._oauth2_access_token,
max_retries_on_error=self._max_retries_on_error,
max_retries_on_rate_limit=self._max_retries_on_rate_limit,
timeout=self._timeout,
user_agent=self._raw_user_agent,
session=self._session,
headers=new_headers,
)
| 41.329508 | 94 | 0.589822 | __all__ = [
'Dropbox',
'DropboxTeam',
'create_session',
]
__version__ = '0.0.0'
import contextlib
import json
import logging
import random
import time
import requests
import six
from . import files, stone_serializers
from .auth import (
AuthError_validator,
RateLimitError_validator,
)
from .common import (
PathRoot,
PathRoot_validator,
PathRootError_validator
)
from .base import DropboxBase
from .base_team import DropboxTeamBase
from .exceptions import (
ApiError,
AuthError,
BadInputError,
HttpError,
PathRootError,
InternalServerError,
RateLimitError,
)
from .session import (
API_HOST,
API_CONTENT_HOST,
API_NOTIFICATION_HOST,
HOST_API,
HOST_CONTENT,
HOST_NOTIFY,
pinned_session,
)
PATH_ROOT_HEADER = 'Dropbox-API-Path-Root'
HTTP_STATUS_INVALID_PATH_ROOT = 422
class RouteResult(object):
def __init__(self, obj_result, http_resp=None):
assert isinstance(obj_result, six.string_types), \
'obj_result: expected string, got %r' % type(obj_result)
if http_resp is not None:
assert isinstance(http_resp, requests.models.Response), \
'http_resp: expected requests.models.Response, got %r' % \
type(http_resp)
self.obj_result = obj_result
self.http_resp = http_resp
class RouteErrorResult(object):
def __init__(self, request_id, obj_result):
self.request_id = request_id
self.obj_result = obj_result
def create_session(max_connections=8, proxies=None):
session = pinned_session(pool_maxsize=max_connections)
if proxies:
session.proxies = proxies
return session
class _DropboxTransport(object):
_API_VERSION = '2'
_ROUTE_STYLE_DOWNLOAD = 'download'
_ROUTE_STYLE_UPLOAD = 'upload'
_ROUTE_STYLE_RPC = 'rpc'
_DEFAULT_TIMEOUT = 30
def __init__(self,
oauth2_access_token,
max_retries_on_error=4,
max_retries_on_rate_limit=None,
user_agent=None,
session=None,
headers=None,
timeout=_DEFAULT_TIMEOUT):
assert len(oauth2_access_token) > 0, \
'OAuth2 access token cannot be empty.'
assert headers is None or isinstance(headers, dict), \
'Expected dict, got %r' % headers
self._oauth2_access_token = oauth2_access_token
self._max_retries_on_error = max_retries_on_error
self._max_retries_on_rate_limit = max_retries_on_rate_limit
if session:
assert isinstance(session, requests.sessions.Session), \
'Expected requests.sessions.Session, got %r' % session
self._session = session
else:
self._session = create_session()
self._headers = headers
base_user_agent = 'OfficialDropboxPythonSDKv2/' + __version__
if user_agent:
self._raw_user_agent = user_agent
self._user_agent = '{}/{}'.format(user_agent, base_user_agent)
else:
self._raw_user_agent = None
self._user_agent = base_user_agent
self._logger = logging.getLogger('dropbox')
self._host_map = {HOST_API: API_HOST,
HOST_CONTENT: API_CONTENT_HOST,
HOST_NOTIFY: API_NOTIFICATION_HOST}
self._timeout = timeout
def clone(
self,
oauth2_access_token=None,
max_retries_on_error=None,
max_retries_on_rate_limit=None,
user_agent=None,
session=None,
headers=None,
timeout=None):
return self.__class__(
oauth2_access_token or self._oauth2_access_token,
max_retries_on_error or self._max_retries_on_error,
max_retries_on_rate_limit or self._max_retries_on_rate_limit,
user_agent or self._user_agent,
session or self._session,
headers or self._headers,
timeout or self._timeout
)
def request(self,
route,
namespace,
request_arg,
request_binary,
timeout=None):
host = route.attrs['host'] or 'api'
route_name = namespace + '/' + route.name
if route.version > 1:
route_name += '_v{}'.format(route.version)
route_style = route.attrs['style'] or 'rpc'
serialized_arg = stone_serializers.json_encode(route.arg_type,
request_arg)
if (timeout is None and
route == files.list_folder_longpoll):
# The client normally sends a timeout value to the
# longpoll route. The server will respond after
# <timeout> + random(0, 90) seconds. We increase the
# socket timeout to the longpoll timeout value plus 90
# seconds so that we don't cut the server response short
timeout = request_arg.timeout + 90
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary,
timeout=timeout)
decoded_obj_result = json.loads(res.obj_result)
if isinstance(res, RouteResult):
returned_data_type = route.result_type
obj = decoded_obj_result
elif isinstance(res, RouteErrorResult):
returned_data_type = route.error_type
obj = decoded_obj_result['error']
user_message = decoded_obj_result.get('user_message')
user_message_text = user_message and user_message.get('text')
user_message_locale = user_message and user_message.get('locale')
else:
raise AssertionError('Expected RouteResult or RouteErrorResult, '
'but res is %s' % type(res))
deserialized_result = stone_serializers.json_compat_obj_decode(
returned_data_type, obj, strict=False)
if isinstance(res, RouteErrorResult):
raise ApiError(res.request_id,
deserialized_result,
user_message_text,
user_message_locale)
elif route_style == self._ROUTE_STYLE_DOWNLOAD:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_object(self,
host,
route_name,
route_style,
request_arg,
request_binary,
timeout=None):
serialized_arg = json.dumps(request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary,
timeout=timeout)
deserialized_result = json.loads(res.obj_result)
if isinstance(res, RouteResult) and res.http_resp is not None:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_string_with_retry(self,
host,
route_name,
route_style,
request_json_arg,
request_binary,
timeout=None):
attempt = 0
rate_limit_errors = 0
while True:
self._logger.info('Request to %s', route_name)
try:
return self.request_json_string(host,
route_name,
route_style,
request_json_arg,
request_binary,
timeout=timeout)
except InternalServerError as e:
attempt += 1
if attempt <= self._max_retries_on_error:
backoff = 2**attempt * random.random()
self._logger.info(
'HttpError status_code=%s: Retrying in %.1f seconds',
e.status_code, backoff)
time.sleep(backoff)
else:
raise
except RateLimitError as e:
rate_limit_errors += 1
if (self._max_retries_on_rate_limit is None or
self._max_retries_on_rate_limit >= rate_limit_errors):
backoff = e.backoff if e.backoff is not None else 5.0
self._logger.info(
'Ratelimit: Retrying in %.1f seconds.', backoff)
time.sleep(backoff)
else:
raise
def request_json_string(self,
host,
func_name,
route_style,
request_json_arg,
request_binary,
timeout=None):
if host not in self._host_map:
raise ValueError('Unknown value for host: %r' % host)
headers['Authorization'] = 'Bearer %s' % self._oauth2_access_token
if self._headers:
headers.update(self._headers)
body = None
stream = False
if route_style == self._ROUTE_STYLE_RPC:
headers['Content-Type'] = 'application/json'
body = request_json_arg
elif route_style == self._ROUTE_STYLE_DOWNLOAD:
headers['Dropbox-API-Arg'] = request_json_arg
stream = True
elif route_style == self._ROUTE_STYLE_UPLOAD:
headers['Content-Type'] = 'application/octet-stream'
headers['Dropbox-API-Arg'] = request_json_arg
body = request_binary
else:
raise ValueError('Unknown operation style: %r' % route_style)
if timeout is None:
timeout = self._timeout
r = self._session.post(url,
headers=headers,
data=body,
stream=stream,
verify=True,
timeout=timeout,
)
request_id = r.headers.get('x-dropbox-request-id')
if r.status_code >= 500:
raise InternalServerError(request_id, r.status_code, r.text)
elif r.status_code == 400:
raise BadInputError(request_id, r.text)
elif r.status_code == 401:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
err = stone_serializers.json_compat_obj_decode(
AuthError_validator, r.json()['error'])
raise AuthError(request_id, err)
elif r.status_code == HTTP_STATUS_INVALID_PATH_ROOT:
err = stone_serializers.json_compat_obj_decode(
PathRootError_validator, r.json()['error'])
raise PathRootError(request_id, err)
elif r.status_code == 429:
err = None
if r.headers.get('content-type') == 'application/json':
err = stone_serializers.json_compat_obj_decode(
RateLimitError_validator, r.json()['error'])
retry_after = err.retry_after
else:
retry_after_str = r.headers.get('retry-after')
if retry_after_str is not None:
retry_after = int(retry_after_str)
else:
retry_after = None
raise RateLimitError(request_id, err, retry_after)
elif 200 <= r.status_code <= 299:
if route_style == self._ROUTE_STYLE_DOWNLOAD:
raw_resp = r.headers['dropbox-api-result']
else:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raw_resp = r.content.decode('utf-8')
if route_style == self._ROUTE_STYLE_DOWNLOAD:
return RouteResult(raw_resp, r)
else:
return RouteResult(raw_resp)
elif r.status_code in (403, 404, 409):
raw_resp = r.content.decode('utf-8')
return RouteErrorResult(request_id, raw_resp)
else:
raise HttpError(request_id, r.status_code, r.text)
def _get_route_url(self, hostname, route_name):
return 'https://{hostname}/{version}/{route_name}'.format(
hostname=hostname,
version=Dropbox._API_VERSION,
route_name=route_name,
)
def _save_body_to_file(self, download_path, http_resp, chunksize=2**16):
with open(download_path, 'wb') as f:
with contextlib.closing(http_resp):
for c in http_resp.iter_content(chunksize):
f.write(c)
def with_path_root(self, path_root):
if not isinstance(path_root, PathRoot):
raise ValueError("path_root must be an instance of PathRoot")
return self.clone(
headers={
PATH_ROOT_HEADER: stone_serializers.json_encode(PathRoot_validator, path_root)
}
)
class Dropbox(_DropboxTransport, DropboxBase):
pass
class DropboxTeam(_DropboxTransport, DropboxTeamBase):
def as_admin(self, team_member_id):
return self._get_dropbox_client_with_select_header('Dropbox-API-Select-Admin',
team_member_id)
def as_user(self, team_member_id):
return self._get_dropbox_client_with_select_header('Dropbox-API-Select-User',
team_member_id)
def _get_dropbox_client_with_select_header(self, select_header_name, team_member_id):
new_headers = self._headers.copy() if self._headers else {}
new_headers[select_header_name] = team_member_id
return Dropbox(
self._oauth2_access_token,
max_retries_on_error=self._max_retries_on_error,
max_retries_on_rate_limit=self._max_retries_on_rate_limit,
timeout=self._timeout,
user_agent=self._raw_user_agent,
session=self._session,
headers=new_headers,
)
| true | true |
f739c889b4f980cb45ac1e10f79885ee3a23b3b7 | 224 | py | Python | test/TestFizzBuzz.py | ystromm/rx-lab-py | b3af3f4a56a41d0284c61813ad7d9951f3538dd1 | [
"Apache-2.0"
] | null | null | null | test/TestFizzBuzz.py | ystromm/rx-lab-py | b3af3f4a56a41d0284c61813ad7d9951f3538dd1 | [
"Apache-2.0"
] | 1 | 2021-02-09T08:11:09.000Z | 2021-02-09T08:11:09.000Z | test/TestFizzBuzz.py | ystromm/rx-lab-py | b3af3f4a56a41d0284c61813ad7d9951f3538dd1 | [
"Apache-2.0"
] | null | null | null | import unittest
from src.fizzbuzz import FizzBuzz
class MyTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(FizzBuzz().fixxBuzz(), "1 2")
if __name__ == '__main__':
unittest.main()
| 17.230769 | 54 | 0.705357 | import unittest
from src.fizzbuzz import FizzBuzz
class MyTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(FizzBuzz().fixxBuzz(), "1 2")
if __name__ == '__main__':
unittest.main()
| true | true |
f739c9c9ba6cf4dc0888a2f41048bb55910c7c84 | 2,416 | py | Python | NLP_Offensive_Language_Detection/data_process_w2v.py | quantumiracle/Course_Code | 5dffd874b33af983fc309e064cc3eaeebbebbae4 | [
"MIT"
] | null | null | null | NLP_Offensive_Language_Detection/data_process_w2v.py | quantumiracle/Course_Code | 5dffd874b33af983fc309e064cc3eaeebbebbae4 | [
"MIT"
] | null | null | null | NLP_Offensive_Language_Detection/data_process_w2v.py | quantumiracle/Course_Code | 5dffd874b33af983fc309e064cc3eaeebbebbae4 | [
"MIT"
] | 1 | 2021-06-12T16:06:10.000Z | 2021-06-12T16:06:10.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/2/15 17:36
# @Author : Kang
# @Site :
# @File : data_process.py
# @Software: PyCharm
import pandas as pd
from gensim.models import Word2Vec
from sklearn.decomposition import IncrementalPCA # inital reduction
from sklearn.manifold import TSNE # final reduction
import numpy as np # array handling
from data_process import DataHandle
# from plotly.offline import init_notebook_mode, iplot, plot
# import plotly.graph_objs as go
# def reduce_dimensions(model, plot_in_notebook = False):
#
# num_dimensions = 2 # final num dimensions (2D, 3D, etc)
#
# vectors = [] # positions in vector space
# labels = [] # keep track of words to label our data again later
# for word in model.wv.vocab:
# vectors.append(model[word])
# labels.append(word)
#
#
# # convert both lists into numpy vectors for reduction
# vectors = np.asarray(vectors)
# labels = np.asarray(labels)
#
# # reduce using t-SNE
# vectors = np.asarray(vectors)
# tsne = TSNE(n_components=num_dimensions, random_state=0)
# vectors = tsne.fit_transform(vectors)
#
# x_vals = [v[0] for v in vectors]
# y_vals = [v[1] for v in vectors]
#
# # Create a trace
# trace = go.Scatter(
# x=x_vals,
# y=y_vals,
# mode='text',
# text=labels
# )
#
# data = [trace]
#
# if plot_in_notebook:
# init_notebook_mode(connected=True)
# iplot(data, filename='word-embedding-plot')
# else:
# plot(data, filename='word-embedding-plot.html')
if __name__ == '__main__':
ex = DataHandle()
# print(ex.tokenized_corpus)
# print(ex.vocabulary)
# print(ex.word2idx)
sentences = ex.tokenized_corpus
'''https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/word2vec.ipynb'''
model = Word2Vec(sentences,min_count=1, window=5, size=20)
# model.build_vocab(sentences) # prepare the model vocabulary
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter) # train word vectors
# plot
# reduce_dimensions(model)
print(np.mean(abs(model['comput'])))
print(model.similarity('woman', 'man'))
print(model.most_similar(positive=['woman', 'king'], negative=['man'], topn=1))
print(type(model))
| 30.974359 | 102 | 0.636175 |
import pandas as pd
from gensim.models import Word2Vec
from sklearn.decomposition import IncrementalPCA
from sklearn.manifold import TSNE
import numpy as np
from data_process import DataHandle
ze=20)
tal_examples=model.corpus_count, epochs=model.iter)
print(np.mean(abs(model['comput'])))
print(model.similarity('woman', 'man'))
print(model.most_similar(positive=['woman', 'king'], negative=['man'], topn=1))
print(type(model))
| true | true |
f739ca0d6c6e60b74cbca4374745a74be5044347 | 10,648 | py | Python | IPython/core/completerlib.py | tkf/ipython | a90f362925c92c4cf6e541e4d31edecabefc88a7 | [
"BSD-3-Clause-Clear"
] | 1 | 2018-09-24T13:45:40.000Z | 2018-09-24T13:45:40.000Z | IPython/core/completerlib.py | tkf/ipython | a90f362925c92c4cf6e541e4d31edecabefc88a7 | [
"BSD-3-Clause-Clear"
] | 3 | 2015-04-01T13:14:57.000Z | 2015-05-26T16:01:37.000Z | IPython/core/completerlib.py | tkf/ipython | a90f362925c92c4cf6e541e4d31edecabefc88a7 | [
"BSD-3-Clause-Clear"
] | 1 | 2017-02-03T06:22:33.000Z | 2017-02-03T06:22:33.000Z | """Implementations for various useful completers.
These are all loaded by default by IPython.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import glob
import inspect
import os
import re
import sys
# Third-party imports
from time import time
from zipimport import zipimporter
# Our own imports
from IPython.core.completer import expand_user, compress_user
from IPython.core.error import TryNext
from IPython.utils import py3compat
from IPython.utils._process_common import arg_split
# FIXME: this should be pulled in with the right call via the component system
from IPython.core.ipapi import get as get_ipython
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# Time in seconds after which the rootmodules will be stored permanently in the
# ipython ip.db database (kept in the user's .ipython dir).
TIMEOUT_STORAGE = 2
# Time in seconds after which we give up
TIMEOUT_GIVEUP = 20
# Regular expression for the python import statement
import_re = re.compile(r'.*(\.so|\.py[cod]?)$')
# RE for the ipython %run command (python + ipython scripts)
magic_run_re = re.compile(r'.*(\.ipy|\.py[w]?)$')
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
def module_list(path):
"""
Return the list containing the names of the modules available in the given
folder.
"""
# sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
if path == '':
path = '.'
if os.path.isdir(path):
folder_list = os.listdir(path)
elif path.endswith('.egg'):
try:
folder_list = [f for f in zipimporter(path)._files]
except:
folder_list = []
else:
folder_list = []
if not folder_list:
return []
# A few local constants to be used in loops below
isfile = os.path.isfile
pjoin = os.path.join
basename = os.path.basename
def is_importable_file(path):
"""Returns True if the provided path is a valid importable module"""
name, extension = os.path.splitext( path )
return import_re.match(path) and py3compat.isidentifier(name)
# Now find actual path matches for packages or modules
folder_list = [p for p in folder_list
if isfile(pjoin(path, p,'__init__.py'))
or is_importable_file(p) ]
return [basename(p).split('.')[0] for p in folder_list]
def get_root_modules():
"""
Returns a list containing the names of all the modules available in the
folders of the pythonpath.
"""
ip = get_ipython()
if 'rootmodules' in ip.db:
return ip.db['rootmodules']
t = time()
store = False
modules = list(sys.builtin_module_names)
for path in sys.path:
modules += module_list(path)
if time() - t >= TIMEOUT_STORAGE and not store:
store = True
print("\nCaching the list of root modules, please wait!")
print("(This will only be done once - type '%rehashx' to "
"reset cache!)\n")
sys.stdout.flush()
if time() - t > TIMEOUT_GIVEUP:
print("This is taking too long, we give up.\n")
ip.db['rootmodules'] = []
return []
modules = set(modules)
if '__init__' in modules:
modules.remove('__init__')
modules = list(modules)
if store:
ip.db['rootmodules'] = modules
return modules
def is_importable(module, attr, only_modules):
if only_modules:
return inspect.ismodule(getattr(module, attr))
else:
return not(attr[:2] == '__' and attr[-2:] == '__')
def try_import(mod, only_modules=False):
try:
m = __import__(mod)
except:
return []
mods = mod.split('.')
for module in mods[1:]:
m = getattr(m, module)
m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
completions = []
if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
completions.extend( [attr for attr in dir(m) if
is_importable(m, attr, only_modules)])
completions.extend(getattr(m, '__all__', []))
if m_is_init:
completions.extend(module_list(os.path.dirname(m.__file__)))
completions = set(completions)
if '__init__' in completions:
completions.remove('__init__')
return list(completions)
#-----------------------------------------------------------------------------
# Completion-related functions.
#-----------------------------------------------------------------------------
def quick_completer(cmd, completions):
""" Easily create a trivial completer for a command.
Takes either a list of completions, or all completions in string (that will
be split on whitespace).
Example::
[d:\ipython]|1> import ipy_completers
[d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
[d:\ipython]|3> foo b<TAB>
bar baz
[d:\ipython]|3> foo ba
"""
if isinstance(completions, basestring):
completions = completions.split()
def do_complete(self, event):
return completions
get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
def module_completion(line):
"""
Returns a list containing the completion possibilities for an import line.
The line looks like this :
'import xml.d'
'from xml.dom import'
"""
words = line.split(' ')
nwords = len(words)
# from whatever <tab> -> 'import '
if nwords == 3 and words[0] == 'from':
return ['import ']
# 'from xy<tab>' or 'import xy<tab>'
if nwords < 3 and (words[0] in ['import','from']) :
if nwords == 1:
return get_root_modules()
mod = words[1].split('.')
if len(mod) < 2:
return get_root_modules()
completion_list = try_import('.'.join(mod[:-1]), True)
return ['.'.join(mod[:-1] + [el]) for el in completion_list]
# 'from xyz import abc<tab>'
if nwords >= 3 and words[0] == 'from':
mod = words[1]
return try_import(mod)
#-----------------------------------------------------------------------------
# Completers
#-----------------------------------------------------------------------------
# These all have the func(self, event) signature to be used as custom
# completers
def module_completer(self,event):
"""Give completions after user has typed 'import ...' or 'from ...'"""
# This works in all versions of python. While 2.5 has
# pkgutil.walk_packages(), that particular routine is fairly dangerous,
# since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
# of possibly problematic side effects.
# This search the folders in the sys.path for available modules.
return module_completion(event.line)
# FIXME: there's a lot of logic common to the run, cd and builtin file
# completers, that is currently reimplemented in each.
def magic_run_completer(self, event):
"""Complete files that end in .py or .ipy for the %run command.
"""
comps = arg_split(event.line, strict=False)
relpath = (len(comps) > 1 and comps[-1] or '').strip("'\"")
#print("\nev=", event) # dbg
#print("rp=", relpath) # dbg
#print('comps=', comps) # dbg
lglob = glob.glob
isdir = os.path.isdir
relpath, tilde_expand, tilde_val = expand_user(relpath)
dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
# Find if the user has already typed the first filename, after which we
# should complete on all files, since after the first one other files may
# be arguments to the input script.
if filter(magic_run_re.match, comps):
pys = [f.replace('\\','/') for f in lglob('*')]
else:
pys = [f.replace('\\','/')
for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
lglob(relpath + '*.pyw')]
#print('run comp:', dirs+pys) # dbg
return [compress_user(p, tilde_expand, tilde_val) for p in dirs+pys]
def cd_completer(self, event):
"""Completer function for cd, which only returns directories."""
ip = get_ipython()
relpath = event.symbol
#print(event) # dbg
if event.line.endswith('-b') or ' -b ' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', None)
if bkms:
return bkms.keys()
else:
return []
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh +'d [%s]'
ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
if len(ents) > 1:
return ents
return []
if event.symbol.startswith('--'):
return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
# Expand ~ in path and normalize directory separators.
relpath, tilde_expand, tilde_val = expand_user(relpath)
relpath = relpath.replace('\\','/')
found = []
for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
if os.path.isdir(f)]:
if ' ' in d:
# we don't want to deal with any of that, complex code
# for this is elsewhere
raise TryNext
found.append(d)
if not found:
if os.path.isdir(relpath):
return [compress_user(relpath, tilde_expand, tilde_val)]
# if no completions so far, try bookmarks
bks = self.db.get('bookmarks',{}).iterkeys()
bkmatches = [s for s in bks if s.startswith(event.symbol)]
if bkmatches:
return bkmatches
raise TryNext
return [compress_user(p, tilde_expand, tilde_val) for p in found]
def reset_completer(self, event):
"A completer for %reset magic"
return '-f -s in out array dhist'.split()
| 32.266667 | 79 | 0.567243 |
from __future__ import print_function
import glob
import inspect
import os
import re
import sys
from time import time
from zipimport import zipimporter
from IPython.core.completer import expand_user, compress_user
from IPython.core.error import TryNext
from IPython.utils import py3compat
from IPython.utils._process_common import arg_split
from IPython.core.ipapi import get as get_ipython
TIMEOUT_STORAGE = 2
# Time in seconds after which we give up
TIMEOUT_GIVEUP = 20
# Regular expression for the python import statement
import_re = re.compile(r'.*(\.so|\.py[cod]?)$')
# RE for the ipython %run command (python + ipython scripts)
magic_run_re = re.compile(r'.*(\.ipy|\.py[w]?)$')
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
def module_list(path):
# sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
if path == '':
path = '.'
if os.path.isdir(path):
folder_list = os.listdir(path)
elif path.endswith('.egg'):
try:
folder_list = [f for f in zipimporter(path)._files]
except:
folder_list = []
else:
folder_list = []
if not folder_list:
return []
# A few local constants to be used in loops below
isfile = os.path.isfile
pjoin = os.path.join
basename = os.path.basename
def is_importable_file(path):
name, extension = os.path.splitext( path )
return import_re.match(path) and py3compat.isidentifier(name)
# Now find actual path matches for packages or modules
folder_list = [p for p in folder_list
if isfile(pjoin(path, p,'__init__.py'))
or is_importable_file(p) ]
return [basename(p).split('.')[0] for p in folder_list]
def get_root_modules():
ip = get_ipython()
if 'rootmodules' in ip.db:
return ip.db['rootmodules']
t = time()
store = False
modules = list(sys.builtin_module_names)
for path in sys.path:
modules += module_list(path)
if time() - t >= TIMEOUT_STORAGE and not store:
store = True
print("\nCaching the list of root modules, please wait!")
print("(This will only be done once - type '%rehashx' to "
"reset cache!)\n")
sys.stdout.flush()
if time() - t > TIMEOUT_GIVEUP:
print("This is taking too long, we give up.\n")
ip.db['rootmodules'] = []
return []
modules = set(modules)
if '__init__' in modules:
modules.remove('__init__')
modules = list(modules)
if store:
ip.db['rootmodules'] = modules
return modules
def is_importable(module, attr, only_modules):
if only_modules:
return inspect.ismodule(getattr(module, attr))
else:
return not(attr[:2] == '__' and attr[-2:] == '__')
def try_import(mod, only_modules=False):
try:
m = __import__(mod)
except:
return []
mods = mod.split('.')
for module in mods[1:]:
m = getattr(m, module)
m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
completions = []
if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
completions.extend( [attr for attr in dir(m) if
is_importable(m, attr, only_modules)])
completions.extend(getattr(m, '__all__', []))
if m_is_init:
completions.extend(module_list(os.path.dirname(m.__file__)))
completions = set(completions)
if '__init__' in completions:
completions.remove('__init__')
return list(completions)
#-----------------------------------------------------------------------------
# Completion-related functions.
#-----------------------------------------------------------------------------
def quick_completer(cmd, completions):
if isinstance(completions, basestring):
completions = completions.split()
def do_complete(self, event):
return completions
get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
def module_completion(line):
words = line.split(' ')
nwords = len(words)
# from whatever <tab> -> 'import '
if nwords == 3 and words[0] == 'from':
return ['import ']
# 'from xy<tab>' or 'import xy<tab>'
if nwords < 3 and (words[0] in ['import','from']) :
if nwords == 1:
return get_root_modules()
mod = words[1].split('.')
if len(mod) < 2:
return get_root_modules()
completion_list = try_import('.'.join(mod[:-1]), True)
return ['.'.join(mod[:-1] + [el]) for el in completion_list]
# 'from xyz import abc<tab>'
if nwords >= 3 and words[0] == 'from':
mod = words[1]
return try_import(mod)
#-----------------------------------------------------------------------------
# Completers
#-----------------------------------------------------------------------------
# These all have the func(self, event) signature to be used as custom
# completers
def module_completer(self,event):
# This works in all versions of python. While 2.5 has
# pkgutil.walk_packages(), that particular routine is fairly dangerous,
# since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
# of possibly problematic side effects.
# This search the folders in the sys.path for available modules.
return module_completion(event.line)
# FIXME: there's a lot of logic common to the run, cd and builtin file
def magic_run_completer(self, event):
comps = arg_split(event.line, strict=False)
relpath = (len(comps) > 1 and comps[-1] or '').strip("'\"")
#print("\nev=", event) # dbg
#print("rp=", relpath) # dbg
#print('comps=', comps) # dbg
lglob = glob.glob
isdir = os.path.isdir
relpath, tilde_expand, tilde_val = expand_user(relpath)
dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
# Find if the user has already typed the first filename, after which we
# should complete on all files, since after the first one other files may
# be arguments to the input script.
if filter(magic_run_re.match, comps):
pys = [f.replace('\\','/') for f in lglob('*')]
else:
pys = [f.replace('\\','/')
for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
lglob(relpath + '*.pyw')]
#print('run comp:', dirs+pys) # dbg
return [compress_user(p, tilde_expand, tilde_val) for p in dirs+pys]
def cd_completer(self, event):
ip = get_ipython()
relpath = event.symbol
#print(event) # dbg
if event.line.endswith('-b') or ' -b ' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', None)
if bkms:
return bkms.keys()
else:
return []
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh +'d [%s]'
ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
if len(ents) > 1:
return ents
return []
if event.symbol.startswith('--'):
return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
# Expand ~ in path and normalize directory separators.
relpath, tilde_expand, tilde_val = expand_user(relpath)
relpath = relpath.replace('\\','/')
found = []
for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
if os.path.isdir(f)]:
if ' ' in d:
# we don't want to deal with any of that, complex code
# for this is elsewhere
raise TryNext
found.append(d)
if not found:
if os.path.isdir(relpath):
return [compress_user(relpath, tilde_expand, tilde_val)]
# if no completions so far, try bookmarks
bks = self.db.get('bookmarks',{}).iterkeys()
bkmatches = [s for s in bks if s.startswith(event.symbol)]
if bkmatches:
return bkmatches
raise TryNext
return [compress_user(p, tilde_expand, tilde_val) for p in found]
def reset_completer(self, event):
return '-f -s in out array dhist'.split()
| true | true |
f739cb9a8109ac9db8b44e9efd16bee1d1bf51d5 | 7,988 | py | Python | qa/rpc-tests/txn_clone.py | Ankh-Trust/credit-core | fa6fd67bdc9846cc40ee24f9a64e610e634b9356 | [
"MIT"
] | 2 | 2019-10-31T11:56:31.000Z | 2019-11-02T08:48:45.000Z | qa/rpc-tests/txn_clone.py | Ankh-fdn/credit-core | fa6fd67bdc9846cc40ee24f9a64e610e634b9356 | [
"MIT"
] | 2 | 2019-11-22T18:49:20.000Z | 2020-10-06T11:44:46.000Z | qa/rpc-tests/txn_clone.py | Ankh-fdn/credit-core | fa6fd67bdc9846cc40ee24f9a64e610e634b9356 | [
"MIT"
] | 1 | 2020-06-09T16:15:27.000Z | 2020-06-09T16:15:27.000Z | #!/usr/bin/env python2
# Copyright (c) 2016-2017 The Duality Blockchain Solutions developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import CreditTestFramework
from test_framework.util import *
class TxnMallTest(CreditTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 12,500 0AC:
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 12190)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 290)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 12190 - 290 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 400, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 200, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs)
# 3 hex manipulations on the clone are required
# manipulation 1. sequence is at version+#inputs+input+sigstub
posseq = 2*(4+1+36+1)
seqbe = '%08x' % rawtx1["vin"][0]["sequence"]
clone_raw = clone_raw[:posseq] + seqbe[6:8] + seqbe[4:6] + seqbe[2:4] + seqbe[0:2] + clone_raw[posseq + 8:]
# manipulation 2. createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 400 0AC serialized is 00902f5009000000
pos0 = 2*(4+1+36+1+4+1)
hex400 = "00902f5009000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 400 and clone_raw[pos0 : pos0 + 16] != hex400 or
rawtx1["vout"][0]["value"] != 400 and clone_raw[pos0 : pos0 + 16] == hex400):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# manipulation 3. locktime is after outputs
poslt = pos0 + 2 * output_len
ltbe = '%08x' % rawtx1["locktime"]
clone_raw = clone_raw[:poslt] + ltbe[6:8] + ltbe[4:6] + ltbe[2:4] + ltbe[0:2] + clone_raw[poslt + 8:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500DYN for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 12190 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 1000 0AC for 2 matured,
# less possible orphaned matured subsidy
expected += 1000
if (self.options.mine_block):
expected -= 500
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 12190 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 12190
+ fund_foo_tx["fee"]
- 290
+ fund_bar_tx["fee"]
+ 1000)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
| 48.412121 | 115 | 0.610666 |
from test_framework.test_framework import CreditTestFramework
from test_framework.util import *
class TxnMallTest(CreditTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("")
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 12190)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 290)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 12190 - 290 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
node1_address = self.nodes[1].getnewaddress("from0")
txid1 = self.nodes[0].sendfrom("foo", node1_address, 400, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 200, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs)
# 3 hex manipulations on the clone are required
# manipulation 1. sequence is at version+#inputs+input+sigstub
posseq = 2*(4+1+36+1)
seqbe = '%08x' % rawtx1["vin"][0]["sequence"]
clone_raw = clone_raw[:posseq] + seqbe[6:8] + seqbe[4:6] + seqbe[2:4] + seqbe[0:2] + clone_raw[posseq + 8:]
# manipulation 2. createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 400 0AC serialized is 00902f5009000000
pos0 = 2*(4+1+36+1+4+1)
hex400 = "00902f5009000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 400 and clone_raw[pos0 : pos0 + 16] != hex400 or
rawtx1["vout"][0]["value"] != 400 and clone_raw[pos0 : pos0 + 16] == hex400):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# manipulation 3. locktime is after outputs
poslt = pos0 + 2 * output_len
ltbe = '%08x' % rawtx1["locktime"]
clone_raw = clone_raw[:poslt] + ltbe[6:8] + ltbe[4:6] + ltbe[2:4] + ltbe[0:2] + clone_raw[poslt + 8:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 12190 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
self.nodes[2].generate(1)
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1)
sync_blocks(self.nodes)
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# less possible orphaned matured subsidy
expected += 1000
if (self.options.mine_block):
expected -= 500
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
assert_equal(self.nodes[0].getbalance("foo"), 12190 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 12190
+ fund_foo_tx["fee"]
- 290
+ fund_bar_tx["fee"]
+ 1000)
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
| true | true |
f739cbfd641bb2dc93074bcc2bad8c1b8fe799cd | 930 | py | Python | docs/jnpr_healthbot_swagger/test/test_rule_schema_sensor.py | dmontagner/healthbot-py-client | 0952e0a9e7ed63c9fe84879f40407c3327735252 | [
"Apache-2.0"
] | 10 | 2019-10-23T12:54:37.000Z | 2022-02-07T19:24:30.000Z | docs/jnpr_healthbot_swagger/test/test_rule_schema_sensor.py | dmontagner/healthbot-py-client | 0952e0a9e7ed63c9fe84879f40407c3327735252 | [
"Apache-2.0"
] | 5 | 2019-09-30T04:29:25.000Z | 2022-02-16T12:21:06.000Z | docs/jnpr_healthbot_swagger/test/test_rule_schema_sensor.py | dmontagner/healthbot-py-client | 0952e0a9e7ed63c9fe84879f40407c3327735252 | [
"Apache-2.0"
] | 4 | 2019-09-30T01:17:48.000Z | 2020-08-25T07:27:54.000Z | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.rule_schema_sensor import RuleSchemaSensor # noqa: E501
from swagger_client.rest import ApiException
class TestRuleSchemaSensor(unittest.TestCase):
"""RuleSchemaSensor unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRuleSchemaSensor(self):
"""Test RuleSchemaSensor"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.rule_schema_sensor.RuleSchemaSensor() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.682927 | 91 | 0.716129 |
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.rule_schema_sensor import RuleSchemaSensor
from swagger_client.rest import ApiException
class TestRuleSchemaSensor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testRuleSchemaSensor(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f739cc4c3b82647f58723e96e7dbbe791d8ff0db | 1,393 | py | Python | DownloadVGG_Faces.py | jfrancis71/TensorFlowApps | a9c61e2d5146c02715748221f51c656143b51b02 | [
"MIT"
] | null | null | null | DownloadVGG_Faces.py | jfrancis71/TensorFlowApps | a9c61e2d5146c02715748221f51c656143b51b02 | [
"MIT"
] | null | null | null | DownloadVGG_Faces.py | jfrancis71/TensorFlowApps | a9c61e2d5146c02715748221f51c656143b51b02 | [
"MIT"
] | null | null | null | import os
from PIL import Image
import urllib.request as ur
import urllib.request
from io import BytesIO
import requests
import csv
import h5py
import numpy as np
import argparse
def retrieve_patch( rec ):
response = requests.get( rec[1], timeout=10 )
file = BytesIO( response.content )
img = Image.open( file )
ptch = img.crop( ( float(rec[2]),float(rec[3]),float(rec[4]), float(rec[5])) ).resize( (32,32) ).convert('L')
return np.asarray( ptch, dtype=np.uint8 )
def retrieve_celeb( filename ):
csvfile = open( filename, 'r')
reader = csv.reader(csvfile, delimiter=' ')
pts = []
for row in reader:
print( "image = ", row[0] )
if ( row[8] != '1' ):
continue
try:
pt = retrieve_patch( row )
pts.append( pt )
except IOError as e:
continue
return pts
#Parsing the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-folder",
help="folder for the HDF5 file and subfolder files")
args = parser.parse_args()
content_list = os.listdir( os.path.join( args.folder, "files") )
celebs = []
for celeb in content_list[0:100]:
print( "Celeb", celeb )
pts = retrieve_celeb( os.path.join( args.folder, "files", celeb ) )
celebs = celebs + pts
file = h5py.File( os.path.join( args.folder, "dataset.hdf5" ), 'w')
dset = file.create_dataset("/patches", data = celebs )
file.close()
| 27.313725 | 113 | 0.658291 | import os
from PIL import Image
import urllib.request as ur
import urllib.request
from io import BytesIO
import requests
import csv
import h5py
import numpy as np
import argparse
def retrieve_patch( rec ):
response = requests.get( rec[1], timeout=10 )
file = BytesIO( response.content )
img = Image.open( file )
ptch = img.crop( ( float(rec[2]),float(rec[3]),float(rec[4]), float(rec[5])) ).resize( (32,32) ).convert('L')
return np.asarray( ptch, dtype=np.uint8 )
def retrieve_celeb( filename ):
csvfile = open( filename, 'r')
reader = csv.reader(csvfile, delimiter=' ')
pts = []
for row in reader:
print( "image = ", row[0] )
if ( row[8] != '1' ):
continue
try:
pt = retrieve_patch( row )
pts.append( pt )
except IOError as e:
continue
return pts
parser = argparse.ArgumentParser()
parser.add_argument("-folder",
help="folder for the HDF5 file and subfolder files")
args = parser.parse_args()
content_list = os.listdir( os.path.join( args.folder, "files") )
celebs = []
for celeb in content_list[0:100]:
print( "Celeb", celeb )
pts = retrieve_celeb( os.path.join( args.folder, "files", celeb ) )
celebs = celebs + pts
file = h5py.File( os.path.join( args.folder, "dataset.hdf5" ), 'w')
dset = file.create_dataset("/patches", data = celebs )
file.close()
| true | true |
f739cc855e41b9fc0b747c1440357d1a0e856cde | 14,079 | bzl | Python | base/bazel/maven.bzl | qiangxu1996/vmtrace | 3411c5436d0d34e6e2b84cbf0e9395ac8c55c9d4 | [
"Apache-2.0"
] | 1 | 2020-10-04T19:30:22.000Z | 2020-10-04T19:30:22.000Z | base/bazel/maven.bzl | qiangxu1996/vmtrace | 3411c5436d0d34e6e2b84cbf0e9395ac8c55c9d4 | [
"Apache-2.0"
] | null | null | null | base/bazel/maven.bzl | qiangxu1996/vmtrace | 3411c5436d0d34e6e2b84cbf0e9395ac8c55c9d4 | [
"Apache-2.0"
] | 2 | 2020-10-04T19:30:24.000Z | 2020-11-04T05:58:17.000Z | load(":functions.bzl", "create_option_file", "explicit_target")
def _maven_pom_impl(ctx):
# Contains both *.jar and *.aar files.
jars = depset()
# classfied jars. Sources are in clsjars["sources"]
clsjars = {} # classifier -> depset(jars)
clsjars["sources"] = depset()
if ctx.attr.library:
if ctx.attr.file or ctx.attr.classified_files:
fail("Cannot set both file and library for a maven_pom.")
jars = depset([jar.class_jar for jar in ctx.attr.library.java.outputs.jars], transitive = [jars])
clsjars["sources"] = depset(ctx.attr.library.java.source_jars.to_list(), transitive = [clsjars["sources"]])
for classifier, library in zip(ctx.attr.classifiers, ctx.attr.classified_libraries):
if classifier not in clsjars:
clsjars[classifier] = depset()
clsjars[classifier] = depset(direct = [jar.class_jar for jar in library.java.outputs.jars], transitive = [clsjars[classifier]])
if ctx.attr.file:
if ctx.attr.library or ctx.attr.classified_libraries:
fail("Cannot set both file and library for a maven_pom.")
jars = depset(transitive = [ctx.attr.file.files, jars])
if ctx.attr.classified_files:
for classifier, file in zip(ctx.attr.classifiers, ctx.attr.classified_files):
if classifier not in clsjars:
clsjars[classifier] = depset()
clsjars[classifier] = depset(transitive = [file.files, clsjars[classifier]])
if ctx.attr.properties and ctx.files.properties_files:
fail("Cannot set both properties and properties_files for a maven_pom.")
parent_poms = depset([], order = "postorder")
parent_jars = {}
parent_clsjars = {} # pom -> classifier -> depset(jars)
deps_poms = depset([], order = "postorder")
deps_jars = {}
deps_clsjars = {} # pom -> classifier -> depset(jars)
# Transitive deps through the parent attribute
if ctx.attr.parent:
parent_poms = depset(transitive = [ctx.attr.parent.maven.parent.poms, parent_poms], order = "postorder")
parent_poms = depset(transitive = [ctx.attr.parent.maven.deps.poms, parent_poms], order = "postorder")
parent_poms = depset(direct = [ctx.file.parent], transitive = [parent_poms], order = "postorder")
parent_jars.update(ctx.attr.parent.maven.parent.jars)
parent_jars.update(ctx.attr.parent.maven.deps.jars)
parent_jars[ctx.file.parent] = ctx.attr.parent.maven.jars
parent_clsjars.update(ctx.attr.parent.maven.parent.clsjars)
parent_clsjars.update(ctx.attr.parent.maven.deps.clsjars)
parent_clsjars[ctx.file.parent] = ctx.attr.parent.maven.clsjars
elif hasattr(ctx.attr.source, "maven"):
parent_poms = ctx.attr.source.maven.parent.poms
parent_jars = ctx.attr.source.maven.parent.jars
parent_clsjars = ctx.attr.source.maven.parent.clsjars
# Transitive deps through deps
if ctx.attr.deps:
for label in ctx.attr.deps:
deps_poms = depset(transitive = [label.maven.parent.poms, deps_poms], order = "postorder")
deps_poms = depset(transitive = [label.maven.deps.poms, deps_poms], order = "postorder")
deps_poms = depset(direct = [label.maven.pom], transitive = [deps_poms], order = "postorder")
deps_jars.update(label.maven.parent.jars)
deps_jars.update(label.maven.deps.jars)
deps_jars[label.maven.pom] = label.maven.jars
deps_clsjars.update(label.maven.parent.clsjars)
deps_clsjars.update(label.maven.deps.clsjars)
deps_clsjars[label.maven.pom] = label.maven.clsjars
elif hasattr(ctx.attr.source, "maven"):
deps_poms = ctx.attr.source.maven.deps.poms
deps_jars = ctx.attr.source.maven.deps.jars
deps_clsjars = ctx.attr.source.maven.deps.clsjars
inputs = []
args = []
# Input file to take as base
if ctx.file.source:
args += ["-i", ctx.file.source.path]
inputs += [ctx.file.source]
# Output file
args += ["-o", ctx.outputs.pom.path]
args += ["-x"] if ctx.attr.export_pom else []
# Overrides
if ctx.attr.group:
args += ["--group", ctx.attr.group]
if ctx.attr.artifact:
args += ["--artifact", ctx.attr.artifact]
if ctx.attr.version:
args += ["--version", ctx.attr.version]
if ctx.attr.properties:
args += ["--properties", ctx.file.properties.path]
inputs += [ctx.file.properties]
if ctx.files.properties_files:
args += ["--properties", ":".join([file.path for file in ctx.files.properties_files])]
inputs += ctx.files.properties_files
if ctx.attr.version_property:
args += ["--version_property", ctx.attr.version_property]
# Exclusions
for (dependency, exclusions) in ctx.attr.exclusions.items():
args += ["--exclusion", dependency, ",".join([e for e in exclusions])]
args += ["--deps", ":".join([dep.path for dep in ctx.files.deps])]
inputs += ctx.files.deps
ctx.actions.run(
mnemonic = "GenPom",
inputs = inputs,
outputs = [ctx.outputs.pom],
arguments = args,
executable = ctx.executable._pom,
)
return struct(maven = struct(
parent = struct(
poms = parent_poms,
jars = parent_jars,
clsjars = parent_clsjars,
),
deps = struct(
poms = deps_poms,
jars = deps_jars,
clsjars = deps_clsjars,
),
pom = ctx.outputs.pom,
jars = jars,
clsjars = clsjars,
))
maven_pom = rule(
attrs = {
"deps": attr.label_list(),
"library": attr.label(
allow_files = True,
),
"export_pom": attr.label(),
"classifiers": attr.string_list(
default = [],
),
"classified_libraries": attr.label_list(
allow_files = True,
default = [],
),
"file": attr.label(
allow_files = True,
),
"classified_files": attr.label_list(
allow_files = True,
default = [],
),
"group": attr.string(),
"version": attr.string(),
"artifact": attr.string(),
"source": attr.label(
allow_single_file = True,
),
"properties": attr.label(
allow_single_file = True,
),
"properties_files": attr.label_list(
allow_files = True,
default = [],
),
"version_property": attr.string(),
"parent": attr.label(
allow_single_file = True,
),
"exclusions": attr.string_list_dict(),
"_pom": attr.label(
executable = True,
cfg = "host",
default = Label("//tools/base/bazel:pom_generator"),
allow_files = True,
),
},
outputs = {
"pom": "%{name}.pom",
},
implementation = _maven_pom_impl,
)
# A java library that can be used in a maven_repo rule.
#
# Usage:
# maven_java_library(
# name = "name",
# deps = A list of maven_java_library or maven_java_import dependencies
# # all java_library attriutes
# info = A maven coordinate for this artifact as a string
# )
def maven_java_library(
name,
deps = None,
runtime_deps = None,
exclusions = None,
export_artifact = None,
srcs = None,
resources = [],
exports = None,
pom = None,
visibility = None,
**kwargs):
if srcs and export_artifact:
fail("Ony one of [srcs, export_artifact] can be used at a time")
if export_artifact and pom:
fail("If export_artifact is specified, the maven information cannot be changed.")
java_exports = exports + [export_artifact] if export_artifact else exports
native.java_library(
name = name,
deps = deps,
runtime_deps = runtime_deps,
srcs = srcs,
resources = native.glob(["NOTICE", "LICENSE"]) + resources,
exports = java_exports,
visibility = visibility,
**kwargs
)
# TODO: Properly exclude libraries from the pom instead of using _neverlink hacks.
maven_deps = (deps or []) + (exports or []) + (runtime_deps or [])
maven_pom(
name = name + "_maven",
deps = [explicit_target(dep) + "_maven" for dep in maven_deps if not dep.endswith("_neverlink")] if maven_deps else None,
exclusions = exclusions,
library = export_artifact if export_artifact else name,
visibility = visibility,
source = explicit_target(export_artifact) + "_maven" if export_artifact else pom,
export_pom = explicit_target(export_artifact) + "_maven" if export_artifact else None,
)
def _import_with_license_impl(ctx):
names = []
for jar in ctx.attr.dep[DefaultInfo].files.to_list():
name = jar.basename
if jar.extension:
name = jar.basename[:-len(jar.extension) - 1]
names.append(name)
return struct(
providers = [ctx.attr.dep[JavaInfo], ctx.attr.dep[DefaultInfo]],
java = ctx.attr.dep.java,
notice = struct(
file = ctx.attr.notice,
name = ",".join(names),
),
)
import_with_license = rule(
implementation = _import_with_license_impl,
attrs = {
"dep": attr.label(),
"notice": attr.label(allow_files = True),
},
)
# A java_import rule extended with pom and parent attributes for maven libraries.
def maven_java_import(name, pom, classifiers = [], visibility = None, jars = [], **kwargs):
native.java_import(
name = name + "_import",
jars = jars,
**kwargs
)
import_with_license(
name = name,
visibility = visibility,
dep = name + "_import",
notice = "NOTICE",
tags = ["require_license"],
)
classified_libraries = []
for classifier in classifiers:
native.java_import(
name = classifier + "-" + name,
visibility = visibility,
jars = [jar.replace(".jar", "-" + classifier + ".jar") for jar in jars],
**kwargs
)
classified_libraries += [classifier + "-" + name]
maven_pom(
name = name + "_maven",
library = name,
classifiers = classifiers,
classified_libraries = classified_libraries,
visibility = visibility,
source = pom,
)
def maven_aar(name, aar, pom, visibility = None):
native.filegroup(
name = name,
srcs = [aar],
visibility = visibility,
)
maven_pom(
name = name + "_maven",
file = aar,
visibility = visibility,
source = pom,
)
def _maven_repo_impl(ctx):
include_sources = ctx.attr.include_sources
seen = {}
inputs = []
args = []
for artifact in ctx.attr.artifacts:
if not seen.get(artifact.maven.pom):
for pom in artifact.maven.parent.poms.to_list():
jars = artifact.maven.parent.jars[pom]
if not seen.get(pom):
inputs += [pom] + jars.to_list()
args += [pom.path] + [jar.path for jar in jars.to_list()]
clsjars = artifact.maven.parent.clsjars[pom]
for classifier in clsjars:
inputs += clsjars[classifier].to_list()
args += [jar.path + ":" + classifier for jar in clsjars[classifier].to_list()]
seen[pom] = True
inputs += [artifact.maven.pom] + artifact.maven.jars.to_list()
args += [artifact.maven.pom.path] + [jar.path for jar in artifact.maven.jars.to_list()]
for classifier in artifact.maven.clsjars:
inputs += artifact.maven.clsjars[classifier].to_list()
args += [jar.path + ":" + classifier for jar in artifact.maven.clsjars[classifier].to_list()]
seen[artifact.maven.pom] = True
for pom in artifact.maven.deps.poms.to_list():
jars = artifact.maven.deps.jars[pom]
if not seen.get(pom):
inputs += [pom] + jars.to_list()
args += [pom.path] + [jar.path for jar in jars.to_list()]
if include_sources:
clsjars = artifact.maven.deps.clsjars[pom]
inputs += clsjars[classifier].to_list()
args += [jar.path + ":" + classifier for jar in clsjars[classifier].to_list()]
seen[pom] = True
# Execute the command
option_file = create_option_file(
ctx,
ctx.outputs.repo.path + ".lst",
"\n".join(args),
)
ctx.actions.run(
inputs = inputs + [option_file],
outputs = [ctx.outputs.repo],
mnemonic = "mavenrepo",
executable = ctx.executable._repo,
arguments = [ctx.outputs.repo.path, "@" + option_file.path],
)
_maven_repo = rule(
attrs = {
"artifacts": attr.label_list(),
"include_sources": attr.bool(),
"_repo": attr.label(
executable = True,
cfg = "host",
default = Label("//tools/base/bazel:repo_builder"),
allow_files = True,
),
},
outputs = {
"repo": "%{name}.zip",
},
implementation = _maven_repo_impl,
)
# Creates a maven repo with the given artifacts and all their transitive
# dependencies.
#
# Usage:
# maven_repo(
# name = The name of the rule. The output of the rule will be ${name}.zip.
# artifacts = A list of all maven_java_libraries to add to the repo.
# include_sources = Add source jars to the repo as well (useful for tests).
# )
def maven_repo(artifacts = [], include_sources = False, **kwargs):
_maven_repo(
artifacts = [explicit_target(artifact) + "_maven" for artifact in artifacts],
include_sources = include_sources,
**kwargs
)
| 35.643038 | 139 | 0.589104 | load(":functions.bzl", "create_option_file", "explicit_target")
def _maven_pom_impl(ctx):
jars = depset()
clsjars = {}
clsjars["sources"] = depset()
if ctx.attr.library:
if ctx.attr.file or ctx.attr.classified_files:
fail("Cannot set both file and library for a maven_pom.")
jars = depset([jar.class_jar for jar in ctx.attr.library.java.outputs.jars], transitive = [jars])
clsjars["sources"] = depset(ctx.attr.library.java.source_jars.to_list(), transitive = [clsjars["sources"]])
for classifier, library in zip(ctx.attr.classifiers, ctx.attr.classified_libraries):
if classifier not in clsjars:
clsjars[classifier] = depset()
clsjars[classifier] = depset(direct = [jar.class_jar for jar in library.java.outputs.jars], transitive = [clsjars[classifier]])
if ctx.attr.file:
if ctx.attr.library or ctx.attr.classified_libraries:
fail("Cannot set both file and library for a maven_pom.")
jars = depset(transitive = [ctx.attr.file.files, jars])
if ctx.attr.classified_files:
for classifier, file in zip(ctx.attr.classifiers, ctx.attr.classified_files):
if classifier not in clsjars:
clsjars[classifier] = depset()
clsjars[classifier] = depset(transitive = [file.files, clsjars[classifier]])
if ctx.attr.properties and ctx.files.properties_files:
fail("Cannot set both properties and properties_files for a maven_pom.")
parent_poms = depset([], order = "postorder")
parent_jars = {}
parent_clsjars = {}
deps_poms = depset([], order = "postorder")
deps_jars = {}
deps_clsjars = {}
if ctx.attr.parent:
parent_poms = depset(transitive = [ctx.attr.parent.maven.parent.poms, parent_poms], order = "postorder")
parent_poms = depset(transitive = [ctx.attr.parent.maven.deps.poms, parent_poms], order = "postorder")
parent_poms = depset(direct = [ctx.file.parent], transitive = [parent_poms], order = "postorder")
parent_jars.update(ctx.attr.parent.maven.parent.jars)
parent_jars.update(ctx.attr.parent.maven.deps.jars)
parent_jars[ctx.file.parent] = ctx.attr.parent.maven.jars
parent_clsjars.update(ctx.attr.parent.maven.parent.clsjars)
parent_clsjars.update(ctx.attr.parent.maven.deps.clsjars)
parent_clsjars[ctx.file.parent] = ctx.attr.parent.maven.clsjars
elif hasattr(ctx.attr.source, "maven"):
parent_poms = ctx.attr.source.maven.parent.poms
parent_jars = ctx.attr.source.maven.parent.jars
parent_clsjars = ctx.attr.source.maven.parent.clsjars
if ctx.attr.deps:
for label in ctx.attr.deps:
deps_poms = depset(transitive = [label.maven.parent.poms, deps_poms], order = "postorder")
deps_poms = depset(transitive = [label.maven.deps.poms, deps_poms], order = "postorder")
deps_poms = depset(direct = [label.maven.pom], transitive = [deps_poms], order = "postorder")
deps_jars.update(label.maven.parent.jars)
deps_jars.update(label.maven.deps.jars)
deps_jars[label.maven.pom] = label.maven.jars
deps_clsjars.update(label.maven.parent.clsjars)
deps_clsjars.update(label.maven.deps.clsjars)
deps_clsjars[label.maven.pom] = label.maven.clsjars
elif hasattr(ctx.attr.source, "maven"):
deps_poms = ctx.attr.source.maven.deps.poms
deps_jars = ctx.attr.source.maven.deps.jars
deps_clsjars = ctx.attr.source.maven.deps.clsjars
inputs = []
args = []
if ctx.file.source:
args += ["-i", ctx.file.source.path]
inputs += [ctx.file.source]
args += ["-o", ctx.outputs.pom.path]
args += ["-x"] if ctx.attr.export_pom else []
if ctx.attr.group:
args += ["--group", ctx.attr.group]
if ctx.attr.artifact:
args += ["--artifact", ctx.attr.artifact]
if ctx.attr.version:
args += ["--version", ctx.attr.version]
if ctx.attr.properties:
args += ["--properties", ctx.file.properties.path]
inputs += [ctx.file.properties]
if ctx.files.properties_files:
args += ["--properties", ":".join([file.path for file in ctx.files.properties_files])]
inputs += ctx.files.properties_files
if ctx.attr.version_property:
args += ["--version_property", ctx.attr.version_property]
for (dependency, exclusions) in ctx.attr.exclusions.items():
args += ["--exclusion", dependency, ",".join([e for e in exclusions])]
args += ["--deps", ":".join([dep.path for dep in ctx.files.deps])]
inputs += ctx.files.deps
ctx.actions.run(
mnemonic = "GenPom",
inputs = inputs,
outputs = [ctx.outputs.pom],
arguments = args,
executable = ctx.executable._pom,
)
return struct(maven = struct(
parent = struct(
poms = parent_poms,
jars = parent_jars,
clsjars = parent_clsjars,
),
deps = struct(
poms = deps_poms,
jars = deps_jars,
clsjars = deps_clsjars,
),
pom = ctx.outputs.pom,
jars = jars,
clsjars = clsjars,
))
maven_pom = rule(
attrs = {
"deps": attr.label_list(),
"library": attr.label(
allow_files = True,
),
"export_pom": attr.label(),
"classifiers": attr.string_list(
default = [],
),
"classified_libraries": attr.label_list(
allow_files = True,
default = [],
),
"file": attr.label(
allow_files = True,
),
"classified_files": attr.label_list(
allow_files = True,
default = [],
),
"group": attr.string(),
"version": attr.string(),
"artifact": attr.string(),
"source": attr.label(
allow_single_file = True,
),
"properties": attr.label(
allow_single_file = True,
),
"properties_files": attr.label_list(
allow_files = True,
default = [],
),
"version_property": attr.string(),
"parent": attr.label(
allow_single_file = True,
),
"exclusions": attr.string_list_dict(),
"_pom": attr.label(
executable = True,
cfg = "host",
default = Label("//tools/base/bazel:pom_generator"),
allow_files = True,
),
},
outputs = {
"pom": "%{name}.pom",
},
implementation = _maven_pom_impl,
)
name,
deps = None,
runtime_deps = None,
exclusions = None,
export_artifact = None,
srcs = None,
resources = [],
exports = None,
pom = None,
visibility = None,
**kwargs):
if srcs and export_artifact:
fail("Ony one of [srcs, export_artifact] can be used at a time")
if export_artifact and pom:
fail("If export_artifact is specified, the maven information cannot be changed.")
java_exports = exports + [export_artifact] if export_artifact else exports
native.java_library(
name = name,
deps = deps,
runtime_deps = runtime_deps,
srcs = srcs,
resources = native.glob(["NOTICE", "LICENSE"]) + resources,
exports = java_exports,
visibility = visibility,
**kwargs
)
maven_deps = (deps or []) + (exports or []) + (runtime_deps or [])
maven_pom(
name = name + "_maven",
deps = [explicit_target(dep) + "_maven" for dep in maven_deps if not dep.endswith("_neverlink")] if maven_deps else None,
exclusions = exclusions,
library = export_artifact if export_artifact else name,
visibility = visibility,
source = explicit_target(export_artifact) + "_maven" if export_artifact else pom,
export_pom = explicit_target(export_artifact) + "_maven" if export_artifact else None,
)
def _import_with_license_impl(ctx):
names = []
for jar in ctx.attr.dep[DefaultInfo].files.to_list():
name = jar.basename
if jar.extension:
name = jar.basename[:-len(jar.extension) - 1]
names.append(name)
return struct(
providers = [ctx.attr.dep[JavaInfo], ctx.attr.dep[DefaultInfo]],
java = ctx.attr.dep.java,
notice = struct(
file = ctx.attr.notice,
name = ",".join(names),
),
)
import_with_license = rule(
implementation = _import_with_license_impl,
attrs = {
"dep": attr.label(),
"notice": attr.label(allow_files = True),
},
)
def maven_java_import(name, pom, classifiers = [], visibility = None, jars = [], **kwargs):
native.java_import(
name = name + "_import",
jars = jars,
**kwargs
)
import_with_license(
name = name,
visibility = visibility,
dep = name + "_import",
notice = "NOTICE",
tags = ["require_license"],
)
classified_libraries = []
for classifier in classifiers:
native.java_import(
name = classifier + "-" + name,
visibility = visibility,
jars = [jar.replace(".jar", "-" + classifier + ".jar") for jar in jars],
**kwargs
)
classified_libraries += [classifier + "-" + name]
maven_pom(
name = name + "_maven",
library = name,
classifiers = classifiers,
classified_libraries = classified_libraries,
visibility = visibility,
source = pom,
)
def maven_aar(name, aar, pom, visibility = None):
native.filegroup(
name = name,
srcs = [aar],
visibility = visibility,
)
maven_pom(
name = name + "_maven",
file = aar,
visibility = visibility,
source = pom,
)
def _maven_repo_impl(ctx):
include_sources = ctx.attr.include_sources
seen = {}
inputs = []
args = []
for artifact in ctx.attr.artifacts:
if not seen.get(artifact.maven.pom):
for pom in artifact.maven.parent.poms.to_list():
jars = artifact.maven.parent.jars[pom]
if not seen.get(pom):
inputs += [pom] + jars.to_list()
args += [pom.path] + [jar.path for jar in jars.to_list()]
clsjars = artifact.maven.parent.clsjars[pom]
for classifier in clsjars:
inputs += clsjars[classifier].to_list()
args += [jar.path + ":" + classifier for jar in clsjars[classifier].to_list()]
seen[pom] = True
inputs += [artifact.maven.pom] + artifact.maven.jars.to_list()
args += [artifact.maven.pom.path] + [jar.path for jar in artifact.maven.jars.to_list()]
for classifier in artifact.maven.clsjars:
inputs += artifact.maven.clsjars[classifier].to_list()
args += [jar.path + ":" + classifier for jar in artifact.maven.clsjars[classifier].to_list()]
seen[artifact.maven.pom] = True
for pom in artifact.maven.deps.poms.to_list():
jars = artifact.maven.deps.jars[pom]
if not seen.get(pom):
inputs += [pom] + jars.to_list()
args += [pom.path] + [jar.path for jar in jars.to_list()]
if include_sources:
clsjars = artifact.maven.deps.clsjars[pom]
inputs += clsjars[classifier].to_list()
args += [jar.path + ":" + classifier for jar in clsjars[classifier].to_list()]
seen[pom] = True
option_file = create_option_file(
ctx,
ctx.outputs.repo.path + ".lst",
"\n".join(args),
)
ctx.actions.run(
inputs = inputs + [option_file],
outputs = [ctx.outputs.repo],
mnemonic = "mavenrepo",
executable = ctx.executable._repo,
arguments = [ctx.outputs.repo.path, "@" + option_file.path],
)
_maven_repo = rule(
attrs = {
"artifacts": attr.label_list(),
"include_sources": attr.bool(),
"_repo": attr.label(
executable = True,
cfg = "host",
default = Label("//tools/base/bazel:repo_builder"),
allow_files = True,
),
},
outputs = {
"repo": "%{name}.zip",
},
implementation = _maven_repo_impl,
)
def maven_repo(artifacts = [], include_sources = False, **kwargs):
_maven_repo(
artifacts = [explicit_target(artifact) + "_maven" for artifact in artifacts],
include_sources = include_sources,
**kwargs
)
| true | true |
f739ccecc4d7f462ed4db6d941d6bd429b68801a | 609 | py | Python | mc2/templatetags/mc2_tags.py | praekeltfoundation/mc2 | 5367a8aed309fade0f17bc72efa099b0afc76aa7 | [
"BSD-2-Clause"
] | 4 | 2016-03-09T00:51:17.000Z | 2017-10-05T23:54:00.000Z | mc2/templatetags/mc2_tags.py | praekeltfoundation/mc2 | 5367a8aed309fade0f17bc72efa099b0afc76aa7 | [
"BSD-2-Clause"
] | 131 | 2015-11-19T16:45:23.000Z | 2018-07-24T09:36:08.000Z | mc2/templatetags/mc2_tags.py | praekeltfoundation/mc2 | 5367a8aed309fade0f17bc72efa099b0afc76aa7 | [
"BSD-2-Clause"
] | 2 | 2016-07-30T15:36:23.000Z | 2017-09-18T12:40:11.000Z | from django import template
from django_gravatar.templatetags.gravatar import gravatar_url
# Get template.Library instance
register = template.Library()
# enables the use of the gravatar_url as an assignment tag
register.assignment_tag(gravatar_url)
@register.simple_tag(takes_context=True)
def display_name(context):
user = context['user']
full_name = ' '.join([user.first_name, user.last_name]).strip()
return full_name if full_name else user.username
@register.filter
def multiply(value, factor):
try:
return value * factor
except: # noqa
pass # noqa
return 0
| 24.36 | 67 | 0.740558 | from django import template
from django_gravatar.templatetags.gravatar import gravatar_url
register = template.Library()
register.assignment_tag(gravatar_url)
@register.simple_tag(takes_context=True)
def display_name(context):
user = context['user']
full_name = ' '.join([user.first_name, user.last_name]).strip()
return full_name if full_name else user.username
@register.filter
def multiply(value, factor):
try:
return value * factor
except:
pass
return 0
| true | true |
f739ccf68584db68a8fa56e7d6e045a55b978fe7 | 1,558 | py | Python | homeassistant/components/fritz/const.py | ysf/core | 7c2f6a43485b126719b8df7edb4d9105193bb52b | [
"Apache-2.0"
] | null | null | null | homeassistant/components/fritz/const.py | ysf/core | 7c2f6a43485b126719b8df7edb4d9105193bb52b | [
"Apache-2.0"
] | 1 | 2022-03-31T06:33:02.000Z | 2022-03-31T06:33:02.000Z | homeassistant/components/fritz/const.py | AjayShanker-geek/core | 2242b023d383c5e8debfa813abe0871de4d30b06 | [
"Apache-2.0"
] | null | null | null | """Constants for the FRITZ!Box Tools integration."""
from typing import Literal
from fritzconnection.core.exceptions import (
FritzActionError,
FritzActionFailedError,
FritzInternalError,
FritzLookUpError,
FritzServiceError,
)
from homeassistant.backports.enum import StrEnum
from homeassistant.const import Platform
class MeshRoles(StrEnum):
"""Available Mesh roles."""
NONE = "none"
MASTER = "master"
SLAVE = "slave"
DOMAIN = "fritz"
PLATFORMS = [
Platform.BUTTON,
Platform.BINARY_SENSOR,
Platform.DEVICE_TRACKER,
Platform.SENSOR,
Platform.SWITCH,
]
CONF_OLD_DISCOVERY = "old_discovery"
DEFAULT_CONF_OLD_DISCOVERY = False
DATA_FRITZ = "fritz_data"
DSL_CONNECTION: Literal["dsl"] = "dsl"
DEFAULT_DEVICE_NAME = "Unknown device"
DEFAULT_HOST = "192.168.178.1"
DEFAULT_PORT = 49000
DEFAULT_USERNAME = ""
ERROR_AUTH_INVALID = "invalid_auth"
ERROR_CANNOT_CONNECT = "cannot_connect"
ERROR_UPNP_NOT_CONFIGURED = "upnp_not_configured"
ERROR_UNKNOWN = "unknown_error"
FRITZ_SERVICES = "fritz_services"
SERVICE_REBOOT = "reboot"
SERVICE_RECONNECT = "reconnect"
SERVICE_CLEANUP = "cleanup"
SERVICE_SET_GUEST_WIFI_PW = "set_guest_wifi_password"
SWITCH_TYPE_DEFLECTION = "CallDeflection"
SWITCH_TYPE_PORTFORWARD = "PortForward"
SWITCH_TYPE_WIFINETWORK = "WiFiNetwork"
UPTIME_DEVIATION = 5
FRITZ_EXCEPTIONS = (
FritzActionError,
FritzActionFailedError,
FritzInternalError,
FritzServiceError,
FritzLookUpError,
)
WIFI_STANDARD = {1: "2.4Ghz", 2: "5Ghz", 3: "5Ghz", 4: "Guest"}
| 21.342466 | 63 | 0.756098 |
from typing import Literal
from fritzconnection.core.exceptions import (
FritzActionError,
FritzActionFailedError,
FritzInternalError,
FritzLookUpError,
FritzServiceError,
)
from homeassistant.backports.enum import StrEnum
from homeassistant.const import Platform
class MeshRoles(StrEnum):
NONE = "none"
MASTER = "master"
SLAVE = "slave"
DOMAIN = "fritz"
PLATFORMS = [
Platform.BUTTON,
Platform.BINARY_SENSOR,
Platform.DEVICE_TRACKER,
Platform.SENSOR,
Platform.SWITCH,
]
CONF_OLD_DISCOVERY = "old_discovery"
DEFAULT_CONF_OLD_DISCOVERY = False
DATA_FRITZ = "fritz_data"
DSL_CONNECTION: Literal["dsl"] = "dsl"
DEFAULT_DEVICE_NAME = "Unknown device"
DEFAULT_HOST = "192.168.178.1"
DEFAULT_PORT = 49000
DEFAULT_USERNAME = ""
ERROR_AUTH_INVALID = "invalid_auth"
ERROR_CANNOT_CONNECT = "cannot_connect"
ERROR_UPNP_NOT_CONFIGURED = "upnp_not_configured"
ERROR_UNKNOWN = "unknown_error"
FRITZ_SERVICES = "fritz_services"
SERVICE_REBOOT = "reboot"
SERVICE_RECONNECT = "reconnect"
SERVICE_CLEANUP = "cleanup"
SERVICE_SET_GUEST_WIFI_PW = "set_guest_wifi_password"
SWITCH_TYPE_DEFLECTION = "CallDeflection"
SWITCH_TYPE_PORTFORWARD = "PortForward"
SWITCH_TYPE_WIFINETWORK = "WiFiNetwork"
UPTIME_DEVIATION = 5
FRITZ_EXCEPTIONS = (
FritzActionError,
FritzActionFailedError,
FritzInternalError,
FritzServiceError,
FritzLookUpError,
)
WIFI_STANDARD = {1: "2.4Ghz", 2: "5Ghz", 3: "5Ghz", 4: "Guest"}
| true | true |
f739cd2e996a820971f5c60d1e406e82b9a09bc2 | 7,404 | py | Python | homeassistant/components/timer/__init__.py | lkollar/home-assistant | f4f7c25f744c0678b12acb2cc905894cca9f46ef | [
"Apache-2.0"
] | null | null | null | homeassistant/components/timer/__init__.py | lkollar/home-assistant | f4f7c25f744c0678b12acb2cc905894cca9f46ef | [
"Apache-2.0"
] | null | null | null | homeassistant/components/timer/__init__.py | lkollar/home-assistant | f4f7c25f744c0678b12acb2cc905894cca9f46ef | [
"Apache-2.0"
] | 1 | 2020-05-12T13:35:56.000Z | 2020-05-12T13:35:56.000Z | """Support for Timers."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import CONF_ICON, CONF_NAME, SERVICE_RELOAD
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.helpers.service
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "timer"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
DEFAULT_DURATION = timedelta(0)
ATTR_DURATION = "duration"
ATTR_REMAINING = "remaining"
CONF_DURATION = "duration"
STATUS_IDLE = "idle"
STATUS_ACTIVE = "active"
STATUS_PAUSED = "paused"
EVENT_TIMER_FINISHED = "timer.finished"
EVENT_TIMER_CANCELLED = "timer.cancelled"
EVENT_TIMER_STARTED = "timer.started"
EVENT_TIMER_RESTARTED = "timer.restarted"
EVENT_TIMER_PAUSED = "timer.paused"
SERVICE_START = "start"
SERVICE_PAUSE = "pause"
SERVICE_CANCEL = "cancel"
SERVICE_FINISH = "finish"
def _none_to_empty_dict(value):
if value is None:
return {}
return value
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.All(
_none_to_empty_dict,
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(
CONF_DURATION, default=DEFAULT_DURATION
): cv.time_period,
},
)
)
},
extra=vol.ALLOW_EXTRA,
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
async def async_setup(hass, config):
"""Set up a timer."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = await _async_process_config(hass, config)
async def reload_service_handler(service_call):
"""Remove all input booleans and load new ones from config."""
conf = await component.async_prepare_reload()
if conf is None:
return
new_entities = await _async_process_config(hass, conf)
if new_entities:
await component.async_add_entities(new_entities)
homeassistant.helpers.service.async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD,
reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA,
)
component.async_register_entity_service(
SERVICE_START,
{vol.Optional(ATTR_DURATION, default=DEFAULT_DURATION): cv.time_period},
"async_start",
)
component.async_register_entity_service(SERVICE_PAUSE, {}, "async_pause")
component.async_register_entity_service(SERVICE_CANCEL, {}, "async_cancel")
component.async_register_entity_service(SERVICE_FINISH, {}, "async_finish")
if entities:
await component.async_add_entities(entities)
return True
async def _async_process_config(hass, config):
"""Process config and create list of entities."""
entities = []
for object_id, cfg in config[DOMAIN].items():
if not cfg:
cfg = {}
name = cfg.get(CONF_NAME)
icon = cfg.get(CONF_ICON)
duration = cfg[CONF_DURATION]
entities.append(Timer(hass, object_id, name, icon, duration))
return entities
class Timer(RestoreEntity):
"""Representation of a timer."""
def __init__(self, hass, object_id, name, icon, duration):
"""Initialize a timer."""
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = name
self._state = STATUS_IDLE
self._duration = duration
self._remaining = self._duration
self._icon = icon
self._hass = hass
self._end = None
self._listener = None
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return name of the timer."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the current value of the timer."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_DURATION: str(self._duration),
ATTR_REMAINING: str(self._remaining),
}
async def async_added_to_hass(self):
"""Call when entity is about to be added to Home Assistant."""
# If not None, we got an initial value.
if self._state is not None:
return
state = await self.async_get_last_state()
self._state = state and state.state == state
async def async_start(self, duration):
"""Start a timer."""
if self._listener:
self._listener()
self._listener = None
newduration = None
if duration:
newduration = duration
event = EVENT_TIMER_STARTED
if self._state == STATUS_PAUSED:
event = EVENT_TIMER_RESTARTED
self._state = STATUS_ACTIVE
start = dt_util.utcnow()
if self._remaining and newduration is None:
self._end = start + self._remaining
else:
if newduration:
self._duration = newduration
self._remaining = newduration
else:
self._remaining = self._duration
self._end = start + self._duration
self._hass.bus.async_fire(event, {"entity_id": self.entity_id})
self._listener = async_track_point_in_utc_time(
self._hass, self.async_finished, self._end
)
await self.async_update_ha_state()
async def async_pause(self):
"""Pause a timer."""
if self._listener is None:
return
self._listener()
self._listener = None
self._remaining = self._end - dt_util.utcnow()
self._state = STATUS_PAUSED
self._end = None
self._hass.bus.async_fire(EVENT_TIMER_PAUSED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
async def async_cancel(self):
"""Cancel a timer."""
if self._listener:
self._listener()
self._listener = None
self._state = STATUS_IDLE
self._end = None
self._remaining = timedelta()
self._hass.bus.async_fire(EVENT_TIMER_CANCELLED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
async def async_finish(self):
"""Reset and updates the states, fire finished event."""
if self._state != STATUS_ACTIVE:
return
self._listener = None
self._state = STATUS_IDLE
self._remaining = timedelta()
self._hass.bus.async_fire(EVENT_TIMER_FINISHED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
async def async_finished(self, time):
"""Reset and updates the states, fire finished event."""
if self._state != STATUS_ACTIVE:
return
self._listener = None
self._state = STATUS_IDLE
self._remaining = timedelta()
self._hass.bus.async_fire(EVENT_TIMER_FINISHED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
| 29.616 | 87 | 0.639519 | from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import CONF_ICON, CONF_NAME, SERVICE_RELOAD
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.helpers.service
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "timer"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
DEFAULT_DURATION = timedelta(0)
ATTR_DURATION = "duration"
ATTR_REMAINING = "remaining"
CONF_DURATION = "duration"
STATUS_IDLE = "idle"
STATUS_ACTIVE = "active"
STATUS_PAUSED = "paused"
EVENT_TIMER_FINISHED = "timer.finished"
EVENT_TIMER_CANCELLED = "timer.cancelled"
EVENT_TIMER_STARTED = "timer.started"
EVENT_TIMER_RESTARTED = "timer.restarted"
EVENT_TIMER_PAUSED = "timer.paused"
SERVICE_START = "start"
SERVICE_PAUSE = "pause"
SERVICE_CANCEL = "cancel"
SERVICE_FINISH = "finish"
def _none_to_empty_dict(value):
if value is None:
return {}
return value
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.All(
_none_to_empty_dict,
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(
CONF_DURATION, default=DEFAULT_DURATION
): cv.time_period,
},
)
)
},
extra=vol.ALLOW_EXTRA,
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
async def async_setup(hass, config):
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = await _async_process_config(hass, config)
async def reload_service_handler(service_call):
conf = await component.async_prepare_reload()
if conf is None:
return
new_entities = await _async_process_config(hass, conf)
if new_entities:
await component.async_add_entities(new_entities)
homeassistant.helpers.service.async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD,
reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA,
)
component.async_register_entity_service(
SERVICE_START,
{vol.Optional(ATTR_DURATION, default=DEFAULT_DURATION): cv.time_period},
"async_start",
)
component.async_register_entity_service(SERVICE_PAUSE, {}, "async_pause")
component.async_register_entity_service(SERVICE_CANCEL, {}, "async_cancel")
component.async_register_entity_service(SERVICE_FINISH, {}, "async_finish")
if entities:
await component.async_add_entities(entities)
return True
async def _async_process_config(hass, config):
entities = []
for object_id, cfg in config[DOMAIN].items():
if not cfg:
cfg = {}
name = cfg.get(CONF_NAME)
icon = cfg.get(CONF_ICON)
duration = cfg[CONF_DURATION]
entities.append(Timer(hass, object_id, name, icon, duration))
return entities
class Timer(RestoreEntity):
def __init__(self, hass, object_id, name, icon, duration):
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = name
self._state = STATUS_IDLE
self._duration = duration
self._remaining = self._duration
self._icon = icon
self._hass = hass
self._end = None
self._listener = None
@property
def should_poll(self):
return False
@property
def name(self):
return self._name
@property
def icon(self):
return self._icon
@property
def state(self):
return self._state
@property
def state_attributes(self):
return {
ATTR_DURATION: str(self._duration),
ATTR_REMAINING: str(self._remaining),
}
async def async_added_to_hass(self):
if self._state is not None:
return
state = await self.async_get_last_state()
self._state = state and state.state == state
async def async_start(self, duration):
if self._listener:
self._listener()
self._listener = None
newduration = None
if duration:
newduration = duration
event = EVENT_TIMER_STARTED
if self._state == STATUS_PAUSED:
event = EVENT_TIMER_RESTARTED
self._state = STATUS_ACTIVE
start = dt_util.utcnow()
if self._remaining and newduration is None:
self._end = start + self._remaining
else:
if newduration:
self._duration = newduration
self._remaining = newduration
else:
self._remaining = self._duration
self._end = start + self._duration
self._hass.bus.async_fire(event, {"entity_id": self.entity_id})
self._listener = async_track_point_in_utc_time(
self._hass, self.async_finished, self._end
)
await self.async_update_ha_state()
async def async_pause(self):
if self._listener is None:
return
self._listener()
self._listener = None
self._remaining = self._end - dt_util.utcnow()
self._state = STATUS_PAUSED
self._end = None
self._hass.bus.async_fire(EVENT_TIMER_PAUSED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
async def async_cancel(self):
if self._listener:
self._listener()
self._listener = None
self._state = STATUS_IDLE
self._end = None
self._remaining = timedelta()
self._hass.bus.async_fire(EVENT_TIMER_CANCELLED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
async def async_finish(self):
if self._state != STATUS_ACTIVE:
return
self._listener = None
self._state = STATUS_IDLE
self._remaining = timedelta()
self._hass.bus.async_fire(EVENT_TIMER_FINISHED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
async def async_finished(self, time):
if self._state != STATUS_ACTIVE:
return
self._listener = None
self._state = STATUS_IDLE
self._remaining = timedelta()
self._hass.bus.async_fire(EVENT_TIMER_FINISHED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
| true | true |
f739ce2a6e6760304b77efce2b6847bd3af75e62 | 764 | py | Python | setup.py | gpauloski/ProxyStore | 94946268a40132b03a17bae55766af59e442f8d4 | [
"Apache-2.0"
] | 2 | 2021-12-19T03:07:28.000Z | 2022-01-27T16:42:32.000Z | setup.py | gpauloski/ProxyStore | 94946268a40132b03a17bae55766af59e442f8d4 | [
"Apache-2.0"
] | 29 | 2021-04-23T20:33:03.000Z | 2022-02-25T22:45:10.000Z | setup.py | gpauloski/ProxyStore | 94946268a40132b03a17bae55766af59e442f8d4 | [
"Apache-2.0"
] | null | null | null | """Build ProxyStore package"""
import setuptools
with open('requirements.txt') as f:
install_requires = f.readlines()
with open('README.md') as f:
long_desc = f.read()
setuptools.setup(
name="ProxyStore",
version="0.3.0",
author="Greg Pauloski",
author_email="jgpauloski@uchicago.edu",
description="Python Lazy Object Proxy Interface for Distributed Stores",
long_description=long_desc,
url="https://github.com/gpauloski/ProxyStore",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=install_requires,
)
| 28.296296 | 76 | 0.679319 | import setuptools
with open('requirements.txt') as f:
install_requires = f.readlines()
with open('README.md') as f:
long_desc = f.read()
setuptools.setup(
name="ProxyStore",
version="0.3.0",
author="Greg Pauloski",
author_email="jgpauloski@uchicago.edu",
description="Python Lazy Object Proxy Interface for Distributed Stores",
long_description=long_desc,
url="https://github.com/gpauloski/ProxyStore",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=install_requires,
)
| true | true |
f739ce7a150eecb14150e155f791f03b18153cc5 | 1,612 | py | Python | configs/_base_/models/liteflownet/liteflownet_pre_M6S6.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 481 | 2021-11-16T07:04:23.000Z | 2022-03-31T22:21:21.000Z | configs/_base_/models/liteflownet/liteflownet_pre_M6S6.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 72 | 2021-11-16T12:25:55.000Z | 2022-03-28T13:10:45.000Z | configs/_base_/models/liteflownet/liteflownet_pre_M6S6.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 48 | 2021-11-16T06:48:46.000Z | 2022-03-30T12:46:40.000Z | model = dict(
type='LiteFlowNet',
encoder=dict(
type='NetC',
in_channels=3,
pyramid_levels=[
'level1', 'level2', 'level3', 'level4', 'level5', 'level6'
],
out_channels=(32, 32, 64, 96, 128, 192),
strides=(1, 2, 2, 2, 2, 2),
num_convs=(1, 3, 2, 2, 1, 1),
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=None),
decoder=dict(
type='NetE',
in_channels=dict(level6=192),
corr_channels=dict(level6=49),
sin_channels=dict(level6=386),
rin_channels=dict(level6=195),
feat_channels=64,
mfeat_channels=(128, 64, 32),
sfeat_channels=(128, 64, 32),
rfeat_channels=(128, 128, 64, 64, 32, 32),
patch_size=dict(level6=3),
corr_cfg=dict(level6=dict(type='Correlation', max_displacement=3)),
warp_cfg=dict(type='Warp', align_corners=True, use_mask=True),
flow_div=20.,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
scaled_corr=False,
regularized_flow=False,
extra_training_loss=False,
flow_loss=dict(
type='MultiLevelEPE',
weights=dict(level6=0.32),
p=2,
reduction='sum'),
),
init_cfg=dict(
type='Kaiming',
nonlinearity='leaky_relu',
layer=['Conv2d', 'ConvTranspose2d'],
mode='fan_in',
bias=0),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(),
)
| 31 | 75 | 0.560174 | model = dict(
type='LiteFlowNet',
encoder=dict(
type='NetC',
in_channels=3,
pyramid_levels=[
'level1', 'level2', 'level3', 'level4', 'level5', 'level6'
],
out_channels=(32, 32, 64, 96, 128, 192),
strides=(1, 2, 2, 2, 2, 2),
num_convs=(1, 3, 2, 2, 1, 1),
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=None),
decoder=dict(
type='NetE',
in_channels=dict(level6=192),
corr_channels=dict(level6=49),
sin_channels=dict(level6=386),
rin_channels=dict(level6=195),
feat_channels=64,
mfeat_channels=(128, 64, 32),
sfeat_channels=(128, 64, 32),
rfeat_channels=(128, 128, 64, 64, 32, 32),
patch_size=dict(level6=3),
corr_cfg=dict(level6=dict(type='Correlation', max_displacement=3)),
warp_cfg=dict(type='Warp', align_corners=True, use_mask=True),
flow_div=20.,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
scaled_corr=False,
regularized_flow=False,
extra_training_loss=False,
flow_loss=dict(
type='MultiLevelEPE',
weights=dict(level6=0.32),
p=2,
reduction='sum'),
),
init_cfg=dict(
type='Kaiming',
nonlinearity='leaky_relu',
layer=['Conv2d', 'ConvTranspose2d'],
mode='fan_in',
bias=0),
train_cfg=dict(),
test_cfg=dict(),
)
| true | true |
f739cec23468cb3e26a87e0ea60a3ce72d3977a0 | 12,370 | py | Python | legacy-v6.0.2/log_fortiguard/fortios_log_fortiguard_override_filter.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 14 | 2018-09-25T20:35:25.000Z | 2021-07-14T04:30:54.000Z | legacy-v6.0.2/log_fortiguard/fortios_log_fortiguard_override_filter.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 32 | 2018-10-09T04:13:42.000Z | 2020-05-11T07:20:28.000Z | legacy-v6.0.2/log_fortiguard/fortios_log_fortiguard_override_filter.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 11 | 2018-10-09T00:14:53.000Z | 2021-11-03T10:54:09.000Z | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_fortiguard_override_filter
short_description: Override filters for FortiCloud in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_fortiguard feature and override_filter category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_fortiguard_override_filter:
description:
- Override filters for FortiCloud.
default: null
suboptions:
anomaly:
description:
- Enable/disable anomaly logging.
choices:
- enable
- disable
dlp-archive:
description:
- Enable/disable DLP archive logging.
choices:
- enable
- disable
dns:
description:
- Enable/disable detailed DNS event logging.
choices:
- enable
- disable
filter:
description:
- FortiCloud log filter.
filter-type:
description:
- Include/exclude logs that match the filter.
choices:
- include
- exclude
forward-traffic:
description:
- Enable/disable forward traffic logging.
choices:
- enable
- disable
gtp:
description:
- Enable/disable GTP messages logging.
choices:
- enable
- disable
local-traffic:
description:
- Enable/disable local in or out traffic logging.
choices:
- enable
- disable
multicast-traffic:
description:
- Enable/disable multicast traffic logging.
choices:
- enable
- disable
netscan-discovery:
description:
- Enable/disable netscan discovery event logging.
choices:
netscan-vulnerability:
description:
- Enable/disable netscan vulnerability event logging.
choices:
severity:
description:
- Lowest severity level to log.
choices:
- emergency
- alert
- critical
- error
- warning
- notification
- information
- debug
sniffer-traffic:
description:
- Enable/disable sniffer traffic logging.
choices:
- enable
- disable
ssh:
description:
- Enable/disable SSH logging.
choices:
- enable
- disable
voip:
description:
- Enable/disable VoIP logging.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Override filters for FortiCloud.
fortios_log_fortiguard_override_filter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_fortiguard_override_filter:
anomaly: "enable"
dlp-archive: "enable"
dns: "enable"
filter: "<your_own_value>"
filter-type: "include"
forward-traffic: "enable"
gtp: "enable"
local-traffic: "enable"
multicast-traffic: "enable"
netscan-discovery: "<your_own_value>"
netscan-vulnerability: "<your_own_value>"
severity: "emergency"
sniffer-traffic: "enable"
ssh: "enable"
voip: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_fortiguard_override_filter_data(json):
option_list = ['anomaly', 'dlp-archive', 'dns',
'filter', 'filter-type', 'forward-traffic',
'gtp', 'local-traffic', 'multicast-traffic',
'netscan-discovery', 'netscan-vulnerability', 'severity',
'sniffer-traffic', 'ssh', 'voip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def log_fortiguard_override_filter(data, fos):
vdom = data['vdom']
log_fortiguard_override_filter_data = data['log_fortiguard_override_filter']
filtered_data = filter_log_fortiguard_override_filter_data(log_fortiguard_override_filter_data)
return fos.set('log.fortiguard',
'override-filter',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_log_fortiguard(data, fos):
login(data, fos)
if data['log_fortiguard_override_filter']:
resp = log_fortiguard_override_filter(data, fos)
fos.logout()
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_fortiguard_override_filter": {
"required": False, "type": "dict",
"options": {
"anomaly": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dlp-archive": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dns": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"filter": {"required": False, "type": "str"},
"filter-type": {"required": False, "type": "str",
"choices": ["include", "exclude"]},
"forward-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"gtp": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"multicast-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"netscan-discovery": {"required": False, "type": "str",
"choices": []},
"netscan-vulnerability": {"required": False, "type": "str",
"choices": []},
"severity": {"required": False, "type": "str",
"choices": ["emergency", "alert", "critical",
"error", "warning", "notification",
"information", "debug"]},
"sniffer-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"voip": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_fortiguard(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 32.552632 | 99 | 0.536863 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_fortiguard_override_filter
short_description: Override filters for FortiCloud in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_fortiguard feature and override_filter category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_fortiguard_override_filter:
description:
- Override filters for FortiCloud.
default: null
suboptions:
anomaly:
description:
- Enable/disable anomaly logging.
choices:
- enable
- disable
dlp-archive:
description:
- Enable/disable DLP archive logging.
choices:
- enable
- disable
dns:
description:
- Enable/disable detailed DNS event logging.
choices:
- enable
- disable
filter:
description:
- FortiCloud log filter.
filter-type:
description:
- Include/exclude logs that match the filter.
choices:
- include
- exclude
forward-traffic:
description:
- Enable/disable forward traffic logging.
choices:
- enable
- disable
gtp:
description:
- Enable/disable GTP messages logging.
choices:
- enable
- disable
local-traffic:
description:
- Enable/disable local in or out traffic logging.
choices:
- enable
- disable
multicast-traffic:
description:
- Enable/disable multicast traffic logging.
choices:
- enable
- disable
netscan-discovery:
description:
- Enable/disable netscan discovery event logging.
choices:
netscan-vulnerability:
description:
- Enable/disable netscan vulnerability event logging.
choices:
severity:
description:
- Lowest severity level to log.
choices:
- emergency
- alert
- critical
- error
- warning
- notification
- information
- debug
sniffer-traffic:
description:
- Enable/disable sniffer traffic logging.
choices:
- enable
- disable
ssh:
description:
- Enable/disable SSH logging.
choices:
- enable
- disable
voip:
description:
- Enable/disable VoIP logging.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Override filters for FortiCloud.
fortios_log_fortiguard_override_filter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_fortiguard_override_filter:
anomaly: "enable"
dlp-archive: "enable"
dns: "enable"
filter: "<your_own_value>"
filter-type: "include"
forward-traffic: "enable"
gtp: "enable"
local-traffic: "enable"
multicast-traffic: "enable"
netscan-discovery: "<your_own_value>"
netscan-vulnerability: "<your_own_value>"
severity: "emergency"
sniffer-traffic: "enable"
ssh: "enable"
voip: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_fortiguard_override_filter_data(json):
option_list = ['anomaly', 'dlp-archive', 'dns',
'filter', 'filter-type', 'forward-traffic',
'gtp', 'local-traffic', 'multicast-traffic',
'netscan-discovery', 'netscan-vulnerability', 'severity',
'sniffer-traffic', 'ssh', 'voip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def log_fortiguard_override_filter(data, fos):
vdom = data['vdom']
log_fortiguard_override_filter_data = data['log_fortiguard_override_filter']
filtered_data = filter_log_fortiguard_override_filter_data(log_fortiguard_override_filter_data)
return fos.set('log.fortiguard',
'override-filter',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_log_fortiguard(data, fos):
login(data, fos)
if data['log_fortiguard_override_filter']:
resp = log_fortiguard_override_filter(data, fos)
fos.logout()
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_fortiguard_override_filter": {
"required": False, "type": "dict",
"options": {
"anomaly": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dlp-archive": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dns": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"filter": {"required": False, "type": "str"},
"filter-type": {"required": False, "type": "str",
"choices": ["include", "exclude"]},
"forward-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"gtp": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"multicast-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"netscan-discovery": {"required": False, "type": "str",
"choices": []},
"netscan-vulnerability": {"required": False, "type": "str",
"choices": []},
"severity": {"required": False, "type": "str",
"choices": ["emergency", "alert", "critical",
"error", "warning", "notification",
"information", "debug"]},
"sniffer-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"voip": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_fortiguard(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| true | true |
f739d016c17a42d4c6f409b32333eac0fd1cc295 | 294 | py | Python | dependencies/panda/direct/particles/__init__.py | SuperM0use24/Project-Altis | 8dec7518a4d3f902cee261fd522ebebc3c171a42 | [
"Apache-2.0"
] | null | null | null | dependencies/panda/direct/particles/__init__.py | SuperM0use24/Project-Altis | 8dec7518a4d3f902cee261fd522ebebc3c171a42 | [
"Apache-2.0"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/direct/particles/__init__.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 1 | 2021-04-09T00:02:59.000Z | 2021-04-09T00:02:59.000Z | """
This package contains the high-level Python interface to the particle
system.
Also see the :mod:`panda3d.physics` module, which contains the C++
implementation of the particle system.
For more information about the particle system, see the :ref:`particle-effects`
page in the manual.
"""
| 26.727273 | 79 | 0.772109 | true | true | |
f739d0cf94f476745636cab0d4f068f6565cd7a1 | 4,166 | py | Python | 0_fast_bert/prediction.py | zaradana/Fast_BERT | 7ee96e99ba95468c29fe3542fe8071e0402ec0f6 | [
"Apache-2.0"
] | null | null | null | 0_fast_bert/prediction.py | zaradana/Fast_BERT | 7ee96e99ba95468c29fe3542fe8071e0402ec0f6 | [
"Apache-2.0"
] | null | null | null | 0_fast_bert/prediction.py | zaradana/Fast_BERT | 7ee96e99ba95468c29fe3542fe8071e0402ec0f6 | [
"Apache-2.0"
] | null | null | null | import os
import torch
from .data_cls import BertDataBunch
from .data_ner import BertNERDataBunch
from .learner_cls import BertLearner
from .learner_ner import BertNERLearner
import time
from transformers import AutoTokenizer
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
class BertClassificationPredictor(object):
def __init__(
self,
model_path,
label_path,
multi_label=False,
model_type="bert",
use_fast_tokenizer=True,
do_lower_case=True,
device=None,
):
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model_path = model_path
self.label_path = label_path
self.multi_label = multi_label
self.model_type = model_type
self.do_lower_case = do_lower_case
self.device = device
# Use auto-tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_path, use_fast=use_fast_tokenizer
)
self.learner = self.get_learner()
def get_learner(self):
databunch = BertDataBunch(
self.label_path,
self.label_path,
self.tokenizer,
train_file=None,
val_file=None,
batch_size_per_gpu=32,
max_seq_length=512,
multi_gpu=False,
multi_label=self.multi_label,
model_type=self.model_type,
no_cache=True,
)
learner = BertLearner.from_pretrained_model(
databunch,
self.model_path,
metrics=[],
device=self.device,
logger=None,
output_dir=None,
warmup_steps=0,
multi_gpu=False,
is_fp16=False,
multi_label=self.multi_label,
logging_steps=0,
)
return learner
def predict_batch(self, texts):
return self.learner.predict_batch(texts)
def predict(self, text):
predictions = self.predict_batch([text])[0]
return predictions
class BertNERPredictor(object):
def __init__(
self,
model_path,
label_path,
model_type="bert",
use_fast_tokenizer=True,
do_lower_case=True,
device=None,
):
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model_path = model_path
self.label_path = label_path
self.model_type = model_type
self.do_lower_case = do_lower_case
self.device = device
# Use auto-tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_path, use_fast=use_fast_tokenizer
)
self.learner = self.get_learner()
def get_learner(self):
databunch = BertNERDataBunch(
self.label_path,
self.tokenizer,
train_file=None,
val_file=None,
batch_size_per_gpu=32,
max_seq_length=512,
multi_gpu=False,
model_type=self.model_type,
no_cache=True,
)
learner = BertNERLearner.from_pretrained_model(
databunch,
self.model_path,
device=self.device,
logger=None,
output_dir=None,
warmup_steps=0,
multi_gpu=False,
is_fp16=False,
logging_steps=0,
)
return learner
def predict_batch(self, texts, group=True, exclude_entities=["O"]):
predictions = []
for text in texts:
pred = self.predict(text, group=group, exclude_entities=exclude_entities)
if pred:
predictions.append({"text": text, "results": pred})
return predictions
def predict(self, text, group=True, exclude_entities=["O"]):
predictions = self.learner.predict(
text, group=group, exclude_entities=exclude_entities
)
return predictions
| 27.589404 | 95 | 0.598656 | import os
import torch
from .data_cls import BertDataBunch
from .data_ner import BertNERDataBunch
from .learner_cls import BertLearner
from .learner_ner import BertNERLearner
import time
from transformers import AutoTokenizer
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
class BertClassificationPredictor(object):
def __init__(
self,
model_path,
label_path,
multi_label=False,
model_type="bert",
use_fast_tokenizer=True,
do_lower_case=True,
device=None,
):
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model_path = model_path
self.label_path = label_path
self.multi_label = multi_label
self.model_type = model_type
self.do_lower_case = do_lower_case
self.device = device
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_path, use_fast=use_fast_tokenizer
)
self.learner = self.get_learner()
def get_learner(self):
databunch = BertDataBunch(
self.label_path,
self.label_path,
self.tokenizer,
train_file=None,
val_file=None,
batch_size_per_gpu=32,
max_seq_length=512,
multi_gpu=False,
multi_label=self.multi_label,
model_type=self.model_type,
no_cache=True,
)
learner = BertLearner.from_pretrained_model(
databunch,
self.model_path,
metrics=[],
device=self.device,
logger=None,
output_dir=None,
warmup_steps=0,
multi_gpu=False,
is_fp16=False,
multi_label=self.multi_label,
logging_steps=0,
)
return learner
def predict_batch(self, texts):
return self.learner.predict_batch(texts)
def predict(self, text):
predictions = self.predict_batch([text])[0]
return predictions
class BertNERPredictor(object):
def __init__(
self,
model_path,
label_path,
model_type="bert",
use_fast_tokenizer=True,
do_lower_case=True,
device=None,
):
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model_path = model_path
self.label_path = label_path
self.model_type = model_type
self.do_lower_case = do_lower_case
self.device = device
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_path, use_fast=use_fast_tokenizer
)
self.learner = self.get_learner()
def get_learner(self):
databunch = BertNERDataBunch(
self.label_path,
self.tokenizer,
train_file=None,
val_file=None,
batch_size_per_gpu=32,
max_seq_length=512,
multi_gpu=False,
model_type=self.model_type,
no_cache=True,
)
learner = BertNERLearner.from_pretrained_model(
databunch,
self.model_path,
device=self.device,
logger=None,
output_dir=None,
warmup_steps=0,
multi_gpu=False,
is_fp16=False,
logging_steps=0,
)
return learner
def predict_batch(self, texts, group=True, exclude_entities=["O"]):
predictions = []
for text in texts:
pred = self.predict(text, group=group, exclude_entities=exclude_entities)
if pred:
predictions.append({"text": text, "results": pred})
return predictions
def predict(self, text, group=True, exclude_entities=["O"]):
predictions = self.learner.predict(
text, group=group, exclude_entities=exclude_entities
)
return predictions
| true | true |
f739d25a0e8a088137e6a240eccd7ec73bbd7535 | 2,698 | py | Python | Pythontutorials/30_copy_deepcopy.py | Asurada2015/Python-Data-Analysis-Learning-Notes | 5697c8de3e5fd6562e77195b198b2d8ff836008e | [
"MIT"
] | 2 | 2018-02-07T02:20:08.000Z | 2019-07-07T06:21:17.000Z | Pythontutorials/30_copy_deepcopy.py | Asurada2015/Mofan_demo | 5697c8de3e5fd6562e77195b198b2d8ff836008e | [
"MIT"
] | null | null | null | Pythontutorials/30_copy_deepcopy.py | Asurada2015/Mofan_demo | 5697c8de3e5fd6562e77195b198b2d8ff836008e | [
"MIT"
] | 3 | 2019-11-26T00:59:12.000Z | 2021-09-09T14:40:43.000Z | """python 中的copy分为浅复制和深复制"""
import copy
#
# a = [1, 2, 3]
# b = a # 这时候a和b指向的是硬盘中的同一片区域,用id显示的话即使id(a)==id(b)
# b[1] = 22 # 改变a中值b中值会同时被改变
# print(a) # [1, 22, 3]
# print(id(a) == id(b)) # True
#
# # deep copy
# c = copy.deepcopy(a)
# print(id(a) == id(c)) # False
# c[1] = 2
# print(a) # [1, 22, 3] 改变c中值,a中值不会改变
# a[1] = 111
# print(c) # [1, 2, 3] 改变a中值,C中值不会改变
# shallow copy
a_list = [1, 2, 3]
a_tuple = (1, 2, 3)
a1_list = [1, 2, (1, 2, 3), [1, 2, 3]]
a1_tuple = (1, 2, (1, 2, 3), [1, 2, 3])
a_shallow_list = copy.copy(a_list)
a_shallow_tuple = copy.copy(a_tuple)
a1_shallow_list = copy.copy(a1_list)
a1_shallow_tuple = copy.copy(a1_tuple)
a_deep_list = copy.deepcopy(a_list)
a_deep_tuple = copy.deepcopy(a_tuple)
a1_deep_list = copy.deepcopy(a1_list)
a1_deep_tuple = copy.deepcopy(a1_tuple)
# 比较基本可变对象,深复制和浅复制区别
# print("id of a_list", id(a_list), "id of a_shallow_list", id(a_shallow_list), "a_deep_list", id(a_deep_list))
# id of a_list 2249250705672 id of a_shallow_list 2249201900552 a_deep_list 2249201900424
# 实验表明均指向一个新的地址
# 其中不可变对象地址
# print("id of a_list[0]", id(a_list[0]), "id of a_shallow_list[0]", id(a_shallow_list[0]), "a_deep_list[0]", id(a_deep_list[0]))
# id of a_list[0] 1887096560 id of a_shallow_list[0] 1887096560 a_deep_list[0] 1887096560
# 基本可变对象中不可变对象的地址不会改变
# 比较基本不可变对象,深复制和浅复制区别
# print("id of a_tuple", id(a_tuple), "a_shallow_tuple", id(a_shallow_tuple), "a_deep_tuple", id(a_deep_tuple))
# print("id of a_tuple[0]", id(a_tuple[0]), "a_shallow_tuple[0]", id(a_shallow_tuple[0]), "a_deep_tuple[0]",
# id(a_deep_tuple[0]))
# id of a_tuple 2344837083280 a_shallow_tuple 2344837083280 a_deep_tuple 2344837083280
# id of a_tuple[0] 1885130480 a_shallow_tuple[0] 1885130480 a_deep_tuple[0] 1885130480
# 复合嵌套不可变元素的深复制和浅复制区别
# print("id of a1_tuple", id(a1_tuple), "a1_shallow_tuple", id(a1_shallow_tuple), "a1_deep_tuple", id(a1_deep_tuple))
# print("id of a1_tuple[3]", id(a1_tuple[3]), "a1_shallow_tuple[3]", id(a1_shallow_tuple[3]), "a1_deep_tuple[3]",
# id(a1_deep_tuple[3]))
# id of a1_tuple 2498218636296 a1_shallow_tuple 2498218636296 a1_deep_tuple 2498218638776
# id of a1_tuple[3] 2498267415048 a1_shallow_tuple[3] 2498267415048 a1_deep_tuple[3] 2498218716040
# 复合嵌套可变元素的深复制和浅复制区别
print("id of a1_list", id(a1_list), "id of a1_shallow_list", id(a1_shallow_list), "a1_deep_list", id(a1_deep_list))
print("id of a1_list[3]", id(a1_list[3]), "id of a1_shallow_list[3]", id(a1_shallow_list[3]), "a1_deep_list[3]",
id(a1_deep_list[3]))
# id of a1_list 1453555407752 id of a1_shallow_list 1453555447432 a1_deep_list 1453555477384
# id of a1_list[3] 1453604277640 id of a1_shallow_list[3] 1453604277640 a1_deep_list[3] 1453555448968 | 42.15625 | 129 | 0.723128 | import copy
= copy.copy(a_list)
a_shallow_tuple = copy.copy(a_tuple)
a1_shallow_list = copy.copy(a1_list)
a1_shallow_tuple = copy.copy(a1_tuple)
a_deep_list = copy.deepcopy(a_list)
a_deep_tuple = copy.deepcopy(a_tuple)
a1_deep_list = copy.deepcopy(a1_list)
a1_deep_tuple = copy.deepcopy(a1_tuple)
print("id of a1_list", id(a1_list), "id of a1_shallow_list", id(a1_shallow_list), "a1_deep_list", id(a1_deep_list))
print("id of a1_list[3]", id(a1_list[3]), "id of a1_shallow_list[3]", id(a1_shallow_list[3]), "a1_deep_list[3]",
id(a1_deep_list[3]))
| true | true |
f739d462164e17c1db876b8fdff0984c82370d5a | 3,190 | py | Python | ActivityManager/settings.py | AkshayVKumar/ActivityManager | e87987a532b8441b41ecc038b51e4614b8d1a526 | [
"BSD-3-Clause"
] | null | null | null | ActivityManager/settings.py | AkshayVKumar/ActivityManager | e87987a532b8441b41ecc038b51e4614b8d1a526 | [
"BSD-3-Clause"
] | 5 | 2020-06-06T01:47:24.000Z | 2022-02-10T14:42:15.000Z | ActivityManager/settings.py | AkshayVKumar/ActivityManager | e87987a532b8441b41ecc038b51e4614b8d1a526 | [
"BSD-3-Clause"
] | null | null | null | """
Django settings for ActivityManager project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hh-h#4=^c#^#k!%$f(o84m1mt)0*5woasgpx=5---9%_cfix49'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'activity',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ActivityManager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ActivityManager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL="activity.UserProfile" | 25.725806 | 91 | 0.697492 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'hh-h#4=^c#^#k!%$f(o84m1mt)0*5woasgpx=5---9%_cfix49'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'activity',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ActivityManager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ActivityManager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL="activity.UserProfile" | true | true |
f739d68103d34c6c0a9961dc179a461964781ccd | 10,202 | py | Python | Wrappers/Python/test/test_TranslateFunction.py | rijobro/CCPi-Framework | ff08216d4e6fef84659b43155c5c52484b1dc543 | [
"Apache-2.0"
] | null | null | null | Wrappers/Python/test/test_TranslateFunction.py | rijobro/CCPi-Framework | ff08216d4e6fef84659b43155c5c52484b1dc543 | [
"Apache-2.0"
] | null | null | null | Wrappers/Python/test/test_TranslateFunction.py | rijobro/CCPi-Framework | ff08216d4e6fef84659b43155c5c52484b1dc543 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# CCP in Tomographic Imaging (CCPi) Core Imaging Library (CIL).
# Copyright 2017 UKRI-STFC
# Copyright 2017 University of Manchester
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ccpi.optimisation.functions import Function, L1Norm, ScaledFunction, \
LeastSquares, L2NormSquared, \
KullbackLeibler, ZeroFunction, ConstantFunction, TranslateFunction
from ccpi.optimisation.operators import Identity
from ccpi.framework import ImageGeometry, BlockGeometry
import unittest
import numpy
from numbers import Number
''' Here we test SumFunction class for different function
L2Norm, L1Norm, KullbackLeibler, ZeroFunction, ConstantFunction, Scalar
for call method
for gradient method
'''
class TestFunction(unittest.TestCase):
def assertBlockDataContainerEqual(self, container1, container2):
print ("assert Block Data Container Equal")
self.assertTrue(issubclass(container1.__class__, container2.__class__))
for col in range(container1.shape[0]):
if issubclass(container1.get_item(col).__class__, DataContainer):
print ("Checking col ", col)
self.assertNumpyArrayEqual(
container1.get_item(col).as_array(),
container2.get_item(col).as_array()
)
else:
self.assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col))
def assertNumpyArrayEqual(self, first, second):
res = True
try:
numpy.testing.assert_array_equal(first, second)
except AssertionError as err:
res = False
print(err)
self.assertTrue(res)
def assertNumpyArrayAlmostEqual(self, first, second, decimal=6):
res = True
try:
numpy.testing.assert_array_almost_equal(first, second, decimal)
except AssertionError as err:
res = False
print(err)
print("expected " , second)
print("actual " , first)
self.assertTrue(res)
def test_TranslateFunction(self):
# Test TranslationFunction
ig = ImageGeometry(4,4)
tmp = ig.allocate('random_int')
b = ig.allocate('random_int')
scalar = 0.4
tau = 0.05
list1 = [ L2NormSquared(), scalar * L2NormSquared(), scalar * L2NormSquared(b=b),
L1Norm(), scalar * L1Norm(), scalar * L1Norm(b=b)]
list1_shift = [ L2NormSquared().centered_at(ig.allocate()), scalar * L2NormSquared().centered_at(ig.allocate()), scalar * L2NormSquared().centered_at(b),
L1Norm().centered_at(ig.allocate()), scalar * L1Norm().centered_at(ig.allocate()), scalar * L1Norm().centered_at(b)]
out_gradient1 = ig.allocate()
out_gradient2 = ig.allocate()
out_proximal1 = ig.allocate()
out_proximal2 = ig.allocate()
out_proximal_conj1 = ig.allocate()
out_proximal_conj2 = ig.allocate()
for func, func_shift in zip(list1, list1_shift):
# check call
res1 = func(tmp)
res2 = func_shift(tmp)
self.assertNumpyArrayAlmostEqual(res1, res2)
try:
# check gradient
res1_gradient = func.gradient(tmp)
res2_gradient = func_shift.gradient(tmp)
self.assertNumpyArrayAlmostEqual(res1_gradient.as_array(), res2_gradient.as_array())
# check gradient out
func.gradient(tmp, out = out_gradient1)
func_shift.gradient(tmp, out = out_gradient2)
self.assertNumpyArrayAlmostEqual(out_gradient1.as_array(), out_gradient2.as_array())
except NotImplementedError:
print('Function is not differentiable')
# check proximal
func.proximal(tmp, tau, out = out_proximal1)
func_shift.proximal(tmp, tau, out = out_proximal2)
self.assertNumpyArrayAlmostEqual(out_proximal1.as_array(), out_proximal2.as_array())
# check proximal conjugate
func.proximal_conjugate(tmp, tau, out = out_proximal_conj1)
func_shift.proximal_conjugate(tmp, tau, out = out_proximal_conj2)
self.assertNumpyArrayAlmostEqual(out_proximal_conj1.as_array(), out_proximal_conj1.as_array())
if __name__ == '__main__':
#
t = TestFunction()
t.test_TranslateFunction()
# ig = ImageGeometry(4,4)
# tmp = ig.allocate('random_int')
# b = ig.allocate('random_int')
# scalar = 0.4
#
## f = scalar * L2NormSquared().centered_at(b)
## print(f.function.function)
# list1 = [ L2NormSquared(), scalar * L2NormSquared(), scalar * L2NormSquared(b=b)]
#
## for func in list_functions:
##
### if isinstance(func, ScaledFunction):
### func_tmp = func.function
### else:
### func_tmp = func
###
### if func_tmp.b is None:
### tmp_data = ig.allocate()
### else:
### tmp_data = b
##
## func_tmp = func
## tmp_data = ig.allocate()
##
## res1 = func_tmp(tmp)
## res2 = func_tmp.centered_at(tmp_data)(tmp)
##
## self.assertNumpyArrayAlmostEqual(res1, res2)
#
# for i in list_functions:
#
# print('Test Translation for Function {} '.format(type(i).__name__))
#
# if isinstance(i, L2NormSquared):
#
# f = L2NormSquared(b = b)
# g = TranslateFunction(L2NormSquared(), b)
#
# elif isinstance(i, L1Norm):
#
# f = L1Norm(b = b)
# g = TranslateFunction(L1Norm(), b)
#
# elif isinstance(i, ScaledFunction):
#
# if isinstance(i.function, L2NormSquared):
# f = scalar * L2NormSquared(b = b)
# g = scalar * TranslateFunction(L2NormSquared(), b)
#
# if isinstance(i.function, L1Norm):
# f = scalar * L1Norm(b = b)
# g = scalar * TranslateFunction(L1Norm(), b)
#
# # check call
# res1 = f(tmp)
# res2 = g(tmp)
# numpy.testing.assert_equal(res1, res2)
#
# # check gradient
#
# if not isinstance(i, L1Norm):
#
# res1 = f.gradient(tmp)
# res2 = g.gradient(tmp)
# numpy.testing.assert_equal(res1.as_array(), res2.as_array())
#
# # check gradient out
# res3 = ig.allocate()
# res4 = ig.allocate()
# f.gradient(tmp, out = res3)
# g.gradient(tmp, out = res4)
# numpy.testing.assert_equal(res3.as_array(), res4.as_array())
#
# # check convex conjugate
# res1 = f.convex_conjugate(tmp)
# res2 = g.convex_conjugate(tmp)
# numpy.testing.assert_equal(res1, res2)
#
# # check proximal
# tau = 0.5
# res1 = f.proximal(tmp, tau)
# res2 = g.proximal(tmp, tau)
# numpy.testing.assert_equal(res1.as_array(), res2.as_array())
#
# # check proximal out
# res3 = ig.allocate()
# res4 = ig.allocate()
# f.proximal(tmp, tau, out = res3)
# g.proximal(tmp, tau, out = res4)
# numpy.testing.assert_array_almost_equal(res3.as_array(), res4.as_array(),decimal = decimal)
#
# # check proximal conjugate
# tau = 0.4
# res1 = f.proximal_conjugate(tmp, tau)
# res2 = g.proximal_conjugate(tmp, tau)
# numpy.testing.assert_array_almost_equal(res1.as_array(), res2.as_array(),decimal = decimal)
#
# # check proximal out
# res3 = ig.allocate()
# res4 = ig.allocate()
# f.proximal_conjugate(tmp, tau, out = res3)
# g.proximal_conjugate(tmp, tau, out = res4)
# numpy.testing.assert_array_almost_equal(res3.as_array(), res4.as_array(),decimal = decimal)
#
#
# f = L2NormSquared() + 1
# print(f(tmp))
#
#
#
#
# # tau = 0.5
# # f = L2NormSquared(b=b)
# # g = TranslateFunction(f, b)
# # res1 = f.proximal_conjugate(tmp, tau)
# # res2 = tmp - tau * f.proximal(tmp/tau, 1/tau)
# # res3 = g.proximal_conjugate(tmp, tau)
#
# # print(res1.as_array())
# # print(res3.as_array())
# # numpy.testing.assert_equal(res1.as_array(), res2.as_array())
# # numpy.testing.assert_equal(res1.as_array(), res3.as_array())
#
#
#
| 37.369963 | 161 | 0.524995 |
from ccpi.optimisation.functions import Function, L1Norm, ScaledFunction, \
LeastSquares, L2NormSquared, \
KullbackLeibler, ZeroFunction, ConstantFunction, TranslateFunction
from ccpi.optimisation.operators import Identity
from ccpi.framework import ImageGeometry, BlockGeometry
import unittest
import numpy
from numbers import Number
class TestFunction(unittest.TestCase):
def assertBlockDataContainerEqual(self, container1, container2):
print ("assert Block Data Container Equal")
self.assertTrue(issubclass(container1.__class__, container2.__class__))
for col in range(container1.shape[0]):
if issubclass(container1.get_item(col).__class__, DataContainer):
print ("Checking col ", col)
self.assertNumpyArrayEqual(
container1.get_item(col).as_array(),
container2.get_item(col).as_array()
)
else:
self.assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col))
def assertNumpyArrayEqual(self, first, second):
res = True
try:
numpy.testing.assert_array_equal(first, second)
except AssertionError as err:
res = False
print(err)
self.assertTrue(res)
def assertNumpyArrayAlmostEqual(self, first, second, decimal=6):
res = True
try:
numpy.testing.assert_array_almost_equal(first, second, decimal)
except AssertionError as err:
res = False
print(err)
print("expected " , second)
print("actual " , first)
self.assertTrue(res)
def test_TranslateFunction(self):
ig = ImageGeometry(4,4)
tmp = ig.allocate('random_int')
b = ig.allocate('random_int')
scalar = 0.4
tau = 0.05
list1 = [ L2NormSquared(), scalar * L2NormSquared(), scalar * L2NormSquared(b=b),
L1Norm(), scalar * L1Norm(), scalar * L1Norm(b=b)]
list1_shift = [ L2NormSquared().centered_at(ig.allocate()), scalar * L2NormSquared().centered_at(ig.allocate()), scalar * L2NormSquared().centered_at(b),
L1Norm().centered_at(ig.allocate()), scalar * L1Norm().centered_at(ig.allocate()), scalar * L1Norm().centered_at(b)]
out_gradient1 = ig.allocate()
out_gradient2 = ig.allocate()
out_proximal1 = ig.allocate()
out_proximal2 = ig.allocate()
out_proximal_conj1 = ig.allocate()
out_proximal_conj2 = ig.allocate()
for func, func_shift in zip(list1, list1_shift):
res1 = func(tmp)
res2 = func_shift(tmp)
self.assertNumpyArrayAlmostEqual(res1, res2)
try:
res1_gradient = func.gradient(tmp)
res2_gradient = func_shift.gradient(tmp)
self.assertNumpyArrayAlmostEqual(res1_gradient.as_array(), res2_gradient.as_array())
func.gradient(tmp, out = out_gradient1)
func_shift.gradient(tmp, out = out_gradient2)
self.assertNumpyArrayAlmostEqual(out_gradient1.as_array(), out_gradient2.as_array())
except NotImplementedError:
print('Function is not differentiable')
func.proximal(tmp, tau, out = out_proximal1)
func_shift.proximal(tmp, tau, out = out_proximal2)
self.assertNumpyArrayAlmostEqual(out_proximal1.as_array(), out_proximal2.as_array())
func.proximal_conjugate(tmp, tau, out = out_proximal_conj1)
func_shift.proximal_conjugate(tmp, tau, out = out_proximal_conj2)
self.assertNumpyArrayAlmostEqual(out_proximal_conj1.as_array(), out_proximal_conj1.as_array())
if __name__ == '__main__':
t = TestFunction()
t.test_TranslateFunction()
| true | true |
f739d6991150ce33d13f2d8387af0a800604a93f | 2,626 | py | Python | fairypptx/_shape/maker/__init__.py | Sillte/fairypptx | 926d277e7692bcf1dd0d365e9c0e26e8d495bafb | [
"MIT"
] | null | null | null | fairypptx/_shape/maker/__init__.py | Sillte/fairypptx | 926d277e7692bcf1dd0d365e9c0e26e8d495bafb | [
"MIT"
] | null | null | null | fairypptx/_shape/maker/__init__.py | Sillte/fairypptx | 926d277e7692bcf1dd0d365e9c0e26e8d495bafb | [
"MIT"
] | null | null | null | from fairypptx.color import Color, make_hue_circle
from fairypptx.shape import Shape, Shapes
class PaletteMaker:
"""Make a Color Pallete.
"""
default_color = Color((25, 239, 198))
def __init__(self,
fontsize=18,
line=3,
colors=None):
self.fontsize = fontsize
self.line = line
self._prepared_colors = colors
def __call__(self, contents=None):
if contents is None:
contents = self._gen_default_content()
contents = self._to_dict(contents)
shapes = []
for key, color in contents.items():
shape = Shape.make_textbox(key)
shape.textrange.font.size = self.fontsize
shape.tighten()
shape.fill = color
shape.line = self.line
shapes.append(shape)
return Shapes(shapes)
def _to_dict(self, contents):
def _to_color(arg):
try:
color = Color(arg)
except Exception as e:
raise ValueError(f"Cannot decipher `arg`, `{arg}`.")
return color
def _to_pair(elem, index):
arg = contents[index]
if isinstance(arg, str):
colors = self.prepare_colors(len(contents), override=False)
return arg, colors[index % len(colors)]
color = _to_color(arg)
key = str(color.rgba)
value = color
return key, value
from typing import Sequence, Mapping
if isinstance(contents, Sequence):
return dict(_to_pair(elem, index) for index, elem in enumerate(contents))
elif isinstance(contents, Mapping):
return {str(key) : _to_color(value) for key, value in contents.items()}
raise ValueError()
@property
def prepared_colors(self):
"""Used for choosing the color
outside the given `contents`.
"""
if self._prepared_colors:
return self._prepared_colors
raise NotImplementedError("`_prepare_colors` must be called priorly.")
def prepare_colors(self, n_color, override=False):
if self._prepared_colors and override is False:
return self._prepared_colors
colors = make_hue_circle(self.default_color, n_color)
self._prepared_colors = colors
return colors
def _gen_default_content(self):
slide = Slide()
colors = []
for shape in slide.shapes:
color = shape.fill.color
if color:
colors.append(color)
return colors
| 32.02439 | 85 | 0.578446 | from fairypptx.color import Color, make_hue_circle
from fairypptx.shape import Shape, Shapes
class PaletteMaker:
default_color = Color((25, 239, 198))
def __init__(self,
fontsize=18,
line=3,
colors=None):
self.fontsize = fontsize
self.line = line
self._prepared_colors = colors
def __call__(self, contents=None):
if contents is None:
contents = self._gen_default_content()
contents = self._to_dict(contents)
shapes = []
for key, color in contents.items():
shape = Shape.make_textbox(key)
shape.textrange.font.size = self.fontsize
shape.tighten()
shape.fill = color
shape.line = self.line
shapes.append(shape)
return Shapes(shapes)
def _to_dict(self, contents):
def _to_color(arg):
try:
color = Color(arg)
except Exception as e:
raise ValueError(f"Cannot decipher `arg`, `{arg}`.")
return color
def _to_pair(elem, index):
arg = contents[index]
if isinstance(arg, str):
colors = self.prepare_colors(len(contents), override=False)
return arg, colors[index % len(colors)]
color = _to_color(arg)
key = str(color.rgba)
value = color
return key, value
from typing import Sequence, Mapping
if isinstance(contents, Sequence):
return dict(_to_pair(elem, index) for index, elem in enumerate(contents))
elif isinstance(contents, Mapping):
return {str(key) : _to_color(value) for key, value in contents.items()}
raise ValueError()
@property
def prepared_colors(self):
if self._prepared_colors:
return self._prepared_colors
raise NotImplementedError("`_prepare_colors` must be called priorly.")
def prepare_colors(self, n_color, override=False):
if self._prepared_colors and override is False:
return self._prepared_colors
colors = make_hue_circle(self.default_color, n_color)
self._prepared_colors = colors
return colors
def _gen_default_content(self):
slide = Slide()
colors = []
for shape in slide.shapes:
color = shape.fill.color
if color:
colors.append(color)
return colors
| true | true |
f739d6f4cb52300de2124bf6f0e7dc296031c8b5 | 10,732 | py | Python | mars/scheduler/tests/test_graph.py | pingrunhuang/mars | ae920c374e9844d7426d0cc09c0d97059dc5341c | [
"Apache-2.0"
] | 1 | 2019-09-22T16:00:48.000Z | 2019-09-22T16:00:48.000Z | mars/scheduler/tests/test_graph.py | turboFei/mars | cde691285d921add5460944764c7278e7ddec8ff | [
"Apache-2.0"
] | null | null | null | mars/scheduler/tests/test_graph.py | turboFei/mars | cde691285d921add5460944764c7278e7ddec8ff | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import uuid
import unittest
import mars.tensor as mt
from mars.cluster_info import ClusterInfoActor
from mars.scheduler import GraphActor, GraphMetaActor, ResourceActor, ChunkMetaActor, \
AssignerActor, GraphState
from mars.utils import serialize_graph, get_next_port
from mars.actors import create_actor_pool
from mars.tests.core import patch_method
class Test(unittest.TestCase):
@contextlib.contextmanager
def prepare_graph_in_pool(self, expr, clean_io_meta=True, compose=False):
session_id = str(uuid.uuid4())
graph_key = str(uuid.uuid4())
graph = expr.build_graph(compose=compose)
serialized_graph = serialize_graph(graph)
chunked_graph = expr.build_graph(compose=compose, tiled=True)
addr = '127.0.0.1:%d' % get_next_port()
with create_actor_pool(n_process=1, backend='gevent', address=addr) as pool:
pool.create_actor(ClusterInfoActor, [pool.cluster_info.address],
uid=ClusterInfoActor.default_name())
resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_name())
pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_name())
pool.create_actor(AssignerActor, uid=AssignerActor.default_name())
graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,
uid=GraphActor.gen_name(session_id, graph_key))
graph_ref.prepare_graph(compose=compose)
fetched_graph = graph_ref.get_chunk_graph()
self.assertIsNotNone(fetched_graph)
self.assertEqual(len(chunked_graph), len(fetched_graph))
graph_ref.scan_node()
op_infos = graph_ref.get_operand_info()
for n in fetched_graph:
depth = op_infos[n.op.key]['optimize']['depth']
self.assertIsNotNone(depth)
successor_size = op_infos[n.op.key]['optimize']['successor_size']
self.assertIsNotNone(successor_size)
descendant_size = op_infos[n.op.key]['optimize']['descendant_size']
self.assertIsNotNone(descendant_size)
resource_ref.set_worker_meta('localhost:12345', dict(hardware=dict(cpu_total=4)))
resource_ref.set_worker_meta('localhost:23456', dict(hardware=dict(cpu_total=4)))
graph_ref.place_initial_chunks()
op_infos = graph_ref.get_operand_info()
for n in fetched_graph:
if fetched_graph.count_predecessors(n) != 0:
continue
target_worker = op_infos[n.op.key]['target_worker']
self.assertIsNotNone(target_worker)
graph_ref.create_operand_actors(_clean_io_meta=clean_io_meta)
op_infos = graph_ref.get_operand_info()
if not clean_io_meta:
orig_metas = dict()
for n in fetched_graph:
try:
meta = orig_metas[n.op.key]
except KeyError:
meta = orig_metas[n.op.key] = dict(
predecessors=set(), successors=set(), input_chunks=set(), chunks=set()
)
meta['predecessors'].update([pn.op.key for pn in fetched_graph.iter_predecessors(n)])
meta['successors'].update([sn.op.key for sn in fetched_graph.iter_successors(n)])
meta['input_chunks'].update([pn.key for pn in fetched_graph.iter_predecessors(n)])
meta['chunks'].update([c.key for c in n.op.outputs])
for n in fetched_graph:
self.assertEqual(op_infos[n.op.key]['op_name'], type(n.op).__name__)
io_meta = op_infos[n.op.key]['io_meta']
orig_io_meta = orig_metas[n.op.key]
self.assertSetEqual(set(io_meta['predecessors']), set(orig_io_meta['predecessors']))
self.assertSetEqual(set(io_meta['successors']), set(orig_io_meta['successors']))
self.assertSetEqual(set(io_meta['input_chunks']), set(orig_io_meta['input_chunks']))
self.assertSetEqual(set(io_meta['chunks']), set(orig_io_meta['chunks']))
self.assertEqual(op_infos[n.op.key]['output_size'], sum(ch.nbytes for ch in n.op.outputs))
yield pool, graph_ref
def testSimpleGraphPreparation(self, *_):
arr = mt.random.randint(10, size=(10, 8), chunk_size=4)
arr_add = mt.random.randint(10, size=(10, 8), chunk_size=4)
arr2 = arr + arr_add
with self.prepare_graph_in_pool(arr2, clean_io_meta=False):
pass
def testSplitPreparation(self, *_):
arr = mt.ones(12, chunk_size=4)
arr_split = mt.split(arr, 2)
arr_sum = arr_split[0] + arr_split[1]
with self.prepare_graph_in_pool(arr_sum, clean_io_meta=False):
pass
def testSameKeyPreparation(self, *_):
arr = mt.ones((5, 5), chunk_size=3)
arr2 = mt.concatenate((arr, arr))
with self.prepare_graph_in_pool(arr2, clean_io_meta=False):
pass
def testFusePreparation(self, *_):
from mars.tensor.expressions.fuse.core import TensorFuseChunk
arr = mt.ones((5, 5), chunk_size=3)
arr2 = (arr + 5) * 2
with self.prepare_graph_in_pool(arr2, compose=True) as (pool, graph_ref):
out_graph = graph_ref.get_chunk_graph()
self.assertTrue(all(isinstance(v.op, TensorFuseChunk) for v in out_graph))
def testMultipleAddPreparation(self, *_):
import numpy as np
import operator
from mars.compat import reduce
base_arr = np.random.random((100, 100))
a = mt.array(base_arr)
sumv = reduce(operator.add, [a[:10, :10] for _ in range(10)])
with self.prepare_graph_in_pool(sumv):
pass
def testGraphTermination(self, *_):
from mars.tensor.expressions.arithmetic.add import TensorAddConstant
arr = mt.random.random((8, 2), chunk_size=2)
arr2 = arr + 1
with self.prepare_graph_in_pool(arr2) as (pool, graph_ref):
out_graph = graph_ref.get_chunk_graph()
for c in out_graph:
if not isinstance(c.op, TensorAddConstant):
continue
self.assertNotEqual(graph_ref.get_state(), GraphState.SUCCEEDED)
graph_ref.mark_terminal_finished(c.op.key)
self.assertEqual(graph_ref.get_state(), GraphState.SUCCEEDED)
arr = mt.random.random((8, 2), chunk_size=2)
arr2 = arr + 1
with self.prepare_graph_in_pool(arr2) as (pool, graph_ref):
out_graph = graph_ref.get_chunk_graph()
for c in out_graph:
if not isinstance(c.op, TensorAddConstant):
continue
self.assertNotEqual(graph_ref.get_state(), GraphState.FAILED)
graph_ref.mark_terminal_finished(c.op.key, GraphState.FAILED)
self.assertEqual(graph_ref.get_state(), GraphState.FAILED)
def testErrorOnPrepare(self, *_):
session_id = str(uuid.uuid4())
addr = '127.0.0.1:%d' % get_next_port()
with create_actor_pool(n_process=1, backend='gevent', address=addr) as pool:
pool.create_actor(ClusterInfoActor, [pool.cluster_info.address],
uid=ClusterInfoActor.default_name())
resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_name())
pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_name())
pool.create_actor(AssignerActor, uid=AssignerActor.default_name())
resource_ref.set_worker_meta('localhost:12345', dict(hardware=dict(cpu_total=4)))
resource_ref.set_worker_meta('localhost:23456', dict(hardware=dict(cpu_total=4)))
# error occurred in create_operand_actors
graph_key = str(uuid.uuid4())
expr = mt.random.random((8, 2), chunk_size=2) + 1
graph = expr.build_graph(compose=False)
serialized_graph = serialize_graph(graph)
graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,
uid=GraphActor.gen_name(session_id, graph_key))
def _mock_raises(*_):
raise RuntimeError
with patch_method(GraphActor.create_operand_actors, new=_mock_raises):
with self.assertRaises(RuntimeError):
graph_ref.execute_graph()
self.assertEqual(graph_ref.get_state(), GraphState.FAILED)
graph_ref.destroy()
# interrupted during create_operand_actors
graph_key = str(uuid.uuid4())
graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,
uid=GraphActor.gen_name(session_id, graph_key))
def _mock_cancels(*_):
graph_meta_ref = pool.actor_ref(GraphMetaActor.gen_name(session_id, graph_key))
graph_meta_ref.set_state(GraphState.CANCELLING)
with patch_method(GraphActor.create_operand_actors, new=_mock_cancels):
graph_ref.execute_graph()
self.assertEqual(graph_ref.get_state(), GraphState.CANCELLED)
# interrupted during previous steps
graph_key = str(uuid.uuid4())
graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,
uid=GraphActor.gen_name(session_id, graph_key))
def _mock_cancels(*_):
graph_meta_ref = pool.actor_ref(GraphMetaActor.gen_name(session_id, graph_key))
graph_meta_ref.set_state(GraphState.CANCELLING)
with patch_method(GraphActor.place_initial_chunks, new=_mock_cancels):
graph_ref.execute_graph()
self.assertEqual(graph_ref.get_state(), GraphState.CANCELLED)
| 46.864629 | 110 | 0.633526 |
import contextlib
import uuid
import unittest
import mars.tensor as mt
from mars.cluster_info import ClusterInfoActor
from mars.scheduler import GraphActor, GraphMetaActor, ResourceActor, ChunkMetaActor, \
AssignerActor, GraphState
from mars.utils import serialize_graph, get_next_port
from mars.actors import create_actor_pool
from mars.tests.core import patch_method
class Test(unittest.TestCase):
@contextlib.contextmanager
def prepare_graph_in_pool(self, expr, clean_io_meta=True, compose=False):
session_id = str(uuid.uuid4())
graph_key = str(uuid.uuid4())
graph = expr.build_graph(compose=compose)
serialized_graph = serialize_graph(graph)
chunked_graph = expr.build_graph(compose=compose, tiled=True)
addr = '127.0.0.1:%d' % get_next_port()
with create_actor_pool(n_process=1, backend='gevent', address=addr) as pool:
pool.create_actor(ClusterInfoActor, [pool.cluster_info.address],
uid=ClusterInfoActor.default_name())
resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_name())
pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_name())
pool.create_actor(AssignerActor, uid=AssignerActor.default_name())
graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,
uid=GraphActor.gen_name(session_id, graph_key))
graph_ref.prepare_graph(compose=compose)
fetched_graph = graph_ref.get_chunk_graph()
self.assertIsNotNone(fetched_graph)
self.assertEqual(len(chunked_graph), len(fetched_graph))
graph_ref.scan_node()
op_infos = graph_ref.get_operand_info()
for n in fetched_graph:
depth = op_infos[n.op.key]['optimize']['depth']
self.assertIsNotNone(depth)
successor_size = op_infos[n.op.key]['optimize']['successor_size']
self.assertIsNotNone(successor_size)
descendant_size = op_infos[n.op.key]['optimize']['descendant_size']
self.assertIsNotNone(descendant_size)
resource_ref.set_worker_meta('localhost:12345', dict(hardware=dict(cpu_total=4)))
resource_ref.set_worker_meta('localhost:23456', dict(hardware=dict(cpu_total=4)))
graph_ref.place_initial_chunks()
op_infos = graph_ref.get_operand_info()
for n in fetched_graph:
if fetched_graph.count_predecessors(n) != 0:
continue
target_worker = op_infos[n.op.key]['target_worker']
self.assertIsNotNone(target_worker)
graph_ref.create_operand_actors(_clean_io_meta=clean_io_meta)
op_infos = graph_ref.get_operand_info()
if not clean_io_meta:
orig_metas = dict()
for n in fetched_graph:
try:
meta = orig_metas[n.op.key]
except KeyError:
meta = orig_metas[n.op.key] = dict(
predecessors=set(), successors=set(), input_chunks=set(), chunks=set()
)
meta['predecessors'].update([pn.op.key for pn in fetched_graph.iter_predecessors(n)])
meta['successors'].update([sn.op.key for sn in fetched_graph.iter_successors(n)])
meta['input_chunks'].update([pn.key for pn in fetched_graph.iter_predecessors(n)])
meta['chunks'].update([c.key for c in n.op.outputs])
for n in fetched_graph:
self.assertEqual(op_infos[n.op.key]['op_name'], type(n.op).__name__)
io_meta = op_infos[n.op.key]['io_meta']
orig_io_meta = orig_metas[n.op.key]
self.assertSetEqual(set(io_meta['predecessors']), set(orig_io_meta['predecessors']))
self.assertSetEqual(set(io_meta['successors']), set(orig_io_meta['successors']))
self.assertSetEqual(set(io_meta['input_chunks']), set(orig_io_meta['input_chunks']))
self.assertSetEqual(set(io_meta['chunks']), set(orig_io_meta['chunks']))
self.assertEqual(op_infos[n.op.key]['output_size'], sum(ch.nbytes for ch in n.op.outputs))
yield pool, graph_ref
def testSimpleGraphPreparation(self, *_):
arr = mt.random.randint(10, size=(10, 8), chunk_size=4)
arr_add = mt.random.randint(10, size=(10, 8), chunk_size=4)
arr2 = arr + arr_add
with self.prepare_graph_in_pool(arr2, clean_io_meta=False):
pass
def testSplitPreparation(self, *_):
arr = mt.ones(12, chunk_size=4)
arr_split = mt.split(arr, 2)
arr_sum = arr_split[0] + arr_split[1]
with self.prepare_graph_in_pool(arr_sum, clean_io_meta=False):
pass
def testSameKeyPreparation(self, *_):
arr = mt.ones((5, 5), chunk_size=3)
arr2 = mt.concatenate((arr, arr))
with self.prepare_graph_in_pool(arr2, clean_io_meta=False):
pass
def testFusePreparation(self, *_):
from mars.tensor.expressions.fuse.core import TensorFuseChunk
arr = mt.ones((5, 5), chunk_size=3)
arr2 = (arr + 5) * 2
with self.prepare_graph_in_pool(arr2, compose=True) as (pool, graph_ref):
out_graph = graph_ref.get_chunk_graph()
self.assertTrue(all(isinstance(v.op, TensorFuseChunk) for v in out_graph))
def testMultipleAddPreparation(self, *_):
import numpy as np
import operator
from mars.compat import reduce
base_arr = np.random.random((100, 100))
a = mt.array(base_arr)
sumv = reduce(operator.add, [a[:10, :10] for _ in range(10)])
with self.prepare_graph_in_pool(sumv):
pass
def testGraphTermination(self, *_):
from mars.tensor.expressions.arithmetic.add import TensorAddConstant
arr = mt.random.random((8, 2), chunk_size=2)
arr2 = arr + 1
with self.prepare_graph_in_pool(arr2) as (pool, graph_ref):
out_graph = graph_ref.get_chunk_graph()
for c in out_graph:
if not isinstance(c.op, TensorAddConstant):
continue
self.assertNotEqual(graph_ref.get_state(), GraphState.SUCCEEDED)
graph_ref.mark_terminal_finished(c.op.key)
self.assertEqual(graph_ref.get_state(), GraphState.SUCCEEDED)
arr = mt.random.random((8, 2), chunk_size=2)
arr2 = arr + 1
with self.prepare_graph_in_pool(arr2) as (pool, graph_ref):
out_graph = graph_ref.get_chunk_graph()
for c in out_graph:
if not isinstance(c.op, TensorAddConstant):
continue
self.assertNotEqual(graph_ref.get_state(), GraphState.FAILED)
graph_ref.mark_terminal_finished(c.op.key, GraphState.FAILED)
self.assertEqual(graph_ref.get_state(), GraphState.FAILED)
def testErrorOnPrepare(self, *_):
session_id = str(uuid.uuid4())
addr = '127.0.0.1:%d' % get_next_port()
with create_actor_pool(n_process=1, backend='gevent', address=addr) as pool:
pool.create_actor(ClusterInfoActor, [pool.cluster_info.address],
uid=ClusterInfoActor.default_name())
resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_name())
pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_name())
pool.create_actor(AssignerActor, uid=AssignerActor.default_name())
resource_ref.set_worker_meta('localhost:12345', dict(hardware=dict(cpu_total=4)))
resource_ref.set_worker_meta('localhost:23456', dict(hardware=dict(cpu_total=4)))
graph_key = str(uuid.uuid4())
expr = mt.random.random((8, 2), chunk_size=2) + 1
graph = expr.build_graph(compose=False)
serialized_graph = serialize_graph(graph)
graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,
uid=GraphActor.gen_name(session_id, graph_key))
def _mock_raises(*_):
raise RuntimeError
with patch_method(GraphActor.create_operand_actors, new=_mock_raises):
with self.assertRaises(RuntimeError):
graph_ref.execute_graph()
self.assertEqual(graph_ref.get_state(), GraphState.FAILED)
graph_ref.destroy()
graph_key = str(uuid.uuid4())
graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,
uid=GraphActor.gen_name(session_id, graph_key))
def _mock_cancels(*_):
graph_meta_ref = pool.actor_ref(GraphMetaActor.gen_name(session_id, graph_key))
graph_meta_ref.set_state(GraphState.CANCELLING)
with patch_method(GraphActor.create_operand_actors, new=_mock_cancels):
graph_ref.execute_graph()
self.assertEqual(graph_ref.get_state(), GraphState.CANCELLED)
graph_key = str(uuid.uuid4())
graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,
uid=GraphActor.gen_name(session_id, graph_key))
def _mock_cancels(*_):
graph_meta_ref = pool.actor_ref(GraphMetaActor.gen_name(session_id, graph_key))
graph_meta_ref.set_state(GraphState.CANCELLING)
with patch_method(GraphActor.place_initial_chunks, new=_mock_cancels):
graph_ref.execute_graph()
self.assertEqual(graph_ref.get_state(), GraphState.CANCELLED)
| true | true |
f739d719e2562269b1ab930f373f9a3766d1f2af | 2,049 | py | Python | part1/main.py | haruishi43/AR-Project-OpenCV | a33f342645945fb688c7846d7da4acbf3977a2f1 | [
"MIT"
] | null | null | null | part1/main.py | haruishi43/AR-Project-OpenCV | a33f342645945fb688c7846d7da4acbf3977a2f1 | [
"MIT"
] | null | null | null | part1/main.py | haruishi43/AR-Project-OpenCV | a33f342645945fb688c7846d7da4acbf3977a2f1 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
MIN_MATCHES = 20
def main():
# Compute model first
model = cv2.imread('../data/model.jpg', 0)
# ORB keypoint detector
orb = cv2.ORB_create()
# create brute force matcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Compute model keypoints and its descriptors
kp_model, des_model = orb.detectAndCompute(model, None)
# safe guard
if des_model is None:
print("no model features!")
# run camera:
cap = cv2.VideoCapture(0)
# set to vga format
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# caputre loop
while(True):
ret, frame = cap.read()
if not ret:
break
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Compute scene keypoints and its descriptors
kp_frame, des_frame = orb.detectAndCompute(gray_frame, None)
if des_frame is not None:
# Match frame descriptors with model descriptors
matches = bf.match(des_model, des_frame)
# Sort them in the order of their distance
matches = sorted(matches, key=lambda x: x.distance)
if len(matches) > MIN_MATCHES:
gray_frame = cv2.drawMatches(model, kp_model, gray_frame, kp_frame, matches[:MIN_MATCHES], 0, flags=2)
cv2.imshow('frame', gray_frame)
else:
print("Not enough matches have been found - {} / {}".format( len(matches), MIN_MATCHES))
# show result
cv2.imshow('frame', gray_frame)
else:
print("taget has no features!")
cv2.imshow('frame', gray_frame)
key = cv2.waitKey(100)
if key == ord('q'): # exit on `q`
break
if key == ord('p'): # print image
if gray_frame.any():
cv2.imwrite('gray_frame.jpg', gray_frame)
if __name__ == "__main__":
main() | 30.132353 | 118 | 0.574915 | import numpy as np
import cv2
MIN_MATCHES = 20
def main():
model = cv2.imread('../data/model.jpg', 0)
orb = cv2.ORB_create()
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
kp_model, des_model = orb.detectAndCompute(model, None)
if des_model is None:
print("no model features!")
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while(True):
ret, frame = cap.read()
if not ret:
break
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
kp_frame, des_frame = orb.detectAndCompute(gray_frame, None)
if des_frame is not None:
matches = bf.match(des_model, des_frame)
matches = sorted(matches, key=lambda x: x.distance)
if len(matches) > MIN_MATCHES:
gray_frame = cv2.drawMatches(model, kp_model, gray_frame, kp_frame, matches[:MIN_MATCHES], 0, flags=2)
cv2.imshow('frame', gray_frame)
else:
print("Not enough matches have been found - {} / {}".format( len(matches), MIN_MATCHES))
cv2.imshow('frame', gray_frame)
else:
print("taget has no features!")
cv2.imshow('frame', gray_frame)
key = cv2.waitKey(100)
if key == ord('q'):
break
if key == ord('p'):
if gray_frame.any():
cv2.imwrite('gray_frame.jpg', gray_frame)
if __name__ == "__main__":
main() | true | true |
f739d7b9032fdb25071a2cfc9c0e51fde44ee430 | 22 | py | Python | treebeard_admin/__init__.py | rouxcode/django-admin-addons | cd9a2b288e4d0d8ebc42a7eb504e05572d0136cc | [
"MIT"
] | null | null | null | treebeard_admin/__init__.py | rouxcode/django-admin-addons | cd9a2b288e4d0d8ebc42a7eb504e05572d0136cc | [
"MIT"
] | 1 | 2019-01-27T11:48:13.000Z | 2019-01-27T11:48:13.000Z | src/imply/__init__.py | seddonym/imply | d5354c60d4584034f4a9aa78e6ad41c2caba3795 | [
"BSD-2-Clause"
] | null | null | null | __version__ = '1.0a3'
| 11 | 21 | 0.681818 | __version__ = '1.0a3'
| true | true |
f739d8a3335f4c422d4015a204f41031d39c511b | 871 | py | Python | amolf/plotting/Histogram.py | Repythory/Libraries | d5f01267a5f396bd0d74b6291f4552bcc6790777 | [
"BSD-2-Clause"
] | null | null | null | amolf/plotting/Histogram.py | Repythory/Libraries | d5f01267a5f396bd0d74b6291f4552bcc6790777 | [
"BSD-2-Clause"
] | null | null | null | amolf/plotting/Histogram.py | Repythory/Libraries | d5f01267a5f396bd0d74b6291f4552bcc6790777 | [
"BSD-2-Clause"
] | null | null | null | def joinprob3d(ff, T1 = 2*pi):
"""
Given the 2d histogram (ff) of two datasets, this function gives back the join probability as 3d plot with the corresponding axis.
Author: Michele Monti
Args:
ff: (list) is the output of histogram2d,
ff[0] is the distribution from histogram2d to 3d plot, ff[1] is the x axis, ff[2] the y axis
Returns: the 3Dplot of the join distribution with the colorbar of the gradient!
"""
distribution = ff[0]
fig = plt.figure()
ax = fig.gca(projection = '3d')
X, Y = meshgrid(ff[2][:-1], ff[1][:-1])
Gx, Gy = gradient(distribution)
G = (Gx**2 + Gy**2) ** 0.5
N = G/G.max()
surf = ax.plot_surface(X, Y, distribution, rstride = 1, cstride = 1,
facecolors = cm.jet(N),linewidth = 0, antialiased=False, shade=False)
m = cm.ScalarMappable(cmap = cm.jet)
m.set_array(G)
plt.colorbar(m)
return plt.show()
| 27.21875 | 131 | 0.660161 | def joinprob3d(ff, T1 = 2*pi):
distribution = ff[0]
fig = plt.figure()
ax = fig.gca(projection = '3d')
X, Y = meshgrid(ff[2][:-1], ff[1][:-1])
Gx, Gy = gradient(distribution)
G = (Gx**2 + Gy**2) ** 0.5
N = G/G.max()
surf = ax.plot_surface(X, Y, distribution, rstride = 1, cstride = 1,
facecolors = cm.jet(N),linewidth = 0, antialiased=False, shade=False)
m = cm.ScalarMappable(cmap = cm.jet)
m.set_array(G)
plt.colorbar(m)
return plt.show()
| true | true |
f739d8aa611bd23754987f56fdd1d8b3709b35c2 | 3,445 | py | Python | catalyst/contrib/data/augmentor.py | and-kul/catalyst | 51428d7756e62b9b8ee5379f38e9fd576eeb36e5 | [
"Apache-2.0"
] | 2 | 2019-04-19T21:34:31.000Z | 2019-05-02T22:50:25.000Z | catalyst/contrib/data/augmentor.py | and-kul/catalyst | 51428d7756e62b9b8ee5379f38e9fd576eeb36e5 | [
"Apache-2.0"
] | 1 | 2021-01-07T16:13:45.000Z | 2021-01-21T09:27:54.000Z | catalyst/contrib/data/augmentor.py | and-kul/catalyst | 51428d7756e62b9b8ee5379f38e9fd576eeb36e5 | [
"Apache-2.0"
] | 1 | 2020-12-02T18:42:31.000Z | 2020-12-02T18:42:31.000Z | from typing import Callable, Dict, List, Union
class Augmentor:
"""Augmentation abstraction to use with data dictionaries."""
def __init__(
self,
dict_key: str,
augment_fn: Callable,
input_key: str = None,
output_key: str = None,
**kwargs,
):
"""
Augmentation abstraction to use with data dictionaries.
Args:
dict_key: key to transform
augment_fn: augmentation function to use
input_key: ``augment_fn`` input key
output_key: ``augment_fn`` output key
**kwargs: default kwargs for augmentations function
"""
self.dict_key = dict_key
self.augment_fn = augment_fn
self.input_key = input_key
self.output_key = output_key
self.kwargs = kwargs
def __call__(self, dict_: dict):
"""Applies the augmentation."""
if self.input_key is not None:
output = self.augment_fn(
**{self.input_key: dict_[self.dict_key]}, **self.kwargs
)
else:
output = self.augment_fn(dict_[self.dict_key], **self.kwargs)
if self.output_key is not None:
dict_[self.dict_key] = output[self.output_key]
else:
dict_[self.dict_key] = output
return dict_
class AugmentorCompose:
"""Compose augmentors."""
def __init__(self, key2augment_fn: Dict[str, Callable]):
"""
Args:
key2augment_fn (Dict[str, Callable]): mapping from input key
to augmentation function to apply
"""
self.key2augment_fn = key2augment_fn
def __call__(self, dictionary: dict) -> dict:
"""
Args:
dictionary: item from dataset
Returns:
dict: dictionaty with augmented data
"""
results = {}
for key, augment_fn in self.key2augment_fn.items():
results = {**results, **augment_fn({key: dictionary[key]})}
return {**dictionary, **results}
class AugmentorKeys:
"""Augmentation abstraction to match input and augmentations keys."""
def __init__(
self,
dict2fn_dict: Union[Dict[str, str], List[str]],
augment_fn: Callable,
):
"""
Args:
dict2fn_dict (Dict[str, str]): keys matching dict
``{input_key: augment_fn_key}``. For example:
``{"image": "image", "mask": "mask"}``
augment_fn: augmentation function
"""
if isinstance(dict2fn_dict, list):
dict2fn_dict = {key: key for key in dict2fn_dict}
self.dict2fn_dict = dict2fn_dict
self.augment_fn = augment_fn
def __call__(self, dictionary: dict) -> dict:
"""
Args:
dictionary: item from dataset
Returns:
dict: dictionaty with augmented data
"""
# link keys from dict_ with augment_fn keys
data = {
fn_key: dictionary[dict_key]
for dict_key, fn_key in self.dict2fn_dict.items()
}
augmented = self.augment_fn(**data)
# link keys from augment_fn back to dict_ keys
results = {
dict_key: augmented[fn_key]
for dict_key, fn_key in self.dict2fn_dict.items()
}
return {**dictionary, **results}
__all__ = ["Augmentor", "AugmentorCompose", "AugmentorKeys"]
| 28.708333 | 73 | 0.571553 | from typing import Callable, Dict, List, Union
class Augmentor:
def __init__(
self,
dict_key: str,
augment_fn: Callable,
input_key: str = None,
output_key: str = None,
**kwargs,
):
self.dict_key = dict_key
self.augment_fn = augment_fn
self.input_key = input_key
self.output_key = output_key
self.kwargs = kwargs
def __call__(self, dict_: dict):
if self.input_key is not None:
output = self.augment_fn(
**{self.input_key: dict_[self.dict_key]}, **self.kwargs
)
else:
output = self.augment_fn(dict_[self.dict_key], **self.kwargs)
if self.output_key is not None:
dict_[self.dict_key] = output[self.output_key]
else:
dict_[self.dict_key] = output
return dict_
class AugmentorCompose:
def __init__(self, key2augment_fn: Dict[str, Callable]):
self.key2augment_fn = key2augment_fn
def __call__(self, dictionary: dict) -> dict:
results = {}
for key, augment_fn in self.key2augment_fn.items():
results = {**results, **augment_fn({key: dictionary[key]})}
return {**dictionary, **results}
class AugmentorKeys:
def __init__(
self,
dict2fn_dict: Union[Dict[str, str], List[str]],
augment_fn: Callable,
):
if isinstance(dict2fn_dict, list):
dict2fn_dict = {key: key for key in dict2fn_dict}
self.dict2fn_dict = dict2fn_dict
self.augment_fn = augment_fn
def __call__(self, dictionary: dict) -> dict:
data = {
fn_key: dictionary[dict_key]
for dict_key, fn_key in self.dict2fn_dict.items()
}
augmented = self.augment_fn(**data)
results = {
dict_key: augmented[fn_key]
for dict_key, fn_key in self.dict2fn_dict.items()
}
return {**dictionary, **results}
__all__ = ["Augmentor", "AugmentorCompose", "AugmentorKeys"]
| true | true |
f739d8bc92803c90617af9225aeb07b3a67061cf | 2,092 | py | Python | Stage_Five_Python_Package/Welcome.py | ChrisVicky/CodingHomeWork2020 | b8946c1d32c3aaecb3de5cc8247a9e5a4653a778 | [
"Apache-2.0"
] | 1 | 2021-01-02T11:02:46.000Z | 2021-01-02T11:02:46.000Z | Stage_Five_Python_Package/Welcome.py | ChrisVicky/CodingHomeWork2020 | b8946c1d32c3aaecb3de5cc8247a9e5a4653a778 | [
"Apache-2.0"
] | null | null | null | Stage_Five_Python_Package/Welcome.py | ChrisVicky/CodingHomeWork2020 | b8946c1d32c3aaecb3de5cc8247a9e5a4653a778 | [
"Apache-2.0"
] | null | null | null | import FindTag
import requests
import re
def Introduction():
print("欢迎使用本程序 copyright: 刘锦帆 20级 工科班 天津大学")
print("本程序满足二次元爱好者收集二次元图片的需求,顺便增加了将图片制成头像的功能")
print("首先,通过简单的 Python 爬虫 从[k站](著名二次元图片网站) [https://konachan.net/post] 抓取并下载二次元图片")
print("然后,利用 open-CV [https://github.com/nagadomi/lbpcascade_animeface] 里的方法识别人物面部")
print("最后,将上述识别到的人物面部保存为图片,可作为 头像 使用")
def GetRequirement():
url = 'https://konachan.net/post'
FolderName = 'konachan.net'
print("请问您希望以何种形式进行图片的下载呢?(请输入1~3的阿拉伯数字)")
print(" 1.从k站的推荐中下载图片\n 2.输入动漫人物名称进行检索并下载\n 3.检索并下载动漫人物[雪之下雪乃]的图片")
Status = int(input())
if Status == 2:
print("请问您想下载哪位动漫人物的图片呢?\n(支持中文、日文或英文输入)\n(例如:雪之下雪乃/雪之下/ゆきのした ゆきの/Yukinoshita Yukino)")
Name = input()
Tag = FindTag.FindPerson(Name)
if Tag == Exception:
return Exception
if re.match('[a~z]*', Tag) is None:
print("[未检索到您要的人物,接下来将为您从k站的推荐中下载图片]")
else:
url = 'https://konachan.net/post?tags=' + Tag
FolderName = Tag
elif Status == 3:
Name = '雪之下雪乃'
Tag = 'yukinoshita_yukino'
url = 'https://konachan.net/post?tags=' + Tag
FolderName = Tag
FolderName = '\\' + FolderName
response = requests.get(url)
if response.status_code == 200:
if Status == 1 or (Status == 2 and re.match('[a~z]*', Tag) is None):
print("将从网站[%s]下载图片" % url)
else:
print("将从网站[%s]下载[%s]的图片" % (url, Name))
else:
print("网络连接异常")
return ConnectionError
print("请问您想下载最多多少张图片呢?\n(请输入一个阿拉伯数字)")
TotalNum = int(input())
print("好的,现在准备从[%s]下载[%d]张图片" % (url, TotalNum))
return {'url': url, 'TotalNum': TotalNum, 'FolderName': FolderName}
def InternetConnectionCheck():
print("[检查网络状况中......]")
try:
requests.get('https://www.baidu.com', timeout=2)
print("[您的网络连接正常]")
except Exception as e:
exit(e)
def FeedBack():
InternetConnectionCheck()
Introduction()
Back_Get = GetRequirement()
return Back_Get
| 31.69697 | 95 | 0.607553 | import FindTag
import requests
import re
def Introduction():
print("欢迎使用本程序 copyright: 刘锦帆 20级 工科班 天津大学")
print("本程序满足二次元爱好者收集二次元图片的需求,顺便增加了将图片制成头像的功能")
print("首先,通过简单的 Python 爬虫 从[k站](著名二次元图片网站) [https://konachan.net/post] 抓取并下载二次元图片")
print("然后,利用 open-CV [https://github.com/nagadomi/lbpcascade_animeface] 里的方法识别人物面部")
print("最后,将上述识别到的人物面部保存为图片,可作为 头像 使用")
def GetRequirement():
url = 'https://konachan.net/post'
FolderName = 'konachan.net'
print("请问您希望以何种形式进行图片的下载呢?(请输入1~3的阿拉伯数字)")
print(" 1.从k站的推荐中下载图片\n 2.输入动漫人物名称进行检索并下载\n 3.检索并下载动漫人物[雪之下雪乃]的图片")
Status = int(input())
if Status == 2:
print("请问您想下载哪位动漫人物的图片呢?\n(支持中文、日文或英文输入)\n(例如:雪之下雪乃/雪之下/ゆきのした ゆきの/Yukinoshita Yukino)")
Name = input()
Tag = FindTag.FindPerson(Name)
if Tag == Exception:
return Exception
if re.match('[a~z]*', Tag) is None:
print("[未检索到您要的人物,接下来将为您从k站的推荐中下载图片]")
else:
url = 'https://konachan.net/post?tags=' + Tag
FolderName = Tag
elif Status == 3:
Name = '雪之下雪乃'
Tag = 'yukinoshita_yukino'
url = 'https://konachan.net/post?tags=' + Tag
FolderName = Tag
FolderName = '\\' + FolderName
response = requests.get(url)
if response.status_code == 200:
if Status == 1 or (Status == 2 and re.match('[a~z]*', Tag) is None):
print("将从网站[%s]下载图片" % url)
else:
print("将从网站[%s]下载[%s]的图片" % (url, Name))
else:
print("网络连接异常")
return ConnectionError
print("请问您想下载最多多少张图片呢?\n(请输入一个阿拉伯数字)")
TotalNum = int(input())
print("好的,现在准备从[%s]下载[%d]张图片" % (url, TotalNum))
return {'url': url, 'TotalNum': TotalNum, 'FolderName': FolderName}
def InternetConnectionCheck():
print("[检查网络状况中......]")
try:
requests.get('https://www.baidu.com', timeout=2)
print("[您的网络连接正常]")
except Exception as e:
exit(e)
def FeedBack():
InternetConnectionCheck()
Introduction()
Back_Get = GetRequirement()
return Back_Get
| true | true |
f739d96422796ebb23b63018a6c76aab635d7a20 | 30 | py | Python | modularrh/hello.py | henriklindgren/python-inspect-classes | 3247812bc36c9d6bb0b8273f3f779f1a84260b8d | [
"Apache-2.0"
] | null | null | null | modularrh/hello.py | henriklindgren/python-inspect-classes | 3247812bc36c9d6bb0b8273f3f779f1a84260b8d | [
"Apache-2.0"
] | null | null | null | modularrh/hello.py | henriklindgren/python-inspect-classes | 3247812bc36c9d6bb0b8273f3f779f1a84260b8d | [
"Apache-2.0"
] | null | null | null | class Hello(object):
pass
| 10 | 20 | 0.666667 | class Hello(object):
pass
| true | true |
f739dad43a507385a9797dbb8a81b214cdc097f6 | 11,815 | py | Python | app/api/views/processor.py | rrozek/image_playground | 3486785c1b22e600b7c91e7c6885b5b72a8da328 | [
"MIT"
] | null | null | null | app/api/views/processor.py | rrozek/image_playground | 3486785c1b22e600b7c91e7c6885b5b72a8da328 | [
"MIT"
] | null | null | null | app/api/views/processor.py | rrozek/image_playground | 3486785c1b22e600b7c91e7c6885b5b72a8da328 | [
"MIT"
] | null | null | null | import os.path
import typing
import subprocess
import base64
from django.conf import settings
from django.core.files.storage import default_storage
from django.http import HttpResponse
from django.urls.base import resolve
from django.views.decorators.csrf import csrf_exempt
from drf_yasg.openapi import Parameter
from drf_yasg.utils import swagger_auto_schema
from rest_framework.filters import BaseFilterBackend
from rest_framework.response import Response
from rest_framework.schemas import coreapi
from rest_framework.views import APIView, Request
from rest_framework import status
from rest_framework.exceptions import ValidationError
from ..drf_auth_override import CsrfExemptSessionAuthentication
from ..utils import xresponse, get_pretty_logger, file_hash, ErrorCode, source_hash, encode_base64
from ..exceptions import ParamError
from ..serializers import ImgSerializer
from ..views import schema_utils
logger = get_pretty_logger('api:views')
class RequestImgFilterBackend(BaseFilterBackend):
def get_schema_fields(self, view):
return [
]
def validate_payload(serializer_class, payload: dict) -> dict:
img_serializer = serializer_class(data=payload)
img_serializer.is_valid(raise_exception=True)
clean_data = img_serializer.validated_data
name = ''.join(clean_data['source'].name.split('.')[:-1]).replace('.', '_').replace(' ', '_')
suffix = ''.join(clean_data['source'].name.split('.')[-1:])
filename = default_storage.save(f'{name}.{suffix}', clean_data['source'])
clean_data['filename'] = filename
clean_data['storage'] = default_storage.location
return clean_data
class ImgProcessAPIView(APIView):
filter_backends = (RequestImgFilterBackend,)
serializer_class = ImgSerializer
authentication_classes = (CsrfExemptSessionAuthentication,)
def process_request(self, clean_data, request):
raise NotImplementedError('not implemented')
@property
def return_format(self):
return ''
@swagger_auto_schema(operation_description="",
manual_parameters=[Parameter('output', in_='query', required=True, type='string')],
request_body=serializer_class,
responses={200: schema_utils.xresponse_ok(),
400: schema_utils.xresponse_nok()})
def post(self, request):
if 'output' not in request.query_params:
output = 'image'
else:
output = str(request.query_params['output']).lower()
supported_output_formats = ['image', 'url']
if output not in supported_output_formats:
return xresponse(
status=status.HTTP_400_BAD_REQUEST,
error_code=ErrorCode.InvalidParams,
msg=f'Unhandled output format. Selected: {output} available: [{", ".join(supported_output_formats)}]'
)
try:
clean_data = validate_payload(self.serializer_class, request.data)
except ParamError as e:
return xresponse(status.HTTP_400_BAD_REQUEST, e.error_code, e.msg)
try:
output_filepath, output_filename = self.process_request(clean_data, request)
if output == 'image':
with open(output_filepath, 'rb') as file:
return HttpResponse(content=file.read(), content_type=f'image/{self.return_format}')
else:
return HttpResponse(
status=status.HTTP_303_SEE_OTHER,
headers={
'Location': request.build_absolute_uri(f'{settings.MEDIA_URL}{output_filename}')
},
)
except Exception as e:
return xresponse(status.HTTP_400_BAD_REQUEST, ErrorCode.NotFound, e)
class Png2Tiff(ImgProcessAPIView):
@property
def return_format(self):
return 'tiff'
def process_request(self, clean_data, request):
# convert easy.png -set colorspace RGB -alpha extract easy_alpha.png
# convert easy_alpha.png easy_alpha.svg
# convert png to tiff
# gimp tiff with alpha.svg
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.tiff"
output_filepath = os.path.join(clean_data['storage'], output_filename)
output_alpha_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_alpha.png")
command_extract_alpha = f'convert {input_filepath} -set colorspace RGB -alpha extract {output_alpha_filepath}'
output_svg_filepath = f'{"".join(output_alpha_filepath.split(".")[:-1])}.svg'
command_alpha_svg = f'convert {output_alpha_filepath} {output_svg_filepath}'
output_tiff_tmp_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_tmp.tiff")
command_png_to_tiff = f'convert {input_filepath} {output_tiff_tmp_filepath}'
logger.info(f'command: {command_extract_alpha}')
process = subprocess.Popen(
command_extract_alpha.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_alpha_svg}')
process = subprocess.Popen(
command_alpha_svg.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_png_to_tiff}')
process = subprocess.Popen(
command_png_to_tiff.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
gimp_command = f"gimp -i -b '(svg-clip-path \"{output_tiff_tmp_filepath}\" \"{output_svg_filepath}\" \"{output_filepath}\" )' -b '(gimp-quit 0)'"
logger.info(f'command: {gimp_command}')
process = subprocess.Popen(
gimp_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(20)
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
os.remove(output_alpha_filepath)
os.remove(output_svg_filepath)
os.remove(output_tiff_tmp_filepath)
return output_filepath, output_filename
class Tiff2Png(ImgProcessAPIView):
@property
def return_format(self):
return 'png'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.png"
output_filepath = os.path.join(clean_data['storage'], output_filename)
command = f'convert {input_filepath} -alpha transparent -clip -alpha opaque {output_filepath}'
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'command: {command}')
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
return output_filepath, output_filename
class Eps2Png(ImgProcessAPIView):
@property
def return_format(self):
return 'png'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.png"
output_filepath = os.path.join(clean_data['storage'], output_filename)
command = f'convert {input_filepath} -alpha transparent -clip -alpha opaque {output_filepath}'
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'command: {command}')
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
return output_filepath, output_filename
class Png2Eps(ImgProcessAPIView):
@property
def return_format(self):
return 'postscript'
def process_request(self, clean_data, request):
# TODO: convert png-alpha to svg
# convert easy.png -set colorspace RGB -alpha extract easy_alpha.png
# convert easy_alpha.png easy_alpha.svg
# convert png to tiff
# gimp tiff with alpha.svg
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.eps"
output_filepath = os.path.join(clean_data['storage'], output_filename)
output_alpha_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_alpha.png")
command_extract_alpha = f'convert {input_filepath} -set colorspace RGB -alpha extract {output_alpha_filepath}'
output_svg_filepath = f'{"".join(output_alpha_filepath.split(".")[:-1])}.svg'
command_alpha_svg = f'convert {output_alpha_filepath} {output_svg_filepath}'
output_tiff_tmp_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_tmp.tiff")
output_filepath_tiff = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_final.tiff")
command_png_to_tiff = f'convert {input_filepath} {output_tiff_tmp_filepath}'
command_tiff_to_eps = f'convert {output_filepath_tiff} {output_filepath}'
logger.info(f'command: {command_extract_alpha}')
process = subprocess.Popen(
command_extract_alpha.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_alpha_svg}')
process = subprocess.Popen(
command_alpha_svg.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_png_to_tiff}')
process = subprocess.Popen(
command_png_to_tiff.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
gimp_command = f"gimp -i -b '(svg-clip-path \"{output_tiff_tmp_filepath}\" \"{output_svg_filepath}\" \"{output_filepath_tiff}\" )' -b '(gimp-quit 0)'"
logger.info(f'command: {gimp_command}')
process = subprocess.Popen(
gimp_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(20)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_tiff_to_eps}')
process = subprocess.Popen(
command_tiff_to_eps.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
os.remove(output_alpha_filepath)
os.remove(output_svg_filepath)
os.remove(output_tiff_tmp_filepath)
os.remove(output_filepath_tiff)
return output_filepath, output_filename
| 39.51505 | 158 | 0.652306 | import os.path
import typing
import subprocess
import base64
from django.conf import settings
from django.core.files.storage import default_storage
from django.http import HttpResponse
from django.urls.base import resolve
from django.views.decorators.csrf import csrf_exempt
from drf_yasg.openapi import Parameter
from drf_yasg.utils import swagger_auto_schema
from rest_framework.filters import BaseFilterBackend
from rest_framework.response import Response
from rest_framework.schemas import coreapi
from rest_framework.views import APIView, Request
from rest_framework import status
from rest_framework.exceptions import ValidationError
from ..drf_auth_override import CsrfExemptSessionAuthentication
from ..utils import xresponse, get_pretty_logger, file_hash, ErrorCode, source_hash, encode_base64
from ..exceptions import ParamError
from ..serializers import ImgSerializer
from ..views import schema_utils
logger = get_pretty_logger('api:views')
class RequestImgFilterBackend(BaseFilterBackend):
def get_schema_fields(self, view):
return [
]
def validate_payload(serializer_class, payload: dict) -> dict:
img_serializer = serializer_class(data=payload)
img_serializer.is_valid(raise_exception=True)
clean_data = img_serializer.validated_data
name = ''.join(clean_data['source'].name.split('.')[:-1]).replace('.', '_').replace(' ', '_')
suffix = ''.join(clean_data['source'].name.split('.')[-1:])
filename = default_storage.save(f'{name}.{suffix}', clean_data['source'])
clean_data['filename'] = filename
clean_data['storage'] = default_storage.location
return clean_data
class ImgProcessAPIView(APIView):
filter_backends = (RequestImgFilterBackend,)
serializer_class = ImgSerializer
authentication_classes = (CsrfExemptSessionAuthentication,)
def process_request(self, clean_data, request):
raise NotImplementedError('not implemented')
@property
def return_format(self):
return ''
@swagger_auto_schema(operation_description="",
manual_parameters=[Parameter('output', in_='query', required=True, type='string')],
request_body=serializer_class,
responses={200: schema_utils.xresponse_ok(),
400: schema_utils.xresponse_nok()})
def post(self, request):
if 'output' not in request.query_params:
output = 'image'
else:
output = str(request.query_params['output']).lower()
supported_output_formats = ['image', 'url']
if output not in supported_output_formats:
return xresponse(
status=status.HTTP_400_BAD_REQUEST,
error_code=ErrorCode.InvalidParams,
msg=f'Unhandled output format. Selected: {output} available: [{", ".join(supported_output_formats)}]'
)
try:
clean_data = validate_payload(self.serializer_class, request.data)
except ParamError as e:
return xresponse(status.HTTP_400_BAD_REQUEST, e.error_code, e.msg)
try:
output_filepath, output_filename = self.process_request(clean_data, request)
if output == 'image':
with open(output_filepath, 'rb') as file:
return HttpResponse(content=file.read(), content_type=f'image/{self.return_format}')
else:
return HttpResponse(
status=status.HTTP_303_SEE_OTHER,
headers={
'Location': request.build_absolute_uri(f'{settings.MEDIA_URL}{output_filename}')
},
)
except Exception as e:
return xresponse(status.HTTP_400_BAD_REQUEST, ErrorCode.NotFound, e)
class Png2Tiff(ImgProcessAPIView):
@property
def return_format(self):
return 'tiff'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.tiff"
output_filepath = os.path.join(clean_data['storage'], output_filename)
output_alpha_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_alpha.png")
command_extract_alpha = f'convert {input_filepath} -set colorspace RGB -alpha extract {output_alpha_filepath}'
output_svg_filepath = f'{"".join(output_alpha_filepath.split(".")[:-1])}.svg'
command_alpha_svg = f'convert {output_alpha_filepath} {output_svg_filepath}'
output_tiff_tmp_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_tmp.tiff")
command_png_to_tiff = f'convert {input_filepath} {output_tiff_tmp_filepath}'
logger.info(f'command: {command_extract_alpha}')
process = subprocess.Popen(
command_extract_alpha.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_alpha_svg}')
process = subprocess.Popen(
command_alpha_svg.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_png_to_tiff}')
process = subprocess.Popen(
command_png_to_tiff.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
gimp_command = f"gimp -i -b '(svg-clip-path \"{output_tiff_tmp_filepath}\" \"{output_svg_filepath}\" \"{output_filepath}\" )' -b '(gimp-quit 0)'"
logger.info(f'command: {gimp_command}')
process = subprocess.Popen(
gimp_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(20)
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
os.remove(output_alpha_filepath)
os.remove(output_svg_filepath)
os.remove(output_tiff_tmp_filepath)
return output_filepath, output_filename
class Tiff2Png(ImgProcessAPIView):
@property
def return_format(self):
return 'png'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.png"
output_filepath = os.path.join(clean_data['storage'], output_filename)
command = f'convert {input_filepath} -alpha transparent -clip -alpha opaque {output_filepath}'
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'command: {command}')
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
return output_filepath, output_filename
class Eps2Png(ImgProcessAPIView):
@property
def return_format(self):
return 'png'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.png"
output_filepath = os.path.join(clean_data['storage'], output_filename)
command = f'convert {input_filepath} -alpha transparent -clip -alpha opaque {output_filepath}'
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'command: {command}')
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
return output_filepath, output_filename
class Png2Eps(ImgProcessAPIView):
@property
def return_format(self):
return 'postscript'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.eps"
output_filepath = os.path.join(clean_data['storage'], output_filename)
output_alpha_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_alpha.png")
command_extract_alpha = f'convert {input_filepath} -set colorspace RGB -alpha extract {output_alpha_filepath}'
output_svg_filepath = f'{"".join(output_alpha_filepath.split(".")[:-1])}.svg'
command_alpha_svg = f'convert {output_alpha_filepath} {output_svg_filepath}'
output_tiff_tmp_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_tmp.tiff")
output_filepath_tiff = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_final.tiff")
command_png_to_tiff = f'convert {input_filepath} {output_tiff_tmp_filepath}'
command_tiff_to_eps = f'convert {output_filepath_tiff} {output_filepath}'
logger.info(f'command: {command_extract_alpha}')
process = subprocess.Popen(
command_extract_alpha.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_alpha_svg}')
process = subprocess.Popen(
command_alpha_svg.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_png_to_tiff}')
process = subprocess.Popen(
command_png_to_tiff.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
gimp_command = f"gimp -i -b '(svg-clip-path \"{output_tiff_tmp_filepath}\" \"{output_svg_filepath}\" \"{output_filepath_tiff}\" )' -b '(gimp-quit 0)'"
logger.info(f'command: {gimp_command}')
process = subprocess.Popen(
gimp_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(20)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_tiff_to_eps}')
process = subprocess.Popen(
command_tiff_to_eps.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
os.remove(output_alpha_filepath)
os.remove(output_svg_filepath)
os.remove(output_tiff_tmp_filepath)
os.remove(output_filepath_tiff)
return output_filepath, output_filename
| true | true |
f739daf96625eab4d26909f01a851f398faca1d4 | 92 | py | Python | webapi/apps.py | erisenlee/django_rest | e358a80f7a7d9214decb7756e2a7838727d025a6 | [
"Apache-2.0"
] | null | null | null | webapi/apps.py | erisenlee/django_rest | e358a80f7a7d9214decb7756e2a7838727d025a6 | [
"Apache-2.0"
] | null | null | null | webapi/apps.py | erisenlee/django_rest | e358a80f7a7d9214decb7756e2a7838727d025a6 | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class WebapiConfig(AppConfig):
name = 'webapi'
| 15.333333 | 34 | 0.706522 | from django.apps import AppConfig
class WebapiConfig(AppConfig):
name = 'webapi'
| true | true |
f739dc43a3b5dc86bc24302e5ffba6affbd418f0 | 603 | py | Python | user/migrations/0004_auto_20200720_1302.py | MohammadReza-Jafari/Gizshop_api | f2dc895a32d5964e7d4d27da5172f132b511b7ee | [
"MIT"
] | null | null | null | user/migrations/0004_auto_20200720_1302.py | MohammadReza-Jafari/Gizshop_api | f2dc895a32d5964e7d4d27da5172f132b511b7ee | [
"MIT"
] | 5 | 2021-04-08T21:57:34.000Z | 2022-02-10T12:43:03.000Z | user/migrations/0004_auto_20200720_1302.py | MohammadReza-Jafari/Gizshop_api | f2dc895a32d5964e7d4d27da5172f132b511b7ee | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-07-20 08:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20200720_1241'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='national_code',
field=models.CharField(max_length=10, null=True, unique=True),
),
migrations.AlterField(
model_name='customuser',
name='phone_number',
field=models.CharField(max_length=13, null=True, unique=True),
),
]
| 25.125 | 74 | 0.600332 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20200720_1241'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='national_code',
field=models.CharField(max_length=10, null=True, unique=True),
),
migrations.AlterField(
model_name='customuser',
name='phone_number',
field=models.CharField(max_length=13, null=True, unique=True),
),
]
| true | true |
f739dd52f32fd2f369e45dfce015fb59aa8b84fb | 3,558 | py | Python | genesis/DDChar.py | Zaltu/content-generator | 23921dad5a95e4d58bae1c2231c5db985ae7ec2e | [
"MIT"
] | null | null | null | genesis/DDChar.py | Zaltu/content-generator | 23921dad5a95e4d58bae1c2231c5db985ae7ec2e | [
"MIT"
] | null | null | null | genesis/DDChar.py | Zaltu/content-generator | 23921dad5a95e4d58bae1c2231c5db985ae7ec2e | [
"MIT"
] | null | null | null | """
Module to generate random D&D characters.
"""
import os
import math
import random
import aigis
import fantasyName
DEFAULT_PLAYER_NAME = "Dungeon Master"
DEFAULT_SAVE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "tmp"))
CHAR_ATTR_NAMES = {"strength", "dexterity", "intelligence", "wisdom", "constitution", "charisma"}
# Ensure the tmp folder exists
if not os.path.exists(os.path.abspath(os.path.join(os.path.dirname(__file__), "tmp"))):
os.makedirs(os.path.abspath(os.path.join(os.path.dirname(__file__), "tmp")))
def _generate_initial_data(**kwargs):
"""
Generate the data used to auto-generate a D&D character.
:param kwargs: any user input parameters
:returns: stats required to build a character
:rtype: dict
"""
stats = {
"name": kwargs.get("name") or fantasyName.generate(),
"player_name": kwargs.get("player") or DEFAULT_PLAYER_NAME,
"alignment": kwargs.get("alignment") or random.sample(aigis.dnd.ALIGNMENTS, 1)[0],
"level": kwargs.get("level") or 1,
"race": kwargs.get("race") or random.sample(aigis.dnd.RACES, 1)[0],
"class": kwargs.get("class") or random.sample(aigis.dnd.CLASSES, 1)[0]
}
# Generate subclass if above level 3
if stats["level"] > 2:
stats["subclass"] = kwargs.get("subclass") or \
random.sample(aigis.dnd.SUBCLASSES[stats["class"]], 1)[0]
# Level 1 HP
stats["hp_max"] = aigis.dnd.CLASS_HP[stats["class"]]
# Level x HP
stats["hp_max"] += sum(aigis.dnd.xdy(aigis.dnd.CLASS_HP[stats["class"]], stats["level"]-1))
stats["background"] = kwargs.get("background") or random.sample(aigis.dnd.BACKGROUNDS, 1)[0]
stats["languages"] = kwargs.get("languages") or ", ".join(random.sample(aigis.dnd.LANGUAGES, 2))
stats["weapons"] = kwargs.get("weapons") or random.sample(aigis.dnd.WEAPONS, 2)
stats["armor"] = kwargs.get("armor") or random.sample(aigis.dnd.ARMOR, 1)[0]
return stats
def _generate_random_stats():
"""
Generate 7x(4d6-lowest)-lowest
:returns: 6x(4d6-lowest)
:rtype: list
"""
vals = []
for _ in range(0, 7):
rolls = aigis.dnd.xdy(6, 4)
rolls.sort()
rolls = rolls[1:]
vals.append(sum(rolls))
vals.sort()
if sum(vals[1:]) < 10:
return _generate_random_stats()
return vals[1:]
def _generate_attr_values():
"""
Generate values for the character's attributes (Str, Dex, etc...)
Uses 7x(4d6-lowest)-lowest and random assignation.
:returns: stats
:rtype: dict
"""
vals = _generate_random_stats()
stats = {}
for attr in CHAR_ATTR_NAMES:
stats[attr] = vals.pop(random.randint(0, len(vals)-1))
return stats
def generate_dnd_character(**kwargs):
"""
Generate a D&D character sheet.
:param kwargs: any user input parameters
:returns: path on disk to the generated character sheet
:rtype: str
"""
stats = _generate_initial_data(**kwargs)
stats.update(_generate_attr_values())
# Add CON mod to HP
stats["hp_max"] += (math.floor(int(stats["constitution"])/2 - 5)) * stats["level"]
# TODO depending on class, generate chosen spells, etc...
paths = aigis.dnd.create_character_sheet(DEFAULT_SAVE_PATH, **stats)
try:
# Try and delete the .FDF, we don't care about it
os.remove(paths[1])
except (OSError, FileNotFoundError):
# The FDF doesnt seem to exist, does the PDF?
assert os.path.exists(paths[0])
return paths[0]
| 30.410256 | 100 | 0.643058 | import os
import math
import random
import aigis
import fantasyName
DEFAULT_PLAYER_NAME = "Dungeon Master"
DEFAULT_SAVE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "tmp"))
CHAR_ATTR_NAMES = {"strength", "dexterity", "intelligence", "wisdom", "constitution", "charisma"}
if not os.path.exists(os.path.abspath(os.path.join(os.path.dirname(__file__), "tmp"))):
os.makedirs(os.path.abspath(os.path.join(os.path.dirname(__file__), "tmp")))
def _generate_initial_data(**kwargs):
stats = {
"name": kwargs.get("name") or fantasyName.generate(),
"player_name": kwargs.get("player") or DEFAULT_PLAYER_NAME,
"alignment": kwargs.get("alignment") or random.sample(aigis.dnd.ALIGNMENTS, 1)[0],
"level": kwargs.get("level") or 1,
"race": kwargs.get("race") or random.sample(aigis.dnd.RACES, 1)[0],
"class": kwargs.get("class") or random.sample(aigis.dnd.CLASSES, 1)[0]
}
if stats["level"] > 2:
stats["subclass"] = kwargs.get("subclass") or \
random.sample(aigis.dnd.SUBCLASSES[stats["class"]], 1)[0]
stats["hp_max"] = aigis.dnd.CLASS_HP[stats["class"]]
stats["hp_max"] += sum(aigis.dnd.xdy(aigis.dnd.CLASS_HP[stats["class"]], stats["level"]-1))
stats["background"] = kwargs.get("background") or random.sample(aigis.dnd.BACKGROUNDS, 1)[0]
stats["languages"] = kwargs.get("languages") or ", ".join(random.sample(aigis.dnd.LANGUAGES, 2))
stats["weapons"] = kwargs.get("weapons") or random.sample(aigis.dnd.WEAPONS, 2)
stats["armor"] = kwargs.get("armor") or random.sample(aigis.dnd.ARMOR, 1)[0]
return stats
def _generate_random_stats():
vals = []
for _ in range(0, 7):
rolls = aigis.dnd.xdy(6, 4)
rolls.sort()
rolls = rolls[1:]
vals.append(sum(rolls))
vals.sort()
if sum(vals[1:]) < 10:
return _generate_random_stats()
return vals[1:]
def _generate_attr_values():
vals = _generate_random_stats()
stats = {}
for attr in CHAR_ATTR_NAMES:
stats[attr] = vals.pop(random.randint(0, len(vals)-1))
return stats
def generate_dnd_character(**kwargs):
stats = _generate_initial_data(**kwargs)
stats.update(_generate_attr_values())
stats["hp_max"] += (math.floor(int(stats["constitution"])/2 - 5)) * stats["level"]
paths = aigis.dnd.create_character_sheet(DEFAULT_SAVE_PATH, **stats)
try:
os.remove(paths[1])
except (OSError, FileNotFoundError):
# The FDF doesnt seem to exist, does the PDF?
assert os.path.exists(paths[0])
return paths[0]
| true | true |
f739de75a3cf69f888c94aa5f38ccd86cabff331 | 454 | py | Python | src/package/__main__.py | behnazh/python-package-template | 32248a470e31a596357e68f04119cb8f9614d0a4 | [
"MIT"
] | 13 | 2021-09-29T08:10:35.000Z | 2022-03-26T08:14:12.000Z | src/package/__main__.py | behnazh/python-package-template | 32248a470e31a596357e68f04119cb8f9614d0a4 | [
"MIT"
] | 114 | 2021-09-29T11:26:15.000Z | 2022-03-31T22:37:28.000Z | src/package/__main__.py | behnazh/python-package-template | 32248a470e31a596357e68f04119cb8f9614d0a4 | [
"MIT"
] | 5 | 2021-09-29T21:53:19.000Z | 2022-03-28T12:00:59.000Z | """The main entry point into this package when run as a script."""
# For more details, see also
# https://docs.python.org/3/library/runpy.html
# https://docs.python.org/3/reference/import.html#special-considerations-for-main
import os
import sys
from .something import Something
def main():
"""Execute the Something standalone command-line tool."""
_ = Something.do_something()
if __name__ == "__main__":
main()
sys.exit(os.EX_OK)
| 21.619048 | 81 | 0.713656 |
mething import Something
def main():
_ = Something.do_something()
if __name__ == "__main__":
main()
sys.exit(os.EX_OK)
| true | true |
f739decb955aebb96c819ef667b45050ddd6f4c7 | 6,834 | py | Python | tests/perf-pulsar-tests/pulsar-gentest_10k_16p_100_rate_2min.py | jingerbread/p3_test_driver | aa1fbd52bcb502cac013d38d267be66f0dbd693e | [
"Apache-2.0"
] | null | null | null | tests/perf-pulsar-tests/pulsar-gentest_10k_16p_100_rate_2min.py | jingerbread/p3_test_driver | aa1fbd52bcb502cac013d38d267be66f0dbd693e | [
"Apache-2.0"
] | null | null | null | tests/perf-pulsar-tests/pulsar-gentest_10k_16p_100_rate_2min.py | jingerbread/p3_test_driver | aa1fbd52bcb502cac013d38d267be66f0dbd693e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import json
import sys
test_list = []
localWorker = False
tarball = '../package/target/openmessaging-benchmark-0.0.1-SNAPSHOT-bin.tar.gz'
build = False
# Run 10kb 100 rate 16 partitions 2min
# "workload": {
# "consumerBacklogSizeGB": 0,
# "consumerPerSubscription": 16,
# "keyDistributor": "NO_KEY",
# "messageSize": 10000,
# "partitionsPerTopic": 16,
# "producerRate": 100.0,
# "producersPerTopic": 4,
# "subscriptionsPerTopic": 1,
# "testDurationMinutes": 5,
# "topics": 1
# }
for repeat in range(1):
for producerWorkers in [2]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [2]:
for messageSize in [10000]: # , 10000]:
messageSize = int(messageSize)
eps = []
MBps = []
ppw = []
if messageSize <= 100:
eps = [5e4] # producerRateEventsPerSec
ppw = [1] # producersPerWorker
elif messageSize <= 10000:
eps += [100] # [3e1, 1e2, 3e2, 1e3, 3e3, 1e4, 3e4, -1]
ppw = [1] # [2] # producersPerWorker
else:
eps += [1] # , 3, 10, 30, 50, 70, 90, -1]
ppw = [4]
eps += [x * 1e6 / messageSize for x in MBps]
for producerRateEventsPerSec in eps:
for topics in [1]:
for partitionsPerTopic in [16]:
for producersPerWorker in ppw:
producersPerTopic = 4 # int(producersPerWorker * producerWorkers)
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [partitionsPerTopic]:
for ackQuorum in [2]: # 2 causes OOM in Bookie at max rate
driver = {
'name': 'Pulsar',
'driverClass': 'io.openmessaging.benchmark.driver.pulsar.PulsarBenchmarkDriver',
'client': {
'ioThreads': 8,
'connectionsPerBroker': 8,
'clusterName': 'local',
'namespacePrefix': 'benchmark/ns',
'topicType': 'persistent',
'persistence': {'ensembleSize': 3,
'writeQuorum': 3,
'ackQuorum': ackQuorum,
'deduplicationEnabled': True},
'tlsAllowInsecureConnection': False,
'tlsEnableHostnameVerification': False,
'tlsTrustCertsFilePath': None,
'authentication': {'plugin': None, 'data': None}},
'producer': {'batchingEnabled': True,
'batchingMaxPublishDelayMs': 1,
'blockIfQueueFull': True,
'pendingQueueSize': 10000},
}
workload = {
'messageSize': messageSize,
'topics': topics,
'partitionsPerTopic': partitionsPerTopic,
'subscriptionsPerTopic': subscriptionsPerTopic,
'consumerPerSubscription': consumerPerSubscription,
'producersPerTopic': producersPerTopic,
'producerRate': producerRateEventsPerSec,
'consumerBacklogSizeGB': consumerBacklogSizeGB,
'testDurationMinutes': testDurationMinutes,
'keyDistributor': 'NO_KEY',
}
t = dict(
test='openmessaging-benchmark',
max_test_attempts=1,
# result_filename_long='data/results/json/%(test)s__%(messageSize)d__%(partitionsPerTopic)d__%(producerRateEventsPerSec)__%(testDurationMinutes)d__%(test_uuid)s.json',
# result_filename_long='data/results/json/%(test)s__%(workload)s__%(test_uuid)s.json',
# result_filename_long_s='data/results/json/%(test)s__%(workload["messageSize"])s__%(test_uuid)s.json',
# result_filename_long_d='data/results/json/%(test)s__%(workload["messageSize"])d__%(test_uuid)s.json',
result_filename='data/results/json/%(test)s_%(test_uuid)s.json',
driver=driver,
workload=workload,
numWorkers=numWorkers,
localWorker=localWorker,
tarball=tarball,
build=build,
undeploy=True,
)
test_list += [t]
build = False
print(json.dumps(test_list, sort_keys=True, indent=4, ensure_ascii=False))
print('Number of tests generated: %d' % len(test_list), file=sys.stderr)
| 62.127273 | 219 | 0.37372 |
from __future__ import print_function
import json
import sys
test_list = []
localWorker = False
tarball = '../package/target/openmessaging-benchmark-0.0.1-SNAPSHOT-bin.tar.gz'
build = False
for repeat in range(1):
for producerWorkers in [2]:
numWorkers = 0 if localWorker else producerWorkers*2
for testDurationMinutes in [2]:
for messageSize in [10000]:
messageSize = int(messageSize)
eps = []
MBps = []
ppw = []
if messageSize <= 100:
eps = [5e4]
ppw = [1]
elif messageSize <= 10000:
eps += [100]
ppw = [1] e:
eps += [1]
ppw = [4]
eps += [x * 1e6 / messageSize for x in MBps]
for producerRateEventsPerSec in eps:
for topics in [1]:
for partitionsPerTopic in [16]:
for producersPerWorker in ppw:
producersPerTopic = 4
for consumerBacklogSizeGB in [0]:
for subscriptionsPerTopic in [1]:
for consumerPerSubscription in [partitionsPerTopic]:
for ackQuorum in [2]:
driver = {
'name': 'Pulsar',
'driverClass': 'io.openmessaging.benchmark.driver.pulsar.PulsarBenchmarkDriver',
'client': {
'ioThreads': 8,
'connectionsPerBroker': 8,
'clusterName': 'local',
'namespacePrefix': 'benchmark/ns',
'topicType': 'persistent',
'persistence': {'ensembleSize': 3,
'writeQuorum': 3,
'ackQuorum': ackQuorum,
'deduplicationEnabled': True},
'tlsAllowInsecureConnection': False,
'tlsEnableHostnameVerification': False,
'tlsTrustCertsFilePath': None,
'authentication': {'plugin': None, 'data': None}},
'producer': {'batchingEnabled': True,
'batchingMaxPublishDelayMs': 1,
'blockIfQueueFull': True,
'pendingQueueSize': 10000},
}
workload = {
'messageSize': messageSize,
'topics': topics,
'partitionsPerTopic': partitionsPerTopic,
'subscriptionsPerTopic': subscriptionsPerTopic,
'consumerPerSubscription': consumerPerSubscription,
'producersPerTopic': producersPerTopic,
'producerRate': producerRateEventsPerSec,
'consumerBacklogSizeGB': consumerBacklogSizeGB,
'testDurationMinutes': testDurationMinutes,
'keyDistributor': 'NO_KEY',
}
t = dict(
test='openmessaging-benchmark',
max_test_attempts=1,
result_filename='data/results/json/%(test)s_%(test_uuid)s.json',
driver=driver,
workload=workload,
numWorkers=numWorkers,
localWorker=localWorker,
tarball=tarball,
build=build,
undeploy=True,
)
test_list += [t]
build = False
print(json.dumps(test_list, sort_keys=True, indent=4, ensure_ascii=False))
print('Number of tests generated: %d' % len(test_list), file=sys.stderr)
| true | true |
f739df0d02c431262465c9f9cddbb2964b329c63 | 1,077 | py | Python | twitoff/predict.py | EEdwardsA/Twitoff | e1c2613c233e81c5aa50fecb89e90c75b9bbdd01 | [
"MIT"
] | null | null | null | twitoff/predict.py | EEdwardsA/Twitoff | e1c2613c233e81c5aa50fecb89e90c75b9bbdd01 | [
"MIT"
] | null | null | null | twitoff/predict.py | EEdwardsA/Twitoff | e1c2613c233e81c5aa50fecb89e90c75b9bbdd01 | [
"MIT"
] | null | null | null | """Prediction of Users based on tweet embeddings"""
import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import vectorize_tweet
def predict_user(user0_name, user1_name, hypo_tweet_text):
"""
Determine and return which user is more likely to say a hypothetical tweet
Example run: predict_user('elonmusk', 'nasa', 'Tesla cars are rad')
returns 0 (user0_name) or 1 (user1_name)
"""
user0 = User.query.filter(User.name == user0_name).one()
# TODO: create try/except block
user1 = User.query.filter(User.name == user1_name).one()
user0_vects = np.array([tweet.vect for tweet in user0.tweets])
user1_vects = np.array([tweet.vect for tweet in user1.tweets])
vects = np.vstack([user0_vects, user1_vects])
labels = np.concatenate(
[np.zeros(len(user0.tweets)), np.ones(len(user1.tweets))])
hypo_tweet_vect = vectorize_tweet(hypo_tweet_text)
log_reg = LogisticRegression().fit(vects, labels)
return log_reg.predict(hypo_tweet_vect.reshape(1, -1))
| 35.9 | 78 | 0.714949 |
import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import vectorize_tweet
def predict_user(user0_name, user1_name, hypo_tweet_text):
user0 = User.query.filter(User.name == user0_name).one()
user1 = User.query.filter(User.name == user1_name).one()
user0_vects = np.array([tweet.vect for tweet in user0.tweets])
user1_vects = np.array([tweet.vect for tweet in user1.tweets])
vects = np.vstack([user0_vects, user1_vects])
labels = np.concatenate(
[np.zeros(len(user0.tweets)), np.ones(len(user1.tweets))])
hypo_tweet_vect = vectorize_tweet(hypo_tweet_text)
log_reg = LogisticRegression().fit(vects, labels)
return log_reg.predict(hypo_tweet_vect.reshape(1, -1))
| true | true |
f739df2e27a2025e12785cdcbc5aea000c0d7616 | 9,220 | py | Python | docs/conf.py | adamtupper/pyneat | 12bf2bf936602c0da7c40cfcb99aced2eb981faa | [
"MIT"
] | null | null | null | docs/conf.py | adamtupper/pyneat | 12bf2bf936602c0da7c40cfcb99aced2eb981faa | [
"MIT"
] | null | null | null | docs/conf.py | adamtupper/pyneat | 12bf2bf936602c0da7c40cfcb99aced2eb981faa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
import sphinx_rtd_theme
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/pyneat")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from pkg_resources import parse_version
cmd_line_template = "sphinx-apidoc -f -e -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if parse_version(sphinx.__version__) >= parse_version('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon', 'sphinx_rtd_theme']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyNEAT'
copyright = u'2020, Adam Tupper'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# 'sidebar_width': '300px',
# 'page_width': '1200px'
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from pyneat import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyneat-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'pyneat Documentation',
u'Adam Tupper', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
}
| 33.649635 | 85 | 0.703254 |
import os
import sys
import inspect
import shutil
import sphinx_rtd_theme
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
sys.path.insert(0, os.path.join(__location__, '../src'))
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/pyneat")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from pkg_resources import parse_version
cmd_line_template = "sphinx-apidoc -f -e -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if parse_version(sphinx.__version__) >= parse_version('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon', 'sphinx_rtd_theme']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyNEAT'
copyright = u'2020, Adam Tupper'
# The version info for the project you're documenting, acts as replacement for
version = ''
release = ''
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
try:
from pyneat import __version__ as version
except ImportError:
pass
else:
release = version
html_static_path = ['_static']
htmlhelp_basename = 'pyneat-doc'
latex_elements = {
}
latex_documents = [
('index', 'user_guide.tex', u'pyneat Documentation',
u'Adam Tupper', 'manual'),
]
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
}
| true | true |
f739e05d1ddd698ac486fa39441422e162da3200 | 308 | py | Python | pyobjc_core-3.3a0-py3.6-macosx-10.13-x86_64.egg/objc/_machsignals.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | pyobjc_core-3.3a0-py3.6-macosx-10.13-x86_64.egg/objc/_machsignals.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | pyobjc_core-3.3a0-py3.6-macosx-10.13-x86_64.egg/objc/_machsignals.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_machsignals.cpython-36dm-darwin.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| 38.5 | 95 | 0.775974 | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_machsignals.cpython-36dm-darwin.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| true | true |
f739e0f6cbe53359e04eb92aacc4dd17a0af9a17 | 1,030 | py | Python | qiushaoyi/programs/qsy_program_codes/aqi_study.py | qsyPython/Python_play_now | 278b6d5d30082f8f93b26902c854737c4919405a | [
"MIT"
] | 2 | 2018-03-29T08:26:17.000Z | 2019-06-17T10:56:19.000Z | qiushaoyi/programs/qsy_program_codes/aqi_study.py | qsyPython/Python_play_now | 278b6d5d30082f8f93b26902c854737c4919405a | [
"MIT"
] | 1 | 2022-03-22T20:26:08.000Z | 2022-03-22T20:26:08.000Z | qiushaoyi/programs/qsy_program_codes/aqi_study.py | qsyPython/Python_play_now | 278b6d5d30082f8f93b26902c854737c4919405a | [
"MIT"
] | 1 | 2019-02-18T10:44:20.000Z | 2019-02-18T10:44:20.000Z | import requests, csv
from bs4 import BeautifulSoup
import pandas as pd
import matplotlib as plt
def get_html_text(url):
'''
获取html文本
'''
r = requests.get(url, timeout=30)
# print('验证码:'r.status_code)
return r.text
def process_cvs_w_file(filepath, file_list):
'''
把数据写成csv文件
'''
with open(filepath, mode='w', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
for line in file_list:
writer.writerow(line)
def main():
html_url = 'https://www.aqistudy.cn/historydata/daydata.php?city=北京&month=2018-05'
aqi_list = []
html_text = get_html_text(html_url)
soup = BeautifulSoup(html_text, 'lxml')
origin_div_list = soup.find_all('div', {'class': 'col-lg-9 col-md-8 col-sm-8 col-xs-12'})[0]
origin_div_list_table = origin_div_list.find_all('table', {
'class': 'table table-condensed table-bordered table-striped table-hover table-responsive'})
print(origin_div_list_table)
if __name__ == '__main__':
main()
| 26.410256 | 100 | 0.656311 | import requests, csv
from bs4 import BeautifulSoup
import pandas as pd
import matplotlib as plt
def get_html_text(url):
r = requests.get(url, timeout=30)
return r.text
def process_cvs_w_file(filepath, file_list):
with open(filepath, mode='w', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
for line in file_list:
writer.writerow(line)
def main():
html_url = 'https://www.aqistudy.cn/historydata/daydata.php?city=北京&month=2018-05'
aqi_list = []
html_text = get_html_text(html_url)
soup = BeautifulSoup(html_text, 'lxml')
origin_div_list = soup.find_all('div', {'class': 'col-lg-9 col-md-8 col-sm-8 col-xs-12'})[0]
origin_div_list_table = origin_div_list.find_all('table', {
'class': 'table table-condensed table-bordered table-striped table-hover table-responsive'})
print(origin_div_list_table)
if __name__ == '__main__':
main()
| true | true |
f739e241dc63d227d2bcd389328ed72c0a2fead1 | 2,870 | py | Python | python/hostconfig/machines/hardyvm.py | DGermano8/ChasteDom | 539a3a811698214c0938489b0cfdffd1abccf667 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | python/hostconfig/machines/hardyvm.py | DGermano8/ChasteDom | 539a3a811698214c0938489b0cfdffd1abccf667 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | python/hostconfig/machines/hardyvm.py | DGermano8/ChasteDom | 539a3a811698214c0938489b0cfdffd1abccf667 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Configuration for Joe's machines
"""Copyright (c) 2005-2018, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
petsc_2_2_path = None
petsc_2_3_path = '../../petsc-2.3.2-p10/'
petsc_3_0_path = None
petsc_build_name = 'linux-gnu'
petsc_build_name_optimized = 'linux-gnu-opt'
petsc_build_name_production = 'linux-intel-opt-mkl'
dealii_path = None
parmetis_path = '../../ParMetis-3.1'
intel_path = ''
icpc = 'icpc'
other_includepaths = ['../../hdf5/include',
'../../xsd-2.3.1-i686-linux-gnu/libxsd', parmetis_path]
other_libpaths = [os.path.join(petsc_2_3_path, 'externalpackages/f2cblaslapack/linux-gnu-opt/'),
'../../hdf5/lib', parmetis_path]
blas_lapack = ['f2clapack', 'f2cblas']
blas_lapack_production = ['mkl_lapack', 'mkl', 'svml']
other_libraries = ['boost_serialization', 'boost_filesystem', 'xerces-c', 'hdf5', 'z', 'parmetis', 'metis']
tools = {'mpirun': '../../mpi/bin/mpirun',
'mpicxx': '../../mpi/bin/mpicxx',
'xsd': '../../xsd-2.3.1-i686-linux-gnu/bin/xsd'}
use_vtk = True
if use_vtk:
other_libraries.extend(['vtkGraphics', 'vtkFiltering', 'vtkIO', 'vtkCommon', 'z'])
other_includepaths.extend(['/usr/include/vtk-5.0/'])
do_inf_tests = 1
| 40.422535 | 107 | 0.74669 |
import os
petsc_2_2_path = None
petsc_2_3_path = '../../petsc-2.3.2-p10/'
petsc_3_0_path = None
petsc_build_name = 'linux-gnu'
petsc_build_name_optimized = 'linux-gnu-opt'
petsc_build_name_production = 'linux-intel-opt-mkl'
dealii_path = None
parmetis_path = '../../ParMetis-3.1'
intel_path = ''
icpc = 'icpc'
other_includepaths = ['../../hdf5/include',
'../../xsd-2.3.1-i686-linux-gnu/libxsd', parmetis_path]
other_libpaths = [os.path.join(petsc_2_3_path, 'externalpackages/f2cblaslapack/linux-gnu-opt/'),
'../../hdf5/lib', parmetis_path]
blas_lapack = ['f2clapack', 'f2cblas']
blas_lapack_production = ['mkl_lapack', 'mkl', 'svml']
other_libraries = ['boost_serialization', 'boost_filesystem', 'xerces-c', 'hdf5', 'z', 'parmetis', 'metis']
tools = {'mpirun': '../../mpi/bin/mpirun',
'mpicxx': '../../mpi/bin/mpicxx',
'xsd': '../../xsd-2.3.1-i686-linux-gnu/bin/xsd'}
use_vtk = True
if use_vtk:
other_libraries.extend(['vtkGraphics', 'vtkFiltering', 'vtkIO', 'vtkCommon', 'z'])
other_includepaths.extend(['/usr/include/vtk-5.0/'])
do_inf_tests = 1
| true | true |
f739e2dd146c85bcd5ee3ffcd6caec4faa911ff9 | 337 | py | Python | dec q4.py | Manthanc007/APS-2o2o | a84337c4e658a93b6c67515fa3ef59b09f2e5e94 | [
"MIT"
] | null | null | null | dec q4.py | Manthanc007/APS-2o2o | a84337c4e658a93b6c67515fa3ef59b09f2e5e94 | [
"MIT"
] | null | null | null | dec q4.py | Manthanc007/APS-2o2o | a84337c4e658a93b6c67515fa3ef59b09f2e5e94 | [
"MIT"
] | null | null | null | T=int(input())
while(T>0):
T=T-1
i=0
j=0
k=0
l=[]
N=int(input())
while(N>0):
N=N-1
i=i+1
j=j+1
k=k+1
print(1,i,j,k,flush=True)
r=int(input())
l.append(r)
print(2,*l,flush=True)
s=int(input())
if(s==-1):
break
| 15.318182 | 34 | 0.364985 | T=int(input())
while(T>0):
T=T-1
i=0
j=0
k=0
l=[]
N=int(input())
while(N>0):
N=N-1
i=i+1
j=j+1
k=k+1
print(1,i,j,k,flush=True)
r=int(input())
l.append(r)
print(2,*l,flush=True)
s=int(input())
if(s==-1):
break
| true | true |
f739e3e1366fcb3aae284345d865741b9db2bbb6 | 1,230 | py | Python | lnbits/extensions/tpos/crud.py | sidahmedabdelillah/attigoBTC | f121184bab7f4704b54eff6d0ac27afa1ed524ed | [
"MIT"
] | null | null | null | lnbits/extensions/tpos/crud.py | sidahmedabdelillah/attigoBTC | f121184bab7f4704b54eff6d0ac27afa1ed524ed | [
"MIT"
] | null | null | null | lnbits/extensions/tpos/crud.py | sidahmedabdelillah/attigoBTC | f121184bab7f4704b54eff6d0ac27afa1ed524ed | [
"MIT"
] | null | null | null | from typing import List, Optional, Union
from lnbits.helpers import urlsafe_short_hash
from . import db
from .models import TPoS
async def create_tpos(*, wallet_id: str, name: str, currency: str) -> TPoS:
tpos_id = urlsafe_short_hash()
await db.execute(
"""
INSERT INTO tpos.tposs (id, wallet, name, currency)
VALUES (?, ?, ?, ?)
""",
(tpos_id, wallet_id, name, currency),
)
tpos = await get_tpos(tpos_id)
assert tpos, "Newly created tpos couldn't be retrieved"
return tpos
async def get_tpos(tpos_id: str) -> Optional[TPoS]:
row = await db.fetchone("SELECT * FROM tpos.tposs WHERE id = ?", (tpos_id,))
return TPoS.from_row(row) if row else None
async def get_tposs(wallet_ids: Union[str, List[str]]) -> List[TPoS]:
if isinstance(wallet_ids, str):
wallet_ids = [wallet_ids]
q = ",".join(["?"] * len(wallet_ids))
rows = await db.fetchall(
f"SELECT * FROM tpos.tposs WHERE wallet IN ({q})", (*wallet_ids,)
)
return [TPoS.from_row(row) for row in rows]
async def delete_tpos(tpos_id: str) -> None:
await db.execute("DELETE FROM tpos.tposs WHERE id = ?", (tpos_id,))
| 28.604651 | 81 | 0.618699 | from typing import List, Optional, Union
from lnbits.helpers import urlsafe_short_hash
from . import db
from .models import TPoS
async def create_tpos(*, wallet_id: str, name: str, currency: str) -> TPoS:
tpos_id = urlsafe_short_hash()
await db.execute(
"""
INSERT INTO tpos.tposs (id, wallet, name, currency)
VALUES (?, ?, ?, ?)
""",
(tpos_id, wallet_id, name, currency),
)
tpos = await get_tpos(tpos_id)
assert tpos, "Newly created tpos couldn't be retrieved"
return tpos
async def get_tpos(tpos_id: str) -> Optional[TPoS]:
row = await db.fetchone("SELECT * FROM tpos.tposs WHERE id = ?", (tpos_id,))
return TPoS.from_row(row) if row else None
async def get_tposs(wallet_ids: Union[str, List[str]]) -> List[TPoS]:
if isinstance(wallet_ids, str):
wallet_ids = [wallet_ids]
q = ",".join(["?"] * len(wallet_ids))
rows = await db.fetchall(
f"SELECT * FROM tpos.tposs WHERE wallet IN ({q})", (*wallet_ids,)
)
return [TPoS.from_row(row) for row in rows]
async def delete_tpos(tpos_id: str) -> None:
await db.execute("DELETE FROM tpos.tposs WHERE id = ?", (tpos_id,))
| true | true |
f739e4231f10a51448e8312440aea0bc5a1990e6 | 16,345 | py | Python | paramiko_expect.py | dgonzalez-cs/paramiko-expect | a485a8ac851b106f97d9e4514a47cbe4eccabb0f | [
"MIT"
] | 158 | 2015-01-13T04:08:24.000Z | 2022-03-31T03:18:58.000Z | paramiko_expect.py | dgonzalez-cs/paramiko-expect | a485a8ac851b106f97d9e4514a47cbe4eccabb0f | [
"MIT"
] | 72 | 2015-01-30T01:00:28.000Z | 2022-02-13T09:47:23.000Z | paramiko_expect.py | dgonzalez-cs/paramiko-expect | a485a8ac851b106f97d9e4514a47cbe4eccabb0f | [
"MIT"
] | 86 | 2015-02-23T18:46:16.000Z | 2022-03-04T12:25:03.000Z | #
# Paramiko Expect
#
# Written by Fotis Gimian
# http://github.com/fgimian
#
# This library works with a Paramiko SSH channel to provide native SSH
# expect-like handling for servers. The library may be used to interact
# with commands like 'configure' or Cisco IOS devices or with interactive
# Unix scripts or commands.
#
# You must have Paramiko installed in order to use this library.
#
from __future__ import unicode_literals
import codecs
import sys
import re
import socket
import struct
import time
# Windows does not have termios
try:
import termios
import tty
has_termios = True
MAX_TIMEOUT = 2 ** (struct.Struct(str('i')).size * 8 - 1) - 1
except ImportError: # pragma: no cover
import threading
has_termios = False
MAX_TIMEOUT = threading.TIMEOUT_MAX
import select
def strip_ansi_codes(s):
return re.sub(r'\x1b\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?|\?(1049|2004)[hl]', '', s)
def default_output_func(msg):
sys.stdout.write(msg)
sys.stdout.flush()
class SSHClientInteraction(object):
"""
This class allows an expect-like interface to Paramiko which allows
coders to interact with applications and the shell of the connected
device.
:param client: A Paramiko SSHClient object
:param timeout: The connection timeout in seconds
:param newline: The newline character to send after each command
:param buffer_size: The amount of data (in bytes) that will be read at
a time after a command is run
:param display: Whether or not the output should be displayed in
real-time as it is being performed (especially useful
when debugging)
:param encoding: The character encoding to use.
:param lines_to_check: The number of last few lines of the output to
look at, while matching regular expression(s)
"""
def __init__(
self, client, timeout=60, newline='\r', buffer_size=1024,
display=False, encoding='utf-8', output_callback=default_output_func,
tty_width=80, tty_height=24, lines_to_check=1
):
self.channel = client.invoke_shell(width=tty_width, height=tty_height)
self.timeout = timeout
self.newline = newline
self.buffer_size = buffer_size
self.display = display
self.encoding = encoding
self.output_callback = output_callback
self.lines_to_check = lines_to_check
self.current_output = ''
self.current_output_clean = ''
self.current_send_string = ''
self.last_match = ''
# If the output is long, multi-byte encoded characters may be split
# across calls to recv, so decode incrementally.
self.decoder = codecs.getincrementaldecoder(self.encoding)()
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""Attempts to close the channel for clean completion."""
try:
self.channel.close()
except Exception:
pass
def expect(
self, re_strings='', timeout=None, output_callback=None, default_match_prefix='.*\n',
strip_ansi=True, ignore_decode_error=True, lines_to_check=None
):
"""
This function takes in a regular expression (or regular expressions)
that represent the last line of output from the server. The function
waits for one or more of the terms to be matched. The regexes are
matched using expression \n<regex>$ so you'll need to provide an
easygoing regex such as '.*server.*' if you wish to have a fuzzy
match.
:param re_strings: Either a regex string or list of regex strings
that we should expect; if this is not specified,
then EOF is expected (i.e. the shell is completely
closed after the exit command is issued)
:param timeout: Timeout in seconds. If this timeout is exceeded,
then an exception is raised.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param default_match_prefix: A prefix to all match regexes, defaults to '.*\n',
can set to '' on cases prompt is the first line,
or the command has no output.
:param strip_ansi: If True, will strip ansi control chars befores regex matching
default to True.
:param ignore_decode_error: If True, will ignore decode errors if any.
default to True.
:param lines_to_check: The number of last few lines of the output to
look at, while matching regular expression(s)
:return: An EOF returns -1, a regex metch returns 0 and a match in a
list of regexes returns the index of the matched string in
the list.
:raises: A socket.timeout exception is raised on timeout.
"""
output_callback = output_callback if output_callback else self.output_callback
# Set the channel timeout
timeout = timeout if timeout else self.timeout
self.channel.settimeout(timeout)
lines_to_check = lines_to_check if lines_to_check else self.lines_to_check
if ignore_decode_error:
self.decoder = codecs.getincrementaldecoder(self.encoding)('ignore')
# Create an empty output buffer
self.current_output = ''
# saves the current buffer to check for re_strings pattern
current_buffer_output_decoded = ''
# This function needs all regular expressions to be in the form of a
# list, so if the user provided a string, let's convert it to a 1
# item list.
if isinstance(re_strings, str) and len(re_strings) != 0:
re_strings = [re_strings]
# to avoid looping in recv_ready()
base_time = time.time()
# Loop until one of the expressions is matched or loop forever if
# nothing is expected (usually used for exit)
while (
len(re_strings) == 0 or
not [re_string
for re_string in re_strings
if re.match(default_match_prefix + re_string + '$',
current_buffer_output_decoded, re.DOTALL)]
):
current_buffer_output_decoded = ''
# avoids paramiko hang when recv is not ready yet
while not self.channel.recv_ready():
time.sleep(.009)
if time.time() >= (base_time + timeout):
print('EXCESS TIME RECV_READY TIMEOUT, did you expect() before a send()')
return -1
# Read some of the output
current_buffer = self.channel.recv(self.buffer_size)
# If we have an empty buffer, then the SSH session has been closed
if len(current_buffer) == 0:
break
# Convert the buffer to our chosen encoding
current_buffer_decoded = self.decoder.decode(current_buffer)
# Strip all ugly \r (Ctrl-M making) characters from the current
# read
current_buffer_decoded = current_buffer_decoded.replace('\r', '')
# Display the current buffer in realtime if requested to do so
# (good for debugging purposes)
if strip_ansi:
current_buffer_decoded = strip_ansi_codes(current_buffer_decoded)
if not current_buffer_decoded:
continue
if self.display:
output_callback(current_buffer_decoded)
# Add the currently read buffer to the output
self.current_output += current_buffer_decoded
current_buffer_output_decoded = '\n' + '\n'.join(self.current_output.splitlines()[-lines_to_check:])
# Grab the first pattern that was matched
if len(re_strings) != 0:
found_pattern = [(re_index, re_string)
for re_index, re_string in enumerate(re_strings)
if re.match(default_match_prefix + re_string + '$',
self.current_output, re.DOTALL)]
# Clean the output up by removing the sent command
self.current_output_clean = self.current_output
if len(self.current_send_string) != 0:
self.current_output_clean = (
self.current_output_clean.replace(
self.current_send_string + self.newline, ''
)
)
# Reset the current send string to ensure that multiple expect calls
# don't result in bad output cleaning
self.current_send_string = ''
# Clean the output up by removing the expect output from the end if
# requested and save the details of the matched pattern
if len(re_strings) != 0 and len(found_pattern) != 0:
self.current_output_clean = (
re.sub(
found_pattern[0][1] + '$', '', self.current_output_clean
)
)
self.last_match = found_pattern[0][1]
return found_pattern[0][0]
else:
# We would socket timeout before getting here, but for good
# measure, let's send back a -1
return -1
def send(self, send_string, newline=None):
"""Saves and sends the send string provided."""
self.current_send_string = send_string
# send_string, _ = codecs.getdecoder(self.encoding)(send_string)
newline = newline if newline is not None else self.newline
# don't send till send_ready
while not self.channel.send_ready():
time.sleep(.009)
self.channel.send(send_string)
self.channel.send(newline)
def tail(
self, line_prefix=None, callback=None, output_callback=None, stop_callback=lambda x: False,
timeout=None
):
"""
This function takes control of an SSH channel and displays line
by line of output as \n is recieved. This function is specifically
made for tail-like commands.
:param line_prefix: Text to append to the left of each line of output.
This is especially useful if you are using my
MultiSSH class to run tail commands over multiple
servers.
:param callback: You may optionally supply a callback function which
takes two paramaters. The first is the line prefix
and the second is current line of output. The
callback should return the string that is to be
displayed (including the \n character). This allows
users to grep the output or manipulate it as
required.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param stop_callback: A function usesd to stop the tail, when function retruns
True tail will stop, by default stop_callback=lambda x: False
:param timeout: how much time to wait for data, default to None which
mean almost forever.
"""
output_callback = output_callback if output_callback else self.output_callback
# Set the channel timeout to the maximum allowed value,
# setting this to None breaks the KeyboardInterrupt exception and
# won't allow us to Ctrl+C out of the script
timeout = timeout if timeout else MAX_TIMEOUT
self.channel.settimeout(timeout)
# Create an empty line buffer and a line counter
current_line = b''
line_counter = 0
line_feed_byte = '\n'.encode(self.encoding)
# Loop forever, Ctrl+C (KeyboardInterrupt) is used to break the tail
while True:
# Read the output one byte at a time so we can detect \n correctly
buffer = self.channel.recv(1)
# If we have an empty buffer, then the SSH session has been closed
if len(buffer) == 0:
break
# Add the currently read buffer to the current line output
current_line += buffer
# Display the last read line in realtime when we reach a \n
# character
if buffer == line_feed_byte:
current_line_decoded = self.decoder.decode(current_line)
if line_counter:
if callback:
output_callback(callback(line_prefix, current_line_decoded))
else:
if line_prefix:
output_callback(line_prefix)
output_callback(current_line_decoded)
if stop_callback(current_line_decoded):
break
line_counter += 1
current_line = b''
def take_control(self):
"""
This function is a better documented and touched up version of the
posix_shell function found in the interactive.py demo script that
ships with Paramiko.
"""
if has_termios:
# Get attributes of the shell you were in before going to the
# new one
original_tty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
# We must set the timeout to 0 so that we can bypass times when
# there is no available text to receive
self.channel.settimeout(0)
# Loop forever until the user exits (i.e. read buffer is empty)
while True:
select_read, select_write, select_exception = (
select.select([self.channel, sys.stdin], [], [])
)
# Read any output from the terminal and print it to the
# screen. With timeout set to 0, we just can ignore times
# when there's nothing to receive.
if self.channel in select_read:
try:
buffer = self.channel.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(self.decoder.decode(buffer))
sys.stdout.flush()
except socket.timeout:
pass
# Send any keyboard input to the terminal one byte at a
# time
if sys.stdin in select_read:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
finally:
# Restore the attributes of the shell you were in
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, original_tty)
else:
def writeall(sock):
while True:
buffer = sock.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(self.decoder.decode(buffer))
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while True:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
# User has hit Ctrl+Z or F6
except EOFError:
pass
| 41.803069 | 112 | 0.581707 |
from __future__ import unicode_literals
import codecs
import sys
import re
import socket
import struct
import time
try:
import termios
import tty
has_termios = True
MAX_TIMEOUT = 2 ** (struct.Struct(str('i')).size * 8 - 1) - 1
except ImportError:
import threading
has_termios = False
MAX_TIMEOUT = threading.TIMEOUT_MAX
import select
def strip_ansi_codes(s):
return re.sub(r'\x1b\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?|\?(1049|2004)[hl]', '', s)
def default_output_func(msg):
sys.stdout.write(msg)
sys.stdout.flush()
class SSHClientInteraction(object):
def __init__(
self, client, timeout=60, newline='\r', buffer_size=1024,
display=False, encoding='utf-8', output_callback=default_output_func,
tty_width=80, tty_height=24, lines_to_check=1
):
self.channel = client.invoke_shell(width=tty_width, height=tty_height)
self.timeout = timeout
self.newline = newline
self.buffer_size = buffer_size
self.display = display
self.encoding = encoding
self.output_callback = output_callback
self.lines_to_check = lines_to_check
self.current_output = ''
self.current_output_clean = ''
self.current_send_string = ''
self.last_match = ''
self.decoder = codecs.getincrementaldecoder(self.encoding)()
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
try:
self.channel.close()
except Exception:
pass
def expect(
self, re_strings='', timeout=None, output_callback=None, default_match_prefix='.*\n',
strip_ansi=True, ignore_decode_error=True, lines_to_check=None
):
output_callback = output_callback if output_callback else self.output_callback
timeout = timeout if timeout else self.timeout
self.channel.settimeout(timeout)
lines_to_check = lines_to_check if lines_to_check else self.lines_to_check
if ignore_decode_error:
self.decoder = codecs.getincrementaldecoder(self.encoding)('ignore')
self.current_output = ''
current_buffer_output_decoded = ''
# item list.
if isinstance(re_strings, str) and len(re_strings) != 0:
re_strings = [re_strings]
# to avoid looping in recv_ready()
base_time = time.time()
# Loop until one of the expressions is matched or loop forever if
# nothing is expected (usually used for exit)
while (
len(re_strings) == 0 or
not [re_string
for re_string in re_strings
if re.match(default_match_prefix + re_string + '$',
current_buffer_output_decoded, re.DOTALL)]
):
current_buffer_output_decoded = ''
# avoids paramiko hang when recv is not ready yet
while not self.channel.recv_ready():
time.sleep(.009)
if time.time() >= (base_time + timeout):
print('EXCESS TIME RECV_READY TIMEOUT, did you expect() before a send()')
return -1
# Read some of the output
current_buffer = self.channel.recv(self.buffer_size)
# If we have an empty buffer, then the SSH session has been closed
if len(current_buffer) == 0:
break
# Convert the buffer to our chosen encoding
current_buffer_decoded = self.decoder.decode(current_buffer)
# Strip all ugly \r (Ctrl-M making) characters from the current
# read
current_buffer_decoded = current_buffer_decoded.replace('\r', '')
# Display the current buffer in realtime if requested to do so
# (good for debugging purposes)
if strip_ansi:
current_buffer_decoded = strip_ansi_codes(current_buffer_decoded)
if not current_buffer_decoded:
continue
if self.display:
output_callback(current_buffer_decoded)
# Add the currently read buffer to the output
self.current_output += current_buffer_decoded
current_buffer_output_decoded = '\n' + '\n'.join(self.current_output.splitlines()[-lines_to_check:])
# Grab the first pattern that was matched
if len(re_strings) != 0:
found_pattern = [(re_index, re_string)
for re_index, re_string in enumerate(re_strings)
if re.match(default_match_prefix + re_string + '$',
self.current_output, re.DOTALL)]
# Clean the output up by removing the sent command
self.current_output_clean = self.current_output
if len(self.current_send_string) != 0:
self.current_output_clean = (
self.current_output_clean.replace(
self.current_send_string + self.newline, ''
)
)
# Reset the current send string to ensure that multiple expect calls
# don't result in bad output cleaning
self.current_send_string = ''
if len(re_strings) != 0 and len(found_pattern) != 0:
self.current_output_clean = (
re.sub(
found_pattern[0][1] + '$', '', self.current_output_clean
)
)
self.last_match = found_pattern[0][1]
return found_pattern[0][0]
else:
return -1
def send(self, send_string, newline=None):
self.current_send_string = send_string
# send_string, _ = codecs.getdecoder(self.encoding)(send_string)
newline = newline if newline is not None else self.newline
# don't send till send_ready
while not self.channel.send_ready():
time.sleep(.009)
self.channel.send(send_string)
self.channel.send(newline)
def tail(
self, line_prefix=None, callback=None, output_callback=None, stop_callback=lambda x: False,
timeout=None
):
output_callback = output_callback if output_callback else self.output_callback
timeout = timeout if timeout else MAX_TIMEOUT
self.channel.settimeout(timeout)
# Create an empty line buffer and a line counter
current_line = b''
line_counter = 0
line_feed_byte = '\n'.encode(self.encoding)
# Loop forever, Ctrl+C (KeyboardInterrupt) is used to break the tail
while True:
# Read the output one byte at a time so we can detect \n correctly
buffer = self.channel.recv(1)
# If we have an empty buffer, then the SSH session has been closed
if len(buffer) == 0:
break
# Add the currently read buffer to the current line output
current_line += buffer
# Display the last read line in realtime when we reach a \n
# character
if buffer == line_feed_byte:
current_line_decoded = self.decoder.decode(current_line)
if line_counter:
if callback:
output_callback(callback(line_prefix, current_line_decoded))
else:
if line_prefix:
output_callback(line_prefix)
output_callback(current_line_decoded)
if stop_callback(current_line_decoded):
break
line_counter += 1
current_line = b''
def take_control(self):
if has_termios:
# Get attributes of the shell you were in before going to the
# new one
original_tty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
# We must set the timeout to 0 so that we can bypass times when
# there is no available text to receive
self.channel.settimeout(0)
# Loop forever until the user exits (i.e. read buffer is empty)
while True:
select_read, select_write, select_exception = (
select.select([self.channel, sys.stdin], [], [])
)
# Read any output from the terminal and print it to the
# screen. With timeout set to 0, we just can ignore times
# when there's nothing to receive.
if self.channel in select_read:
try:
buffer = self.channel.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(self.decoder.decode(buffer))
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in select_read:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, original_tty)
else:
def writeall(sock):
while True:
buffer = sock.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(self.decoder.decode(buffer))
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while True:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
except EOFError:
pass
| true | true |
f739e43acfbc6a465c6846deca9594c01555a2d0 | 2,019 | py | Python | components/cloud/aws/cbmc/proofs/make_cbmc_batch_files.py | flyghost/OneOS-V2.1.0 | 6fedab0558c07fe679d63ba1eb8ee9992c044d86 | [
"Apache-2.0"
] | 1 | 2022-03-26T09:59:37.000Z | 2022-03-26T09:59:37.000Z | components/cloud/aws/cbmc/proofs/make_cbmc_batch_files.py | flyghost/OneOS-V2.1.0 | 6fedab0558c07fe679d63ba1eb8ee9992c044d86 | [
"Apache-2.0"
] | 1 | 2021-06-24T04:27:40.000Z | 2021-06-24T04:27:40.000Z | components/cloud/aws/cbmc/proofs/make_cbmc_batch_files.py | flyghost/OneOS-V2.1.0 | 6fedab0558c07fe679d63ba1eb8ee9992c044d86 | [
"Apache-2.0"
] | 2 | 2021-06-24T04:08:28.000Z | 2022-03-07T06:37:24.000Z | #!/usr/bin/env python3
#
# Generation of the cbmc-batch.yaml files for the CBMC proofs.
#
# Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import platform
import subprocess
import logging
MAKEFILE = "Makefile"
YAML_FILE = "cbmc-batch.yaml"
def create_cbmc_yaml_files():
# The YAML files are only used by CI and are not needed on Windows.
if platform.system() == "Windows":
return
for dyr, _, files in os.walk("."):
if YAML_FILE in files and MAKEFILE in files:
logging.info("Building %s in %s", YAML_FILE, dyr)
os.remove(os.path.join(os.path.abspath(dyr), YAML_FILE))
subprocess.run(["make", YAML_FILE],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=os.path.abspath(dyr),
check=True)
if __name__ == '__main__':
create_cbmc_yaml_files()
| 41.204082 | 80 | 0.698861 |
import os
import platform
import subprocess
import logging
MAKEFILE = "Makefile"
YAML_FILE = "cbmc-batch.yaml"
def create_cbmc_yaml_files():
if platform.system() == "Windows":
return
for dyr, _, files in os.walk("."):
if YAML_FILE in files and MAKEFILE in files:
logging.info("Building %s in %s", YAML_FILE, dyr)
os.remove(os.path.join(os.path.abspath(dyr), YAML_FILE))
subprocess.run(["make", YAML_FILE],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=os.path.abspath(dyr),
check=True)
if __name__ == '__main__':
create_cbmc_yaml_files()
| true | true |
f739e57e3e5ae3ed4ad51629aeaff4624374b766 | 105 | py | Python | gitz_doc/dirs.py | rec/gitz | cbb07f99dd002c85b5ca95896b33d03150bf9282 | [
"MIT"
] | 24 | 2019-07-26T03:57:16.000Z | 2021-11-22T22:39:13.000Z | gitz_doc/dirs.py | rec/gitz | cbb07f99dd002c85b5ca95896b33d03150bf9282 | [
"MIT"
] | 212 | 2019-06-13T13:44:26.000Z | 2020-06-02T17:59:51.000Z | gitz_doc/dirs.py | rec/gitz | cbb07f99dd002c85b5ca95896b33d03150bf9282 | [
"MIT"
] | 2 | 2019-08-09T13:55:38.000Z | 2019-09-07T11:17:59.000Z | from pathlib import Path
HOME = Path(__file__).parent.parent
DOC = HOME / 'doc'
MAN = HOME / 'man/man1'
| 17.5 | 35 | 0.695238 | from pathlib import Path
HOME = Path(__file__).parent.parent
DOC = HOME / 'doc'
MAN = HOME / 'man/man1'
| true | true |
f739e693bbbb5b47420ba4555a24c7e6139e4ffb | 3,570 | py | Python | bindings/python/ensmallen/datasets/string/propionimicrobiumspbv2f7.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/propionimicrobiumspbv2f7.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/propionimicrobiumspbv2f7.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Propionimicrobium sp. BV2F7.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def PropionimicrobiumSpBv2f7(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Propionimicrobium sp. BV2F7 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Propionimicrobium sp. BV2F7 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PropionimicrobiumSpBv2f7",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.055556 | 223 | 0.677871 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def PropionimicrobiumSpBv2f7(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="PropionimicrobiumSpBv2f7",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f739e69877b6e88120e1d59493b398b233abd828 | 1,058 | py | Python | build/lib/rdd/test.py | darrelrobinson/rdd | 54b9c328087ae22ac38073aab2ee930459b2364a | [
"MIT"
] | 46 | 2019-02-24T20:31:11.000Z | 2022-03-30T08:03:10.000Z | build/lib/rdd/test.py | darrelrobinson/rdd | 54b9c328087ae22ac38073aab2ee930459b2364a | [
"MIT"
] | 8 | 2019-05-22T16:36:20.000Z | 2019-11-02T15:55:01.000Z | build/lib/rdd/test.py | darrelrobinson/rdd | 54b9c328087ae22ac38073aab2ee930459b2364a | [
"MIT"
] | 17 | 2019-01-18T13:17:47.000Z | 2022-03-07T01:45:18.000Z | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import functions as rdd
'''
To Do:
- Put testing functions in another folder
- test different input types, combos of bad items, etc
'''
# Set seed
np.random.seed(42)
# Simulate data
N = 10000
# x = np.random.uniform(-.5, .5, N)
x = np.random.normal(0, 1, N)
epsilon = np.random.normal(0, 1, N)
forcing = np.round(x+.5)
y = .5 * forcing + 2 * x + 1 + epsilon
w1 = np.random.normal(0, 1, N)
w2 = np.random.normal(0, 4, N)
data = pd.DataFrame({'y':y, 'x': x, 'w1':w1, 'w2':w2})
print(data.head())
h = rdd.optimal_bandwidth(data['y'], data['x'])
print(h)
# data_rdd = rdd.truncated_data(data, 'x', h)
# results = rdd.rdd(data_rdd, 'x', 'y')
# print(results.summary())
# data_binned = rdd.bin_data(data, 'y', 'x', 100)
# plt.figure()
# plt.scatter(data_binned['x'], data_binned['y'],
# s = data_binned['n_obs'], facecolors='none', edgecolors='r')
# plt.show()
# plt.close()
# print(data_binned['n_obs'].describe())
# Show a spline
# show placebo with different cuts | 21.591837 | 66 | 0.642722 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import functions as rdd
np.random.seed(42)
N = 10000
x = np.random.normal(0, 1, N)
epsilon = np.random.normal(0, 1, N)
forcing = np.round(x+.5)
y = .5 * forcing + 2 * x + 1 + epsilon
w1 = np.random.normal(0, 1, N)
w2 = np.random.normal(0, 4, N)
data = pd.DataFrame({'y':y, 'x': x, 'w1':w1, 'w2':w2})
print(data.head())
h = rdd.optimal_bandwidth(data['y'], data['x'])
print(h)
| true | true |
f739e6d00f052c9c6193859d2752eee192484bc2 | 424 | py | Python | QT/pyside_test.py | ValRCS/RCS_Python_11 | 157c8e08aaf9849341cadb50077fe65dead536fa | [
"MIT"
] | null | null | null | QT/pyside_test.py | ValRCS/RCS_Python_11 | 157c8e08aaf9849341cadb50077fe65dead536fa | [
"MIT"
] | null | null | null | QT/pyside_test.py | ValRCS/RCS_Python_11 | 157c8e08aaf9849341cadb50077fe65dead536fa | [
"MIT"
] | 2 | 2019-12-11T14:39:36.000Z | 2019-12-13T14:29:09.000Z | import PySide2.QtCore
# Prints PySide2 version
# e.g. 5.11.1a1
print(PySide2.__version__)
# Gets a tuple with each version component
# e.g. (5, 11, 1, 'a', 1)
print(PySide2.__version_info__)
# Prints the Qt version used to compile PySide2
# e.g. "5.11.2"
print(PySide2.QtCore.__version__)
# Gets a tuple with each version components of Qt used to compile PySide2
# e.g. (5, 11, 2)
print(PySide2.QtCore.__version_info__)
| 23.555556 | 73 | 0.735849 | import PySide2.QtCore
print(PySide2.__version__)
print(PySide2.__version_info__)
print(PySide2.QtCore.__version__)
print(PySide2.QtCore.__version_info__)
| true | true |
f739e7a1101eb5c696e2190eceea8d2d9114135b | 202 | py | Python | test.py | Curt-H/bilidown | 9d2d0e4adea8172f0f5c69fc4e72d1eaa9ba2a76 | [
"MIT"
] | null | null | null | test.py | Curt-H/bilidown | 9d2d0e4adea8172f0f5c69fc4e72d1eaa9ba2a76 | [
"MIT"
] | null | null | null | test.py | Curt-H/bilidown | 9d2d0e4adea8172f0f5c69fc4e72d1eaa9ba2a76 | [
"MIT"
] | null | null | null | {"code":0,"message":"0","ttl":1,"data":[{"cid":260839008,"page":1,"from":"vupload","part":"PocketLCD_with_srt","duration":467,"vid":"","weblink":"","dimension":{"width":1920,"height":1080,"rotate":0}}]} | 202 | 202 | 0.628713 | {"code":0,"message":"0","ttl":1,"data":[{"cid":260839008,"page":1,"from":"vupload","part":"PocketLCD_with_srt","duration":467,"vid":"","weblink":"","dimension":{"width":1920,"height":1080,"rotate":0}}]} | true | true |
f739e804d6bc7fe67793b8b0f8753bba78ac200d | 4,072 | py | Python | examples/Old Code/run_follower_stopper_ring.py | georgegunter/flow | 15848ec9bafd250364a51fa162786037645b19bf | [
"MIT"
] | null | null | null | examples/Old Code/run_follower_stopper_ring.py | georgegunter/flow | 15848ec9bafd250364a51fa162786037645b19bf | [
"MIT"
] | null | null | null | examples/Old Code/run_follower_stopper_ring.py | georgegunter/flow | 15848ec9bafd250364a51fa162786037645b19bf | [
"MIT"
] | null | null | null | from flow.controllers import FollowerStopper, IDMController, ContinuousRouter, OVMController
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams
from flow.core.params import VehicleParams
from flow.envs.ring.accel import AccelEnv, ADDITIONAL_ENV_PARAMS
from flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS
# For running a simulation:
from flow.core.experiment import Experiment
# For data processing:
import pandas as pd
import numpy as np
import os
import sys
import time
def get_flow_dict(v_des,model_params,emission_path):
alpha = model_params[0]
beta = model_params[1]
v_max = model_params[2]
s_star = model_params[3]
s0 = model_params[4]
human_accel = (OVMController,{'alpha':alpha,'beta':beta,'v_max':v_max,'s_star':s_star,'s0':s0,'noise':.1})
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=human_accel,
routing_controller=(ContinuousRouter, {}),
num_vehicles=20)
vehicles.add(
color='red',
veh_id="AV",
acceleration_controller=(FollowerStopper, {'v_des':v_des}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=1)
flow_params = dict(
# name of the experiment
exp_tag='ring',
# name of the flow environment the experiment is running on
env_name=AccelEnv,
# name of the network class the experiment is running on
network=RingNetwork,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
render=False,
sim_step=0.1,
emission_path=emission_path,
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=3000,
warmup_steps=750,
additional_params=ADDITIONAL_ENV_PARAMS,
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
additional_params={
"length": 260,
"lanes": 1,
"speed_limit": 30,
"resolution": 40,
}, ),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
# initial=InitialConfig(
# bunching=20,
# ),
)
return flow_params
def run_sim(v_des,model_params,emission_path):
flow_params = get_flow_dict(v_des,model_params,emission_path)
exp = Experiment(flow_params)
[emission_files,info_dict] = exp.run(num_runs=1,convert_to_csv=True)
csv_path = emission_files[0]
return csv_path
def get_sim_results(csv_path):
data = pd.read_csv(csv_path,delimiter=',')
ids = data.id.unique() #numpy array
ids = list(ids)
sim_time = np.array(data[data['id']==ids[0]]['time'])
sim_length = sim_time[-1]
time_threshold = sim_length/2
speed_measurements = data[data['time'] > time_threshold]['speed']
speed_measurements = np.array(speed_measurements)
ave_speed = np.mean(speed_measurements)
std_speed = np.std(speed_measurements)
return [ave_speed,std_speed]
if __name__ == "__main__":
emission_path = '/Users/vanderbilt/Desktop/Research_2020/CIRCLES/Official_Flow/flow/examples/follower_stopper_sims/'
model_params = [0.6660,21.5975,8.9368,2.2146,2.8150]
sim_results = []
v_des_vals = np.linspace(1.0,9.0,25)
v_des_vals = list(v_des_vals)
start_time = time.time()
for v_des in v_des_vals:
sys.stdout.write('\r'+'Simulating v_des: '+str(v_des))
csv_path = run_sim(v_des,model_params,emission_path)
sim_data = get_sim_results(csv_path)
sim_results.append([v_des,sim_data[0],sim_data[1]])
os.remove(csv_path)
sim_time = time.time() - start_time
sim_results = np.array(sim_results)
np.savetxt('follower_stopper_sweep.csv',sim_results)
print('Simulation sweep finished, time to complete: '+str(sim_time))
| 24.53012 | 117 | 0.711444 | from flow.controllers import FollowerStopper, IDMController, ContinuousRouter, OVMController
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams
from flow.core.params import VehicleParams
from flow.envs.ring.accel import AccelEnv, ADDITIONAL_ENV_PARAMS
from flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS
from flow.core.experiment import Experiment
import pandas as pd
import numpy as np
import os
import sys
import time
def get_flow_dict(v_des,model_params,emission_path):
alpha = model_params[0]
beta = model_params[1]
v_max = model_params[2]
s_star = model_params[3]
s0 = model_params[4]
human_accel = (OVMController,{'alpha':alpha,'beta':beta,'v_max':v_max,'s_star':s_star,'s0':s0,'noise':.1})
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=human_accel,
routing_controller=(ContinuousRouter, {}),
num_vehicles=20)
vehicles.add(
color='red',
veh_id="AV",
acceleration_controller=(FollowerStopper, {'v_des':v_des}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=1)
flow_params = dict(
exp_tag='ring',
env_name=AccelEnv,
network=RingNetwork,
simulator='traci',
sim=SumoParams(
render=False,
sim_step=0.1,
emission_path=emission_path,
),
env=EnvParams(
horizon=3000,
warmup_steps=750,
additional_params=ADDITIONAL_ENV_PARAMS,
),
net=NetParams(
additional_params={
"length": 260,
"lanes": 1,
"speed_limit": 30,
"resolution": 40,
}, ),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
# initial=InitialConfig(
# bunching=20,
# ),
)
return flow_params
def run_sim(v_des,model_params,emission_path):
flow_params = get_flow_dict(v_des,model_params,emission_path)
exp = Experiment(flow_params)
[emission_files,info_dict] = exp.run(num_runs=1,convert_to_csv=True)
csv_path = emission_files[0]
return csv_path
def get_sim_results(csv_path):
data = pd.read_csv(csv_path,delimiter=',')
ids = data.id.unique() #numpy array
ids = list(ids)
sim_time = np.array(data[data['id']==ids[0]]['time'])
sim_length = sim_time[-1]
time_threshold = sim_length/2
speed_measurements = data[data['time'] > time_threshold]['speed']
speed_measurements = np.array(speed_measurements)
ave_speed = np.mean(speed_measurements)
std_speed = np.std(speed_measurements)
return [ave_speed,std_speed]
if __name__ == "__main__":
emission_path = '/Users/vanderbilt/Desktop/Research_2020/CIRCLES/Official_Flow/flow/examples/follower_stopper_sims/'
model_params = [0.6660,21.5975,8.9368,2.2146,2.8150]
sim_results = []
v_des_vals = np.linspace(1.0,9.0,25)
v_des_vals = list(v_des_vals)
start_time = time.time()
for v_des in v_des_vals:
sys.stdout.write('\r'+'Simulating v_des: '+str(v_des))
csv_path = run_sim(v_des,model_params,emission_path)
sim_data = get_sim_results(csv_path)
sim_results.append([v_des,sim_data[0],sim_data[1]])
os.remove(csv_path)
sim_time = time.time() - start_time
sim_results = np.array(sim_results)
np.savetxt('follower_stopper_sweep.csv',sim_results)
print('Simulation sweep finished, time to complete: '+str(sim_time))
| true | true |
f739e810378e3ef9bf5e8fdd4b5a50f8df17f877 | 2,569 | py | Python | cvxpy/atoms/affine/unary_operators.py | hashstat/cvxpy | 20d667ebe8614821fa38e41b1e333257512d9594 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-05-28T16:41:11.000Z | 2021-05-28T16:41:11.000Z | cvxpy/atoms/affine/unary_operators.py | h-vetinari/cvxpy | 86307f271819bb78fcdf64a9c3a424773e8269fa | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/atoms/affine/unary_operators.py | h-vetinari/cvxpy | 86307f271819bb78fcdf64a9c3a424773e8269fa | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Tuple
from cvxpy.atoms.affine.affine_atom import AffAtom
import cvxpy.lin_ops.lin_utils as lu
import operator as op
class UnaryOperator(AffAtom):
"""
Base class for expressions involving unary operators.
"""
def __init__(self, expr) -> None:
super(UnaryOperator, self).__init__(expr)
def name(self):
return self.OP_NAME + self.args[0].name()
# Applies the unary operator to the value.
def numeric(self, values):
return self.OP_FUNC(values[0])
class NegExpression(UnaryOperator):
"""Negation of an expression.
"""
OP_NAME = "-"
OP_FUNC = op.neg
def shape_from_args(self):
"""Returns the (row, col) shape of the expression.
"""
return self.args[0].shape
def sign_from_args(self):
"""Returns sign (is positive, is negative) of the expression.
"""
return (self.args[0].is_nonpos(), self.args[0].is_nonneg())
def is_incr(self, idx) -> bool:
"""Is the composition non-decreasing in argument idx?
"""
return False
def is_decr(self, idx) -> bool:
"""Is the composition non-increasing in argument idx?
"""
return True
def is_symmetric(self) -> bool:
"""Is the expression symmetric?
"""
return self.args[0].is_symmetric()
def is_hermitian(self) -> bool:
"""Is the expression Hermitian?
"""
return self.args[0].is_hermitian()
def graph_implementation(self, arg_objs, shape: Tuple[int, ...], data=None):
"""Negate the affine objective.
Parameters
----------
arg_objs : list
LinExpr for each argument.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return (lu.neg_expr(arg_objs[0]), [])
| 27.623656 | 80 | 0.63371 | from typing import Tuple
from cvxpy.atoms.affine.affine_atom import AffAtom
import cvxpy.lin_ops.lin_utils as lu
import operator as op
class UnaryOperator(AffAtom):
def __init__(self, expr) -> None:
super(UnaryOperator, self).__init__(expr)
def name(self):
return self.OP_NAME + self.args[0].name()
def numeric(self, values):
return self.OP_FUNC(values[0])
class NegExpression(UnaryOperator):
OP_NAME = "-"
OP_FUNC = op.neg
def shape_from_args(self):
return self.args[0].shape
def sign_from_args(self):
return (self.args[0].is_nonpos(), self.args[0].is_nonneg())
def is_incr(self, idx) -> bool:
return False
def is_decr(self, idx) -> bool:
return True
def is_symmetric(self) -> bool:
return self.args[0].is_symmetric()
def is_hermitian(self) -> bool:
return self.args[0].is_hermitian()
def graph_implementation(self, arg_objs, shape: Tuple[int, ...], data=None):
return (lu.neg_expr(arg_objs[0]), [])
| true | true |
f739e8c0cdbab17b8c0d325ceb1c088852a1d813 | 223 | py | Python | mozmill-env/python/Lib/site-packages/mozmill_automation/__init__.py | lucashmorais/x-Bench | 2080b8753dd6e45c2212666bcdb05327752a94e9 | [
"MIT"
] | null | null | null | mozmill-env/python/Lib/site-packages/mozmill_automation/__init__.py | lucashmorais/x-Bench | 2080b8753dd6e45c2212666bcdb05327752a94e9 | [
"MIT"
] | null | null | null | mozmill-env/python/Lib/site-packages/mozmill_automation/__init__.py | lucashmorais/x-Bench | 2080b8753dd6e45c2212666bcdb05327752a94e9 | [
"MIT"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from testrun import *
| 31.857143 | 75 | 0.726457 |
from testrun import *
| true | true |
f739e943b254657ba870ee727b94a4a77dd4fbe2 | 8,450 | py | Python | test/functional/wallet_import_rescan.py | EnzoNodes/ENZO | 3f607ab55261ec22a02f3eba4b32699eed187620 | [
"MIT"
] | null | null | null | test/functional/wallet_import_rescan.py | EnzoNodes/ENZO | 3f607ab55261ec22a02f3eba4b32699eed187620 | [
"MIT"
] | null | null | null | test/functional/wallet_import_rescan.py | EnzoNodes/ENZO | 3f607ab55261ec22a02f3eba4b32699eed187620 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import enzoTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
rescan = self.rescan == Rescan.yes
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label, rescan)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label, rescan)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, rescan)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that listreceivedbyaddress returns return expected values."""
addresses = self.node.listreceivedbyaddress(0, True, self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], confirmations)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(enzoTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [["-addresstype=legacy",] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress())
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
self.sync_blocks()
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
self.sync_blocks()
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| 47.206704 | 116 | 0.665444 |
from test_framework.test_framework import enzoTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
rescan = self.rescan == Rescan.yes
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label, rescan)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label, rescan)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, rescan)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
addresses = self.node.listreceivedbyaddress(0, True, self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], confirmations)
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(enzoTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [["-addresstype=legacy",] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
for i, variant in enumerate(IMPORT_VARIANTS):
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress())
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
self.sync_blocks()
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
self.sync_blocks()
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| true | true |
f739e9c686022d42f710bf3f3f3b03cc2857019e | 1,051 | py | Python | src/minterpy/transformations/lagrange.py | karanprime/minterpy | 75d8976b2ddcf79ebaa29b4cb80691ca02a6d180 | [
"MIT"
] | 13 | 2021-11-30T17:52:45.000Z | 2021-12-09T10:05:20.000Z | src/minterpy/transformations/lagrange.py | karanprime/minterpy | 75d8976b2ddcf79ebaa29b4cb80691ca02a6d180 | [
"MIT"
] | 2 | 2021-11-30T18:31:22.000Z | 2022-02-10T10:13:37.000Z | src/minterpy/transformations/lagrange.py | karanprime/minterpy | 75d8976b2ddcf79ebaa29b4cb80691ca02a6d180 | [
"MIT"
] | 6 | 2021-11-30T18:17:26.000Z | 2022-02-18T17:38:27.000Z | """
Concrete implementations of the Transformation classes for the LagrangePolynomial.
Transformations from Lagrange basis to Newton and Canonical basis are provided.
"""
from minterpy.core.ABC import TransformationABC
from minterpy.polynomials import CanonicalPolynomial, NewtonPolynomial
from minterpy.polynomials.lagrange_polynomial import LagrangePolynomial
from .utils import (
_build_lagrange_to_canonical_operator,
_build_lagrange_to_newton_operator,
)
__all__ = ["LagrangeToNewton", "LagrangeToCanonical"]
class LagrangeToNewton(TransformationABC):
"""Transformation from LagrangePolynomial to NewtonPolynomial"""
origin_type = LagrangePolynomial
target_type = NewtonPolynomial
_get_transformation_operator = _build_lagrange_to_newton_operator
class LagrangeToCanonical(TransformationABC):
"""Transformation from LagrangePolynomial to CanonicalPolynomial"""
origin_type = LagrangePolynomial
target_type = CanonicalPolynomial
_get_transformation_operator = _build_lagrange_to_canonical_operator
| 32.84375 | 82 | 0.830637 | from minterpy.core.ABC import TransformationABC
from minterpy.polynomials import CanonicalPolynomial, NewtonPolynomial
from minterpy.polynomials.lagrange_polynomial import LagrangePolynomial
from .utils import (
_build_lagrange_to_canonical_operator,
_build_lagrange_to_newton_operator,
)
__all__ = ["LagrangeToNewton", "LagrangeToCanonical"]
class LagrangeToNewton(TransformationABC):
origin_type = LagrangePolynomial
target_type = NewtonPolynomial
_get_transformation_operator = _build_lagrange_to_newton_operator
class LagrangeToCanonical(TransformationABC):
origin_type = LagrangePolynomial
target_type = CanonicalPolynomial
_get_transformation_operator = _build_lagrange_to_canonical_operator
| true | true |
f739eb5a8ca0992a17a6dbd1a4ee0628e0cbae25 | 716 | py | Python | src/file_parser.py | gldgrnt/twitter-search-scraper | b0f2834f4fcb6406d63099a83352d60f4c39a99c | [
"MIT"
] | null | null | null | src/file_parser.py | gldgrnt/twitter-search-scraper | b0f2834f4fcb6406d63099a83352d60f4c39a99c | [
"MIT"
] | null | null | null | src/file_parser.py | gldgrnt/twitter-search-scraper | b0f2834f4fcb6406d63099a83352d60f4c39a99c | [
"MIT"
] | null | null | null | import os
import sys
from csv import reader
class FileParser:
def __init__(self, file_path):
# Load file
filename, extension = os.path.splitext(file_path)
# Check for correct file format
if extension != '.txt':
print('Invalid file type, must be .txt with one query per line')
sys.exit()
# Get queries from file
self.queries = self.parse_file(file_path)
def parse_file(self, file_path):
file = open(file_path, 'r')
queries = []
# Read each line of file
for line in file:
queries.append(line.replace('\n', ''))
# Close and return queries
file.close()
return queries | 26.518519 | 76 | 0.585196 | import os
import sys
from csv import reader
class FileParser:
def __init__(self, file_path):
filename, extension = os.path.splitext(file_path)
if extension != '.txt':
print('Invalid file type, must be .txt with one query per line')
sys.exit()
self.queries = self.parse_file(file_path)
def parse_file(self, file_path):
file = open(file_path, 'r')
queries = []
for line in file:
queries.append(line.replace('\n', ''))
file.close()
return queries | true | true |
f739ed18da1797cb33e0f0cc2c265bff5cefa2d9 | 551 | py | Python | Python/Tests/TestData/DebuggerProject/PrevFrameEnumChildTestV3.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 404 | 2019-05-07T02:21:57.000Z | 2022-03-31T17:03:04.000Z | Python/Tests/TestData/DebuggerProject/PrevFrameEnumChildTestV3.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 1,672 | 2019-05-06T21:09:38.000Z | 2022-03-31T23:16:04.000Z | Python/Tests/TestData/DebuggerProject/PrevFrameEnumChildTestV3.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 186 | 2019-05-13T03:17:37.000Z | 2022-03-31T16:24:05.000Z | # coding: utf-8
def g():
pass
def f():
d1 = {42: 100}
d2 = {'abc': 'fob'}
d3 = {1e1000: d1}
s = set([frozenset([2,3,4])])
class C(object):
abc = 42
def f(self): pass
cinst = C()
class C2(object):
abc = 42
def __init__(self):
self.oar = 100
self.self = self
def __repr__(self):
return 'myrepr'
def __hex__(self):
return 'myhex'
def f(self): pass
c2inst = C2()
l = [1, 2, ]
i = 3
g()
f() | 14.891892 | 33 | 0.421053 |
def g():
pass
def f():
d1 = {42: 100}
d2 = {'abc': 'fob'}
d3 = {1e1000: d1}
s = set([frozenset([2,3,4])])
class C(object):
abc = 42
def f(self): pass
cinst = C()
class C2(object):
abc = 42
def __init__(self):
self.oar = 100
self.self = self
def __repr__(self):
return 'myrepr'
def __hex__(self):
return 'myhex'
def f(self): pass
c2inst = C2()
l = [1, 2, ]
i = 3
g()
f() | true | true |
f739f035b0052a37929022735ab9a7e92d518233 | 815 | py | Python | coursedashboards/cache.py | uw-it-aca/course-dashboards | 0f195f7233fc8e24e9ca0d2624ca288869e133ba | [
"Apache-2.0"
] | 1 | 2018-04-05T19:00:27.000Z | 2018-04-05T19:00:27.000Z | coursedashboards/cache.py | uw-it-aca/course-dashboards | 0f195f7233fc8e24e9ca0d2624ca288869e133ba | [
"Apache-2.0"
] | 188 | 2017-08-31T23:38:23.000Z | 2022-03-29T18:06:00.000Z | coursedashboards/cache.py | uw-it-aca/course-dashboards | 0f195f7233fc8e24e9ca0d2624ca288869e133ba | [
"Apache-2.0"
] | null | null | null | from memcached_clients import RestclientPymemcacheClient
import re
ONE_MINUTE = 60
ONE_HOUR = 60 * 60
class RestClientsCache(RestclientPymemcacheClient):
""" A custom cache implementation for Course Dashboards """
def get_cache_expiration_time(self, service, url, status=200):
if "sws" == service:
if re.match(r"^/student/v\d/term/\d{4}", url):
return ONE_HOUR * 10
if re.match(r"^/student/v\d/(?:enrollment|registration)", url):
return ONE_HOUR * 2
return ONE_HOUR
if "pws" == service:
return ONE_HOUR * 10
if "gws" == service:
return ONE_MINUTE * 2
if "canvas" == service:
if status == 200:
return ONE_HOUR * 10
return ONE_MINUTE * 5
| 28.103448 | 75 | 0.580368 | from memcached_clients import RestclientPymemcacheClient
import re
ONE_MINUTE = 60
ONE_HOUR = 60 * 60
class RestClientsCache(RestclientPymemcacheClient):
def get_cache_expiration_time(self, service, url, status=200):
if "sws" == service:
if re.match(r"^/student/v\d/term/\d{4}", url):
return ONE_HOUR * 10
if re.match(r"^/student/v\d/(?:enrollment|registration)", url):
return ONE_HOUR * 2
return ONE_HOUR
if "pws" == service:
return ONE_HOUR * 10
if "gws" == service:
return ONE_MINUTE * 2
if "canvas" == service:
if status == 200:
return ONE_HOUR * 10
return ONE_MINUTE * 5
| true | true |
f739f11f89e0b302d28be62f97a04949d2e82d11 | 4,771 | py | Python | rpy2_utils/robjects.py | pdatlab/rpy2_utils | 8d563592550272604cf6453c6d4dd121f3da49b6 | [
"MIT"
] | null | null | null | rpy2_utils/robjects.py | pdatlab/rpy2_utils | 8d563592550272604cf6453c6d4dd121f3da49b6 | [
"MIT"
] | null | null | null | rpy2_utils/robjects.py | pdatlab/rpy2_utils | 8d563592550272604cf6453c6d4dd121f3da49b6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"""
import rpy2
import pandas as pd
class DataFrame():
def __init__(self,r_df):
"""
import rpy2_utils as ru
dfh = ru.robjects.DataFrame(r_df)
"""
#TODO: Verify data type
self.r = r_df
def __contains__(self, name):
return name in self.r.names
def renameColumn(self,old_name,new_name):
names = list(self.r.names)
I = names.index(old_name)
self.r.names[I] = new_name
@property
def names(self):
return self.r.names
def __getitem__(self,name):
names = list(self.r.names)
I = names.index(name)
r_column = self.r[I]
#Had isinstance, but factor is subclass of int
#to generally avoid subclass comparisons, switched to type()
if type(r_column) == rpy2.robjects.vectors.IntVector:
return IntVector(r_column)
elif type(r_column) == rpy2.robjects.vectors.StrVector:
return StrVector(r_column)
elif type(r_column) == rpy2.robjects.vectors.FloatVector:
return FloatVector(r_column)
elif type(r_column) == rpy2.robjects.vectors.FactorVector:
return FactorVector(r_column)
else:
raise Exception('Unhandled case')
def __setitem__(self, name, new_value):
names = list(self.r.names)
I = names.index(name)
self.r[I] = new_value.r
class IntVector():
def __init__(self,r):
self.r = r
def as_factor(self,levels=None,ordered=False,na=None):
if na is not None:
raise Exception('NA option not yet handled for int vector')
if levels is None:
r = rpy2.robjects.vectors.FactorVector(self.r,ordered=ordered)
else:
levels_r = rpy2.robjects.vectors.IntVector(levels)
r = rpy2.robjects.vectors.FactorVector(self.r,levels=levels_r,ordered=ordered)
return FactorVector(r)
class StrVector():
def __init__(self,r):
self.r = r
def as_factor(self,levels=None,ordered=False,na=None):
if levels is None:
if na is not None:
raise Exception('NA for no levels specified not yet handled')
r = rpy2.robjects.vectors.FactorVector(self.r,ordered=ordered)
else:
if na is not None:
levels.remove(na)
levels_r = rpy2.robjects.vectors.StrVector(levels)
r = rpy2.robjects.vectors.FactorVector(self.r,levels=levels_r,ordered=ordered)
# if na is not None:
# #TODO: Not sure if there is a better way of doing this ...
# final_levels = list(r.levels)
# I = final_levels.index(na)
# #Note, level values are 1 based, not 0 based
# I = I + 1
# r_column = self.r[I]
# #r_train_data.rx[r_train_data.ro == -1] = robjects.NA_Integer
# import pdb
# pdb.set_trace()
# pass
return FactorVector(r)
class FloatVector():
def __init__(self,r):
self.r = r
class FactorVector():
def __init__(self,r):
self.r = r
@property
def levels(self):
return self.r.levels
def as_factor(self,levels=None,ordered=False,na=None):
#TODO: it is possible this changes the levels
#Right now this occurs when we rerun code that has
#already been converted
return self
class FloatMatrix():
def __init__(self,r):
self.r = r
def as_dataframe(self):
#TODO: Clean this up, can we just extract column values
# rather than build by row? Yes, slice by column
# n_rows = 5
# col1 = self.r[0:4]
# col2 = self.r[5:9]
# etc
#
#- make it so rownames is either a column (default) or index
data = self.r
col_names = ['rownames'] + list(data.colnames)
row_names = data.rownames
num_cols = data.ncol
num_rows = data.nrow
col_range = range(num_cols)
row_range = range(num_rows)
rows = []
for x in row_range:
index = [x+p*num_rows for p in col_range]
row_values = [data[p] for p in index]
row_values = [row_names[x]] + row_values
row = dict(zip(col_names,row_values))
row = pd.DataFrame(row, index=[x])
rows.append(row)
output = pd.concat(rows)
return output
| 28.915152 | 90 | 0.543702 |
import rpy2
import pandas as pd
class DataFrame():
def __init__(self,r_df):
self.r = r_df
def __contains__(self, name):
return name in self.r.names
def renameColumn(self,old_name,new_name):
names = list(self.r.names)
I = names.index(old_name)
self.r.names[I] = new_name
@property
def names(self):
return self.r.names
def __getitem__(self,name):
names = list(self.r.names)
I = names.index(name)
r_column = self.r[I]
if type(r_column) == rpy2.robjects.vectors.IntVector:
return IntVector(r_column)
elif type(r_column) == rpy2.robjects.vectors.StrVector:
return StrVector(r_column)
elif type(r_column) == rpy2.robjects.vectors.FloatVector:
return FloatVector(r_column)
elif type(r_column) == rpy2.robjects.vectors.FactorVector:
return FactorVector(r_column)
else:
raise Exception('Unhandled case')
def __setitem__(self, name, new_value):
names = list(self.r.names)
I = names.index(name)
self.r[I] = new_value.r
class IntVector():
def __init__(self,r):
self.r = r
def as_factor(self,levels=None,ordered=False,na=None):
if na is not None:
raise Exception('NA option not yet handled for int vector')
if levels is None:
r = rpy2.robjects.vectors.FactorVector(self.r,ordered=ordered)
else:
levels_r = rpy2.robjects.vectors.IntVector(levels)
r = rpy2.robjects.vectors.FactorVector(self.r,levels=levels_r,ordered=ordered)
return FactorVector(r)
class StrVector():
def __init__(self,r):
self.r = r
def as_factor(self,levels=None,ordered=False,na=None):
if levels is None:
if na is not None:
raise Exception('NA for no levels specified not yet handled')
r = rpy2.robjects.vectors.FactorVector(self.r,ordered=ordered)
else:
if na is not None:
levels.remove(na)
levels_r = rpy2.robjects.vectors.StrVector(levels)
r = rpy2.robjects.vectors.FactorVector(self.r,levels=levels_r,ordered=ordered)
tor():
def __init__(self,r):
self.r = r
class FactorVector():
def __init__(self,r):
self.r = r
@property
def levels(self):
return self.r.levels
def as_factor(self,levels=None,ordered=False,na=None):
return self
class FloatMatrix():
def __init__(self,r):
self.r = r
def as_dataframe(self):
data = self.r
col_names = ['rownames'] + list(data.colnames)
row_names = data.rownames
num_cols = data.ncol
num_rows = data.nrow
col_range = range(num_cols)
row_range = range(num_rows)
rows = []
for x in row_range:
index = [x+p*num_rows for p in col_range]
row_values = [data[p] for p in index]
row_values = [row_names[x]] + row_values
row = dict(zip(col_names,row_values))
row = pd.DataFrame(row, index=[x])
rows.append(row)
output = pd.concat(rows)
return output
| true | true |
f739f28f09e442fb9948d6b739f00afa3a7fe6ca | 315 | py | Python | math_day1/toy.py | ease44/training_camp | 9a7b60ceb36e8634d2861c7b662934bbcdbb2093 | [
"MIT"
] | null | null | null | math_day1/toy.py | ease44/training_camp | 9a7b60ceb36e8634d2861c7b662934bbcdbb2093 | [
"MIT"
] | null | null | null | math_day1/toy.py | ease44/training_camp | 9a7b60ceb36e8634d2861c7b662934bbcdbb2093 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
搭积木
小球相同,格子不同
"""
def combination(n, m):
r = 1
for i in range(1, m + 1):
t = (r * (n - i + 1) // i) % (10 ** 9 + 7)
t *= r
return r
if __name__ == '__main__':
n, m = map(int, input().split(' '))
print(combination(m * m + n - 1, n) % (10 ** 9 + 7))
| 16.578947 | 56 | 0.44127 |
def combination(n, m):
r = 1
for i in range(1, m + 1):
t = (r * (n - i + 1) // i) % (10 ** 9 + 7)
t *= r
return r
if __name__ == '__main__':
n, m = map(int, input().split(' '))
print(combination(m * m + n - 1, n) % (10 ** 9 + 7))
| true | true |
f739f2f5b6b458f3a2cb5f2dbb8060e080fca2d8 | 243 | py | Python | mac/pyobjc-framework-Quartz/PyObjCTest/test_cifiltershape.py | albertz/music-player | d23586f5bf657cbaea8147223be7814d117ae73d | [
"BSD-2-Clause"
] | 132 | 2015-01-01T10:02:42.000Z | 2022-03-09T12:51:01.000Z | mac/pyobjc-framework-Quartz/PyObjCTest/test_cifiltershape.py | mba811/music-player | 7998986b34cfda2244ef622adefb839331b81a81 | [
"BSD-2-Clause"
] | 6 | 2015-01-06T08:23:19.000Z | 2019-03-14T12:22:06.000Z | mac/pyobjc-framework-Quartz/PyObjCTest/test_cifiltershape.py | mba811/music-player | 7998986b34cfda2244ef622adefb839331b81a81 | [
"BSD-2-Clause"
] | 27 | 2015-02-23T11:51:43.000Z | 2022-03-07T02:34:18.000Z |
from PyObjCTools.TestSupport import *
from Quartz.QuartzCore import *
class TestCIFilterShape (TestCase):
def testMethods(self):
self.assertArgIsBOOL(CIFilterShape.transformBy_interior_, 1)
if __name__ == "__main__":
main()
| 22.090909 | 68 | 0.748971 |
from PyObjCTools.TestSupport import *
from Quartz.QuartzCore import *
class TestCIFilterShape (TestCase):
def testMethods(self):
self.assertArgIsBOOL(CIFilterShape.transformBy_interior_, 1)
if __name__ == "__main__":
main()
| true | true |
f739f45f996aaacfcdd9136f44db8f1afcb98858 | 797 | py | Python | migrations/versions/577055ed7da4_.py | Evantually/cash-and-associates | ffccaf8d1018b312296adc6d432f802fd825b483 | [
"MIT"
] | null | null | null | migrations/versions/577055ed7da4_.py | Evantually/cash-and-associates | ffccaf8d1018b312296adc6d432f802fd825b483 | [
"MIT"
] | null | null | null | migrations/versions/577055ed7da4_.py | Evantually/cash-and-associates | ffccaf8d1018b312296adc6d432f802fd825b483 | [
"MIT"
] | 1 | 2021-08-21T12:42:57.000Z | 2021-08-21T12:42:57.000Z | """empty message
Revision ID: 577055ed7da4
Revises: 732b6fffe866
Create Date: 2021-08-19 02:09:56.472249
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '577055ed7da4'
down_revision = '732b6fffe866'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('job', sa.Column('timestamp', sa.DateTime(), nullable=True))
op.create_index(op.f('ix_job_timestamp'), 'job', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_job_timestamp'), table_name='job')
op.drop_column('job', 'timestamp')
# ### end Alembic commands ###
| 25.709677 | 81 | 0.691343 | from alembic import op
import sqlalchemy as sa
revision = '577055ed7da4'
down_revision = '732b6fffe866'
branch_labels = None
depends_on = None
def upgrade():
| true | true |
f739f5e047215138ecb2c702d77b4628b3c2bf85 | 13,067 | py | Python | indicators/admin.py | mercycorps/toladata | 4d5f9b45905a81af9981b586690e020d5b3bfc60 | [
"Apache-2.0"
] | null | null | null | indicators/admin.py | mercycorps/toladata | 4d5f9b45905a81af9981b586690e020d5b3bfc60 | [
"Apache-2.0"
] | 268 | 2020-03-31T15:46:59.000Z | 2022-03-31T18:01:08.000Z | indicators/admin.py | mercycorps/toladata | 4d5f9b45905a81af9981b586690e020d5b3bfc60 | [
"Apache-2.0"
] | 1 | 2021-01-05T01:58:24.000Z | 2021-01-05T01:58:24.000Z | from adminsortable2.admin import SortableInlineAdminMixin
from django.contrib import admin
from django.db import models
from django.utils.encoding import force_text
from django.utils.translation import gettext_lazy as _
from django.utils.html import format_html
from indicators.models import (
Indicator, IndicatorType, Result, StrategicObjective, Objective, Level,
ExternalService, ExternalServiceRecord, DataCollectionFrequency,
DisaggregationType, PeriodicTarget, DisaggregationLabel, ReportingFrequency,
ExternalServiceAdmin,
ExternalServiceRecordAdmin,
PeriodicTargetAdmin,
)
from workflow.models import Sector, Program, Country
from import_export import resources, fields
from import_export.widgets import ForeignKeyWidget, ManyToManyWidget
from import_export.admin import ImportExportModelAdmin
from simple_history.admin import SimpleHistoryAdmin
class BooleanListFilterWithDefault(admin.SimpleListFilter):
all_value = 'all'
def default_value(self):
raise NotImplementedError()
def value(self):
return force_text(super().value() if super().value() is not None else self.default_value())
def choices(self, changelist):
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_text(lookup),
'query_string': changelist.get_query_string({self.parameter_name: lookup}, []),
'display': title
}
yield {
'selected': self.value() == force_text(self.all_value),
'query_string': changelist.get_query_string({self.parameter_name: self.all_value}, []),
'display': _('Show all')
}
def queryset(self, request, queryset):
if self.value() == '0':
queryset = queryset.filter(is_archived=False)
elif self.value() == '1':
queryset = queryset.filter(is_archived=True)
return queryset
class ArchivedFilter(BooleanListFilterWithDefault):
title = _('status')
parameter_name = 'is_archived'
def lookups(self, request, model_admin):
return (
# Translators: This is a filter option that allows users to limit results based on status of archived or not-archived
(0, _('Active (not archived)')),
# Translators: This is a filter option that allows users to limit results based on status of archived or not-archived
(1, _('Inactive (archived)'))
)
def default_value(self):
return 0
# TODO: is this obsolete?
class IndicatorResource(resources.ModelResource):
indicator_type = ManyToManyWidget(IndicatorType, separator=" | ", field="indicator_type")
objective = ManyToManyWidget(Objective, separator=" | ", field="objective")
strategic_objective = ManyToManyWidget(StrategicObjective, separator=" | ", field="strategic_objective")
level = ManyToManyWidget(Level, separator=" | ", field="level")
reporting_frequencies = ManyToManyWidget(ReportingFrequency, separator=" | ", field="frequency")
data_collection_frequencies = ManyToManyWidget(DataCollectionFrequency, separator=" | ", field="frequency")
sector = fields.Field(column_name='sector', attribute='sector', widget=ForeignKeyWidget(Sector, 'sector'))
program = ManyToManyWidget(Program, separator=" | ", field="name")
class Meta:
model = Indicator
fields = ('id', 'indicator_type', 'level', 'objective', 'strategic_objective', 'name', 'number',
'source', 'definition', 'justification', 'unit_of_measure', 'baseline', 'lop_target',
'rationale_for_target', 'means_of_verification', 'data_collection_method',
'data_collection_frequencies', 'data_points', 'responsible_person',
'method_of_analysis', 'information_use', 'reporting_frequencies', 'quality_assurance',
'data_issues', 'comments', 'disaggregation', 'sector',
'program')
class IndicatorListFilter(admin.SimpleListFilter):
title = "Program"
parameter_name = 'program'
def lookups(self, request, model_admin):
user_country = request.user.tola_user.country
programs = Program.objects.filter(country__in=[user_country]).values('id', 'name')
programs_tuple = ()
for p in programs:
programs_tuple = [(p['id'], p['name']) for p in programs]
return programs_tuple
def queryset(self, request, queryset):
if self.value():
queryset = queryset.filter(program__in=[self.value()])
return queryset
class IndicatorAdmin(ImportExportModelAdmin, SimpleHistoryAdmin):
resource_class = IndicatorResource
list_display = ('indicator_types', 'name', 'sector')
search_fields = ('name', 'number', 'program__name')
list_filter = (IndicatorListFilter, 'sector')
display = 'Indicators'
filter_horizontal = ('objectives', 'strategic_objectives', 'disaggregation')
def get_queryset(self, request):
queryset = super(IndicatorAdmin, self).get_queryset(request)
if request.user.is_superuser is False:
user_country = request.user.tola_user.country
programs = Program.objects.filter(country__in=[user_country])
queryset = queryset.filter(program__in=programs)
return queryset
class CountryFilter(admin.SimpleListFilter):
title = 'country'
parameter_name = 'country'
def lookups(self, request, model_admin):
countries = Country.objects.all().values('id', 'country')
if request.user.is_superuser is False:
user_country = request.user.tola_user.country
countries = countries.filter(pk=user_country.pk)
countries_tuple = [(c['id'], c['country']) for c in countries]
return countries_tuple
def queryset(self, request, queryset):
if self.value():
if queryset.model == Objective:
queryset = queryset.filter(program__country=self.value())
else:
queryset = queryset.filter(country=self.value())
return queryset
class DisaggregationCategoryAdmin(SortableInlineAdminMixin, admin.StackedInline):
model = DisaggregationLabel
min_num = 2
extra = 0
# Translators: This is label text for an individual category in a listing of disaggregation categories
verbose_name = _('Category')
# Translators: This is label text for a listing of disaggregation categories
verbose_name_plural = _('Categories')
fieldsets = (
(None, {'fields': ('customsort', ('label', 'indicator_count')),
'classes': ('inline-fieldset',)}),
)
readonly_fields = ('indicator_count',)
def indicator_count(self, instance):
# TODO: make this accurate (easier to do after indicator form / results form in place for testing)
# return Indicator.rf_aware_objects.filter(result__disaggregation_value__disaggregation_label=instance).count()
return 4
class DisaggregationAdmin(admin.ModelAdmin):
"""Abstract base class for the two kinds of disaggregation admins (country and global)"""
display = _('Disaggregation')
inlines = [
DisaggregationCategoryAdmin,
]
class Media:
js = (
'js/admin/disaggregation_admin.js',
)
css = {
'all': ('css/admin/inline_forms.css',)
}
def program_count(self, instance):
"""returns a count of how many programs have indicators to which this disaggregation is assigned"""
return instance.program_count_annotation
program_count.admin_order_field = 'program_count_annotation'
def pretty_archived(self, instance):
"""replaces the boolean check/X display, which seemed inappropriate for archived (red X for active)"""
return format_html(
'<span style="color: {};">{}</span>',
'red' if instance.is_archived else 'green',
'No' if instance.is_archived else 'Yes'
)
pretty_archived.short_description = 'Active'
def categories(self, instance):
"""returns a truncated, comma-separated list of the categories (labels) for a given disaggregation"""
labels = ', '.join([category.label for category in instance.disaggregationlabel_set.all()])
return (labels[:self.COLUMN_WIDTH-3] + '...') if len(labels) > self.COLUMN_WIDTH else labels
def get_queryset(self, request):
"""annotation (programs using disaggregation) and filter (is or is not global)"""
return super().get_queryset(request).filter(standard=self.STANDARD).annotate(
program_count_annotation=models.Subquery(
Indicator.rf_aware_objects.filter(
disaggregation=models.OuterRef('pk')
).values('disaggregation').order_by().annotate(
program_count=models.Count('program', distinct=True),
).values('program_count')[:1],
output_field=models.IntegerField()
))
class GlobalDisaggregation(DisaggregationType):
"""Proxy model to allow for two admins for one model (disaggregation)"""
class Meta:
proxy = True
@admin.register(GlobalDisaggregation)
class GlobalDisaggregationAdmin(DisaggregationAdmin):
list_display = ('disaggregation_type', 'pretty_archived', 'program_count', 'categories')
list_filter = (ArchivedFilter,)
sortable_by = ('disaggregation_type', 'program_count')
exclude = ('create_date', 'edit_date', 'country', 'standard')
STANDARD = True # shows only standard (global) disaggregations
COLUMN_WIDTH = 70 # width of the "categories list" column before truncation
def save_model(self, request, obj, form, change):
"""ensure on save that standard is true and country is blank - this is the global admin"""
obj.standard = True
obj.country = None
super().save_model(request, obj, form, change)
class CountryDisaggregation(DisaggregationType):
"""Proxy model to allow for two admins for one model (disaggregation)"""
class Meta:
proxy = True
@admin.register(CountryDisaggregation)
class CountryDisaggregationAdmin(DisaggregationAdmin):
list_display = ('disaggregation_type', 'country', 'pretty_archived', 'program_count', 'categories')
list_filter = (ArchivedFilter, 'country')
sortable_by = ('disaggregation_type', 'program_count', 'country')
exclude = ('create_date', 'edit_date', 'standard',)
STANDARD = False
COLUMN_WIDTH = 50
def save_model(self, request, obj, form, change):
obj.standard = False
super().save_model(request, obj, form, change)
class ObjectiveAdmin(admin.ModelAdmin):
list_display = ('program', 'name')
search_fields = ('name', 'program__name')
list_filter = (CountryFilter,) # ('program__country__country',)
display = 'Program Objectives'
def get_queryset(self, request):
queryset = super(ObjectiveAdmin, self).get_queryset(request)
if request.user.is_superuser is False:
user_country = request.user.tola_user.country
programs = Program.objects.filter(country__in=[user_country]).values('id')
program_ids = [p['id'] for p in programs]
queryset = queryset.filter(program__in=program_ids)
return queryset
class StrategicObjectiveAdmin(admin.ModelAdmin):
list_display = ('country', 'name')
search_fields = ('country__country', 'name')
list_filter = (CountryFilter,) # ('country__country',)
display = 'Country Strategic Objectives'
def get_queryset(self, request):
queryset = super(StrategicObjectiveAdmin, self).get_queryset(request)
if request.user.is_superuser is False:
user_country = request.user.tola_user.country
queryset = queryset.filter(country=user_country)
return queryset
class ResultResource(resources.ModelResource):
class Meta:
model = Result
# import_id_fields = ['id']
class ResultAdmin(ImportExportModelAdmin, SimpleHistoryAdmin):
resource_class = ResultResource
list_display = ('indicator', 'program')
search_fields = ('indicator', 'program', 'owner__username')
list_filter = ('indicator__program__country__country', 'program', 'approved_by')
display = 'Indicators Results'
class ReportingFrequencyAdmin(admin.ModelAdmin):
list_display = ('frequency', 'description', 'create_date', 'edit_date')
display = 'Reporting Frequency'
admin.site.register(IndicatorType)
admin.site.register(Indicator, IndicatorAdmin)
admin.site.register(ReportingFrequency)
admin.site.register(Result, ResultAdmin)
admin.site.register(Objective, ObjectiveAdmin)
admin.site.register(StrategicObjective, StrategicObjectiveAdmin)
admin.site.register(Level)
admin.site.register(ExternalService, ExternalServiceAdmin)
admin.site.register(ExternalServiceRecord, ExternalServiceRecordAdmin)
admin.site.register(DataCollectionFrequency)
admin.site.register(PeriodicTarget, PeriodicTargetAdmin)
| 41.48254 | 130 | 0.688605 | from adminsortable2.admin import SortableInlineAdminMixin
from django.contrib import admin
from django.db import models
from django.utils.encoding import force_text
from django.utils.translation import gettext_lazy as _
from django.utils.html import format_html
from indicators.models import (
Indicator, IndicatorType, Result, StrategicObjective, Objective, Level,
ExternalService, ExternalServiceRecord, DataCollectionFrequency,
DisaggregationType, PeriodicTarget, DisaggregationLabel, ReportingFrequency,
ExternalServiceAdmin,
ExternalServiceRecordAdmin,
PeriodicTargetAdmin,
)
from workflow.models import Sector, Program, Country
from import_export import resources, fields
from import_export.widgets import ForeignKeyWidget, ManyToManyWidget
from import_export.admin import ImportExportModelAdmin
from simple_history.admin import SimpleHistoryAdmin
class BooleanListFilterWithDefault(admin.SimpleListFilter):
all_value = 'all'
def default_value(self):
raise NotImplementedError()
def value(self):
return force_text(super().value() if super().value() is not None else self.default_value())
def choices(self, changelist):
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_text(lookup),
'query_string': changelist.get_query_string({self.parameter_name: lookup}, []),
'display': title
}
yield {
'selected': self.value() == force_text(self.all_value),
'query_string': changelist.get_query_string({self.parameter_name: self.all_value}, []),
'display': _('Show all')
}
def queryset(self, request, queryset):
if self.value() == '0':
queryset = queryset.filter(is_archived=False)
elif self.value() == '1':
queryset = queryset.filter(is_archived=True)
return queryset
class ArchivedFilter(BooleanListFilterWithDefault):
title = _('status')
parameter_name = 'is_archived'
def lookups(self, request, model_admin):
return (
(0, _('Active (not archived)')),
(1, _('Inactive (archived)'))
)
def default_value(self):
return 0
class IndicatorResource(resources.ModelResource):
indicator_type = ManyToManyWidget(IndicatorType, separator=" | ", field="indicator_type")
objective = ManyToManyWidget(Objective, separator=" | ", field="objective")
strategic_objective = ManyToManyWidget(StrategicObjective, separator=" | ", field="strategic_objective")
level = ManyToManyWidget(Level, separator=" | ", field="level")
reporting_frequencies = ManyToManyWidget(ReportingFrequency, separator=" | ", field="frequency")
data_collection_frequencies = ManyToManyWidget(DataCollectionFrequency, separator=" | ", field="frequency")
sector = fields.Field(column_name='sector', attribute='sector', widget=ForeignKeyWidget(Sector, 'sector'))
program = ManyToManyWidget(Program, separator=" | ", field="name")
class Meta:
model = Indicator
fields = ('id', 'indicator_type', 'level', 'objective', 'strategic_objective', 'name', 'number',
'source', 'definition', 'justification', 'unit_of_measure', 'baseline', 'lop_target',
'rationale_for_target', 'means_of_verification', 'data_collection_method',
'data_collection_frequencies', 'data_points', 'responsible_person',
'method_of_analysis', 'information_use', 'reporting_frequencies', 'quality_assurance',
'data_issues', 'comments', 'disaggregation', 'sector',
'program')
class IndicatorListFilter(admin.SimpleListFilter):
title = "Program"
parameter_name = 'program'
def lookups(self, request, model_admin):
user_country = request.user.tola_user.country
programs = Program.objects.filter(country__in=[user_country]).values('id', 'name')
programs_tuple = ()
for p in programs:
programs_tuple = [(p['id'], p['name']) for p in programs]
return programs_tuple
def queryset(self, request, queryset):
if self.value():
queryset = queryset.filter(program__in=[self.value()])
return queryset
class IndicatorAdmin(ImportExportModelAdmin, SimpleHistoryAdmin):
resource_class = IndicatorResource
list_display = ('indicator_types', 'name', 'sector')
search_fields = ('name', 'number', 'program__name')
list_filter = (IndicatorListFilter, 'sector')
display = 'Indicators'
filter_horizontal = ('objectives', 'strategic_objectives', 'disaggregation')
def get_queryset(self, request):
queryset = super(IndicatorAdmin, self).get_queryset(request)
if request.user.is_superuser is False:
user_country = request.user.tola_user.country
programs = Program.objects.filter(country__in=[user_country])
queryset = queryset.filter(program__in=programs)
return queryset
class CountryFilter(admin.SimpleListFilter):
title = 'country'
parameter_name = 'country'
def lookups(self, request, model_admin):
countries = Country.objects.all().values('id', 'country')
if request.user.is_superuser is False:
user_country = request.user.tola_user.country
countries = countries.filter(pk=user_country.pk)
countries_tuple = [(c['id'], c['country']) for c in countries]
return countries_tuple
def queryset(self, request, queryset):
if self.value():
if queryset.model == Objective:
queryset = queryset.filter(program__country=self.value())
else:
queryset = queryset.filter(country=self.value())
return queryset
class DisaggregationCategoryAdmin(SortableInlineAdminMixin, admin.StackedInline):
model = DisaggregationLabel
min_num = 2
extra = 0
verbose_name = _('Category')
verbose_name_plural = _('Categories')
fieldsets = (
(None, {'fields': ('customsort', ('label', 'indicator_count')),
'classes': ('inline-fieldset',)}),
)
readonly_fields = ('indicator_count',)
def indicator_count(self, instance):
return 4
class DisaggregationAdmin(admin.ModelAdmin):
display = _('Disaggregation')
inlines = [
DisaggregationCategoryAdmin,
]
class Media:
js = (
'js/admin/disaggregation_admin.js',
)
css = {
'all': ('css/admin/inline_forms.css',)
}
def program_count(self, instance):
return instance.program_count_annotation
program_count.admin_order_field = 'program_count_annotation'
def pretty_archived(self, instance):
return format_html(
'<span style="color: {};">{}</span>',
'red' if instance.is_archived else 'green',
'No' if instance.is_archived else 'Yes'
)
pretty_archived.short_description = 'Active'
def categories(self, instance):
labels = ', '.join([category.label for category in instance.disaggregationlabel_set.all()])
return (labels[:self.COLUMN_WIDTH-3] + '...') if len(labels) > self.COLUMN_WIDTH else labels
def get_queryset(self, request):
return super().get_queryset(request).filter(standard=self.STANDARD).annotate(
program_count_annotation=models.Subquery(
Indicator.rf_aware_objects.filter(
disaggregation=models.OuterRef('pk')
).values('disaggregation').order_by().annotate(
program_count=models.Count('program', distinct=True),
).values('program_count')[:1],
output_field=models.IntegerField()
))
class GlobalDisaggregation(DisaggregationType):
class Meta:
proxy = True
@admin.register(GlobalDisaggregation)
class GlobalDisaggregationAdmin(DisaggregationAdmin):
list_display = ('disaggregation_type', 'pretty_archived', 'program_count', 'categories')
list_filter = (ArchivedFilter,)
sortable_by = ('disaggregation_type', 'program_count')
exclude = ('create_date', 'edit_date', 'country', 'standard')
STANDARD = True
COLUMN_WIDTH = 70
def save_model(self, request, obj, form, change):
obj.standard = True
obj.country = None
super().save_model(request, obj, form, change)
class CountryDisaggregation(DisaggregationType):
class Meta:
proxy = True
@admin.register(CountryDisaggregation)
class CountryDisaggregationAdmin(DisaggregationAdmin):
list_display = ('disaggregation_type', 'country', 'pretty_archived', 'program_count', 'categories')
list_filter = (ArchivedFilter, 'country')
sortable_by = ('disaggregation_type', 'program_count', 'country')
exclude = ('create_date', 'edit_date', 'standard',)
STANDARD = False
COLUMN_WIDTH = 50
def save_model(self, request, obj, form, change):
obj.standard = False
super().save_model(request, obj, form, change)
class ObjectiveAdmin(admin.ModelAdmin):
list_display = ('program', 'name')
search_fields = ('name', 'program__name')
list_filter = (CountryFilter,)
display = 'Program Objectives'
def get_queryset(self, request):
queryset = super(ObjectiveAdmin, self).get_queryset(request)
if request.user.is_superuser is False:
user_country = request.user.tola_user.country
programs = Program.objects.filter(country__in=[user_country]).values('id')
program_ids = [p['id'] for p in programs]
queryset = queryset.filter(program__in=program_ids)
return queryset
class StrategicObjectiveAdmin(admin.ModelAdmin):
list_display = ('country', 'name')
search_fields = ('country__country', 'name')
list_filter = (CountryFilter,)
display = 'Country Strategic Objectives'
def get_queryset(self, request):
queryset = super(StrategicObjectiveAdmin, self).get_queryset(request)
if request.user.is_superuser is False:
user_country = request.user.tola_user.country
queryset = queryset.filter(country=user_country)
return queryset
class ResultResource(resources.ModelResource):
class Meta:
model = Result
class ResultAdmin(ImportExportModelAdmin, SimpleHistoryAdmin):
resource_class = ResultResource
list_display = ('indicator', 'program')
search_fields = ('indicator', 'program', 'owner__username')
list_filter = ('indicator__program__country__country', 'program', 'approved_by')
display = 'Indicators Results'
class ReportingFrequencyAdmin(admin.ModelAdmin):
list_display = ('frequency', 'description', 'create_date', 'edit_date')
display = 'Reporting Frequency'
admin.site.register(IndicatorType)
admin.site.register(Indicator, IndicatorAdmin)
admin.site.register(ReportingFrequency)
admin.site.register(Result, ResultAdmin)
admin.site.register(Objective, ObjectiveAdmin)
admin.site.register(StrategicObjective, StrategicObjectiveAdmin)
admin.site.register(Level)
admin.site.register(ExternalService, ExternalServiceAdmin)
admin.site.register(ExternalServiceRecord, ExternalServiceRecordAdmin)
admin.site.register(DataCollectionFrequency)
admin.site.register(PeriodicTarget, PeriodicTargetAdmin)
| true | true |
f739f6fc414801c38208b08e4891b11de4a43d1e | 12,699 | py | Python | tests/contrib/redis/test_redis.py | SzySteve/dd-trace-py | 90d1d5981c72ea312c21ac04e5be47521d0f0f2e | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contrib/redis/test_redis.py | SzySteve/dd-trace-py | 90d1d5981c72ea312c21ac04e5be47521d0f0f2e | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-22T16:56:55.000Z | 2020-12-22T16:56:55.000Z | tests/contrib/redis/test_redis.py | kenferrara/dd-trace-py | 12e52e0ab804061e72b0f76214f5e4bb475ae20f | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-22T16:54:02.000Z | 2020-12-22T16:54:02.000Z | # -*- coding: utf-8 -*-
import redis
import ddtrace
from ddtrace import Pin, compat
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.redis import get_traced_redis
from ddtrace.contrib.redis.patch import patch, unpatch
from tests.opentracer.utils import init_tracer
from ..config import REDIS_CONFIG
from tests.tracer.test_tracer import get_dummy_tracer
from tests import TracerTestCase, snapshot
def test_redis_legacy():
# ensure the old interface isn't broken, but doesn't trace
tracer = get_dummy_tracer()
TracedRedisCache = get_traced_redis(tracer, "foo")
r = TracedRedisCache(port=REDIS_CONFIG["port"])
r.set("a", "b")
got = r.get("a")
assert compat.to_unicode(got) == "b"
assert not tracer.writer.pop()
class TestRedisPatch(TracerTestCase):
TEST_PORT = REDIS_CONFIG["port"]
def setUp(self):
super(TestRedisPatch, self).setUp()
patch()
r = redis.Redis(port=self.TEST_PORT)
r.flushall()
Pin.override(r, tracer=self.tracer)
self.r = r
def tearDown(self):
unpatch()
super(TestRedisPatch, self).tearDown()
def test_long_command(self):
self.r.mget(*range(1000))
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
self.assert_is_measured(span)
assert span.service == "redis"
assert span.name == "redis.command"
assert span.span_type == "redis"
assert span.error == 0
meta = {
"out.host": u"localhost",
}
metrics = {
"out.port": self.TEST_PORT,
"out.redis_db": 0,
}
for k, v in meta.items():
assert span.get_tag(k) == v
for k, v in metrics.items():
assert span.get_metric(k) == v
assert span.get_tag("redis.raw_command").startswith(u"MGET 0 1 2 3")
assert span.get_tag("redis.raw_command").endswith(u"...")
def test_basics(self):
us = self.r.get("cheese")
assert us is None
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
self.assert_is_measured(span)
assert span.service == "redis"
assert span.name == "redis.command"
assert span.span_type == "redis"
assert span.error == 0
assert span.get_metric("out.redis_db") == 0
assert span.get_tag("out.host") == "localhost"
assert span.get_tag("redis.raw_command") == u"GET cheese"
assert span.get_metric("redis.args_length") == 2
assert span.resource == "GET cheese"
assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None
def test_analytics_without_rate(self):
with self.override_config("redis", dict(analytics_enabled=True)):
us = self.r.get("cheese")
assert us is None
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0
def test_analytics_with_rate(self):
with self.override_config("redis", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
us = self.r.get("cheese")
assert us is None
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.5
def test_pipeline_traced(self):
with self.r.pipeline(transaction=False) as p:
p.set("blah", 32)
p.rpush("foo", u"éé")
p.hgetall("xxx")
p.execute()
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
self.assert_is_measured(span)
assert span.service == "redis"
assert span.name == "redis.command"
assert span.resource == u"SET blah 32\nRPUSH foo éé\nHGETALL xxx"
assert span.span_type == "redis"
assert span.error == 0
assert span.get_metric("out.redis_db") == 0
assert span.get_tag("out.host") == "localhost"
assert span.get_tag("redis.raw_command") == u"SET blah 32\nRPUSH foo éé\nHGETALL xxx"
assert span.get_metric("redis.pipeline_length") == 3
assert span.get_metric("redis.pipeline_length") == 3
assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None
def test_pipeline_immediate(self):
with self.r.pipeline() as p:
p.set("a", 1)
p.immediate_execute_command("SET", "a", 1)
p.execute()
spans = self.get_spans()
assert len(spans) == 2
span = spans[0]
self.assert_is_measured(span)
assert span.service == "redis"
assert span.name == "redis.command"
assert span.resource == u"SET a 1"
assert span.span_type == "redis"
assert span.error == 0
assert span.get_metric("out.redis_db") == 0
assert span.get_tag("out.host") == "localhost"
def test_meta_override(self):
r = self.r
pin = Pin.get_from(r)
if pin:
pin.clone(tags={"cheese": "camembert"}).onto(r)
r.get("cheese")
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
assert span.service == "redis"
assert "cheese" in span.meta and span.meta["cheese"] == "camembert"
def test_patch_unpatch(self):
tracer = get_dummy_tracer()
writer = tracer.writer
# Test patch idempotence
patch()
patch()
r = redis.Redis(port=REDIS_CONFIG["port"])
Pin.get_from(r).clone(tracer=tracer).onto(r)
r.get("key")
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
# Test unpatch
unpatch()
r = redis.Redis(port=REDIS_CONFIG["port"])
r.get("key")
spans = writer.pop()
assert not spans, spans
# Test patch again
patch()
r = redis.Redis(port=REDIS_CONFIG["port"])
Pin.get_from(r).clone(tracer=tracer).onto(r)
r.get("key")
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
def test_opentracing(self):
"""Ensure OpenTracing works with redis."""
ot_tracer = init_tracer("redis_svc", self.tracer)
with ot_tracer.start_active_span("redis_get"):
us = self.r.get("cheese")
assert us is None
spans = self.get_spans()
assert len(spans) == 2
ot_span, dd_span = spans
# confirm the parenting
assert ot_span.parent_id is None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.name == "redis_get"
assert ot_span.service == "redis_svc"
self.assert_is_measured(dd_span)
assert dd_span.service == "redis"
assert dd_span.name == "redis.command"
assert dd_span.span_type == "redis"
assert dd_span.error == 0
assert dd_span.get_metric("out.redis_db") == 0
assert dd_span.get_tag("out.host") == "localhost"
assert dd_span.get_tag("redis.raw_command") == u"GET cheese"
assert dd_span.get_metric("redis.args_length") == 2
assert dd_span.resource == "GET cheese"
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_user_specified_service(self):
from ddtrace import config
assert config.service == "mysvc"
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "redis"
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_REDIS_SERVICE="myredis"))
def test_env_user_specified_redis_service(self):
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "myredis", span.service
self.reset()
# Global config
with self.override_config("redis", dict(service="cfg-redis")):
from ddtrace import config
print(config.redis.service)
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "cfg-redis", span.service
self.reset()
# Manual override
Pin.override(self.r, service="mysvc", tracer=self.tracer)
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "mysvc", span.service
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="app-svc", DD_REDIS_SERVICE="env-redis"))
def test_service_precedence(self):
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "env-redis", span.service
self.reset()
# Do a manual override
Pin.override(self.r, service="override-redis", tracer=self.tracer)
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "override-redis", span.service
class TestRedisPatchSnapshot(TracerTestCase):
TEST_PORT = REDIS_CONFIG["port"]
def setUp(self):
super(TestRedisPatchSnapshot, self).setUp()
patch()
r = redis.Redis(port=self.TEST_PORT)
self.r = r
def tearDown(self):
unpatch()
super(TestRedisPatchSnapshot, self).tearDown()
self.r.flushall()
@snapshot()
def test_long_command(self):
self.r.mget(*range(1000))
@snapshot()
def test_basics(self):
us = self.r.get("cheese")
assert us is None
@snapshot()
def test_analytics_without_rate(self):
with self.override_config("redis", dict(analytics_enabled=True)):
us = self.r.get("cheese")
assert us is None
@snapshot()
def test_analytics_with_rate(self):
with self.override_config("redis", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
us = self.r.get("cheese")
assert us is None
@snapshot()
def test_pipeline_traced(self):
with self.r.pipeline(transaction=False) as p:
p.set("blah", 32)
p.rpush("foo", u"éé")
p.hgetall("xxx")
p.execute()
@snapshot()
def test_pipeline_immediate(self):
with self.r.pipeline() as p:
p.set("a", 1)
p.immediate_execute_command("SET", "a", 1)
p.execute()
@snapshot()
def test_meta_override(self):
r = self.r
pin = Pin.get_from(r)
if pin:
pin.clone(tags={"cheese": "camembert"}).onto(r)
r.get("cheese")
def test_patch_unpatch(self):
tracer = get_dummy_tracer()
writer = tracer.writer
# Test patch idempotence
patch()
patch()
r = redis.Redis(port=REDIS_CONFIG["port"])
Pin.get_from(r).clone(tracer=tracer).onto(r)
r.get("key")
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
# Test unpatch
unpatch()
r = redis.Redis(port=REDIS_CONFIG["port"])
r.get("key")
spans = writer.pop()
assert not spans, spans
# Test patch again
patch()
r = redis.Redis(port=REDIS_CONFIG["port"])
Pin.get_from(r).clone(tracer=tracer).onto(r)
r.get("key")
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
@snapshot()
def test_opentracing(self):
"""Ensure OpenTracing works with redis."""
ot_tracer = init_tracer("redis_svc", ddtrace.tracer)
with ot_tracer.start_active_span("redis_get"):
us = self.r.get("cheese")
assert us is None
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
@snapshot()
def test_user_specified_service(self):
from ddtrace import config
assert config.service == "mysvc"
self.r.get("cheese")
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_REDIS_SERVICE="myredis"))
@snapshot()
def test_env_user_specified_redis_service(self):
self.r.get("cheese")
self.reset()
# Global config
with self.override_config("redis", dict(service="cfg-redis")):
self.r.get("cheese")
self.reset()
# Manual override
Pin.override(self.r, service="mysvc", tracer=self.tracer)
self.r.get("cheese")
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="app-svc", DD_REDIS_SERVICE="env-redis"))
@snapshot()
def test_service_precedence(self):
self.r.get("cheese")
self.reset()
# Do a manual override
Pin.override(self.r, service="override-redis", tracer=self.tracer)
self.r.get("cheese")
| 30.453237 | 109 | 0.597449 |
import redis
import ddtrace
from ddtrace import Pin, compat
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.redis import get_traced_redis
from ddtrace.contrib.redis.patch import patch, unpatch
from tests.opentracer.utils import init_tracer
from ..config import REDIS_CONFIG
from tests.tracer.test_tracer import get_dummy_tracer
from tests import TracerTestCase, snapshot
def test_redis_legacy():
tracer = get_dummy_tracer()
TracedRedisCache = get_traced_redis(tracer, "foo")
r = TracedRedisCache(port=REDIS_CONFIG["port"])
r.set("a", "b")
got = r.get("a")
assert compat.to_unicode(got) == "b"
assert not tracer.writer.pop()
class TestRedisPatch(TracerTestCase):
TEST_PORT = REDIS_CONFIG["port"]
def setUp(self):
super(TestRedisPatch, self).setUp()
patch()
r = redis.Redis(port=self.TEST_PORT)
r.flushall()
Pin.override(r, tracer=self.tracer)
self.r = r
def tearDown(self):
unpatch()
super(TestRedisPatch, self).tearDown()
def test_long_command(self):
self.r.mget(*range(1000))
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
self.assert_is_measured(span)
assert span.service == "redis"
assert span.name == "redis.command"
assert span.span_type == "redis"
assert span.error == 0
meta = {
"out.host": u"localhost",
}
metrics = {
"out.port": self.TEST_PORT,
"out.redis_db": 0,
}
for k, v in meta.items():
assert span.get_tag(k) == v
for k, v in metrics.items():
assert span.get_metric(k) == v
assert span.get_tag("redis.raw_command").startswith(u"MGET 0 1 2 3")
assert span.get_tag("redis.raw_command").endswith(u"...")
def test_basics(self):
us = self.r.get("cheese")
assert us is None
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
self.assert_is_measured(span)
assert span.service == "redis"
assert span.name == "redis.command"
assert span.span_type == "redis"
assert span.error == 0
assert span.get_metric("out.redis_db") == 0
assert span.get_tag("out.host") == "localhost"
assert span.get_tag("redis.raw_command") == u"GET cheese"
assert span.get_metric("redis.args_length") == 2
assert span.resource == "GET cheese"
assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None
def test_analytics_without_rate(self):
with self.override_config("redis", dict(analytics_enabled=True)):
us = self.r.get("cheese")
assert us is None
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0
def test_analytics_with_rate(self):
with self.override_config("redis", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
us = self.r.get("cheese")
assert us is None
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.5
def test_pipeline_traced(self):
with self.r.pipeline(transaction=False) as p:
p.set("blah", 32)
p.rpush("foo", u"éé")
p.hgetall("xxx")
p.execute()
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
self.assert_is_measured(span)
assert span.service == "redis"
assert span.name == "redis.command"
assert span.resource == u"SET blah 32\nRPUSH foo éé\nHGETALL xxx"
assert span.span_type == "redis"
assert span.error == 0
assert span.get_metric("out.redis_db") == 0
assert span.get_tag("out.host") == "localhost"
assert span.get_tag("redis.raw_command") == u"SET blah 32\nRPUSH foo éé\nHGETALL xxx"
assert span.get_metric("redis.pipeline_length") == 3
assert span.get_metric("redis.pipeline_length") == 3
assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None
def test_pipeline_immediate(self):
with self.r.pipeline() as p:
p.set("a", 1)
p.immediate_execute_command("SET", "a", 1)
p.execute()
spans = self.get_spans()
assert len(spans) == 2
span = spans[0]
self.assert_is_measured(span)
assert span.service == "redis"
assert span.name == "redis.command"
assert span.resource == u"SET a 1"
assert span.span_type == "redis"
assert span.error == 0
assert span.get_metric("out.redis_db") == 0
assert span.get_tag("out.host") == "localhost"
def test_meta_override(self):
r = self.r
pin = Pin.get_from(r)
if pin:
pin.clone(tags={"cheese": "camembert"}).onto(r)
r.get("cheese")
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
assert span.service == "redis"
assert "cheese" in span.meta and span.meta["cheese"] == "camembert"
def test_patch_unpatch(self):
tracer = get_dummy_tracer()
writer = tracer.writer
patch()
patch()
r = redis.Redis(port=REDIS_CONFIG["port"])
Pin.get_from(r).clone(tracer=tracer).onto(r)
r.get("key")
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
unpatch()
r = redis.Redis(port=REDIS_CONFIG["port"])
r.get("key")
spans = writer.pop()
assert not spans, spans
patch()
r = redis.Redis(port=REDIS_CONFIG["port"])
Pin.get_from(r).clone(tracer=tracer).onto(r)
r.get("key")
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
def test_opentracing(self):
ot_tracer = init_tracer("redis_svc", self.tracer)
with ot_tracer.start_active_span("redis_get"):
us = self.r.get("cheese")
assert us is None
spans = self.get_spans()
assert len(spans) == 2
ot_span, dd_span = spans
assert ot_span.parent_id is None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.name == "redis_get"
assert ot_span.service == "redis_svc"
self.assert_is_measured(dd_span)
assert dd_span.service == "redis"
assert dd_span.name == "redis.command"
assert dd_span.span_type == "redis"
assert dd_span.error == 0
assert dd_span.get_metric("out.redis_db") == 0
assert dd_span.get_tag("out.host") == "localhost"
assert dd_span.get_tag("redis.raw_command") == u"GET cheese"
assert dd_span.get_metric("redis.args_length") == 2
assert dd_span.resource == "GET cheese"
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_user_specified_service(self):
from ddtrace import config
assert config.service == "mysvc"
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "redis"
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_REDIS_SERVICE="myredis"))
def test_env_user_specified_redis_service(self):
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "myredis", span.service
self.reset()
with self.override_config("redis", dict(service="cfg-redis")):
from ddtrace import config
print(config.redis.service)
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "cfg-redis", span.service
self.reset()
Pin.override(self.r, service="mysvc", tracer=self.tracer)
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "mysvc", span.service
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="app-svc", DD_REDIS_SERVICE="env-redis"))
def test_service_precedence(self):
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "env-redis", span.service
self.reset()
Pin.override(self.r, service="override-redis", tracer=self.tracer)
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "override-redis", span.service
class TestRedisPatchSnapshot(TracerTestCase):
TEST_PORT = REDIS_CONFIG["port"]
def setUp(self):
super(TestRedisPatchSnapshot, self).setUp()
patch()
r = redis.Redis(port=self.TEST_PORT)
self.r = r
def tearDown(self):
unpatch()
super(TestRedisPatchSnapshot, self).tearDown()
self.r.flushall()
@snapshot()
def test_long_command(self):
self.r.mget(*range(1000))
@snapshot()
def test_basics(self):
us = self.r.get("cheese")
assert us is None
@snapshot()
def test_analytics_without_rate(self):
with self.override_config("redis", dict(analytics_enabled=True)):
us = self.r.get("cheese")
assert us is None
@snapshot()
def test_analytics_with_rate(self):
with self.override_config("redis", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
us = self.r.get("cheese")
assert us is None
@snapshot()
def test_pipeline_traced(self):
with self.r.pipeline(transaction=False) as p:
p.set("blah", 32)
p.rpush("foo", u"éé")
p.hgetall("xxx")
p.execute()
@snapshot()
def test_pipeline_immediate(self):
with self.r.pipeline() as p:
p.set("a", 1)
p.immediate_execute_command("SET", "a", 1)
p.execute()
@snapshot()
def test_meta_override(self):
r = self.r
pin = Pin.get_from(r)
if pin:
pin.clone(tags={"cheese": "camembert"}).onto(r)
r.get("cheese")
def test_patch_unpatch(self):
tracer = get_dummy_tracer()
writer = tracer.writer
patch()
patch()
r = redis.Redis(port=REDIS_CONFIG["port"])
Pin.get_from(r).clone(tracer=tracer).onto(r)
r.get("key")
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
unpatch()
r = redis.Redis(port=REDIS_CONFIG["port"])
r.get("key")
spans = writer.pop()
assert not spans, spans
patch()
r = redis.Redis(port=REDIS_CONFIG["port"])
Pin.get_from(r).clone(tracer=tracer).onto(r)
r.get("key")
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
@snapshot()
def test_opentracing(self):
ot_tracer = init_tracer("redis_svc", ddtrace.tracer)
with ot_tracer.start_active_span("redis_get"):
us = self.r.get("cheese")
assert us is None
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
@snapshot()
def test_user_specified_service(self):
from ddtrace import config
assert config.service == "mysvc"
self.r.get("cheese")
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_REDIS_SERVICE="myredis"))
@snapshot()
def test_env_user_specified_redis_service(self):
self.r.get("cheese")
self.reset()
with self.override_config("redis", dict(service="cfg-redis")):
self.r.get("cheese")
self.reset()
Pin.override(self.r, service="mysvc", tracer=self.tracer)
self.r.get("cheese")
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="app-svc", DD_REDIS_SERVICE="env-redis"))
@snapshot()
def test_service_precedence(self):
self.r.get("cheese")
self.reset()
Pin.override(self.r, service="override-redis", tracer=self.tracer)
self.r.get("cheese")
| true | true |
f739f7405479ab6c149c0f968f26736c35c97367 | 3,525 | py | Python | MobileRevelator/python/android_xing.py | ohunecker/MR | b0c93436c7964d87a0b8154f8b7662b1731124b9 | [
"MIT"
] | 98 | 2019-02-03T22:50:24.000Z | 2022-03-17T12:50:56.000Z | MobileRevelator/python/android_xing.py | cewatkins/MR | 5ba553fd0eb4c1d80842074a553119486f005822 | [
"MIT"
] | 10 | 2019-03-14T20:12:10.000Z | 2020-05-23T10:37:54.000Z | MobileRevelator/python/android_xing.py | cewatkins/MR | 5ba553fd0eb4c1d80842074a553119486f005822 | [
"MIT"
] | 30 | 2019-02-03T22:50:27.000Z | 2022-03-30T12:37:30.000Z | #Pluginname="Xing (Android)"
#Filename="conversations.db"
#Type=App
import struct
def convertdata(db):
#ctx.gui_clearData()
waconn=ctx.sqlite_run_cmd(db,"SELECT _id, im_skype, company_name, bussiness_province, birthdate, display_name, page_name, bussiness_city, occupation_title from users.users_table;")
if (waconn==-1):
print ("Error: "+ctx.sqlite_last_error(db))
return
contacts={}
if waconn!=-1:
rows=ctx.sqlite_get_data_size(waconn)[0]
for i in range(0,rows):
id=str(ctx.sqlite_get_data(waconn,i,0))
skype=str(ctx.sqlite_get_data(waconn,i,1))
company_name=str(ctx.sqlite_get_data(waconn,i,2))
bussiness_province=str(ctx.sqlite_get_data(waconn,i,3))
birthdate=str(ctx.sqlite_get_data(waconn,i,4))
display_name=str(ctx.sqlite_get_data(waconn,i,5))
page_name=str(ctx.sqlite_get_data(waconn,i,6))
bussiness_city=str(ctx.sqlite_get_data(waconn,i,7))
occupation_title=str(ctx.sqlite_get_data(waconn,i,8))
if (id not in contacts) or (contacts[id]==id):
if display_name != None:
contacts[id]=display_name
elif page_name != None:
contacts[id]=page_name
else:
contacts[id]=id
attconn=ctx.sqlite_run_cmd(db,"select msg_id, file_name from attachments_table;")
attachments={}
if attconn!=-1:
attrows=ctx.sqlite_get_data_size(attconn)[0]
for i in range(0,attrows):
id=str(ctx.sqlite_get_data(attconn,i,0))
filename=str(ctx.sqlite_get_data(attconn,i,1))
if (id not in attachments):
attachments[id]=filename
else:
attachments[id]+=";"+filename
conn=ctx.sqlite_run_cmd(db,"select messages_table.rowid, date, _id, body, sender, has_attachments from messages_table;")
rows=ctx.sqlite_get_data_size(conn)[0]
oldpos=0
r=0
for i in range(0,rows):
newpos=int(i/rows*100)
if (oldpos<newpos):
oldpos=newpos
ctx.gui_setMainProgressBar(oldpos)
rowid=ctx.sqlite_get_data(conn,i,0)
timestamp=ctx.sqlite_get_data(conn,i,1)
id=ctx.sqlite_get_data(conn,i,2)
body=ctx.sqlite_get_data(conn,i,3)
sender_id=ctx.sqlite_get_data(conn,i,4)
has_attachments=ctx.sqlite_get_data(conn,i,5)
name=""
sender_name=""
attaches=""
if id in attachments:
attaches=attachments[id]
if sender_id in contacts:
sender_name=contacts[sender_id]
ctx.gui_set_data(r,0,rowid)
ctx.gui_set_data(r,1,timestamp)
ctx.gui_set_data(r,2,sender_id)
ctx.gui_set_data(r,3,sender_name)
ctx.gui_set_data(r,4,body)
ctx.gui_set_data(r,5,attaches)
r+=1
ctx.sqlite_cmd_close(attconn)
ctx.sqlite_cmd_close(waconn)
ctx.sqlite_cmd_close(conn)
def main():
headers=["rowid (int)","timestamp (int)","_sender (QString)","_sender_alias (QString)","body (QString)","Attachments (QString)"]
ctx.gui_set_headers(headers)
ctx.gui_setMainLabel("Xing: Parsing Strings");
ctx.gui_setMainProgressBar(0)
db=ctx.sqlite_open("gui",True)
convertdata(db)
ctx.gui_update()
ctx.gui_setMainLabel("Status: Idle.")
ctx.gui_setMainProgressBar(0)
ctx.sqlite_close(db)
return "Finished running plugin." | 38.315217 | 185 | 0.626383 |
import struct
def convertdata(db):
waconn=ctx.sqlite_run_cmd(db,"SELECT _id, im_skype, company_name, bussiness_province, birthdate, display_name, page_name, bussiness_city, occupation_title from users.users_table;")
if (waconn==-1):
print ("Error: "+ctx.sqlite_last_error(db))
return
contacts={}
if waconn!=-1:
rows=ctx.sqlite_get_data_size(waconn)[0]
for i in range(0,rows):
id=str(ctx.sqlite_get_data(waconn,i,0))
skype=str(ctx.sqlite_get_data(waconn,i,1))
company_name=str(ctx.sqlite_get_data(waconn,i,2))
bussiness_province=str(ctx.sqlite_get_data(waconn,i,3))
birthdate=str(ctx.sqlite_get_data(waconn,i,4))
display_name=str(ctx.sqlite_get_data(waconn,i,5))
page_name=str(ctx.sqlite_get_data(waconn,i,6))
bussiness_city=str(ctx.sqlite_get_data(waconn,i,7))
occupation_title=str(ctx.sqlite_get_data(waconn,i,8))
if (id not in contacts) or (contacts[id]==id):
if display_name != None:
contacts[id]=display_name
elif page_name != None:
contacts[id]=page_name
else:
contacts[id]=id
attconn=ctx.sqlite_run_cmd(db,"select msg_id, file_name from attachments_table;")
attachments={}
if attconn!=-1:
attrows=ctx.sqlite_get_data_size(attconn)[0]
for i in range(0,attrows):
id=str(ctx.sqlite_get_data(attconn,i,0))
filename=str(ctx.sqlite_get_data(attconn,i,1))
if (id not in attachments):
attachments[id]=filename
else:
attachments[id]+=";"+filename
conn=ctx.sqlite_run_cmd(db,"select messages_table.rowid, date, _id, body, sender, has_attachments from messages_table;")
rows=ctx.sqlite_get_data_size(conn)[0]
oldpos=0
r=0
for i in range(0,rows):
newpos=int(i/rows*100)
if (oldpos<newpos):
oldpos=newpos
ctx.gui_setMainProgressBar(oldpos)
rowid=ctx.sqlite_get_data(conn,i,0)
timestamp=ctx.sqlite_get_data(conn,i,1)
id=ctx.sqlite_get_data(conn,i,2)
body=ctx.sqlite_get_data(conn,i,3)
sender_id=ctx.sqlite_get_data(conn,i,4)
has_attachments=ctx.sqlite_get_data(conn,i,5)
name=""
sender_name=""
attaches=""
if id in attachments:
attaches=attachments[id]
if sender_id in contacts:
sender_name=contacts[sender_id]
ctx.gui_set_data(r,0,rowid)
ctx.gui_set_data(r,1,timestamp)
ctx.gui_set_data(r,2,sender_id)
ctx.gui_set_data(r,3,sender_name)
ctx.gui_set_data(r,4,body)
ctx.gui_set_data(r,5,attaches)
r+=1
ctx.sqlite_cmd_close(attconn)
ctx.sqlite_cmd_close(waconn)
ctx.sqlite_cmd_close(conn)
def main():
headers=["rowid (int)","timestamp (int)","_sender (QString)","_sender_alias (QString)","body (QString)","Attachments (QString)"]
ctx.gui_set_headers(headers)
ctx.gui_setMainLabel("Xing: Parsing Strings");
ctx.gui_setMainProgressBar(0)
db=ctx.sqlite_open("gui",True)
convertdata(db)
ctx.gui_update()
ctx.gui_setMainLabel("Status: Idle.")
ctx.gui_setMainProgressBar(0)
ctx.sqlite_close(db)
return "Finished running plugin." | true | true |
f739f7b974d2d1dd16a7b9aded204c562a1b9f9c | 191 | py | Python | HelmState/__init__.py | hansehe/HelmState | d4b83f1ed2259bd9728c61b8c95270b264fcfda1 | [
"MIT"
] | 1 | 2020-03-21T12:22:23.000Z | 2020-03-21T12:22:23.000Z | HelmState/__main__.py | hansehe/HelmState | d4b83f1ed2259bd9728c61b8c95270b264fcfda1 | [
"MIT"
] | null | null | null | HelmState/__main__.py | hansehe/HelmState | d4b83f1ed2259bd9728c61b8c95270b264fcfda1 | [
"MIT"
] | null | null | null | from HelmState import Main
import sys
def main():
"""Entry point for the application script"""
sys.stdout.write(Main.Main())
sys.exit(0)
if __name__ == "__main__":
main()
| 14.692308 | 48 | 0.649215 | from HelmState import Main
import sys
def main():
sys.stdout.write(Main.Main())
sys.exit(0)
if __name__ == "__main__":
main()
| true | true |
f739f980016373c8eb44419fbbcca1766d4dbbc6 | 4,906 | py | Python | examples/pipeline/hetero_sbt/pipeline-hetero-sbt-multi.py | QuantumA/FATE | 89a3dd593252128c1bf86fb1014b25a629bdb31a | [
"Apache-2.0"
] | 1 | 2021-12-03T06:33:33.000Z | 2021-12-03T06:33:33.000Z | examples/pipeline/hetero_sbt/pipeline-hetero-sbt-multi.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | 11 | 2020-10-09T09:53:50.000Z | 2021-12-06T16:14:51.000Z | examples/pipeline/hetero_sbt/pipeline-hetero-sbt-multi.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(role="guest", party_id=guest).component_param(with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(role="guest", party_id=guest).component_param(with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "iterativeAffine"},
tree_param={"max_depth": 3},
validation_freqs=1)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="multi")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1, data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 45.850467 | 126 | 0.711374 |
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(role="guest", party_id=guest).component_param(with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(role="guest", party_id=guest).component_param(with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "iterativeAffine"},
tree_param={"max_depth": 3},
validation_freqs=1)
evaluation_0 = Evaluation(name="evaluation_0", eval_type="multi")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1, data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| true | true |
f739f9b082b1daeb0978051a0233b904bc22f951 | 3,051 | py | Python | sail_bpm/sail_bpm/custom_scripts/jinja_file/jinja_file.py | hrishikesh20/SAIL-BPM | 5288168be29ec1f46ba4231b62f65f1758fdb227 | [
"MIT"
] | 1 | 2021-04-04T13:12:04.000Z | 2021-04-04T13:12:04.000Z | sail_bpm/sail_bpm/custom_scripts/jinja_file/jinja_file.py | hrishikesh20/SAIL-BPM | 5288168be29ec1f46ba4231b62f65f1758fdb227 | [
"MIT"
] | null | null | null | sail_bpm/sail_bpm/custom_scripts/jinja_file/jinja_file.py | hrishikesh20/SAIL-BPM | 5288168be29ec1f46ba4231b62f65f1758fdb227 | [
"MIT"
] | 2 | 2022-03-23T06:30:46.000Z | 2022-03-23T07:21:43.000Z | from __future__ import unicode_literals
from frappe.model.document import Document
import frappe
from frappe.utils import flt,today
from frappe import _
import decimal
import json
from datetime import datetime, timedelta
@frappe.whitelist()
def get_delivery_note_data(doc):
data = frappe.db.sql("""select item_code,qty,number_of_pieces,wt_range from `tabDelivery Note Item` where parent ="%s" order by item_code"""%(doc.name))
data_aggregate = frappe.db.sql("""select item_code,sum(qty) as sumgrosswt,sum(number_of_pieces) as sum_qty from `tabDelivery Note Item` where parent ="%s" group by item_code"""%(doc.name))
table_height=0
sendata={}
total_bundle=0
total_weight=0
data_to_send = ""
last_data_aggregate_count =0
for i in data_aggregate:
last_data_aggregate_count += 1
header = add_data("""<table style ='width :200px'><tbody><tr class='cls_003' style='border: 1px solid black;'><th colspan ='4' style='text-align:center' ><strong >%s</strong></th></tr>"""%(i[0]),table_height)
table_height += 1
header += add_data("""<tr><td><strong>NO</strong></td><td><strong>Wt Range</strong></td><td><strong>Qty</strong></td><td><strong>Gross Wt</strong></td></tr>""",table_height)
table_height += 1
count=1
for j in data:
if j[0] == i[0]:
header += add_data("""<tr><td>%s</td><td align="right">%s</td><td align="right">%s</td><td align="right">%s</td></tr>"""%(count,'{:.3f}'.format(round(j[3], 3)),j[2],'{:.3f}'.format(round(j[1], 3))),table_height)
table_height += 1
count+=1
header += add_data("""<tr><td><strong>%s</strong></td><td align="left"><strong>%s</strong></td><td align="right"><strong>%s</strong></td><td align="right"><strong>%s</strong></td></tr></tbody></table>"""%(count-1,"Bun",'{:.0f}'.format(round(i[2], 0)),'{:.3f}'.format(round(i[1], 3))),table_height)
table_height += 1
if last_data_aggregate_count == len(data_aggregate):
header += add_data("""</div><p align='justify'> </p></div>""",table_height)
else:
header += add_data("""<p align='justify'> </p>""",table_height)
table_height += 1
data_to_send += header
total_bundle += count-1
total_weight += i[1]
headertable= """<table class = 'headertable'><tr><th>%s</th><th align="left"><strong>%s</strong></th><th>%s</th><th align="left"><strong>%s</strong></th></tr></table>"""%('Total Bundles',total_bundle,'Total Weight','{:.3f}'.format(round(total_weight, 3)))
divtable = data_to_send
sendata['divtable']=divtable
sendata['headertable']=headertable
return sendata
def add_data(data , num):
if num%40 == 0:
if ((num // 40)) % 4 == 0 or num == 0:
if num ==0:
return """<div class='row'> <div class='column' style='margin-left:50px' >""" + data
else:
return """</tbody></table></div></div> <p > </p><div class='row'> <div class='column' style='margin-left:50px' ><table style ='width :200px'><tbody>""" + data
else:
return """</table></tbody></div><div class='column' style='margin-left:60px'><table style ='width :200px'><tbody>""" + data
else:
return data
| 47.671875 | 299 | 0.658145 | from __future__ import unicode_literals
from frappe.model.document import Document
import frappe
from frappe.utils import flt,today
from frappe import _
import decimal
import json
from datetime import datetime, timedelta
@frappe.whitelist()
def get_delivery_note_data(doc):
data = frappe.db.sql("""select item_code,qty,number_of_pieces,wt_range from `tabDelivery Note Item` where parent ="%s" order by item_code"""%(doc.name))
data_aggregate = frappe.db.sql("""select item_code,sum(qty) as sumgrosswt,sum(number_of_pieces) as sum_qty from `tabDelivery Note Item` where parent ="%s" group by item_code"""%(doc.name))
table_height=0
sendata={}
total_bundle=0
total_weight=0
data_to_send = ""
last_data_aggregate_count =0
for i in data_aggregate:
last_data_aggregate_count += 1
header = add_data("""<table style ='width :200px'><tbody><tr class='cls_003' style='border: 1px solid black;'><th colspan ='4' style='text-align:center' ><strong >%s</strong></th></tr>"""%(i[0]),table_height)
table_height += 1
header += add_data("""<tr><td><strong>NO</strong></td><td><strong>Wt Range</strong></td><td><strong>Qty</strong></td><td><strong>Gross Wt</strong></td></tr>""",table_height)
table_height += 1
count=1
for j in data:
if j[0] == i[0]:
header += add_data("""<tr><td>%s</td><td align="right">%s</td><td align="right">%s</td><td align="right">%s</td></tr>"""%(count,'{:.3f}'.format(round(j[3], 3)),j[2],'{:.3f}'.format(round(j[1], 3))),table_height)
table_height += 1
count+=1
header += add_data("""<tr><td><strong>%s</strong></td><td align="left"><strong>%s</strong></td><td align="right"><strong>%s</strong></td><td align="right"><strong>%s</strong></td></tr></tbody></table>"""%(count-1,"Bun",'{:.0f}'.format(round(i[2], 0)),'{:.3f}'.format(round(i[1], 3))),table_height)
table_height += 1
if last_data_aggregate_count == len(data_aggregate):
header += add_data("""</div><p align='justify'> </p></div>""",table_height)
else:
header += add_data("""<p align='justify'> </p>""",table_height)
table_height += 1
data_to_send += header
total_bundle += count-1
total_weight += i[1]
headertable= """<table class = 'headertable'><tr><th>%s</th><th align="left"><strong>%s</strong></th><th>%s</th><th align="left"><strong>%s</strong></th></tr></table>"""%('Total Bundles',total_bundle,'Total Weight','{:.3f}'.format(round(total_weight, 3)))
divtable = data_to_send
sendata['divtable']=divtable
sendata['headertable']=headertable
return sendata
def add_data(data , num):
if num%40 == 0:
if ((num // 40)) % 4 == 0 or num == 0:
if num ==0:
return """<div class='row'> <div class='column' style='margin-left:50px' >""" + data
else:
return """</tbody></table></div></div> <p > </p><div class='row'> <div class='column' style='margin-left:50px' ><table style ='width :200px'><tbody>""" + data
else:
return """</table></tbody></div><div class='column' style='margin-left:60px'><table style ='width :200px'><tbody>""" + data
else:
return data
| true | true |
f739fa5aa2c2be84427ccffc4362b76e98bcaed9 | 1,330 | py | Python | django_auth2/views/reset_password.py | Nick1994209/django-auth2 | b8678f06ade985d2b5b0606e6e49bd9d2a49931a | [
"MIT"
] | null | null | null | django_auth2/views/reset_password.py | Nick1994209/django-auth2 | b8678f06ade985d2b5b0606e6e49bd9d2a49931a | [
"MIT"
] | null | null | null | django_auth2/views/reset_password.py | Nick1994209/django-auth2 | b8678f06ade985d2b5b0606e6e49bd9d2a49931a | [
"MIT"
] | null | null | null | from django.contrib.auth import views as auth_views
from django.urls import reverse_lazy
from django.views.generic import FormView
from .. import forms, mails
from ..tokens import password_reset_token_generator
class PasswordReset(FormView):
form_class = forms.PasswordResetForm
template_name = 'django_auth2/reset_password/form.html'
success_url = reverse_lazy('password_reset_done')
def form_valid(self, form):
response = super().form_valid(form)
user = form.get_user_from_email()
mails.send_reset_password_mail(self.request, user)
return response
password_reset = PasswordReset.as_view()
def password_reset_done(request, **kwargs):
return auth_views.password_reset_done(
request, template_name='django_auth2/reset_password/done.html',
**kwargs
)
def password_reset_confirm(request, **kwargs):
return auth_views.password_reset_confirm(
request,
set_password_form=forms.SetPasswordForm,
token_generator=password_reset_token_generator,
template_name='django_auth2/reset_password/confirm.html',
**kwargs
)
def password_reset_complete(request, **kwargs):
return auth_views.password_reset_done(
request,
template_name='django_auth2/reset_password/complete.html',
**kwargs)
| 28.913043 | 71 | 0.737594 | from django.contrib.auth import views as auth_views
from django.urls import reverse_lazy
from django.views.generic import FormView
from .. import forms, mails
from ..tokens import password_reset_token_generator
class PasswordReset(FormView):
form_class = forms.PasswordResetForm
template_name = 'django_auth2/reset_password/form.html'
success_url = reverse_lazy('password_reset_done')
def form_valid(self, form):
response = super().form_valid(form)
user = form.get_user_from_email()
mails.send_reset_password_mail(self.request, user)
return response
password_reset = PasswordReset.as_view()
def password_reset_done(request, **kwargs):
return auth_views.password_reset_done(
request, template_name='django_auth2/reset_password/done.html',
**kwargs
)
def password_reset_confirm(request, **kwargs):
return auth_views.password_reset_confirm(
request,
set_password_form=forms.SetPasswordForm,
token_generator=password_reset_token_generator,
template_name='django_auth2/reset_password/confirm.html',
**kwargs
)
def password_reset_complete(request, **kwargs):
return auth_views.password_reset_done(
request,
template_name='django_auth2/reset_password/complete.html',
**kwargs)
| true | true |
f739fd1040b679d5040af2976da189d84f3401ba | 5,588 | py | Python | data-science-onramp/ai-platform/modules/trainer/tfkeras_model/task.py | TKone7/python-docs-samples | ef3dd032d6fde6a47b944604788bb674e8e51b66 | [
"Apache-2.0"
] | 1 | 2021-03-25T10:15:56.000Z | 2021-03-25T10:15:56.000Z | data-science-onramp/ai-platform/modules/trainer/tfkeras_model/task.py | TKone7/python-docs-samples | ef3dd032d6fde6a47b944604788bb674e8e51b66 | [
"Apache-2.0"
] | 2 | 2021-04-15T19:02:01.000Z | 2021-05-11T04:53:15.000Z | data-science-onramp/ai-platform/modules/trainer/tfkeras_model/task.py | TKone7/python-docs-samples | ef3dd032d6fde6a47b944604788bb674e8e51b66 | [
"Apache-2.0"
] | 1 | 2021-07-15T21:04:39.000Z | 2021-07-15T21:04:39.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START ai_platform_tfkeras_task]
"""Trains a Keras model to predict number of trips
started and ended at Citibike stations. """
# [START ai_platform_tfkeras_task_imports]
import argparse
import os
import tensorflow as tf
from trainer import utils
from trainer.tfkeras_model import model
# [END ai_platform_tfkeras_task_imports]
# [START ai_platform_tfkeras_task_args]
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-path",
type=str,
required=True,
help="path to input data"
)
parser.add_argument(
"--num-epochs",
type=int,
help="number of times to go through the data, default=20",
)
parser.add_argument(
"--batch-size",
type=int,
help="number of records to read during each training step, default=128",
)
parser.add_argument(
"--learning-rate",
type=float,
help="learning rate for gradient descent, default=.01",
)
parser.add_argument(
"--verbosity",
choices=["DEBUG", "ERROR", "FATAL", "INFO", "WARN"],
default="INFO",
)
parser.add_argument(
"--model_dir",
type=str,
help="Output directory for the model.",
default=os.environ["AIP_MODEL_DIR"],
)
return parser.parse_args()
# [END ai_platform_tfkeras_task_args]
# [START ai_platform_tfkeras_task_train_and_evaluate]
# [START ai_platform_tfkeras_task_train_and_evaluate_load]
def train_and_evaluate(
input_path: str,
model_dir: str,
num_epochs: int = 5,
batch_size: int = 128,
learning_rate: float = 0.01
) -> None:
"""Trains and evaluates the Keras model.
Uses the Keras model defined in model.py. Saves the trained model in TensorFlow SavedModel
format to the path defined in part by the --job-dir argument."""
# Split datasets into training and testing
train_feature, eval_feature, train_target, eval_target = utils.load_data(input_path)
# [END ai_platform_tfkeras_task_train_and_evaluate_load]
# [START ai_platform_tfkeras_task_train_and_evaluate_dimensions]
# Extract dimensions of the data
num_train_examples, input_dim = train_feature.shape
num_eval_examples = eval_feature.shape[1]
output_dim = train_target.shape[1]
# [END ai_platform_tfkeras_task_train_and_evaluate_dimensions]
# [START ai_platform_tfkeras_task_train_and_evaluate_model]
# Create the Keras Model
keras_model = model.create_keras_model(
input_dim=input_dim,
output_dim=output_dim,
learning_rate=learning_rate,
)
# [END ai_platform_tfkeras_task_train_and_evaluate_model]
# [START ai_platform_tfkeras_task_train_and_evaluate_training_data]
# Pass a numpy array by passing DataFrame.values
training_dataset = model.input_fn(
features=train_feature.values,
labels=train_target.values,
shuffle=True,
num_epochs=num_epochs,
batch_size=batch_size,
)
# [END ai_platform_tfkeras_task_train_and_evaluate_training_data]
# [START ai_platform_tfkeras_task_train_and_evaluate_validation_data]
# Pass a numpy array by passing DataFrame.values
validation_dataset = model.input_fn(
features=eval_feature.values,
labels=eval_target.values,
shuffle=False,
num_epochs=num_epochs,
batch_size=num_eval_examples,
)
# [END ai_platform_tfkeras_task_train_and_evaluate_validation_data]
# [START ai_platform_tfkeras_task_train_and_evaluate_tensorboard]
# Setup Learning Rate decay.
lr_decay_cb = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: learning_rate + 0.02 * (0.5 ** (1 + epoch)), verbose=True
)
# Setup TensorBoard callback.
tensorboard_cb = tf.keras.callbacks.TensorBoard(
os.path.join(model_dir, "keras_tensorboard"), histogram_freq=1
)
# [END ai_platform_tfkeras_task_train_and_evaluate_tensorboard]
# [START ai_platform_tfkeras_task_train_and_evaluate_fit_export]
# Train model
keras_model.fit(
training_dataset,
steps_per_epoch=int(num_train_examples / batch_size),
epochs=num_epochs,
validation_data=validation_dataset,
validation_steps=1,
verbose=1,
callbacks=[lr_decay_cb, tensorboard_cb],
)
# Export model
keras_model.save(model_dir)
print(f"Model exported to: {model_dir}")
# [END ai_platform_tfkeras_task_train_and_evaluate_fit_export]
# [END ai_platform_tfkeras_task_train_and_evaluate]
if __name__ == "__main__":
args = get_args()
kwargs = {}
if args.num_epochs:
kwargs["num-epochs"] = args.num_epochs
if args.batch_size:
kwargs["batch-size"] = args.batch_size
if args.learning_rate:
kwargs["learning-rate"] = args.learning_rate
tf.compat.v1.logging.set_verbosity(args.verbosity)
train_and_evaluate(args.input_path, args.model_dir, **kwargs)
# [END ai_platform_tfkeras_task]
| 32.678363 | 94 | 0.713314 |
import argparse
import os
import tensorflow as tf
from trainer import utils
from trainer.tfkeras_model import model
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-path",
type=str,
required=True,
help="path to input data"
)
parser.add_argument(
"--num-epochs",
type=int,
help="number of times to go through the data, default=20",
)
parser.add_argument(
"--batch-size",
type=int,
help="number of records to read during each training step, default=128",
)
parser.add_argument(
"--learning-rate",
type=float,
help="learning rate for gradient descent, default=.01",
)
parser.add_argument(
"--verbosity",
choices=["DEBUG", "ERROR", "FATAL", "INFO", "WARN"],
default="INFO",
)
parser.add_argument(
"--model_dir",
type=str,
help="Output directory for the model.",
default=os.environ["AIP_MODEL_DIR"],
)
return parser.parse_args()
def train_and_evaluate(
input_path: str,
model_dir: str,
num_epochs: int = 5,
batch_size: int = 128,
learning_rate: float = 0.01
) -> None:
train_feature, eval_feature, train_target, eval_target = utils.load_data(input_path)
num_train_examples, input_dim = train_feature.shape
num_eval_examples = eval_feature.shape[1]
output_dim = train_target.shape[1]
keras_model = model.create_keras_model(
input_dim=input_dim,
output_dim=output_dim,
learning_rate=learning_rate,
)
training_dataset = model.input_fn(
features=train_feature.values,
labels=train_target.values,
shuffle=True,
num_epochs=num_epochs,
batch_size=batch_size,
)
validation_dataset = model.input_fn(
features=eval_feature.values,
labels=eval_target.values,
shuffle=False,
num_epochs=num_epochs,
batch_size=num_eval_examples,
)
lr_decay_cb = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: learning_rate + 0.02 * (0.5 ** (1 + epoch)), verbose=True
)
tensorboard_cb = tf.keras.callbacks.TensorBoard(
os.path.join(model_dir, "keras_tensorboard"), histogram_freq=1
)
keras_model.fit(
training_dataset,
steps_per_epoch=int(num_train_examples / batch_size),
epochs=num_epochs,
validation_data=validation_dataset,
validation_steps=1,
verbose=1,
callbacks=[lr_decay_cb, tensorboard_cb],
)
keras_model.save(model_dir)
print(f"Model exported to: {model_dir}")
if __name__ == "__main__":
args = get_args()
kwargs = {}
if args.num_epochs:
kwargs["num-epochs"] = args.num_epochs
if args.batch_size:
kwargs["batch-size"] = args.batch_size
if args.learning_rate:
kwargs["learning-rate"] = args.learning_rate
tf.compat.v1.logging.set_verbosity(args.verbosity)
train_and_evaluate(args.input_path, args.model_dir, **kwargs)
| true | true |
f739fdb2899321a502b29b4687eb0f071e6271c2 | 166 | py | Python | rlflows/__init__.py | levmckinney/NormalizingFlowPolicies | 4474e4b96fea1b6238680bc9b3ebf277be39676d | [
"MIT"
] | 1 | 2021-07-31T00:12:33.000Z | 2021-07-31T00:12:33.000Z | rlflows/__init__.py | levmckinney/NormalizingFlowPolicies | 4474e4b96fea1b6238680bc9b3ebf277be39676d | [
"MIT"
] | null | null | null | rlflows/__init__.py | levmckinney/NormalizingFlowPolicies | 4474e4b96fea1b6238680bc9b3ebf277be39676d | [
"MIT"
] | null | null | null | from .action_dists import TorchGaussianMixtureDistribution
from .envs import cheetah_env_creator, ant_env_creator
from .custom_trainer import CustomKLUpdatePPOTrainer | 55.333333 | 58 | 0.903614 | from .action_dists import TorchGaussianMixtureDistribution
from .envs import cheetah_env_creator, ant_env_creator
from .custom_trainer import CustomKLUpdatePPOTrainer | true | true |
f739fea85a258a9d214794f00a720d41a723aadf | 17,928 | py | Python | pymc/bart/pgbart.py | austereantelope/pymc | 657eb2a7e46fa30e61d3c1b12a8ce15020794a2c | [
"Apache-2.0"
] | 1 | 2021-12-02T07:40:25.000Z | 2021-12-02T07:40:25.000Z | pymc/bart/pgbart.py | austereantelope/pymc | 657eb2a7e46fa30e61d3c1b12a8ce15020794a2c | [
"Apache-2.0"
] | null | null | null | pymc/bart/pgbart.py | austereantelope/pymc | 657eb2a7e46fa30e61d3c1b12a8ce15020794a2c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from copy import copy
import aesara
import numpy as np
from aesara import function as aesara_function
from pymc.aesaraf import inputvars, join_nonshared_inputs, make_shared_replacements
from pymc.bart.bart import BARTRV
from pymc.bart.tree import LeafNode, SplitNode, Tree
from pymc.model import modelcontext
from pymc.step_methods.arraystep import ArrayStepShared, Competence
_log = logging.getLogger("pymc")
class PGBART(ArrayStepShared):
"""
Particle Gibss BART sampling step
Parameters
----------
vars: list
List of value variables for sampler
num_particles : int
Number of particles for the conditional SMC sampler. Defaults to 40
max_stages : int
Maximum number of iterations of the conditional SMC sampler. Defaults to 100.
batch : int or tuple
Number of trees fitted per step. Defaults to "auto", which is the 10% of the `m` trees
during tuning and after tuning. If a tuple is passed the first element is the batch size
during tuning and the second the batch size after tuning.
model: PyMC Model
Optional model for sampling step. Defaults to None (taken from context).
"""
name = "bartsampler"
default_blocked = False
generates_stats = True
stats_dtypes = [{"variable_inclusion": np.ndarray, "bart_trees": np.ndarray}]
def __init__(self, vars=None, num_particles=40, max_stages=100, batch="auto", model=None):
_log.warning("BART is experimental. Use with caution.")
model = modelcontext(model)
initial_values = model.compute_initial_point()
value_bart = inputvars(vars)[0]
self.bart = model.values_to_rvs[value_bart].owner.op
self.X = self.bart.X
self.Y = self.bart.Y
self.missing_data = np.any(np.isnan(self.X))
self.m = self.bart.m
self.alpha = self.bart.alpha
self.k = self.bart.k
self.alpha_vec = self.bart.split_prior
if self.alpha_vec is None:
self.alpha_vec = np.ones(self.X.shape[1])
self.init_mean = self.Y.mean()
# if data is binary
Y_unique = np.unique(self.Y)
if Y_unique.size == 2 and np.all(Y_unique == [0, 1]):
self.mu_std = 6 / (self.k * self.m ** 0.5)
# maybe we need to check for count data
else:
self.mu_std = (2 * self.Y.std()) / (self.k * self.m ** 0.5)
self.num_observations = self.X.shape[0]
self.num_variates = self.X.shape[1]
self.available_predictors = list(range(self.num_variates))
self.sum_trees = np.full_like(self.Y, self.init_mean).astype(aesara.config.floatX)
self.a_tree = Tree.init_tree(
leaf_node_value=self.init_mean / self.m,
idx_data_points=np.arange(self.num_observations, dtype="int32"),
)
self.mean = fast_mean()
self.normal = NormalSampler()
self.prior_prob_leaf_node = compute_prior_probability(self.alpha)
self.ssv = SampleSplittingVariable(self.alpha_vec)
self.tune = True
if batch == "auto":
batch = max(1, int(self.m * 0.1))
self.batch = (batch, batch)
else:
if isinstance(batch, (tuple, list)):
self.batch = batch
else:
self.batch = (batch, batch)
self.log_num_particles = np.log(num_particles)
self.indices = list(range(2, num_particles))
self.len_indices = len(self.indices)
self.max_stages = max_stages
shared = make_shared_replacements(initial_values, vars, model)
self.likelihood_logp = logp(initial_values, [model.datalogpt], vars, shared)
self.all_particles = []
for i in range(self.m):
self.a_tree.leaf_node_value = self.init_mean / self.m
p = ParticleTree(self.a_tree)
self.all_particles.append(p)
self.all_trees = np.array([p.tree for p in self.all_particles])
super().__init__(vars, shared)
def astep(self, _):
variable_inclusion = np.zeros(self.num_variates, dtype="int")
tree_ids = np.random.choice(range(self.m), replace=False, size=self.batch[~self.tune])
for tree_id in tree_ids:
# Generate an initial set of SMC particles
# at the end of the algorithm we return one of these particles as the new tree
particles = self.init_particles(tree_id)
# Compute the sum of trees without the old tree, that we are attempting to replace
self.sum_trees_noi = self.sum_trees - particles[0].tree.predict_output()
# Resample leaf values for particle 1 which is a copy of the old tree
particles[1].sample_leafs(
self.sum_trees,
self.X,
self.mean,
self.m,
self.normal,
self.mu_std,
)
# The old tree and the one with new leafs do not grow so we update the weights only once
self.update_weight(particles[0], old=True)
self.update_weight(particles[1], old=True)
for _ in range(self.max_stages):
# Sample each particle (try to grow each tree), except for the first two
stop_growing = True
for p in particles[2:]:
tree_grew = p.sample_tree(
self.ssv,
self.available_predictors,
self.prior_prob_leaf_node,
self.X,
self.missing_data,
self.sum_trees,
self.mean,
self.m,
self.normal,
self.mu_std,
)
if tree_grew:
self.update_weight(p)
if p.expansion_nodes:
stop_growing = False
if stop_growing:
break
# Normalize weights
W_t, normalized_weights = self.normalize(particles[2:])
# Resample all but first two particles
new_indices = np.random.choice(
self.indices, size=self.len_indices, p=normalized_weights
)
particles[2:] = particles[new_indices]
# Set the new weights
for p in particles[2:]:
p.log_weight = W_t
for p in particles[2:]:
p.log_weight = p.old_likelihood_logp
_, normalized_weights = self.normalize(particles)
# Get the new tree and update
new_particle = np.random.choice(particles, p=normalized_weights)
new_tree = new_particle.tree
self.all_trees[tree_id] = new_tree
new_particle.log_weight = new_particle.old_likelihood_logp - self.log_num_particles
self.all_particles[tree_id] = new_particle
self.sum_trees = self.sum_trees_noi + new_tree.predict_output()
if self.tune:
self.ssv = SampleSplittingVariable(self.alpha_vec)
for index in new_particle.used_variates:
self.alpha_vec[index] += 1
else:
for index in new_particle.used_variates:
variable_inclusion[index] += 1
stats = {"variable_inclusion": variable_inclusion, "bart_trees": self.all_trees}
return self.sum_trees, [stats]
def normalize(self, particles):
"""
Use logsumexp trick to get W_t and softmax to get normalized_weights
"""
log_w = np.array([p.log_weight for p in particles])
log_w_max = log_w.max()
log_w_ = log_w - log_w_max
w_ = np.exp(log_w_)
w_sum = w_.sum()
W_t = log_w_max + np.log(w_sum) - self.log_num_particles
normalized_weights = w_ / w_sum
# stabilize weights to avoid assigning exactly zero probability to a particle
normalized_weights += 1e-12
return W_t, normalized_weights
def init_particles(self, tree_id: int) -> np.ndarray:
"""
Initialize particles
"""
p = self.all_particles[tree_id]
particles = [p]
particles.append(copy(p))
for _ in self.indices:
particles.append(ParticleTree(self.a_tree))
return np.array(particles)
def update_weight(self, particle, old=False):
"""
Update the weight of a particle
Since the prior is used as the proposal,the weights are updated additively as the ratio of
the new and old log-likelihoods.
"""
new_likelihood = self.likelihood_logp(self.sum_trees_noi + particle.tree.predict_output())
if old:
particle.log_weight = new_likelihood
particle.old_likelihood_logp = new_likelihood
else:
particle.log_weight += new_likelihood - particle.old_likelihood_logp
particle.old_likelihood_logp = new_likelihood
@staticmethod
def competence(var, has_grad):
"""
PGBART is only suitable for BART distributions
"""
dist = getattr(var.owner, "op", None)
if isinstance(dist, BARTRV):
return Competence.IDEAL
return Competence.INCOMPATIBLE
class ParticleTree:
"""
Particle tree
"""
def __init__(self, tree):
self.tree = tree.copy() # keeps the tree that we care at the moment
self.expansion_nodes = [0]
self.log_weight = 0
self.old_likelihood_logp = 0
self.used_variates = []
def sample_tree(
self,
ssv,
available_predictors,
prior_prob_leaf_node,
X,
missing_data,
sum_trees,
mean,
m,
normal,
mu_std,
):
tree_grew = False
if self.expansion_nodes:
index_leaf_node = self.expansion_nodes.pop(0)
# Probability that this node will remain a leaf node
prob_leaf = prior_prob_leaf_node[self.tree[index_leaf_node].depth]
if prob_leaf < np.random.random():
index_selected_predictor = grow_tree(
self.tree,
index_leaf_node,
ssv,
available_predictors,
X,
missing_data,
sum_trees,
mean,
m,
normal,
mu_std,
)
if index_selected_predictor is not None:
new_indexes = self.tree.idx_leaf_nodes[-2:]
self.expansion_nodes.extend(new_indexes)
self.used_variates.append(index_selected_predictor)
tree_grew = True
return tree_grew
def sample_leafs(self, sum_trees, X, mean, m, normal, mu_std):
sample_leaf_values(self.tree, sum_trees, X, mean, m, normal, mu_std)
class SampleSplittingVariable:
def __init__(self, alpha_vec):
"""
Sample splitting variables proportional to `alpha_vec`.
This is equivalent to compute the posterior mean of a Dirichlet-Multinomial model.
This enforce sparsity.
"""
self.enu = list(enumerate(np.cumsum(alpha_vec / alpha_vec.sum())))
def rvs(self):
r = np.random.random()
for i, v in self.enu:
if r <= v:
return i
def compute_prior_probability(alpha):
"""
Calculate the probability of the node being a LeafNode (1 - p(being SplitNode)).
Taken from equation 19 in [Rockova2018].
Parameters
----------
alpha : float
Returns
-------
list with probabilities for leaf nodes
References
----------
.. [Rockova2018] Veronika Rockova, Enakshi Saha (2018). On the theory of BART.
arXiv, `link <https://arxiv.org/abs/1810.00787>`__
"""
prior_leaf_prob = [0]
depth = 1
while prior_leaf_prob[-1] < 1:
prior_leaf_prob.append(1 - alpha ** depth)
depth += 1
return prior_leaf_prob
def grow_tree(
tree,
index_leaf_node,
ssv,
available_predictors,
X,
missing_data,
sum_trees,
mean,
m,
normal,
mu_std,
):
current_node = tree.get_node(index_leaf_node)
idx_data_points = current_node.idx_data_points
index_selected_predictor = ssv.rvs()
selected_predictor = available_predictors[index_selected_predictor]
available_splitting_values = X[idx_data_points, selected_predictor]
if missing_data:
idx_data_points = idx_data_points[~np.isnan(available_splitting_values)]
available_splitting_values = available_splitting_values[
~np.isnan(available_splitting_values)
]
if available_splitting_values.size > 0:
idx_selected_splitting_values = discrete_uniform_sampler(len(available_splitting_values))
split_value = available_splitting_values[idx_selected_splitting_values]
new_idx_data_points = get_new_idx_data_points(
split_value, idx_data_points, selected_predictor, X
)
current_node_children = (
current_node.get_idx_left_child(),
current_node.get_idx_right_child(),
)
new_nodes = []
for idx in range(2):
idx_data_point = new_idx_data_points[idx]
node_value = draw_leaf_value(
sum_trees[idx_data_point],
X[idx_data_point, selected_predictor],
mean,
m,
normal,
mu_std,
)
new_node = LeafNode(
index=current_node_children[idx],
value=node_value,
idx_data_points=idx_data_point,
)
new_nodes.append(new_node)
new_split_node = SplitNode(
index=index_leaf_node,
idx_split_variable=selected_predictor,
split_value=split_value,
)
# update tree nodes and indexes
tree.delete_node(index_leaf_node)
tree.set_node(index_leaf_node, new_split_node)
tree.set_node(new_nodes[0].index, new_nodes[0])
tree.set_node(new_nodes[1].index, new_nodes[1])
return index_selected_predictor
def sample_leaf_values(tree, sum_trees, X, mean, m, normal, mu_std):
for idx in tree.idx_leaf_nodes:
if idx > 0:
leaf = tree[idx]
idx_data_points = leaf.idx_data_points
parent_node = tree[leaf.get_idx_parent_node()]
selected_predictor = parent_node.idx_split_variable
node_value = draw_leaf_value(
sum_trees[idx_data_points],
X[idx_data_points, selected_predictor],
mean,
m,
normal,
mu_std,
)
leaf.value = node_value
def get_new_idx_data_points(split_value, idx_data_points, selected_predictor, X):
left_idx = X[idx_data_points, selected_predictor] <= split_value
left_node_idx_data_points = idx_data_points[left_idx]
right_node_idx_data_points = idx_data_points[~left_idx]
return left_node_idx_data_points, right_node_idx_data_points
def draw_leaf_value(Y_mu_pred, X_mu, mean, m, normal, mu_std):
"""Draw Gaussian distributed leaf values"""
if Y_mu_pred.size == 0:
return 0
else:
norm = normal.random() * mu_std
if Y_mu_pred.size == 1:
mu_mean = Y_mu_pred.item() / m
else:
mu_mean = mean(Y_mu_pred) / m
draw = norm + mu_mean
return draw
def fast_mean():
"""If available use Numba to speed up the computation of the mean."""
try:
from numba import jit
except ImportError:
return np.mean
@jit
def mean(a):
count = a.shape[0]
suma = 0
for i in range(count):
suma += a[i]
return suma / count
return mean
def discrete_uniform_sampler(upper_value):
"""Draw from the uniform distribution with bounds [0, upper_value).
This is the same and np.random.randit(upper_value) but faster.
"""
return int(np.random.random() * upper_value)
class NormalSampler:
"""
Cache samples from a standard normal distribution
"""
def __init__(self):
self.size = 1000
self.cache = []
def random(self):
if not self.cache:
self.update()
return self.cache.pop()
def update(self):
self.cache = np.random.normal(loc=0.0, scale=1, size=self.size).tolist()
def logp(point, out_vars, vars, shared):
"""Compile Aesara function of the model and the input and output variables.
Parameters
----------
out_vars: List
containing :class:`pymc.Distribution` for the output variables
vars: List
containing :class:`pymc.Distribution` for the input variables
shared: List
containing :class:`aesara.tensor.Tensor` for depended shared data
"""
out_list, inarray0 = join_nonshared_inputs(point, out_vars, vars, shared)
f = aesara_function([inarray0], out_list[0])
f.trust_input = True
return f
| 33.447761 | 100 | 0.603693 |
import logging
from copy import copy
import aesara
import numpy as np
from aesara import function as aesara_function
from pymc.aesaraf import inputvars, join_nonshared_inputs, make_shared_replacements
from pymc.bart.bart import BARTRV
from pymc.bart.tree import LeafNode, SplitNode, Tree
from pymc.model import modelcontext
from pymc.step_methods.arraystep import ArrayStepShared, Competence
_log = logging.getLogger("pymc")
class PGBART(ArrayStepShared):
name = "bartsampler"
default_blocked = False
generates_stats = True
stats_dtypes = [{"variable_inclusion": np.ndarray, "bart_trees": np.ndarray}]
def __init__(self, vars=None, num_particles=40, max_stages=100, batch="auto", model=None):
_log.warning("BART is experimental. Use with caution.")
model = modelcontext(model)
initial_values = model.compute_initial_point()
value_bart = inputvars(vars)[0]
self.bart = model.values_to_rvs[value_bart].owner.op
self.X = self.bart.X
self.Y = self.bart.Y
self.missing_data = np.any(np.isnan(self.X))
self.m = self.bart.m
self.alpha = self.bart.alpha
self.k = self.bart.k
self.alpha_vec = self.bart.split_prior
if self.alpha_vec is None:
self.alpha_vec = np.ones(self.X.shape[1])
self.init_mean = self.Y.mean()
Y_unique = np.unique(self.Y)
if Y_unique.size == 2 and np.all(Y_unique == [0, 1]):
self.mu_std = 6 / (self.k * self.m ** 0.5)
else:
self.mu_std = (2 * self.Y.std()) / (self.k * self.m ** 0.5)
self.num_observations = self.X.shape[0]
self.num_variates = self.X.shape[1]
self.available_predictors = list(range(self.num_variates))
self.sum_trees = np.full_like(self.Y, self.init_mean).astype(aesara.config.floatX)
self.a_tree = Tree.init_tree(
leaf_node_value=self.init_mean / self.m,
idx_data_points=np.arange(self.num_observations, dtype="int32"),
)
self.mean = fast_mean()
self.normal = NormalSampler()
self.prior_prob_leaf_node = compute_prior_probability(self.alpha)
self.ssv = SampleSplittingVariable(self.alpha_vec)
self.tune = True
if batch == "auto":
batch = max(1, int(self.m * 0.1))
self.batch = (batch, batch)
else:
if isinstance(batch, (tuple, list)):
self.batch = batch
else:
self.batch = (batch, batch)
self.log_num_particles = np.log(num_particles)
self.indices = list(range(2, num_particles))
self.len_indices = len(self.indices)
self.max_stages = max_stages
shared = make_shared_replacements(initial_values, vars, model)
self.likelihood_logp = logp(initial_values, [model.datalogpt], vars, shared)
self.all_particles = []
for i in range(self.m):
self.a_tree.leaf_node_value = self.init_mean / self.m
p = ParticleTree(self.a_tree)
self.all_particles.append(p)
self.all_trees = np.array([p.tree for p in self.all_particles])
super().__init__(vars, shared)
def astep(self, _):
variable_inclusion = np.zeros(self.num_variates, dtype="int")
tree_ids = np.random.choice(range(self.m), replace=False, size=self.batch[~self.tune])
for tree_id in tree_ids:
particles = self.init_particles(tree_id)
self.sum_trees_noi = self.sum_trees - particles[0].tree.predict_output()
particles[1].sample_leafs(
self.sum_trees,
self.X,
self.mean,
self.m,
self.normal,
self.mu_std,
)
self.update_weight(particles[0], old=True)
self.update_weight(particles[1], old=True)
for _ in range(self.max_stages):
stop_growing = True
for p in particles[2:]:
tree_grew = p.sample_tree(
self.ssv,
self.available_predictors,
self.prior_prob_leaf_node,
self.X,
self.missing_data,
self.sum_trees,
self.mean,
self.m,
self.normal,
self.mu_std,
)
if tree_grew:
self.update_weight(p)
if p.expansion_nodes:
stop_growing = False
if stop_growing:
break
W_t, normalized_weights = self.normalize(particles[2:])
new_indices = np.random.choice(
self.indices, size=self.len_indices, p=normalized_weights
)
particles[2:] = particles[new_indices]
for p in particles[2:]:
p.log_weight = W_t
for p in particles[2:]:
p.log_weight = p.old_likelihood_logp
_, normalized_weights = self.normalize(particles)
new_particle = np.random.choice(particles, p=normalized_weights)
new_tree = new_particle.tree
self.all_trees[tree_id] = new_tree
new_particle.log_weight = new_particle.old_likelihood_logp - self.log_num_particles
self.all_particles[tree_id] = new_particle
self.sum_trees = self.sum_trees_noi + new_tree.predict_output()
if self.tune:
self.ssv = SampleSplittingVariable(self.alpha_vec)
for index in new_particle.used_variates:
self.alpha_vec[index] += 1
else:
for index in new_particle.used_variates:
variable_inclusion[index] += 1
stats = {"variable_inclusion": variable_inclusion, "bart_trees": self.all_trees}
return self.sum_trees, [stats]
def normalize(self, particles):
log_w = np.array([p.log_weight for p in particles])
log_w_max = log_w.max()
log_w_ = log_w - log_w_max
w_ = np.exp(log_w_)
w_sum = w_.sum()
W_t = log_w_max + np.log(w_sum) - self.log_num_particles
normalized_weights = w_ / w_sum
normalized_weights += 1e-12
return W_t, normalized_weights
def init_particles(self, tree_id: int) -> np.ndarray:
p = self.all_particles[tree_id]
particles = [p]
particles.append(copy(p))
for _ in self.indices:
particles.append(ParticleTree(self.a_tree))
return np.array(particles)
def update_weight(self, particle, old=False):
new_likelihood = self.likelihood_logp(self.sum_trees_noi + particle.tree.predict_output())
if old:
particle.log_weight = new_likelihood
particle.old_likelihood_logp = new_likelihood
else:
particle.log_weight += new_likelihood - particle.old_likelihood_logp
particle.old_likelihood_logp = new_likelihood
@staticmethod
def competence(var, has_grad):
dist = getattr(var.owner, "op", None)
if isinstance(dist, BARTRV):
return Competence.IDEAL
return Competence.INCOMPATIBLE
class ParticleTree:
def __init__(self, tree):
self.tree = tree.copy()
self.expansion_nodes = [0]
self.log_weight = 0
self.old_likelihood_logp = 0
self.used_variates = []
def sample_tree(
self,
ssv,
available_predictors,
prior_prob_leaf_node,
X,
missing_data,
sum_trees,
mean,
m,
normal,
mu_std,
):
tree_grew = False
if self.expansion_nodes:
index_leaf_node = self.expansion_nodes.pop(0)
prob_leaf = prior_prob_leaf_node[self.tree[index_leaf_node].depth]
if prob_leaf < np.random.random():
index_selected_predictor = grow_tree(
self.tree,
index_leaf_node,
ssv,
available_predictors,
X,
missing_data,
sum_trees,
mean,
m,
normal,
mu_std,
)
if index_selected_predictor is not None:
new_indexes = self.tree.idx_leaf_nodes[-2:]
self.expansion_nodes.extend(new_indexes)
self.used_variates.append(index_selected_predictor)
tree_grew = True
return tree_grew
def sample_leafs(self, sum_trees, X, mean, m, normal, mu_std):
sample_leaf_values(self.tree, sum_trees, X, mean, m, normal, mu_std)
class SampleSplittingVariable:
def __init__(self, alpha_vec):
self.enu = list(enumerate(np.cumsum(alpha_vec / alpha_vec.sum())))
def rvs(self):
r = np.random.random()
for i, v in self.enu:
if r <= v:
return i
def compute_prior_probability(alpha):
prior_leaf_prob = [0]
depth = 1
while prior_leaf_prob[-1] < 1:
prior_leaf_prob.append(1 - alpha ** depth)
depth += 1
return prior_leaf_prob
def grow_tree(
tree,
index_leaf_node,
ssv,
available_predictors,
X,
missing_data,
sum_trees,
mean,
m,
normal,
mu_std,
):
current_node = tree.get_node(index_leaf_node)
idx_data_points = current_node.idx_data_points
index_selected_predictor = ssv.rvs()
selected_predictor = available_predictors[index_selected_predictor]
available_splitting_values = X[idx_data_points, selected_predictor]
if missing_data:
idx_data_points = idx_data_points[~np.isnan(available_splitting_values)]
available_splitting_values = available_splitting_values[
~np.isnan(available_splitting_values)
]
if available_splitting_values.size > 0:
idx_selected_splitting_values = discrete_uniform_sampler(len(available_splitting_values))
split_value = available_splitting_values[idx_selected_splitting_values]
new_idx_data_points = get_new_idx_data_points(
split_value, idx_data_points, selected_predictor, X
)
current_node_children = (
current_node.get_idx_left_child(),
current_node.get_idx_right_child(),
)
new_nodes = []
for idx in range(2):
idx_data_point = new_idx_data_points[idx]
node_value = draw_leaf_value(
sum_trees[idx_data_point],
X[idx_data_point, selected_predictor],
mean,
m,
normal,
mu_std,
)
new_node = LeafNode(
index=current_node_children[idx],
value=node_value,
idx_data_points=idx_data_point,
)
new_nodes.append(new_node)
new_split_node = SplitNode(
index=index_leaf_node,
idx_split_variable=selected_predictor,
split_value=split_value,
)
tree.delete_node(index_leaf_node)
tree.set_node(index_leaf_node, new_split_node)
tree.set_node(new_nodes[0].index, new_nodes[0])
tree.set_node(new_nodes[1].index, new_nodes[1])
return index_selected_predictor
def sample_leaf_values(tree, sum_trees, X, mean, m, normal, mu_std):
for idx in tree.idx_leaf_nodes:
if idx > 0:
leaf = tree[idx]
idx_data_points = leaf.idx_data_points
parent_node = tree[leaf.get_idx_parent_node()]
selected_predictor = parent_node.idx_split_variable
node_value = draw_leaf_value(
sum_trees[idx_data_points],
X[idx_data_points, selected_predictor],
mean,
m,
normal,
mu_std,
)
leaf.value = node_value
def get_new_idx_data_points(split_value, idx_data_points, selected_predictor, X):
left_idx = X[idx_data_points, selected_predictor] <= split_value
left_node_idx_data_points = idx_data_points[left_idx]
right_node_idx_data_points = idx_data_points[~left_idx]
return left_node_idx_data_points, right_node_idx_data_points
def draw_leaf_value(Y_mu_pred, X_mu, mean, m, normal, mu_std):
if Y_mu_pred.size == 0:
return 0
else:
norm = normal.random() * mu_std
if Y_mu_pred.size == 1:
mu_mean = Y_mu_pred.item() / m
else:
mu_mean = mean(Y_mu_pred) / m
draw = norm + mu_mean
return draw
def fast_mean():
try:
from numba import jit
except ImportError:
return np.mean
@jit
def mean(a):
count = a.shape[0]
suma = 0
for i in range(count):
suma += a[i]
return suma / count
return mean
def discrete_uniform_sampler(upper_value):
return int(np.random.random() * upper_value)
class NormalSampler:
def __init__(self):
self.size = 1000
self.cache = []
def random(self):
if not self.cache:
self.update()
return self.cache.pop()
def update(self):
self.cache = np.random.normal(loc=0.0, scale=1, size=self.size).tolist()
def logp(point, out_vars, vars, shared):
out_list, inarray0 = join_nonshared_inputs(point, out_vars, vars, shared)
f = aesara_function([inarray0], out_list[0])
f.trust_input = True
return f
| true | true |
f739ff70d24635826bcf0d76af520ce019c18470 | 5,626 | py | Python | .modules/.metagoofil/hachoir_parser/audio/flac.py | termux-one/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 1,103 | 2018-04-20T14:08:11.000Z | 2022-03-29T06:22:43.000Z | .modules/.metagoofil/hachoir_parser/audio/flac.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 236 | 2016-11-20T07:56:15.000Z | 2017-04-12T12:10:00.000Z | .modules/.metagoofil/hachoir_parser/audio/flac.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 262 | 2017-09-16T22:15:50.000Z | 2022-03-31T00:38:42.000Z | """
FLAC (audio) parser
Documentation:
* http://flac.sourceforge.net/format.html
Author: Esteban Loiseau <baal AT tuxfamily.org>
Creation date: 2008-04-09
"""
from hachoir_parser import Parser
from hachoir_core.field import FieldSet, String, Bit, Bits, UInt16, UInt24, RawBytes, Enum, NullBytes
from hachoir_core.stream import BIG_ENDIAN, LITTLE_ENDIAN
from hachoir_core.tools import createDict
from hachoir_parser.container.ogg import parseVorbisComment
class VorbisComment(FieldSet):
endian = LITTLE_ENDIAN
createFields = parseVorbisComment
class StreamInfo(FieldSet):
static_size = 34*8
def createFields(self):
yield UInt16(self, "min_block_size", "The minimum block size (in samples) used in the stream")
yield UInt16(self, "max_block_size", "The maximum block size (in samples) used in the stream")
yield UInt24(self, "min_frame_size", "The minimum frame size (in bytes) used in the stream")
yield UInt24(self, "max_frame_size", "The maximum frame size (in bytes) used in the stream")
yield Bits(self, "sample_hertz", 20, "Sample rate in Hertz")
yield Bits(self, "nb_channel", 3, "Number of channels minus one")
yield Bits(self, "bits_per_sample", 5, "Bits per sample minus one")
yield Bits(self, "total_samples", 36, "Total samples in stream")
yield RawBytes(self, "md5sum", 16, "MD5 signature of the unencoded audio data")
class SeekPoint(FieldSet):
def createFields(self):
yield Bits(self, "sample_number", 64, "Sample number")
yield Bits(self, "offset", 64, "Offset in bytes")
yield Bits(self, "nb_sample", 16)
class SeekTable(FieldSet):
def createFields(self):
while not self.eof:
yield SeekPoint(self, "point[]")
class MetadataBlock(FieldSet):
"Metadata block field: http://flac.sourceforge.net/format.html#metadata_block"
BLOCK_TYPES = {
0: ("stream_info", u"Stream info", StreamInfo),
1: ("padding[]", u"Padding", None),
2: ("application[]", u"Application", None),
3: ("seek_table", u"Seek table", SeekTable),
4: ("comment", u"Vorbis comment", VorbisComment),
5: ("cue_sheet[]", u"Cue sheet", None),
6: ("picture[]", u"Picture", None),
}
BLOCK_TYPE_DESC = createDict(BLOCK_TYPES, 1)
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = 32 + self["metadata_length"].value * 8
try:
key = self["block_type"].value
self._name, self._description, self.handler = self.BLOCK_TYPES[key]
except KeyError:
self.handler = None
def createFields(self):
yield Bit(self, "last_metadata_block", "True if this is the last metadata block")
yield Enum(Bits(self, "block_type", 7, "Metadata block header type"), self.BLOCK_TYPE_DESC)
yield UInt24(self, "metadata_length", "Length of following metadata in bytes (doesn't include this header)")
block_type = self["block_type"].value
size = self["metadata_length"].value
if not size:
return
try:
handler = self.BLOCK_TYPES[block_type][2]
except KeyError:
handler = None
if handler:
yield handler(self, "content", size=size*8)
elif self["block_type"].value == 1:
yield NullBytes(self, "padding", size)
else:
yield RawBytes(self, "rawdata", size)
class Metadata(FieldSet):
def createFields(self):
while not self.eof:
field = MetadataBlock(self,"metadata_block[]")
yield field
if field["last_metadata_block"].value:
break
class Frame(FieldSet):
SAMPLE_RATES = {
0: "get from STREAMINFO metadata block",
1: "88.2kHz",
2: "176.4kHz",
3: "192kHz",
4: "8kHz",
5: "16kHz",
6: "22.05kHz",
7: "24kHz",
8: "32kHz",
9: "44.1kHz",
10: "48kHz",
11: "96kHz",
12: "get 8 bit sample rate (in kHz) from end of header",
13: "get 16 bit sample rate (in Hz) from end of header",
14: "get 16 bit sample rate (in tens of Hz) from end of header",
}
def createFields(self):
yield Bits(self, "sync", 14, "Sync code: 11111111111110")
yield Bit(self, "reserved[]")
yield Bit(self, "blocking_strategy")
yield Bits(self, "block_size", 4)
yield Enum(Bits(self, "sample_rate", 4), self.SAMPLE_RATES)
yield Bits(self, "channel_assign", 4)
yield Bits(self, "sample_size", 3)
yield Bit(self, "reserved[]")
# FIXME: Finish frame header parser
class Frames(FieldSet):
def createFields(self):
while not self.eof:
yield Frame(self, "frame[]")
# FIXME: Parse all frames
return
class FlacParser(Parser):
"Parse FLAC audio files: FLAC is a lossless audio codec"
MAGIC = "fLaC\x00"
PARSER_TAGS = {
"id": "flac",
"category": "audio",
"file_ext": ("flac",),
"mime": (u"audio/x-flac",),
"magic": ((MAGIC, 0),),
"min_size": 4*8,
"description": "FLAC audio",
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return u"Invalid magic string"
return True
def createFields(self):
yield String(self, "signature", 4,charset="ASCII", description="FLAC signature: fLaC string")
yield Metadata(self,"metadata")
yield Frames(self,"frames")
| 35.607595 | 116 | 0.614824 |
from hachoir_parser import Parser
from hachoir_core.field import FieldSet, String, Bit, Bits, UInt16, UInt24, RawBytes, Enum, NullBytes
from hachoir_core.stream import BIG_ENDIAN, LITTLE_ENDIAN
from hachoir_core.tools import createDict
from hachoir_parser.container.ogg import parseVorbisComment
class VorbisComment(FieldSet):
endian = LITTLE_ENDIAN
createFields = parseVorbisComment
class StreamInfo(FieldSet):
static_size = 34*8
def createFields(self):
yield UInt16(self, "min_block_size", "The minimum block size (in samples) used in the stream")
yield UInt16(self, "max_block_size", "The maximum block size (in samples) used in the stream")
yield UInt24(self, "min_frame_size", "The minimum frame size (in bytes) used in the stream")
yield UInt24(self, "max_frame_size", "The maximum frame size (in bytes) used in the stream")
yield Bits(self, "sample_hertz", 20, "Sample rate in Hertz")
yield Bits(self, "nb_channel", 3, "Number of channels minus one")
yield Bits(self, "bits_per_sample", 5, "Bits per sample minus one")
yield Bits(self, "total_samples", 36, "Total samples in stream")
yield RawBytes(self, "md5sum", 16, "MD5 signature of the unencoded audio data")
class SeekPoint(FieldSet):
def createFields(self):
yield Bits(self, "sample_number", 64, "Sample number")
yield Bits(self, "offset", 64, "Offset in bytes")
yield Bits(self, "nb_sample", 16)
class SeekTable(FieldSet):
def createFields(self):
while not self.eof:
yield SeekPoint(self, "point[]")
class MetadataBlock(FieldSet):
BLOCK_TYPES = {
0: ("stream_info", u"Stream info", StreamInfo),
1: ("padding[]", u"Padding", None),
2: ("application[]", u"Application", None),
3: ("seek_table", u"Seek table", SeekTable),
4: ("comment", u"Vorbis comment", VorbisComment),
5: ("cue_sheet[]", u"Cue sheet", None),
6: ("picture[]", u"Picture", None),
}
BLOCK_TYPE_DESC = createDict(BLOCK_TYPES, 1)
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = 32 + self["metadata_length"].value * 8
try:
key = self["block_type"].value
self._name, self._description, self.handler = self.BLOCK_TYPES[key]
except KeyError:
self.handler = None
def createFields(self):
yield Bit(self, "last_metadata_block", "True if this is the last metadata block")
yield Enum(Bits(self, "block_type", 7, "Metadata block header type"), self.BLOCK_TYPE_DESC)
yield UInt24(self, "metadata_length", "Length of following metadata in bytes (doesn't include this header)")
block_type = self["block_type"].value
size = self["metadata_length"].value
if not size:
return
try:
handler = self.BLOCK_TYPES[block_type][2]
except KeyError:
handler = None
if handler:
yield handler(self, "content", size=size*8)
elif self["block_type"].value == 1:
yield NullBytes(self, "padding", size)
else:
yield RawBytes(self, "rawdata", size)
class Metadata(FieldSet):
def createFields(self):
while not self.eof:
field = MetadataBlock(self,"metadata_block[]")
yield field
if field["last_metadata_block"].value:
break
class Frame(FieldSet):
SAMPLE_RATES = {
0: "get from STREAMINFO metadata block",
1: "88.2kHz",
2: "176.4kHz",
3: "192kHz",
4: "8kHz",
5: "16kHz",
6: "22.05kHz",
7: "24kHz",
8: "32kHz",
9: "44.1kHz",
10: "48kHz",
11: "96kHz",
12: "get 8 bit sample rate (in kHz) from end of header",
13: "get 16 bit sample rate (in Hz) from end of header",
14: "get 16 bit sample rate (in tens of Hz) from end of header",
}
def createFields(self):
yield Bits(self, "sync", 14, "Sync code: 11111111111110")
yield Bit(self, "reserved[]")
yield Bit(self, "blocking_strategy")
yield Bits(self, "block_size", 4)
yield Enum(Bits(self, "sample_rate", 4), self.SAMPLE_RATES)
yield Bits(self, "channel_assign", 4)
yield Bits(self, "sample_size", 3)
yield Bit(self, "reserved[]")
# FIXME: Finish frame header parser
class Frames(FieldSet):
def createFields(self):
while not self.eof:
yield Frame(self, "frame[]")
# FIXME: Parse all frames
return
class FlacParser(Parser):
MAGIC = "fLaC\x00"
PARSER_TAGS = {
"id": "flac",
"category": "audio",
"file_ext": ("flac",),
"mime": (u"audio/x-flac",),
"magic": ((MAGIC, 0),),
"min_size": 4*8,
"description": "FLAC audio",
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return u"Invalid magic string"
return True
def createFields(self):
yield String(self, "signature", 4,charset="ASCII", description="FLAC signature: fLaC string")
yield Metadata(self,"metadata")
yield Frames(self,"frames")
| true | true |
f739ffae5620d3e95bc4447bf10ca03f3d74fe05 | 2,023 | py | Python | drf_app_generators/templates/factories.py | drf-tools/drf-app-generators | b73db872810f6d6d931b564082942ac5227464cb | [
"MIT"
] | 3 | 2020-03-17T10:00:43.000Z | 2021-09-28T04:08:46.000Z | drf_app_generators/templates/factories.py | drf-tools/drf-app-generators | b73db872810f6d6d931b564082942ac5227464cb | [
"MIT"
] | 18 | 2020-03-17T09:49:26.000Z | 2021-09-22T18:45:30.000Z | drf_app_generators/templates/factories.py | drf-tools/drf-app-generators | b73db872810f6d6d931b564082942ac5227464cb | [
"MIT"
] | 1 | 2020-03-19T04:44:06.000Z | 2020-03-19T04:44:06.000Z | __all__ = ['FACTORY_VIEW', 'FACTORIES_VIEW', 'FACTORY_INIT']
FACTORIES_VIEW = """import datetime
from drf_core import factories
from {{ app_name }}.models import ({% for model in models %}
{{ model.object_name }},{% endfor %}
)
{% for model in models %}
# =============================================================================
# {{ model.object_name }}
# =============================================================================
class {{ model.object_name }}Factory(factories.ModelFactory):
# Factory data for {{ model.object_name }} model.
{% for field in model.fields %}{% if field.factory.code_line %}{% autoescape off %}{{ field.factory.code_line }}
{% endautoescape %}{% endif %}{% endfor %}
class Meta:
model = {{ model.object_name }}
{% endfor %}
apps = [{% for model in models %}
{{ model.object_name }}Factory,{% endfor %}
]
"""
FACTORY_VIEW = """{% for required_lib in model_meta.factory_required_libs %}{{ required_lib }}
{% endfor %}
{% for required_module in model_meta.factory_required_modules %}{{ required_module }}
{% endfor %}
from drf_core import factories
from {{ app_name }}.models.{{ model_meta.verbose_name_plural }} import {{ model_meta.object_name }}
# =============================================================================
# {{ model_meta.object_name }}
# =============================================================================
class {{ model_meta.object_name }}Factory(factories.ModelFactory):
# Factory data for {{ model_meta.object_name }} model.
{% for field in model_meta.fields %}{% if field.factory.code_line %}{% autoescape off %}{{ field.factory.code_line }}
{% endautoescape %}{% endif %}{% endfor %}
class Meta:
model = {{ model_meta.object_name }}
"""
FACTORY_INIT = """{% for model in models %}from {{ app_name }}.factories.{{ model.verbose_name_plural }} import {{ model.object_name }}Factory
{% endfor %}
apps = [{% for model in models %}
{{ model.object_name }}Factory,{% endfor %}
]
"""
| 38.169811 | 142 | 0.560059 | __all__ = ['FACTORY_VIEW', 'FACTORIES_VIEW', 'FACTORY_INIT']
FACTORIES_VIEW = """import datetime
from drf_core import factories
from {{ app_name }}.models import ({% for model in models %}
{{ model.object_name }},{% endfor %}
)
{% for model in models %}
# =============================================================================
# {{ model.object_name }}
# =============================================================================
class {{ model.object_name }}Factory(factories.ModelFactory):
# Factory data for {{ model.object_name }} model.
{% for field in model.fields %}{% if field.factory.code_line %}{% autoescape off %}{{ field.factory.code_line }}
{% endautoescape %}{% endif %}{% endfor %}
class Meta:
model = {{ model.object_name }}
{% endfor %}
apps = [{% for model in models %}
{{ model.object_name }}Factory,{% endfor %}
]
"""
FACTORY_VIEW = """{% for required_lib in model_meta.factory_required_libs %}{{ required_lib }}
{% endfor %}
{% for required_module in model_meta.factory_required_modules %}{{ required_module }}
{% endfor %}
from drf_core import factories
from {{ app_name }}.models.{{ model_meta.verbose_name_plural }} import {{ model_meta.object_name }}
# =============================================================================
# {{ model_meta.object_name }}
# =============================================================================
class {{ model_meta.object_name }}Factory(factories.ModelFactory):
# Factory data for {{ model_meta.object_name }} model.
{% for field in model_meta.fields %}{% if field.factory.code_line %}{% autoescape off %}{{ field.factory.code_line }}
{% endautoescape %}{% endif %}{% endfor %}
class Meta:
model = {{ model_meta.object_name }}
"""
FACTORY_INIT = """{% for model in models %}from {{ app_name }}.factories.{{ model.verbose_name_plural }} import {{ model.object_name }}Factory
{% endfor %}
apps = [{% for model in models %}
{{ model.object_name }}Factory,{% endfor %}
]
"""
| true | true |
f73a01ee3e92eb4c9a4ebb9332a63d1d080785ab | 355 | py | Python | program/migrations/0011_programslot_remove_is_active.py | Dumbaz/autoradio-pv | 8aae293e58b2e79a05956c535bb109f74edc89c3 | [
"BSD-3-Clause"
] | null | null | null | program/migrations/0011_programslot_remove_is_active.py | Dumbaz/autoradio-pv | 8aae293e58b2e79a05956c535bb109f74edc89c3 | [
"BSD-3-Clause"
] | null | null | null | program/migrations/0011_programslot_remove_is_active.py | Dumbaz/autoradio-pv | 8aae293e58b2e79a05956c535bb109f74edc89c3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('program', '0010_show_remove_is_active'),
]
operations = [
migrations.RemoveField(
model_name='programslot',
name='is_active',
),
]
| 18.684211 | 50 | 0.611268 |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('program', '0010_show_remove_is_active'),
]
operations = [
migrations.RemoveField(
model_name='programslot',
name='is_active',
),
]
| true | true |
f73a02ef0f16ad8396acafba0c3ba04a9b062bdd | 2,319 | py | Python | du/android/hdump/renderer/qt/DiffViewer.py | spiricn/DevUtils | 58a035a08a7c58035c25f992c1b8aa33cc997cd2 | [
"MIT"
] | 1 | 2021-12-21T13:18:08.000Z | 2021-12-21T13:18:08.000Z | du/android/hdump/renderer/qt/DiffViewer.py | spiricn/DevUtils | 58a035a08a7c58035c25f992c1b8aa33cc997cd2 | [
"MIT"
] | null | null | null | du/android/hdump/renderer/qt/DiffViewer.py | spiricn/DevUtils | 58a035a08a7c58035c25f992c1b8aa33cc997cd2 | [
"MIT"
] | null | null | null | import os
from PyQt4 import QtGui, QtCore
from du.android.hdump.HeapDump import HeapDump
from du.android.hdump.HeapDumpDiff import HeapDumpDiff, DiffNode
from du.android.hdump.SymbolResolver import SymbolResolver
from du.android.hdump.renderer.qt.ui.diffWindow import Ui_MainWindow
class DiffViewer(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self, heapDiff):
super(DiffViewer, self).__init__(None)
self.setupUi(self)
self._heapDiff = heapDiff
self.treeWidget.header().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.treeWidget.header().setStretchLastSection(False)
self._display()
self.cbShowChanged.stateChanged.connect(lambda: self._display())
self.cbShowEqual.stateChanged.connect(lambda: self._display())
self.cbShowNewcheckBox.stateChanged.connect(lambda: self._display())
self.cbShowRemoved.stateChanged.connect(lambda: self._display())
self.show()
def _display(self):
self.treeWidget.clear()
nodes = (
("Zygote", self._heapDiff.zygoteRootNode),
("App", self._heapDiff.appRootNode),
)
for nodeName, node in nodes:
self._renderDiff(nodeName, self.treeWidget, node)
def _renderDiff(self, rootName, parentItem, node):
item = QtGui.QTreeWidgetItem(parentItem)
colorMap = {
DiffNode.TYPE_CHANGED: QtGui.QColor(255, 240, 187),
DiffNode.TYPE_REMOVED: QtGui.QColor(251, 208, 210),
DiffNode.TYPE_ADDED: QtGui.QColor(197, 243, 210),
DiffNode.TYPE_EQUAL: QtGui.QBrush(QtCore.Qt.white),
}
for i in range(4):
item.setBackground(i, colorMap[node.type])
if rootName:
item.setText(0, rootName)
else:
item.setText(0, os.path.basename(node.frame.library))
if node.frame.symbol != SymbolResolver.UNKOWN_SYMBOL:
symbol = "[%s] %s:%d" % (
os.path.basename(node.frame.symbol.file),
node.frame.symbol.function,
node.frame.symbol.line,
)
item.setText(3, symbol)
item.setText(1, str(node.size))
for child in node.children:
self._renderDiff(None, item, child)
| 32.661972 | 82 | 0.633463 | import os
from PyQt4 import QtGui, QtCore
from du.android.hdump.HeapDump import HeapDump
from du.android.hdump.HeapDumpDiff import HeapDumpDiff, DiffNode
from du.android.hdump.SymbolResolver import SymbolResolver
from du.android.hdump.renderer.qt.ui.diffWindow import Ui_MainWindow
class DiffViewer(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self, heapDiff):
super(DiffViewer, self).__init__(None)
self.setupUi(self)
self._heapDiff = heapDiff
self.treeWidget.header().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.treeWidget.header().setStretchLastSection(False)
self._display()
self.cbShowChanged.stateChanged.connect(lambda: self._display())
self.cbShowEqual.stateChanged.connect(lambda: self._display())
self.cbShowNewcheckBox.stateChanged.connect(lambda: self._display())
self.cbShowRemoved.stateChanged.connect(lambda: self._display())
self.show()
def _display(self):
self.treeWidget.clear()
nodes = (
("Zygote", self._heapDiff.zygoteRootNode),
("App", self._heapDiff.appRootNode),
)
for nodeName, node in nodes:
self._renderDiff(nodeName, self.treeWidget, node)
def _renderDiff(self, rootName, parentItem, node):
item = QtGui.QTreeWidgetItem(parentItem)
colorMap = {
DiffNode.TYPE_CHANGED: QtGui.QColor(255, 240, 187),
DiffNode.TYPE_REMOVED: QtGui.QColor(251, 208, 210),
DiffNode.TYPE_ADDED: QtGui.QColor(197, 243, 210),
DiffNode.TYPE_EQUAL: QtGui.QBrush(QtCore.Qt.white),
}
for i in range(4):
item.setBackground(i, colorMap[node.type])
if rootName:
item.setText(0, rootName)
else:
item.setText(0, os.path.basename(node.frame.library))
if node.frame.symbol != SymbolResolver.UNKOWN_SYMBOL:
symbol = "[%s] %s:%d" % (
os.path.basename(node.frame.symbol.file),
node.frame.symbol.function,
node.frame.symbol.line,
)
item.setText(3, symbol)
item.setText(1, str(node.size))
for child in node.children:
self._renderDiff(None, item, child)
| true | true |
f73a0451d2c8f7256c5c2475175e60d5a83aa6ee | 586 | py | Python | overtime/calculations.py | Atwinenickson/lendsuphumanresourcemanagement | b46df164d59a4e94300376d679e07bd9a60d6343 | [
"MIT",
"Unlicense"
] | 36 | 2019-11-26T11:46:32.000Z | 2022-02-17T13:18:18.000Z | overtime/calculations.py | Atwinenickson/lendsuphumanresourcemanagement | b46df164d59a4e94300376d679e07bd9a60d6343 | [
"MIT",
"Unlicense"
] | 13 | 2020-02-14T09:30:16.000Z | 2022-03-12T00:58:09.000Z | overtime/calculations.py | Atwinenickson/lendsuphumanresourcemanagement | b46df164d59a4e94300376d679e07bd9a60d6343 | [
"MIT",
"Unlicense"
] | 16 | 2019-06-14T12:11:29.000Z | 2022-02-14T15:16:07.000Z | from holidays.selectors import is_on_holiday
def get_overtime_pay(overtime_application) -> float:
# determine the overtime pay for the overtime application
if overtime_application.is_on_holiday or overtime_application.is_on_sunday:
overtime_amount = overtime_application.number_of_hours * 2 * overtime_application.applicant.\
overtime_hourly_rate
return overtime_amount
else:
overtime_amount = overtime_application.number_of_hours * 1.5 * overtime_application.applicant.\
overtime_hourly_rate
return overtime_amount
| 41.857143 | 103 | 0.764505 | from holidays.selectors import is_on_holiday
def get_overtime_pay(overtime_application) -> float:
if overtime_application.is_on_holiday or overtime_application.is_on_sunday:
overtime_amount = overtime_application.number_of_hours * 2 * overtime_application.applicant.\
overtime_hourly_rate
return overtime_amount
else:
overtime_amount = overtime_application.number_of_hours * 1.5 * overtime_application.applicant.\
overtime_hourly_rate
return overtime_amount
| true | true |
f73a0465ae7a1e497e613408d7bc3d6b4286c115 | 2,363 | py | Python | lambdas/record-set-cleaner-1.0.0-1/record-set-cleaner.py | dev-cloudbd/cfn-templates | 19aedb87273fcffd1fdd1a082e4b7333d44d6139 | [
"MIT"
] | null | null | null | lambdas/record-set-cleaner-1.0.0-1/record-set-cleaner.py | dev-cloudbd/cfn-templates | 19aedb87273fcffd1fdd1a082e4b7333d44d6139 | [
"MIT"
] | null | null | null | lambdas/record-set-cleaner-1.0.0-1/record-set-cleaner.py | dev-cloudbd/cfn-templates | 19aedb87273fcffd1fdd1a082e4b7333d44d6139 | [
"MIT"
] | null | null | null | # IAM Permissions
# Requires
# - 'route53:ListResourceRecordSets'
# - 'route53:ChangeResourceRecordSets'
# - 'route53:GetChange'
# Optional
# - 'logs:CreateLogGroup'
# - 'logs:CreateLogStream'
# - 'logs:PutLogEvents'
import logging
import os
import time
import boto3
from crhelper import CfnResource
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
helper = CfnResource()
try:
route53_client = boto3.client('route53')
except Exception as e:
helper.init_failure(e)
@helper.create
@helper.update
def no_op(_, __):
pass
def check_response(response):
try:
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return True
else:
return False
except KeyError:
return False
def wait_for_dns_change_completion(response):
timewait = 1
while check_response(response) and response['ChangeInfo']['Status'] == 'PENDING':
time.sleep(timewait)
timewait += timewait
changeId = response['ChangeInfo']['Id']
response = route53_client.get_change(Id=changeId)
LOGGER.info('Get change: %s', response)
if check_response(response) and response['ChangeInfo']['Status'] == 'INSYNC':
LOGGER.info('Delete DNS records completed successfully.')
else:
LOGGER.info('Delete DNS records failed.')
@helper.delete
def delete_dns_records(event, __):
zoneId = event['ResourceProperties']['ZoneId']
recordNames = event['ResourceProperties']['RecordNames'].split(sep=',')
recordSetPaginator = route53_client.get_paginator('list_resource_record_sets')
recordSetIterable = recordSetPaginator.paginate(HostedZoneId=zoneId)
changes = [
{
'Action': 'DELETE',
'ResourceRecordSet': record
}
for recordSet in recordSetIterable
for record in recordSet['ResourceRecordSets']
if record['Name'] in recordNames
]
if changes:
response = route53_client.change_resource_record_sets(
HostedZoneId=zoneId,
ChangeBatch={ 'Changes': changes }
)
LOGGER.info('Change resource record set: %s', response)
wait_for_dns_change_completion(response)
else:
LOGGER.info('No matching DNS records found.')
def handler(event, context):
helper(event, context)
| 28.817073 | 85 | 0.664833 |
import logging
import os
import time
import boto3
from crhelper import CfnResource
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
helper = CfnResource()
try:
route53_client = boto3.client('route53')
except Exception as e:
helper.init_failure(e)
@helper.create
@helper.update
def no_op(_, __):
pass
def check_response(response):
try:
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return True
else:
return False
except KeyError:
return False
def wait_for_dns_change_completion(response):
timewait = 1
while check_response(response) and response['ChangeInfo']['Status'] == 'PENDING':
time.sleep(timewait)
timewait += timewait
changeId = response['ChangeInfo']['Id']
response = route53_client.get_change(Id=changeId)
LOGGER.info('Get change: %s', response)
if check_response(response) and response['ChangeInfo']['Status'] == 'INSYNC':
LOGGER.info('Delete DNS records completed successfully.')
else:
LOGGER.info('Delete DNS records failed.')
@helper.delete
def delete_dns_records(event, __):
zoneId = event['ResourceProperties']['ZoneId']
recordNames = event['ResourceProperties']['RecordNames'].split(sep=',')
recordSetPaginator = route53_client.get_paginator('list_resource_record_sets')
recordSetIterable = recordSetPaginator.paginate(HostedZoneId=zoneId)
changes = [
{
'Action': 'DELETE',
'ResourceRecordSet': record
}
for recordSet in recordSetIterable
for record in recordSet['ResourceRecordSets']
if record['Name'] in recordNames
]
if changes:
response = route53_client.change_resource_record_sets(
HostedZoneId=zoneId,
ChangeBatch={ 'Changes': changes }
)
LOGGER.info('Change resource record set: %s', response)
wait_for_dns_change_completion(response)
else:
LOGGER.info('No matching DNS records found.')
def handler(event, context):
helper(event, context)
| true | true |
f73a053fdbf81204ff6a8f61c79d54d982663492 | 25,787 | py | Python | paddlenlp/transformers/roberta/tokenizer.py | tanhanzhuo/PaddleNLP | d0d20678f2bec820570b4f09ca49cd402d20c3b6 | [
"Apache-2.0"
] | 1 | 2022-01-28T06:32:26.000Z | 2022-01-28T06:32:26.000Z | paddlenlp/transformers/roberta/tokenizer.py | tanhanzhuo/PaddleNLP | d0d20678f2bec820570b4f09ca49cd402d20c3b6 | [
"Apache-2.0"
] | null | null | null | paddlenlp/transformers/roberta/tokenizer.py | tanhanzhuo/PaddleNLP | d0d20678f2bec820570b4f09ca49cd402d20c3b6 | [
"Apache-2.0"
] | 1 | 2022-03-30T03:05:52.000Z | 2022-03-30T03:05:52.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import json
from paddle.utils import try_import
from .. import BasicTokenizer, PretrainedTokenizer, WordpieceTokenizer, GPTTokenizer, AddedToken
from ..gpt.tokenizer import bytes_to_unicode
from ...utils.downloader import get_path_from_url, COMMUNITY_MODEL_PREFIX
from ...utils.env import MODEL_HOME
from ...utils.log import logger
__all__ = ['RobertaTokenizer', 'RobertaChineseTokenizer', 'RobertaBPETokenizer']
class RobertaChineseTokenizer(PretrainedTokenizer):
"""
Constructs a RoBerta tokenizer. It uses a basic tokenizer to do punctuation
splitting, lower casing and so on, and follows a WordPiece tokenizer to
tokenize as subwords.
This tokenizer inherits from :class:`~paddlenlp.transformers.tokenizer_utils.PretrainedTokenizer`
which contains most of the main methods. For more information regarding those methods,
please refer to this superclass.
Args:
vocab_file (str):
The vocabulary file path (ends with '.txt') required to instantiate
a `WordpieceTokenizer`.
do_lower_case (bool):
Whether or not to lowercase the input when tokenizing.
Defaults to`True`.
unk_token (str):
A special token representing the *unknown (out-of-vocabulary)* token.
An unknown token is set to be `unk_token` inorder to be converted to an ID.
Defaults to "[UNK]".
sep_token (str):
A special token separating two different sentences in the same input.
Defaults to "[SEP]".
pad_token (str):
A special token used to make arrays of tokens the same size for batching purposes.
Defaults to "[PAD]".
cls_token (str):
A special token used for sequence classification. It is the last token
of the sequence when built with special tokens. Defaults to "[CLS]".
mask_token (str):
A special token representing a masked token. This is the token used
in the masked language modeling task which the model tries to predict the original unmasked ones.
Defaults to "[MASK]".
Examples:
.. code-block::
from paddlenlp.transformers import RobertaTokenizer
tokenizer = RobertaTokenizer.from_pretrained('roberta-wwm-ext')
tokens = tokenizer('He was a puppeteer')
#{'input_ids': [101, 9245, 9947, 143, 11227, 9586, 8418, 8854, 8180, 102],
#'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}、
"""
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {
"vocab_file": {
"roberta-wwm-ext":
"https://bj.bcebos.com/paddlenlp/models/transformers/roberta_base/vocab.txt",
"roberta-wwm-ext-large":
"https://bj.bcebos.com/paddlenlp/models/transformers/roberta_large/vocab.txt",
"rbt3":
"https://bj.bcebos.com/paddlenlp/models/transformers/rbt3/vocab.txt",
"rbtl3":
"https://bj.bcebos.com/paddlenlp/models/transformers/rbtl3/vocab.txt",
}
}
pretrained_init_configuration = {
"roberta-wwm-ext": {
"do_lower_case": True
},
"roberta-wwm-ext-large": {
"do_lower_case": True
},
"rbt3": {
"do_lower_case": True
},
"rbtl3": {
"do_lower_case": True
},
}
def __init__(self,
vocab_file,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = RobertaTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.do_lower_case = do_lower_case
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(
vocab=self.vocab, unk_token=unk_token)
@property
def vocab_size(self):
"""
Return the size of vocabulary.
Returns:
int: The size of vocabulary.
"""
return len(self.vocab)
def _tokenize(self, text):
"""
End-to-end tokenization for RoBERTa models.
Args:
text (str): The text to be tokenized.
Returns:
list: A list of string representing converted tokens.
"""
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_string(self, tokens):
"""
Converts a sequence of tokens (list of string) to a single string. Since
the usage of WordPiece introducing `##` to concat subwords, also removes
`##` when converting.
Args:
tokens (list): A list of string representing tokens to be converted.
Returns:
str: Converted string from tokens.
Examples:
.. code-block::
from paddlenlp.transformers import RobertaTokenizer
tokenizer = RobertaTokenizer.from_pretrained('roberta-wwm-ext')
tokens = tokenizer.tokenize('He was a puppeteer')
strings = tokenizer.convert_tokens_to_string(tokens)
'''
he was a puppeteer
'''
"""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Args:
pair(bool):
Whether the input is a sequence pair or a single sequence.
Defaults to `False` and the input is a single sequence.
Returns:
int: Number of tokens added to sequences.
"""
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens.
A RoBERTa sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (List[int]):
List of IDs to which the special tokens will be added.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs.
Defaults to `None`.
Returns:
List[int]: List of input_id with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
"""
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
A RoBERTa offset_mapping has the following format:
- single sequence: ``(0,0) X (0,0)``
- pair of sequences: ``(0,0) A (0,0) B (0,0)``
Args:
offset_mapping_0 (List[tuple]):
List of wordpiece offsets to which the special tokens will be added.
offset_mapping_1 (List[tuple], optional):
Optional second list of wordpiece offsets for offset mapping pairs. Defaults to None.
Returns:
List[tuple]: A list of wordpiece offsets with the appropriate offsets of special tokens.
"""
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)
] + offset_mapping_1 + [(0, 0)]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
A RoBERTa sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (List[int]):
A list of `inputs_ids` for the first sequence.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs. Defaults to None.
Returns:
List[int]: List of token_type_id according to the given sequence(s).
"""
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``encode`` methods.
Args:
token_ids_0 (List[int]):
A list of `inputs_ids` for the first sequence.
token_ids_1 (List[int], optinal):
Optional second list of IDs for sequence pairs. Defaults to None.
already_has_special_tokens (bool, optional): Whether or not the token list is already
formatted with special tokens for the model. Defaults to None.
Returns:
List[int]: The list of integers either be 0 or 1: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0,
token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
class RobertaBPETokenizer(GPTTokenizer):
"""
Constructs a Roberta tokenizer based on byte-level Byte-Pair-Encoding.
This tokenizer inherits from :class:`~paddlenlp.transformers.GPTTokenizer`
which contains most of the main methods. For more information regarding those methods,
please refer to this superclass.
Args:
vocab_file (str):
Path to the vocab file.
The vocab file contains a mapping from vocabulary strings to indices.
merges_file (str):
Path to the merge file.
The merge file is used to split the input sentence into "subword" units.
The vocab file is then used to encode those units as intices.
errors (str):
Paradigm to follow when decoding bytes to UTF-8.
Defaults to `'replace'`.
max_len (int, optional):
The maximum value of the input sequence length.
Defaults to `None`.
special_tokens (list, optional):
A list of special tokens not in the vocabulary.
Defaults to `None`.
Examples:
.. code-block::
from paddlenlp.transformers import RobertaBPETokenizer
tokenizer = RobertaBPETokenizer.from_pretrained('roberta-en-base')
tokens = tokenizer('This is a simple Paddle')
#{'input_ids': [0, 713, 16, 10, 2007, 221, 33151, 2],
#'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0]}
"""
resource_files_names = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt"
} # for save_pretrained
pretrained_resource_files_map = {}
pretrained_init_configuration = {}
def __init__(self,
vocab_file,
merges_file,
errors='replace',
max_len=None,
special_tokens=None,
bos_token="<s>",
eos_token="</s>",
cls_token="<s>",
sep_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs):
bos_token = AddedToken(
bos_token, lstrip=False,
rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(
eos_token, lstrip=False,
rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(
sep_token, lstrip=False,
rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(
cls_token, lstrip=False,
rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(
unk_token, lstrip=False,
rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(
pad_token, lstrip=False,
rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(
mask_token, lstrip=True,
rstrip=False) if isinstance(mask_token, str) else mask_token
self._build_special_tokens_map_extended(
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token)
self._vocab_file = vocab_file
self._merges_file = merges_file
self.max_len = max_len if max_len is not None else int(1e12)
self.num_command_tokens = 2
self.num_type_tokens = 2
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.num_tokens = len(self.encoder)
self.num_text_tokens = self.num_tokens - 1
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_data = merges_handle.read().split('\n')[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_data]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
re = try_import("regex")
self.pat = re.compile(
r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification
tasks by concatenating and adding special tokens.
"""
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
if token_ids_1 is None:
return _cls + token_ids_0 + _sep
return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep
def get_offset_mapping(self, text):
tokens = self._tokenize(text)
offset_mapping = []
offset = 0
for token in tokens:
if token[0] == 'Ġ':
offset_mapping.append((offset + 1, offset + len(token)))
else:
offset_mapping.append((offset, offset + len(token)))
offset += len(token)
return offset_mapping
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
"""
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
A Roberta offset_mapping has the following format:
- single sequence: ``(0,0) X (0,0)``
- pair of sequences: ``(0,0) A (0,0) (0,0) B (0,0)``
Args:
offset_mapping_0 (List[tuple]):
List of wordpiece offsets to which the special tokens will be added.
offset_mapping_1 (List[tuple], optional):
Optional second list of wordpiece offsets for offset mapping pairs. Defaults to None.
Returns:
List[tuple]: A list of wordpiece offsets with the appropriate offsets of special tokens.
"""
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)
] + offset_mapping_1 + [(0, 0)]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``encode`` methods.
Args:
token_ids_0 (List[int]):
A list of `inputs_ids` for the first sequence.
token_ids_1 (List[int], optinal):
Optional second list of IDs for sequence pairs. Defaults to None.
already_has_special_tokens (bool, optional): Whether or not the token list is already
formatted with special tokens for the model. Defaults to None.
Returns:
List[int]: The list of integers either be 0 or 1: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0,
token_ids_1=token_ids_1,
already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)
) + [1]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def convert_tokens_to_string(self, tokens):
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode(
'utf-8', errors=self.errors)
return text
def num_special_tokens_to_add(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Args:
pair(bool):
Whether the input is a sequence pair or a single sequence.
Defaults to `False` and the input is a single sequence.
Returns:
int: Number of tokens added to sequences.
"""
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
class RobertaTokenizer:
"""
RobertaTokenizer is a generic tokenizer class that will be instantiated as either
RobertaChineseTokenizer or RobertaBPETokenizer when created with the RobertaTokenizer.from_pretrained() class method.
"""
chinese_model_names = RobertaChineseTokenizer.pretrained_init_configuration.keys(
)
english_model_names = RobertaBPETokenizer.pretrained_init_configuration.keys(
)
tokenizer_config_file = "tokenizer_config.json"
def __init__(self, *args, **kwargs):
raise EnvironmentError(
f"{self.__class__.__name__} is designed to be instantiated "
f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path).`"
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
# From built-in pretrained models
if pretrained_model_name_or_path in cls.chinese_model_names:
return RobertaChineseTokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
elif pretrained_model_name_or_path in cls.english_model_names:
return RobertaBPETokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
# From local dir path
elif os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path,
cls.tokenizer_config_file)
if os.path.exists(config_file):
with io.open(config_file, encoding="utf-8") as f:
init_kwargs = json.load(f)
# class name corresponds to this configuration
init_class = init_kwargs.pop("init_class", None)
if init_class == "RobertaBPETokenizer":
return RobertaBPETokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
if init_class == "RobertaChineseTokenizer" or init_class == "BertTokenizer":
return RobertaChineseTokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
return RobertaBPETokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
else:
# Assuming from community-contributed pretrained models
config_file = os.path.join(COMMUNITY_MODEL_PREFIX,
pretrained_model_name_or_path,
cls.tokenizer_config_file)
default_root = os.path.join(MODEL_HOME,
pretrained_model_name_or_path)
try:
resolved_config_file = get_path_from_url(config_file,
default_root)
except RuntimeError as err:
logger.error(err)
raise RuntimeError(
f"Can't find load tokenizer_config_file for '{pretrained_model_name_or_path}'.\n"
f"Please make sure that '{pretrained_model_name_or_path}' is:\n"
"a correct model-identifier of community-contributed pretrained models.\n"
)
with io.open(resolved_config_file, encoding="utf-8") as f:
init_kwargs = json.load(f)
init_class = init_kwargs.pop("init_class", None)
if init_class == "RobertaBPETokenizer":
return RobertaBPETokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
elif init_class == "RobertaChineseTokenizer" or init_class == "BertTokenizer":
return RobertaChineseTokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
else:
return RobertaBPETokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
| 41.2592 | 121 | 0.586769 |
import io
import os
import json
from paddle.utils import try_import
from .. import BasicTokenizer, PretrainedTokenizer, WordpieceTokenizer, GPTTokenizer, AddedToken
from ..gpt.tokenizer import bytes_to_unicode
from ...utils.downloader import get_path_from_url, COMMUNITY_MODEL_PREFIX
from ...utils.env import MODEL_HOME
from ...utils.log import logger
__all__ = ['RobertaTokenizer', 'RobertaChineseTokenizer', 'RobertaBPETokenizer']
class RobertaChineseTokenizer(PretrainedTokenizer):
resource_files_names = {"vocab_file": "vocab.txt"}
pretrained_resource_files_map = {
"vocab_file": {
"roberta-wwm-ext":
"https://bj.bcebos.com/paddlenlp/models/transformers/roberta_base/vocab.txt",
"roberta-wwm-ext-large":
"https://bj.bcebos.com/paddlenlp/models/transformers/roberta_large/vocab.txt",
"rbt3":
"https://bj.bcebos.com/paddlenlp/models/transformers/rbt3/vocab.txt",
"rbtl3":
"https://bj.bcebos.com/paddlenlp/models/transformers/rbtl3/vocab.txt",
}
}
pretrained_init_configuration = {
"roberta-wwm-ext": {
"do_lower_case": True
},
"roberta-wwm-ext-large": {
"do_lower_case": True
},
"rbt3": {
"do_lower_case": True
},
"rbtl3": {
"do_lower_case": True
},
}
def __init__(self,
vocab_file,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = RobertaTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.do_lower_case = do_lower_case
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(
vocab=self.vocab, unk_token=unk_token)
@property
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_string(self, tokens):
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)
] + offset_mapping_1 + [(0, 0)]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0,
token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
class RobertaBPETokenizer(GPTTokenizer):
resource_files_names = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt"
} # for save_pretrained
pretrained_resource_files_map = {}
pretrained_init_configuration = {}
def __init__(self,
vocab_file,
merges_file,
errors='replace',
max_len=None,
special_tokens=None,
bos_token="<s>",
eos_token="</s>",
cls_token="<s>",
sep_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs):
bos_token = AddedToken(
bos_token, lstrip=False,
rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(
eos_token, lstrip=False,
rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(
sep_token, lstrip=False,
rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(
cls_token, lstrip=False,
rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(
unk_token, lstrip=False,
rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(
pad_token, lstrip=False,
rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(
mask_token, lstrip=True,
rstrip=False) if isinstance(mask_token, str) else mask_token
self._build_special_tokens_map_extended(
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token)
self._vocab_file = vocab_file
self._merges_file = merges_file
self.max_len = max_len if max_len is not None else int(1e12)
self.num_command_tokens = 2
self.num_type_tokens = 2
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.num_tokens = len(self.encoder)
self.num_text_tokens = self.num_tokens - 1
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_data = merges_handle.read().split('\n')[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_data]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
re = try_import("regex")
self.pat = re.compile(
r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
if token_ids_1 is None:
return _cls + token_ids_0 + _sep
return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep
def get_offset_mapping(self, text):
tokens = self._tokenize(text)
offset_mapping = []
offset = 0
for token in tokens:
if token[0] == 'Ġ':
offset_mapping.append((offset + 1, offset + len(token)))
else:
offset_mapping.append((offset, offset + len(token)))
offset += len(token)
return offset_mapping
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)
] + offset_mapping_1 + [(0, 0)]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0,
token_ids_1=token_ids_1,
already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)
) + [1]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def convert_tokens_to_string(self, tokens):
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode(
'utf-8', errors=self.errors)
return text
def num_special_tokens_to_add(self, pair=False):
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
class RobertaTokenizer:
chinese_model_names = RobertaChineseTokenizer.pretrained_init_configuration.keys(
)
english_model_names = RobertaBPETokenizer.pretrained_init_configuration.keys(
)
tokenizer_config_file = "tokenizer_config.json"
def __init__(self, *args, **kwargs):
raise EnvironmentError(
f"{self.__class__.__name__} is designed to be instantiated "
f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path).`"
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
if pretrained_model_name_or_path in cls.chinese_model_names:
return RobertaChineseTokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
elif pretrained_model_name_or_path in cls.english_model_names:
return RobertaBPETokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
elif os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path,
cls.tokenizer_config_file)
if os.path.exists(config_file):
with io.open(config_file, encoding="utf-8") as f:
init_kwargs = json.load(f)
init_class = init_kwargs.pop("init_class", None)
if init_class == "RobertaBPETokenizer":
return RobertaBPETokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
if init_class == "RobertaChineseTokenizer" or init_class == "BertTokenizer":
return RobertaChineseTokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
return RobertaBPETokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
else:
config_file = os.path.join(COMMUNITY_MODEL_PREFIX,
pretrained_model_name_or_path,
cls.tokenizer_config_file)
default_root = os.path.join(MODEL_HOME,
pretrained_model_name_or_path)
try:
resolved_config_file = get_path_from_url(config_file,
default_root)
except RuntimeError as err:
logger.error(err)
raise RuntimeError(
f"Can't find load tokenizer_config_file for '{pretrained_model_name_or_path}'.\n"
f"Please make sure that '{pretrained_model_name_or_path}' is:\n"
"a correct model-identifier of community-contributed pretrained models.\n"
)
with io.open(resolved_config_file, encoding="utf-8") as f:
init_kwargs = json.load(f)
init_class = init_kwargs.pop("init_class", None)
if init_class == "RobertaBPETokenizer":
return RobertaBPETokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
elif init_class == "RobertaChineseTokenizer" or init_class == "BertTokenizer":
return RobertaChineseTokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
else:
return RobertaBPETokenizer.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs)
| true | true |
f73a0566a5648baba82ef35edd6a06447f333e67 | 1,433 | py | Python | PyToolkit/logger.py | LiangsLi/PyToolkit | 2406835a1b653ad5b3bdbb1e9ae9cf2aaf6447d8 | [
"MIT"
] | null | null | null | PyToolkit/logger.py | LiangsLi/PyToolkit | 2406835a1b653ad5b3bdbb1e9ae9cf2aaf6447d8 | [
"MIT"
] | null | null | null | PyToolkit/logger.py | LiangsLi/PyToolkit | 2406835a1b653ad5b3bdbb1e9ae9cf2aaf6447d8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: logger
Description :
Author : Liangs
date: 2019/7/28
-------------------------------------------------
Change Activity:
2019/7/28:
-------------------------------------------------
"""
import logging
def init_logger(logger_name, log_file=None, is_debug=False, only_console=False):
if not only_console:
assert log_file
logger = logging.getLogger(logger_name)
if is_debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# handlers:
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
c_format = logging.Formatter("%(asctime)s-%(levelname)s-%(message)s")
c_handler.setFormatter(c_format)
logger.addHandler(c_handler)
if not only_console:
f_handler = logging.FileHandler(log_file)
f_handler.setLevel(logging.INFO)
f_format = logging.Formatter("%(asctime)s-%(levelname)s-%(message)s")
f_handler.setFormatter(f_format)
logger.addHandler(f_handler)
logger.info(f'===================== NEW LOGGER:{logger_name} =========================')
return logger
def get_logger(logger_name):
return logging.getLogger(logger_name)
if __name__ == '__main__':
my_logger = init_logger('test', 'test.log')
my_logger.info('this is a info')
| 30.489362 | 96 | 0.568039 |
import logging
def init_logger(logger_name, log_file=None, is_debug=False, only_console=False):
if not only_console:
assert log_file
logger = logging.getLogger(logger_name)
if is_debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
c_format = logging.Formatter("%(asctime)s-%(levelname)s-%(message)s")
c_handler.setFormatter(c_format)
logger.addHandler(c_handler)
if not only_console:
f_handler = logging.FileHandler(log_file)
f_handler.setLevel(logging.INFO)
f_format = logging.Formatter("%(asctime)s-%(levelname)s-%(message)s")
f_handler.setFormatter(f_format)
logger.addHandler(f_handler)
logger.info(f'===================== NEW LOGGER:{logger_name} =========================')
return logger
def get_logger(logger_name):
return logging.getLogger(logger_name)
if __name__ == '__main__':
my_logger = init_logger('test', 'test.log')
my_logger.info('this is a info')
| true | true |
f73a05a35e2268d1368fe692a994702fa2899886 | 1,368 | py | Python | tests/y2018/test_2018_d10.py | ErikThorsell/advent-of-code-python | 8afb3d2dd731b77a421eff9dbd33d1f6a9dfbee3 | [
"MIT"
] | 2 | 2021-12-03T16:17:13.000Z | 2022-01-27T12:29:45.000Z | tests/y2018/test_2018_d10.py | ErikThorsell/advent-of-code-python | 8afb3d2dd731b77a421eff9dbd33d1f6a9dfbee3 | [
"MIT"
] | null | null | null | tests/y2018/test_2018_d10.py | ErikThorsell/advent-of-code-python | 8afb3d2dd731b77a421eff9dbd33d1f6a9dfbee3 | [
"MIT"
] | 1 | 2021-12-29T20:38:38.000Z | 2021-12-29T20:38:38.000Z | """TEST MODULE TEMPLATE"""
from advent_of_code.utils.parse import parse_star_vectors
from advent_of_code.y2018.d10 import solution_1
def test_solution_1():
example_input = """position=< 9, 1> velocity=< 0, 2>
position=< 7, 0> velocity=<-1, 0>
position=< 3, -2> velocity=<-1, 1>
position=< 6, 10> velocity=<-2, -1>
position=< 2, -4> velocity=< 2, 2>
position=<-6, 10> velocity=< 2, -2>
position=< 1, 8> velocity=< 1, -1>
position=< 1, 7> velocity=< 1, 0>
position=<-3, 11> velocity=< 1, -2>
position=< 7, 6> velocity=<-1, -1>
position=<-2, 3> velocity=< 1, 0>
position=<-4, 3> velocity=< 2, 0>
position=<10, -3> velocity=<-1, 1>
position=< 5, 11> velocity=< 1, -2>
position=< 4, 7> velocity=< 0, -1>
position=< 8, -2> velocity=< 0, 1>
position=<15, 0> velocity=<-2, 0>
position=< 1, 6> velocity=< 1, 0>
position=< 8, 9> velocity=< 0, -1>
position=< 3, 3> velocity=<-1, 1>
position=< 0, 5> velocity=< 0, -1>
position=<-2, 2> velocity=< 2, 0>
position=< 5, -2> velocity=< 1, 2>
position=< 1, 4> velocity=< 2, 1>
position=<-2, 7> velocity=< 2, -2>
position=< 3, 6> velocity=<-1, -1>
position=< 5, 0> velocity=< 1, 0>
position=<-6, 0> velocity=< 2, 0>
position=< 5, 9> velocity=< 1, -2>
position=<14, 7> velocity=<-2, 0>
position=<-3, 6> velocity=< 2, -1>"""
solution_1(parse_star_vectors(example_input))
assert 1 == 0
| 34.2 | 58 | 0.598684 | from advent_of_code.utils.parse import parse_star_vectors
from advent_of_code.y2018.d10 import solution_1
def test_solution_1():
example_input = """position=< 9, 1> velocity=< 0, 2>
position=< 7, 0> velocity=<-1, 0>
position=< 3, -2> velocity=<-1, 1>
position=< 6, 10> velocity=<-2, -1>
position=< 2, -4> velocity=< 2, 2>
position=<-6, 10> velocity=< 2, -2>
position=< 1, 8> velocity=< 1, -1>
position=< 1, 7> velocity=< 1, 0>
position=<-3, 11> velocity=< 1, -2>
position=< 7, 6> velocity=<-1, -1>
position=<-2, 3> velocity=< 1, 0>
position=<-4, 3> velocity=< 2, 0>
position=<10, -3> velocity=<-1, 1>
position=< 5, 11> velocity=< 1, -2>
position=< 4, 7> velocity=< 0, -1>
position=< 8, -2> velocity=< 0, 1>
position=<15, 0> velocity=<-2, 0>
position=< 1, 6> velocity=< 1, 0>
position=< 8, 9> velocity=< 0, -1>
position=< 3, 3> velocity=<-1, 1>
position=< 0, 5> velocity=< 0, -1>
position=<-2, 2> velocity=< 2, 0>
position=< 5, -2> velocity=< 1, 2>
position=< 1, 4> velocity=< 2, 1>
position=<-2, 7> velocity=< 2, -2>
position=< 3, 6> velocity=<-1, -1>
position=< 5, 0> velocity=< 1, 0>
position=<-6, 0> velocity=< 2, 0>
position=< 5, 9> velocity=< 1, -2>
position=<14, 7> velocity=<-2, 0>
position=<-3, 6> velocity=< 2, -1>"""
solution_1(parse_star_vectors(example_input))
assert 1 == 0
| true | true |
f73a071480fc63afad913916ce833f0566fa595e | 8,304 | py | Python | test.py | Hooooot/doubanSpider | 0f011705ae9218ebdce3ef83ad3cd9e723c98e42 | [
"MIT"
] | 1 | 2019-12-24T08:05:26.000Z | 2019-12-24T08:05:26.000Z | test.py | Hooooot/doubanSpider | 0f011705ae9218ebdce3ef83ad3cd9e723c98e42 | [
"MIT"
] | null | null | null | test.py | Hooooot/doubanSpider | 0f011705ae9218ebdce3ef83ad3cd9e723c98e42 | [
"MIT"
] | null | null | null | import pickle
import sys
import time
from PyQt5.QtCore import Qt, QThread, QTimer, pyqtSignal
from PyQt5.QtGui import QIntValidator, QStandardItem, QStandardItemModel
from PyQt5.QtWidgets import (QAction, QApplication, QCheckBox, QComboBox,
QDialog, QGridLayout, QLabel, QLineEdit,
QMainWindow, QMenu, QMenuBar, QMessageBox,
QProgressBar, QPushButton, QTableView, QWidget)
from douban import Movie
def readData():
with open("./jinwu/dump.dat", "rb") as f:
movies = pickle.load(f)
return movies
movies = readData()
class TimerThread(QThread):
def __init__(self, parent=None):
return super().__init__(parent=parent)
def run(self):
pass
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.typeCombo = QComboBox(self)
self.areaCombo = QComboBox(self)
self.yearCombo = QComboBox(self)
self.tip = QLabel("最大电影获取数量:", self)
self.btn = QPushButton("开始", self)
self.amount = QLineEdit(self)
self.showAllDataCheck = QCheckBox("显示所有电影信息", self)
self.spiderThread = None
self.tableWindow = None
self.status = self.statusBar()
self.menu = self.menuBar()
self.aboutWindow = None
self.progress = None
self.usedTimeS = 0
self.usedTimeM = 0
self.timeLabel = None
self.timer = None
self.initUI()
def initUI(self):
# self.setGeometry(1000, 100, 500, 200)
gridLayout = QGridLayout()
widget = QWidget()
self.setWindowTitle("Test Window")
self.setCentralWidget(widget)
self.resize(400, 300)
types = ["全部类型", "剧情", "喜剧", "动作", "爱情", "科幻", "动画", "悬疑",
"惊悚", "恐怖", "犯罪", "同性", "音乐", "歌舞", "传记",
"历史", "战争", "西部", "奇幻", "冒险", "灾难", "武侠"]
areas = ["全部地区", "中国大陆", "美国", "香港", "台湾", "日本", "韩国", "英国",
"法国", "德国", "意大利", "西班牙", "印度", "泰国", "俄罗斯",
"伊朗", "加拿大", "澳大利亚", "爱尔兰", "瑞典", "巴西", "丹麦"]
years = ["全部年代", "2019", "2018", "2010年代", "2000年代", "90年代",
"80年代", "70年代", "60年代", "更早"]
self.typeCombo.addItems(types)
self.areaCombo.addItems(areas)
self.yearCombo.addItems(years)
self.btn.setToolTip("点击开始爬取")
self.tip.setAlignment(Qt.AlignRight)
advanced = self.menu.addMenu("高级")
concurrent = advanced.addAction("并发")
concurrent.setCheckable(True)
concurrent.triggered.connect(self.concurrentCheck)
help = self.menu.addMenu("帮助")
help.addAction("关于")
help.triggered[QAction].connect(self.about)
intOnly = QIntValidator()
intOnly.setRange(0, 999)
self.amount.setValidator(intOnly)
self.amount.setPlaceholderText("0~999(0:不限制)")
self.amount.setMaxLength(3)
self.amount.setMaximumWidth(120)
self.showAllDataCheck.setCheckState(Qt.Checked)
self.btn.clicked.connect(self.btnClicked)
self.amount.returnPressed.connect(lambda: self.btn.clicked.emit())
gridLayout.addWidget(self.typeCombo, 1, 1)
gridLayout.addWidget(self.areaCombo, 1, 2)
gridLayout.addWidget(self.yearCombo, 1, 3)
gridLayout.addWidget(self.tip, 2, 1)
gridLayout.addWidget(self.amount, 2, 2)
gridLayout.addWidget(self.showAllDataCheck, 2, 3)
gridLayout.addWidget(self.btn, 3, 2)
self.status.messageChanged.connect(self.status.showMessage)
self.status.messageChanged.emit("Ready!")
self.centralWidget().setLayout(gridLayout)
# self.progress = QProgressBar(self)
# self.status.addPermanentWidget(self.progress, stretch=0)
self.timeLabel = QLabel("已用时:" + str(self.usedTimeS) + "秒")
self.status.addPermanentWidget(self.timeLabel)
self.timer = QTimer(self)
self.timer.timeout.connect(self.updateUsedTime)
def updateUsedTime(self):
self.usedTimeS += 1
if self.usedTimeS >= 60:
self.usedTimeM += 1
self.usedTimeS = 0
if self.usedTimeM == 0:
self.timeLabel.setText("已用时:" + str(self.usedTimeS) + " 秒")
else:
self.timeLabel.setText("已用时:" + str(self.usedTimeM) + " 分 " +
str(self.usedTimeS) + " 秒")
def btnClicked(self):
# moviesList = readData()
# self.moviesTable(moviesList)
# self.tableThread = TableWindow()
# self.tableThread.start()
if self.showAllDataCheck.isChecked():
self.tableWindow = TableWidget(movies)
self.tableWindow.show()
self.status.showMessage("正在获取电影详细信息:(200/200)")
self.usedTimeM = 0
self.usedTimeS = 0
self.timer.start(1000)
def about(self, qAction):
self.aboutWindow = About()
self.aboutWindow.setModal(True)
self.aboutWindow.show()
def concurrentCheck(self, check):
QMessageBox.information(self, "title", str(check))
class TableWidget(QWidget):
def __init__(self, movieList, parent=None):
"""
TableWidget(movieList, parent=None)
#### 参数:
movieList:电影(douban.Movie)列表
parent:指定父窗口,默认无
"""
super(TableWidget, self).__init__(parent)
self.table = QTableView(self)
self.movieList = movieList
self.initUI()
def initUI(self):
self.handleData()
self.setWindowTitle("所有电影详细信息")
self.resize(1200, 600)
gridLayout = QGridLayout()
gridLayout.addWidget(self.table, 1, 1)
self.setLayout(gridLayout)
def handleData(self):
model = QStandardItemModel()
self.table.setModel(model)
model.setHorizontalHeaderLabels(["电影名", "评分", "投票人数", "评分百分比(依次5星~1星)",
"地区", "上映年份", "类型", "长度(分钟)", "链接",
"操作"])
self.table.setColumnWidth(0, 150)
self.table.setColumnWidth(1, 50)
self.table.setColumnWidth(2, 75)
self.table.setColumnWidth(3, 175)
self.table.setColumnWidth(4, 175)
self.table.setColumnWidth(5, 75)
self.table.setColumnWidth(6, 125)
self.table.setColumnWidth(7, 75)
self.table.setColumnWidth(8, 150)
self.table.setColumnWidth(9, 75)
for row, movie in enumerate(self.movieList):
for column in range(9):
item = QStandardItem(movie[column])
item.setTextAlignment(Qt.AlignCenter)
model.setItem(row, column, item)
delete = QLabel(self)
delete.setText("<a style='color:DimGray;' \
href='" + movie.url + "'>查看评论</a>")
delete.setAlignment(Qt.AlignCenter)
delete.setOpenExternalLinks(True)
self.table.setIndexWidget(model.index(row, 9), delete)
def deleteClicked(self):
sender = self.sender()
row = sender.property("row")
del self.movieList[row]
self.handleData()
class About(QDialog):
"""
关于界面
"""
def __init__(self, parent=None, flags=Qt.WindowFlags()):
super().__init__(parent=parent, flags=flags)
self.initUI()
def initUI(self):
self.setWindowTitle("关于")
self.resize(100, 150)
gridLayout = QGridLayout()
edition = QLabel("版本:0.1.0", self)
url = QLabel(self)
url.setOpenExternalLinks(True)
url.setText("<a style='color:DeepSkyBlue;' \
href='https://github.com/Hooooot/doubanSpider'>项目地址</a>")
GPLLicense = QLabel(self)
GPLLicense.setOpenExternalLinks(True)
GPLLicense.setText("<a style='color:DeepSkyBlue;' \
href='https://github.com/Hooooot/doubanSpider/blob/master/LICENSE'>MIT许可证</a>")
right = QLabel("Copyright (c) 2019 Hooooot", self)
gridLayout.addWidget(edition, 1, 1)
gridLayout.addWidget(url, 2, 1)
gridLayout.addWidget(GPLLicense, 3, 1)
gridLayout.addWidget(right, 4, 1)
self.setLayout(gridLayout)
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = MainWindow()
ex.show()
sys.exit(app.exec_())
| 34.6 | 79 | 0.589355 | import pickle
import sys
import time
from PyQt5.QtCore import Qt, QThread, QTimer, pyqtSignal
from PyQt5.QtGui import QIntValidator, QStandardItem, QStandardItemModel
from PyQt5.QtWidgets import (QAction, QApplication, QCheckBox, QComboBox,
QDialog, QGridLayout, QLabel, QLineEdit,
QMainWindow, QMenu, QMenuBar, QMessageBox,
QProgressBar, QPushButton, QTableView, QWidget)
from douban import Movie
def readData():
with open("./jinwu/dump.dat", "rb") as f:
movies = pickle.load(f)
return movies
movies = readData()
class TimerThread(QThread):
def __init__(self, parent=None):
return super().__init__(parent=parent)
def run(self):
pass
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.typeCombo = QComboBox(self)
self.areaCombo = QComboBox(self)
self.yearCombo = QComboBox(self)
self.tip = QLabel("最大电影获取数量:", self)
self.btn = QPushButton("开始", self)
self.amount = QLineEdit(self)
self.showAllDataCheck = QCheckBox("显示所有电影信息", self)
self.spiderThread = None
self.tableWindow = None
self.status = self.statusBar()
self.menu = self.menuBar()
self.aboutWindow = None
self.progress = None
self.usedTimeS = 0
self.usedTimeM = 0
self.timeLabel = None
self.timer = None
self.initUI()
def initUI(self):
gridLayout = QGridLayout()
widget = QWidget()
self.setWindowTitle("Test Window")
self.setCentralWidget(widget)
self.resize(400, 300)
types = ["全部类型", "剧情", "喜剧", "动作", "爱情", "科幻", "动画", "悬疑",
"惊悚", "恐怖", "犯罪", "同性", "音乐", "歌舞", "传记",
"历史", "战争", "西部", "奇幻", "冒险", "灾难", "武侠"]
areas = ["全部地区", "中国大陆", "美国", "香港", "台湾", "日本", "韩国", "英国",
"法国", "德国", "意大利", "西班牙", "印度", "泰国", "俄罗斯",
"伊朗", "加拿大", "澳大利亚", "爱尔兰", "瑞典", "巴西", "丹麦"]
years = ["全部年代", "2019", "2018", "2010年代", "2000年代", "90年代",
"80年代", "70年代", "60年代", "更早"]
self.typeCombo.addItems(types)
self.areaCombo.addItems(areas)
self.yearCombo.addItems(years)
self.btn.setToolTip("点击开始爬取")
self.tip.setAlignment(Qt.AlignRight)
advanced = self.menu.addMenu("高级")
concurrent = advanced.addAction("并发")
concurrent.setCheckable(True)
concurrent.triggered.connect(self.concurrentCheck)
help = self.menu.addMenu("帮助")
help.addAction("关于")
help.triggered[QAction].connect(self.about)
intOnly = QIntValidator()
intOnly.setRange(0, 999)
self.amount.setValidator(intOnly)
self.amount.setPlaceholderText("0~999(0:不限制)")
self.amount.setMaxLength(3)
self.amount.setMaximumWidth(120)
self.showAllDataCheck.setCheckState(Qt.Checked)
self.btn.clicked.connect(self.btnClicked)
self.amount.returnPressed.connect(lambda: self.btn.clicked.emit())
gridLayout.addWidget(self.typeCombo, 1, 1)
gridLayout.addWidget(self.areaCombo, 1, 2)
gridLayout.addWidget(self.yearCombo, 1, 3)
gridLayout.addWidget(self.tip, 2, 1)
gridLayout.addWidget(self.amount, 2, 2)
gridLayout.addWidget(self.showAllDataCheck, 2, 3)
gridLayout.addWidget(self.btn, 3, 2)
self.status.messageChanged.connect(self.status.showMessage)
self.status.messageChanged.emit("Ready!")
self.centralWidget().setLayout(gridLayout)
self.timeLabel = QLabel("已用时:" + str(self.usedTimeS) + "秒")
self.status.addPermanentWidget(self.timeLabel)
self.timer = QTimer(self)
self.timer.timeout.connect(self.updateUsedTime)
def updateUsedTime(self):
self.usedTimeS += 1
if self.usedTimeS >= 60:
self.usedTimeM += 1
self.usedTimeS = 0
if self.usedTimeM == 0:
self.timeLabel.setText("已用时:" + str(self.usedTimeS) + " 秒")
else:
self.timeLabel.setText("已用时:" + str(self.usedTimeM) + " 分 " +
str(self.usedTimeS) + " 秒")
def btnClicked(self):
if self.showAllDataCheck.isChecked():
self.tableWindow = TableWidget(movies)
self.tableWindow.show()
self.status.showMessage("正在获取电影详细信息:(200/200)")
self.usedTimeM = 0
self.usedTimeS = 0
self.timer.start(1000)
def about(self, qAction):
self.aboutWindow = About()
self.aboutWindow.setModal(True)
self.aboutWindow.show()
def concurrentCheck(self, check):
QMessageBox.information(self, "title", str(check))
class TableWidget(QWidget):
def __init__(self, movieList, parent=None):
super(TableWidget, self).__init__(parent)
self.table = QTableView(self)
self.movieList = movieList
self.initUI()
def initUI(self):
self.handleData()
self.setWindowTitle("所有电影详细信息")
self.resize(1200, 600)
gridLayout = QGridLayout()
gridLayout.addWidget(self.table, 1, 1)
self.setLayout(gridLayout)
def handleData(self):
model = QStandardItemModel()
self.table.setModel(model)
model.setHorizontalHeaderLabels(["电影名", "评分", "投票人数", "评分百分比(依次5星~1星)",
"地区", "上映年份", "类型", "长度(分钟)", "链接",
"操作"])
self.table.setColumnWidth(0, 150)
self.table.setColumnWidth(1, 50)
self.table.setColumnWidth(2, 75)
self.table.setColumnWidth(3, 175)
self.table.setColumnWidth(4, 175)
self.table.setColumnWidth(5, 75)
self.table.setColumnWidth(6, 125)
self.table.setColumnWidth(7, 75)
self.table.setColumnWidth(8, 150)
self.table.setColumnWidth(9, 75)
for row, movie in enumerate(self.movieList):
for column in range(9):
item = QStandardItem(movie[column])
item.setTextAlignment(Qt.AlignCenter)
model.setItem(row, column, item)
delete = QLabel(self)
delete.setText("<a style='color:DimGray;' \
href='" + movie.url + "'>查看评论</a>")
delete.setAlignment(Qt.AlignCenter)
delete.setOpenExternalLinks(True)
self.table.setIndexWidget(model.index(row, 9), delete)
def deleteClicked(self):
sender = self.sender()
row = sender.property("row")
del self.movieList[row]
self.handleData()
class About(QDialog):
def __init__(self, parent=None, flags=Qt.WindowFlags()):
super().__init__(parent=parent, flags=flags)
self.initUI()
def initUI(self):
self.setWindowTitle("关于")
self.resize(100, 150)
gridLayout = QGridLayout()
edition = QLabel("版本:0.1.0", self)
url = QLabel(self)
url.setOpenExternalLinks(True)
url.setText("<a style='color:DeepSkyBlue;' \
href='https://github.com/Hooooot/doubanSpider'>项目地址</a>")
GPLLicense = QLabel(self)
GPLLicense.setOpenExternalLinks(True)
GPLLicense.setText("<a style='color:DeepSkyBlue;' \
href='https://github.com/Hooooot/doubanSpider/blob/master/LICENSE'>MIT许可证</a>")
right = QLabel("Copyright (c) 2019 Hooooot", self)
gridLayout.addWidget(edition, 1, 1)
gridLayout.addWidget(url, 2, 1)
gridLayout.addWidget(GPLLicense, 3, 1)
gridLayout.addWidget(right, 4, 1)
self.setLayout(gridLayout)
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = MainWindow()
ex.show()
sys.exit(app.exec_())
| true | true |
f73a07dee8e21b1cca6a2cbac3105a06c33fa974 | 32,654 | py | Python | src/poretitioner/hdf5/hdf5.py | uwmisl/poretitioner | 0ff9f67a3b25fdcb460b11c970b2ed366da07da7 | [
"MIT"
] | 2 | 2021-03-11T21:27:16.000Z | 2021-03-18T00:58:22.000Z | src/poretitioner/hdf5/hdf5.py | uwmisl/poretitioner | 0ff9f67a3b25fdcb460b11c970b2ed366da07da7 | [
"MIT"
] | 12 | 2021-02-19T19:36:05.000Z | 2021-03-24T15:38:02.000Z | src/poretitioner/hdf5/hdf5.py | uwmisl/poretitioner | 0ff9f67a3b25fdcb460b11c970b2ed366da07da7 | [
"MIT"
] | null | null | null | """
===================
hdf5.py
===================
The Hierarchical Data Format version 5 (HDF5) defines a
a file format for storing and organzing massive amounts of
hiearchical data.
This module attempts to encapsulate the rich features of HDF5
alongside your favorite python3.7+ constructs
(e.g dataclasses)
[1] - https://en.wikipedia.org/wiki/Hierarchical_Data_Format
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from logging import Logger, getLogger
from typing import (
Any,
Dict,
Generic,
Iterable,
Mapping,
Optional,
Protocol,
Type,
TypedDict,
TypeVar,
Union,
)
import h5py
import numpy as np
from h5py import File as Fast5File
from h5py._hl.base import Empty
from .exceptions import HDF5_GroupSerializationException, HDF5_SerializationException
# Generics
T = TypeVar("T")
S = TypeVar("S")
class NumpyArrayLike(np.ndarray):
"""This class represents a numpy array with extra attributes and functionality.
Subclasses of NumpyArrayLike can be treated exactly like numpy arrays computationally
By default, we serialize class attributes alone.
For more fine-grained control over what information is stored during serialization/pickling,
implementers should override the `serialize_info` `deserialize_from_info`
"""
def __new__(cls, data: Union[np.ndarray, NumpyArrayLike]):
obj = np.copy(data).view(
cls
) # Optimization: Consider not making a copy, this is more error prone though: np.asarray(data).view(cls)
return obj
def serialize_info(self, **kwargs) -> Dict:
"""Creates a dictionary describing the signal and its attributes.
Returns
-------
Dict
A serialized set of attributes.
"""
# When serializing, copy over any existing attributes already in self, and
# any that don't exist in self get taken from kwargs.
existing_info = self.__dict__
info = {key: getattr(self, key, kwargs.get(key)) for key in kwargs.keys()}
return {**info, **existing_info}
def deserialize_from_info(self, info: Dict):
"""Sets attributes on an object from a serialized dict.
Parameters
----------
info : Dict
Dictionary of attributes to set after deserialization.
"""
for name, value in info.items():
setattr(self, name, value)
# Multiprocessing and Dask require pickling (i.e. serializing) their inputs.
# By default, this will drop all our custom class data.
# https://stackoverflow.com/questions/26598109/preserve-custom-attributes-when-pickling-subclass-of-numpy-array
def __reduce__(self):
reconstruct, arguments, object_state = super().__reduce__()
# Create a custom state to pass to __setstate__ when this object is deserialized.
info = self.serialize_info()
new_state = object_state + (info,)
# Return a tuple that replaces the parent's __setstate__ tuple with our own
return (reconstruct, arguments, new_state)
def __setstate__(self, state):
info = state[-1]
self.deserialize_from_info(info)
# Call the parent's __setstate__ with the other tuple elements.
super().__setstate__(state[0:-1])
####################################
### Fast5 Helpers ###
####################################
# NOTE: Sadly, many of these can't be used until "RuntimeError: Unable to create attribute (object header message is too large)" https://github.com/h5py/h5py/issues/1855
# The goal is to provide high-level data-class representations of HDF5 objects, so users can just describe their structures as python dataclasses instead of finagling with h5py.
# Unfortunately, there's currently a bug "RuntimeError: Unable to create attribute (object header message is too large)" that can only be fixed by delving into the low-level API ()
def hdf5_dtype(object: Any) -> Optional[np.dtype]:
"""Returns the proper h5py dtype for an object, if one is necessary.
Otherwise returns None.
For us, this is mostly needed in the case of storing numpy data or string data,
since numpy data has a specific dtype, and strings have a variable length and an assumed encoding (e.g. "utf-8")
For more info on how h5py handles strings, see [1, 2].
[1] - https://docs.h5py.org/en/stable/strings.html#strings
[2] - https://docs.h5py.org/en/stable/special.html?highlight=string_dtype#variable-length-strings
Parameters
----------
object : Any
Some object you want the dtype for if it's necessary, but are fine not having one
if it's not.
Returns
-------
Optional[np.dtype]
The numpy datatype for an object if it has one, or if it's a string, and None otherwise.
"""
if isinstance(object, str):
return h5py.string_dtype(length=len(object))
elif hasattr(object, "dtype"):
# Is this already a numpy-like object with a dtype? If so, just use that.
return object.dtype
return None # For most cases, h5py can determine the dtype from the data itself.
def get_class_for_name(name: str, module_name: str = __name__) -> Type:
"""Gets a class from a module based on its name.
Tread carefully with this. Personally I feel like it's only safe to use
with dataclasses with known interfaces.
Parameters
----------
name : str
Name of the class we're trying to get the class object for.
module_name: str, optional
Which module to get a class from, by defualt __name__.
Returns
-------
Type
[description]
"""
import importlib
this_module = importlib.import_module(module_name)
this_class = getattr(this_module, name)
return this_class
class HasFast5(Protocol):
f5: Union[Fast5File, HDF5_Group]
###########################################################
#
# HDF5 Base Types
#
###########################################################
# Note: We never create or instantiate AttributeManagers directly, instead we borrow its interface.
# 3 Laws to keep in mind with Attributes:
#
#
# 1) They may be created from any scalar or NumPy array
#
# 2) Each attribute should be small (generally < 64k)
#
# 3) There is no partial I/O (i.e. slicing); the entire attribute must be read.
#
# https://docs.h5py.org/en/stable/high/attr.html
# Attrs are really just mappings from names to data/objects.
HDF5_Attribute_Objects = Mapping[str, Optional[Any]]
class IsAttr(Protocol):
"""A special protocol for objects that are just meant to be set data attributes, and don't
need any special HDF5 consdiration (e.g. a class that just needs to store a few numbers).
"""
def as_attr(self) -> np.dtype:
...
def from_attr(self, attr) -> IsAttr:
...
class HDF5IsAttr(IsAttr):
def as_attr(self) -> np.dtype:
...
def from_attr(self, attr) -> IsAttr:
...
class HasAttrs(Protocol):
def get_attrs(self) -> HDF5_Attributes:
...
def create_attr(self, name: str, value: Optional[Any], log: Optional[Logger] = None):
"""Adds an attribute to the current object.
Any existing attribute with this name will be overwritten.
Parameters
----------
name : str
Name of the attribute.
value : Optional[Any]
Value of the attribute.
"""
...
def create_attrs(self, attrs: HDF5_Attributes, log: Optional[Logger] = None):
"""Adds multiple attributes to the current object.
Any existing attribute with the names in attrs will be overwritten.
Parameters
----------
attrs :
Name of the attribute.
value : Optional[Any]
Value of the attribute.
"""
def object_from_attr(self, name: str, log: Optional[Logger] = None) -> Optional[Any]:
"""Creates an object from an attribute (if one could be made).
# TODO: Plugin Register via Plugins
Parameters
----------
name : str
Name of the attribute.
Returns
----------
An instantiated object represented by this attr, or None if one couldn't be found.
"""
...
def objects_from_attrs(
self, attrs: HDF5_Attributes, log: Optional[Logger] = None
) -> HDF5_Attribute_Objects:
"""Creates mapping of attribute names to their serialzed objects (if one could be made).
Parameters
----------
name : str
Name of the attribute.
Returns
----------
An instantiated object represented by this attr, or None if one couldn't be found.
"""
...
class HDF5_AttributeHaving(HasAttrs):
def __init__(self, has_attrs: Optional[HasAttrs]):
super().__init__()
self.attrs = self.get_attrs() if has_attrs is None else has_attrs.get_attrs()
def get_attrs(self) -> HDF5_Attributes:
return self.attrs
def create_attr(self, name: str, value: Optional[Any], log: Optional[Logger] = None):
"""Adds an attribute to the current object.
WARNING: Any existing attribute will be overwritten!
This method will coerce value to a special 'Empty' type used by HDF5 if the value
provided is zero-length or None. For more on Attributes and Empty types, see [1, 2]
[1] - https://docs.h5py.org/en/stable/high/attr.html#attributes
[2] - https://docs.h5py.org/en/stable/high/dataset.html?highlight=Empty#creating-and-reading-empty-or-null-datasets-and-attributes
Parameters
----------
name : str
Name of the attribute.
value : Optional[Any]
Value of the attribute. This method will coerce this value
to a special Empty object if it's zero-length or None [2].
"""
if value is None or value == "" or (hasattr(value, "__len__") and len(value) < 1):
empty = h5py.Empty(dtype=np.uint8)
self.get_attrs().create(name, empty)
elif isinstance(value, HDF5IsAttr):
attr_value = value.as_attr()
self.get_attrs().create(name, value, dtype=hdf5_dtype(attr_value))
else:
self.get_attrs().create(name, value, dtype=hdf5_dtype(value))
def create_attrs(self, attrs: HDF5_Attributes, log: Optional[Logger] = None):
for attr_name, attr_value in attrs.items():
self.create_attr(attr_name, attr_value, log=log)
def object_from_attr(self, name: str, log: Optional[Logger] = None) -> Optional[Any]:
log = log if log is not None else getLogger()
try:
attr_value = self.get_attrs()[name]
except AttributeError:
log.warning(
f"Could not find an attribute with the name '{name}' on object {self!r}. Returning None"
)
return None
if attr_value.shape is None:
"""
From the Docs:
An empty dataset has shape defined as None,
which is the best way of determining whether a dataset is empty or not.
An empty dataset can be “read” in a similar way to scalar datasets.
[1] - https://docs.h5py.org/en/stable/high/dataset.html?highlight=Empty#creating-and-reading-empty-or-null-datasets-and-attributes
"""
return ""
return bytes.decode(bytes(attr_value), encoding="utf-8")
def objects_from_attrs(self, log: Optional[Logger] = None) -> HDF5_Attribute_Objects:
objects: HDF5_Attribute_Objects = {
attr_name: self.object_from_attr(attr_name, log=log)
for attr_name in self.get_attrs().keys()
}
return objects
def copy_attr(self, name: str, source: HDF5_AttributeHaving):
"""Copy a single attribute from a source.
This will overwrite any attribute of this name, if one exists.
Parameters
----------
name : str
Which attribute to copy.
from : HDF5_AttributeHaving
Which attribute-haver to copy from.
"""
self.create_attr(name, source.get_attrs()[name])
def copy_all_attrs(self, source: HDF5_AttributeHaving):
"""Copy a all attributes from a source.
This will overwrite any attributes sharing the same names, if any exists.
Parameters
----------
from : HDF5_AttributeHaving
Which attribute-haver to copy all attributes from.
"""
for name in source.get_attrs().keys():
self.copy_attr(name, source)
class HDF5_ParentHaving:
@property
def parent(self) -> HDF5_Group:
return HDF5_Group(self.parent)
class HDF5_Dataset(h5py.Dataset, NumpyArrayLike, HDF5_AttributeHaving, HDF5_ParentHaving):
def __new__(cls, dataset: NumpyArrayLike) -> HDF5_Dataset:
if isinstance(dataset, HDF5_Dataset):
return dataset
self = dataset
def __init__(self, dataset: h5py.Dataset):
self._dataset = dataset
def __getattr__(self, attrib: str):
return getattr(self._dataset, attrib)
class HDF5_Group(h5py.Group, HDF5_AttributeHaving, HDF5_ParentHaving):
def __new__(cls, group: Optional[h5py.Group]) -> HDF5_Group:
if isinstance(group, HDF5_Group):
return group
hdf5_group = super().__new__(cls, group)
hdf5_group._group = group
return hdf5_group
def __init__(self, group: Optional[h5py.Group]):
if isinstance(group, HDF5_Group):
return
super().__init__(group.id)
self._group = group
@property
def parent(self) -> HDF5_Group:
return HDF5_Group(self._group.parent)
def require_group(self, name: str):
return HDF5_Group(self._group.require_group(name))
def require_dataset(self, name, data, dtype, shape, **kwds):
return HDF5_Dataset(self._group.require_dataset(name, shape, data=data, dtype=dtype,**kwds))
def __getattr__(self, attrib: str):
return getattr(self._group, attrib)
class HDF5_Attributes(h5py.AttributeManager, HDF5_ParentHaving):
def __init__(self, attrs: h5py.AttributeManager):
self.attrs = attrs
def __getattr__(self, attrib: str):
return getattr(self.attrs, attrib)
HDF5_Type = Union[HDF5_Dataset, HDF5_Group, HDF5_Attributes]
class HDF5_Serializing(ABC):
"""Any object that can be HDFSserialized.
Don't instantiate this directly, rather subclass.
"""
@classmethod
@abstractmethod
def from_a(cls, a: HDF5_Type, log: Optional[Logger] = None) -> HDF5_Serializing:
"""Creates an instance of this class (from) (a) HDF5_Type.
Parameters
----------
a : HDF5_Types
Instance of an HDF5Type (e.g. a h5py.Group).
log : Logger, optional
Logger to use for information/warnings/debug
Returns
-------
HDF5_Serializing
An instance of this class with data derived from (a) HDF5_Type.
Raises
------
NotImplementedError
This method wasn't implemented, but needs to be.
"""
raise NotImplementedError(
f"{cls!s} is missing an implementation for {HDF5_Serializing.from_a.__name__}"
)
@abstractmethod
def as_a(
self, a: HDF5_Type, parent: Optional[HDF5_Group] = None, log: Optional[Logger] = None
) -> HDF5_Type:
"""Returns this object, formatted (as) (a) given HDF5 type (thus the name).
Parameters
----------
a : HDF5_Types
One of the HDF5 types we understand.
parent : Optional[HDF5_Group]
The parent group to which this object should be added/updated.
If parent is None, we return the HDFS_Type proxy in isolation (e.g)
not attached to the parent if it doesn't already have one. None by default.
log : Logger, optional
Logger to use for information/warnings/debug
Returns
-------
HDF5_Type
This object serialized to a given HDF5 type.
Raises
------
NotImplementedError
This method wasn't implemented, but needs to be.
"""
raise NotImplementedError(
f"{self!s} is missing an implementation for {HDF5_Serializing.as_a.__name__}!"
)
@abstractmethod
def update(self, log: Optional[Logger] = None):
"""Makes sure any changes have been reflected in the underlying object.
Parameters
----------
log : Optional[Logger], optional
Logger to use, by default None
Raises
------
NotImplementedError
This method wasn't implemented.
"""
raise NotImplementedError(
f"{self!s} is missing an implementation for {HDF5_Serializing.update.__name__}!"
)
###########################################################
#
# HDF5 Groups
#
###########################################################
class HDF5_GroupSerializing(HDF5_Serializing, HDF5_AttributeHaving):
"""Objects adhering to the `HDF5_GroupSerializable` can be written to and
read directly from hd5 Groups.
"""
def name(self) -> str:
"""Group name that this object will be stored under.
i.e. If this method returns "patrice_lmb", then a subsequent call to
`self.as_group(Group("/Foo/bar/"))`
Will return a group at /Foo/bar/patrice_lmb
Be double-sure to override this if you want it to be anything other than the class name.
Returns
-------
str
Name to use in the Fast5 file.
"""
return self.__class__.__name__
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
"""Stores and Returns this object as an HDF5 Group, rooted at the group passed in.
This should be used to directly set the contents of an Hdf5 group.
This method should also create the group named 'name' in the parent_group, if it doesn't already exist.
class Baz(HDF5_GroupSerializable):
def name(self):
return "boop"
# ...Implementation
my_hdf5_file = h5py.File("/path/to/file")
foo_group = filts.require_group("/foo")
my_serial = Baz()
baz_group = foo_group.require_group(my_serial.name()) # Make space in the file for Baz at f'/foo/{my_serial.name()}'
my_serialized_group = my_serial.as_group(foo_group) # Sets "/foo/boop" group to the serialized group
my_serialized_group # /boop group, rooted at /foo/
Parameters
----------
parent_group : h5py.Group
Which group to store this group under. This doesn't necessarily have to be the root group of the file.
Returns
-------
h5py.Group
Group that stores a serialization of this instance.
"""
...
@classmethod
def from_group(cls, group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_GroupSerializable:
"""Serializes this object FROM an HDF5 Group.
class Baz(HDF5_GroupSerializable):
# ...Implementation
my_hdf5_file = h5py.File("/path/to/file")
baz_serialized_group = filts.require_group("/baz")
baz = Baz.from_group(baz_serialized_group) # I now have an instance of Baz.
Parameters
----------
group : h5py.Group
HDF5 Group that can be serialized into this instance.
Returns
-------
HDF5_GroupSerializable
Instance of an adherent to this protocol.
"""
...
class HDF5_GroupSerializable(HDF5_GroupSerializing):
"""Base class for objects that can be written to and
read directly from hd5 Groups.
Not meant to be instantiated directly. Instead, subclass and make sure your
`as_group` implementation uses the group created by `super().as_group(...)`.
NOTE: Make sure to call super().as_group(...)
"""
def name(self) -> str:
"""Group name that this object will be stored under.
i.e. If this method returns "patrice_lmb", then a subsequent call to
`self.as_group(Group("/Foo/bar/"))`
Will return a group at /Foo/bar/patrice_lmb
Override this if you want it to be anything other than the class name.
Returns
-------
str
Name to use in the Fast5 file.
"""
return self.__class__.__name__
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
new_group = parent_group.require_group(self.name())
parent_group[self.name()] = self
# Note: This does nothing but register a group with the name 'name' in the parent group.
# Implementers must now write their serialized instance to this group.
return self
@classmethod
@abstractmethod
def from_group(cls, group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_GroupSerializable:
raise NotImplementedError(
f"from_group not implemented for {cls.__name__}. Make sure you write a method that returns a serialzied version of this object."
)
def require_group_from_group(
self, parent_group: HDF5_Group, log: Optional[Logger] = None
) -> HDF5_GroupSerializable:
# child_group = parent_group.require_group(self.name())
child_group = self.as_group(parent_group, log=log)
@classmethod
def from_a(cls, a: HDF5_Group, log: Logger) -> HDF5_Serializing:
return cls.from_group(parent_group=a, log=log)
def as_a(self, a: HDF5_Type, log: Logger) -> HDF5_Type:
return self.as_group(parent_group=a, log=log)
def update(self, log: Optional[Logger] = None):
self.as_a(self._group.parent, log=log)
class HDF5_GroupSerialiableDict(Dict[T, S], HDF5_GroupSerializable):
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
log = log if log is not None else getLogger()
my_group = parent_group.require_group(self.name())
"""Returns this object as an HDF5 Group."""
for field_name, field_value in self.items():
if isinstance(field_value, HDF5_GroupSerializable):
# This value is actually its own group.
# So we create a new group rooted at our dataclass's group
# And assign it the value of whatever the group of the value is.
field_value.as_a(my_group, log=log)
# my_group.require_group(field_name)
# my_group[field_name] = field_value
elif isinstance(field_value, HDF5_DatasetSerializable):
field_value.as_a(parent_group, log)
else:
my_group.create_attr(field_name, field_value)
return my_group
@classmethod
def from_group(
cls, group: HDF5_Group, log: Optional[Logger] = None
) -> HDF5_GroupSerialableDataclass:
log = log if log is not None else getLogger()
if not log:
log = getLogger()
my_instance = cls.__new__(cls)
# First, copy over attrs:
for name, value in group.attrs.items():
object.__setattr__(my_instance, name, value)
# Then, copy over any datasets or groups.
for name, value in group.items():
if isinstance(value, h5py.Dataset):
# Assuming we're storing a numpy array as this dataset
buffer = np.empty(value.shape, dtype=value.dtype)
# Copies the values into our buffer
value.read_direct(buffer)
object.__setattr__(my_instance, name, NumpyArrayLike(buffer))
elif isinstance(value, h5py.Group):
# If it's a group, we have to do a little more work
# 1) Find the class described by the group
# 1.1) Verify that we actually know a class by that name. Raise an exception if we don't.
# 1.2) Verify that that class has a method to create an instance group a group.
# 2) Create a new class instance from that group
# 3) Set this object's 'name' field to the object we just created.
try:
ThisClass = get_class_for_name(name)
except AttributeError as e:
serial_exception = HDF5_GroupSerializationException(
f"We couldn't serialize group named {name} (group is attached in the exception.",
e,
group=value,
)
log.exception(serial_exception.msg, serial_exception)
raise serial_exception
# assert get_class_for_name(name) and isinstance(), f"No class found that corresponds to group {name}! Make sure there's a corresponding dataclass named {name} in this module scope!"
try:
this_instance = ThisClass.from_group(value, log=log)
except AttributeError as e:
serial_exception = HDF5_GroupSerializationException(
f"We couldn't serialize group named {name!s} from class {ThisClass!s}. It appears {ThisClass!s} doesn't implement the {HDF5_GroupSerializing.__name__} protocol. Group is attached in the exception.",
e,
group=value,
)
log.exception(serial_exception.msg, serial_exception)
raise serial_exception
object.__setattr__(my_instance, name, this_instance)
return my_instance
class HDF5_GroupSerialableDataclass(HDF5_GroupSerializable):
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
log = log if log is not None else getLogger()
"""Returns this object as an HDF5 Group."""
my_group: HDF5_Group = parent_group.require_group(self.name())
for field_name, field_value in vars(self).items():
if isinstance(field_value, HDF5_GroupSerializable):
# This value is actually its own group.
# So we create a new group rooted at our dataclass's group
# And assign it the value of whatever the group of the value is.
# new_group = my_group.require_group(field_name)
field_value.as_group(my_group)
elif isinstance(field_value, HDF5_DatasetSerializable):
field_value.as_a(my_group, log)
else:
my_group.create_attr(field_name, field_value)
return my_group
@classmethod
def from_group(
cls, group: HDF5_Group, log: Optional[Logger] = None
) -> HDF5_GroupSerialableDataclass:
log = log if log is not None else getLogger()
if not log:
log = getLogger()
my_instance = cls.__new__(cls)
# First, copy over attrs:
my_instance.create_attrs(group.get_attrs())
# Then, copy over any datasets or groups.
for name, value in group.items():
if isinstance(value, h5py.Dataset):
# Assuming we're storing a numpy array as this dataset
buffer = np.empty(value.shape, dtype=value.dtype)
# Copies the values into our buffer
value.read_direct(buffer)
object.__setattr__(my_instance, name, buffer)
elif isinstance(value, h5py.Group):
# If it's a group, we have to do a little more work
# 1) Find the class described by the group
# 1.1) Verify that we actually know a class by that name. Raise an exception if we don't.
# 1.2) Verify that that class has a method to create an instance group a group.
# 2) Create a new class instance from that group
# 3) Set this object's 'name' field to the object we just created.
try:
ThisClass = get_class_for_name(name)
except AttributeError as e:
serial_exception = HDF5_GroupSerializationException(
f"We couldn't serialize group named {name} (group is attached in the exception.",
e,
group=value,
)
log.exception(serial_exception.msg, serial_exception)
raise serial_exception
# assert get_class_for_name(name) and isinstance(), f"No class found that corresponds to group {name}! Make sure there's a corresponding dataclass named {name} in this module scope!"
try:
this_instance = ThisClass.from_group(value, log=log)
except AttributeError as e:
serial_exception = HDF5_GroupSerializationException(
f"We couldn't serialize group named {name!s} from class {ThisClass!s}. It appears {ThisClass!s} doesn't implement the {HDF5_GroupSerializing.__name__} protocol. Group is attached in the exception.",
e,
group=value,
)
log.exception(serial_exception.msg, serial_exception)
raise serial_exception
object.__setattr__(my_instance, name, this_instance)
return my_instance
###########################################################
#
# HDF5 Datasets
#
###########################################################
class HDF5_DatasetSerializing(HDF5_Dataset, HDF5_Serializing):
"""Objects adhering to the `HDF5_GroupSerializable` can be written to and
read directly from hd5 Groups.
"""
def name(self) -> str:
"""Group name that this object will be stored under.
i.e. If this method returns "patrice_lmb", then a subsequent call to
`self.as_group(Group("/Foo/bar/"))`
Will return a group at /Foo/bar/patrice_lmb
Be double-sure to override this if you want it to be anything other than the class name.
Returns
-------
str
Name to use in the Fast5 file.
"""
return self.__class__.__name__
class HDF5_DatasetSerializable(HDF5_DatasetSerializing):
@classmethod
def from_a(
cls, a: Union[HDF5_Dataset, HDF5_Group], log: Optional[Logger] = None
) -> HDF5_DatasetSerializable:
# Assume A is the parent group
# Assuming we're storing a numpy array as this dataset
# Copies the values into our buffer
try:
buffer = np.empty(a.shape, dtype=a.dtype)
a.read_direct(buffer)
data = NumpyArrayLike(buffer)
return HDF5_DatasetSerializable(cls.__new__(cls, buffer))
except AttributeError as e:
log.error("Could not convert to HDF5_DatasetSerializable from: {a!r}")
raise e
# serialized = cls.__new__(cls)
return
def as_a(self, a: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Dataset:
dataset = HDF5_Dataset(a.require_dataset(self.name(), shape=self.shape, dtype=self.dtype))
return dataset
def update(self, log: Optional[Logger] = None):
self.as_a(self._group.parent, log=log)
class HDF5_DatasetSerialableDataclass(HDF5_DatasetSerializable):
def as_dataset(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Dataset:
log = log if log is not None else getLogger()
"""Returns this object as an HDF5 Group."""
dataset: HDF5_Dataset = super().as_a(parent_group)
dataset.create_attrs(vars(self))
# for field_name, field_value in vars(self).items():
# dataset.create_attr(field_name, field_value)
return dataset
@classmethod
def from_dataset(
cls, dataset: HDF5_Dataset, log: Optional[Logger] = None
) -> HDF5_DatasetSerialableDataclass:
log = log if log is not None else getLogger()
if not log:
log = getLogger()
my_instance = cls.__new__(dataset)
# First, copy over attrs:
for name, value in dataset.objects_from_attrs():
object.__setattr__(my_instance, name, value)
return my_instance
| 35.962555 | 222 | 0.617597 | from __future__ import annotations
from abc import ABC, abstractmethod
from logging import Logger, getLogger
from typing import (
Any,
Dict,
Generic,
Iterable,
Mapping,
Optional,
Protocol,
Type,
TypedDict,
TypeVar,
Union,
)
import h5py
import numpy as np
from h5py import File as Fast5File
from h5py._hl.base import Empty
from .exceptions import HDF5_GroupSerializationException, HDF5_SerializationException
T = TypeVar("T")
S = TypeVar("S")
class NumpyArrayLike(np.ndarray):
def __new__(cls, data: Union[np.ndarray, NumpyArrayLike]):
obj = np.copy(data).view(
cls
)
return obj
def serialize_info(self, **kwargs) -> Dict:
existing_info = self.__dict__
info = {key: getattr(self, key, kwargs.get(key)) for key in kwargs.keys()}
return {**info, **existing_info}
def deserialize_from_info(self, info: Dict):
for name, value in info.items():
setattr(self, name, value)
# Multiprocessing and Dask require pickling (i.e. serializing) their inputs.
# By default, this will drop all our custom class data.
# https://stackoverflow.com/questions/26598109/preserve-custom-attributes-when-pickling-subclass-of-numpy-array
def __reduce__(self):
reconstruct, arguments, object_state = super().__reduce__()
# Create a custom state to pass to __setstate__ when this object is deserialized.
info = self.serialize_info()
new_state = object_state + (info,)
# Return a tuple that replaces the parent's __setstate__ tuple with our own
return (reconstruct, arguments, new_state)
def __setstate__(self, state):
info = state[-1]
self.deserialize_from_info(info)
super().__setstate__(state[0:-1])
####################################
### Fast5 Helpers ###
####################################
# NOTE: Sadly, many of these can't be used until "RuntimeError: Unable to create attribute (object header message is too large)" https://github.com/h5py/h5py/issues/1855
def hdf5_dtype(object: Any) -> Optional[np.dtype]:
if isinstance(object, str):
return h5py.string_dtype(length=len(object))
elif hasattr(object, "dtype"):
# Is this already a numpy-like object with a dtype? If so, just use that.
return object.dtype
return None # For most cases, h5py can determine the dtype from the data itself.
def get_class_for_name(name: str, module_name: str = __name__) -> Type:
import importlib
this_module = importlib.import_module(module_name)
this_class = getattr(this_module, name)
return this_class
class HasFast5(Protocol):
f5: Union[Fast5File, HDF5_Group]
###########################################################
#
# HDF5 Base Types
#
###########################################################
# Note: We never create or instantiate AttributeManagers directly, instead we borrow its interface.
# 3 Laws to keep in mind with Attributes:
#
#
# 1) They may be created from any scalar or NumPy array
#
# 2) Each attribute should be small (generally < 64k)
#
# 3) There is no partial I/O (i.e. slicing); the entire attribute must be read.
#
# https://docs.h5py.org/en/stable/high/attr.html
# Attrs are really just mappings from names to data/objects.
HDF5_Attribute_Objects = Mapping[str, Optional[Any]]
class IsAttr(Protocol):
def as_attr(self) -> np.dtype:
...
def from_attr(self, attr) -> IsAttr:
...
class HDF5IsAttr(IsAttr):
def as_attr(self) -> np.dtype:
...
def from_attr(self, attr) -> IsAttr:
...
class HasAttrs(Protocol):
def get_attrs(self) -> HDF5_Attributes:
...
def create_attr(self, name: str, value: Optional[Any], log: Optional[Logger] = None):
...
def create_attrs(self, attrs: HDF5_Attributes, log: Optional[Logger] = None):
def object_from_attr(self, name: str, log: Optional[Logger] = None) -> Optional[Any]:
...
def objects_from_attrs(
self, attrs: HDF5_Attributes, log: Optional[Logger] = None
) -> HDF5_Attribute_Objects:
...
class HDF5_AttributeHaving(HasAttrs):
def __init__(self, has_attrs: Optional[HasAttrs]):
super().__init__()
self.attrs = self.get_attrs() if has_attrs is None else has_attrs.get_attrs()
def get_attrs(self) -> HDF5_Attributes:
return self.attrs
def create_attr(self, name: str, value: Optional[Any], log: Optional[Logger] = None):
if value is None or value == "" or (hasattr(value, "__len__") and len(value) < 1):
empty = h5py.Empty(dtype=np.uint8)
self.get_attrs().create(name, empty)
elif isinstance(value, HDF5IsAttr):
attr_value = value.as_attr()
self.get_attrs().create(name, value, dtype=hdf5_dtype(attr_value))
else:
self.get_attrs().create(name, value, dtype=hdf5_dtype(value))
def create_attrs(self, attrs: HDF5_Attributes, log: Optional[Logger] = None):
for attr_name, attr_value in attrs.items():
self.create_attr(attr_name, attr_value, log=log)
def object_from_attr(self, name: str, log: Optional[Logger] = None) -> Optional[Any]:
log = log if log is not None else getLogger()
try:
attr_value = self.get_attrs()[name]
except AttributeError:
log.warning(
f"Could not find an attribute with the name '{name}' on object {self!r}. Returning None"
)
return None
if attr_value.shape is None:
return ""
return bytes.decode(bytes(attr_value), encoding="utf-8")
def objects_from_attrs(self, log: Optional[Logger] = None) -> HDF5_Attribute_Objects:
objects: HDF5_Attribute_Objects = {
attr_name: self.object_from_attr(attr_name, log=log)
for attr_name in self.get_attrs().keys()
}
return objects
def copy_attr(self, name: str, source: HDF5_AttributeHaving):
self.create_attr(name, source.get_attrs()[name])
def copy_all_attrs(self, source: HDF5_AttributeHaving):
for name in source.get_attrs().keys():
self.copy_attr(name, source)
class HDF5_ParentHaving:
@property
def parent(self) -> HDF5_Group:
return HDF5_Group(self.parent)
class HDF5_Dataset(h5py.Dataset, NumpyArrayLike, HDF5_AttributeHaving, HDF5_ParentHaving):
def __new__(cls, dataset: NumpyArrayLike) -> HDF5_Dataset:
if isinstance(dataset, HDF5_Dataset):
return dataset
self = dataset
def __init__(self, dataset: h5py.Dataset):
self._dataset = dataset
def __getattr__(self, attrib: str):
return getattr(self._dataset, attrib)
class HDF5_Group(h5py.Group, HDF5_AttributeHaving, HDF5_ParentHaving):
def __new__(cls, group: Optional[h5py.Group]) -> HDF5_Group:
if isinstance(group, HDF5_Group):
return group
hdf5_group = super().__new__(cls, group)
hdf5_group._group = group
return hdf5_group
def __init__(self, group: Optional[h5py.Group]):
if isinstance(group, HDF5_Group):
return
super().__init__(group.id)
self._group = group
@property
def parent(self) -> HDF5_Group:
return HDF5_Group(self._group.parent)
def require_group(self, name: str):
return HDF5_Group(self._group.require_group(name))
def require_dataset(self, name, data, dtype, shape, **kwds):
return HDF5_Dataset(self._group.require_dataset(name, shape, data=data, dtype=dtype,**kwds))
def __getattr__(self, attrib: str):
return getattr(self._group, attrib)
class HDF5_Attributes(h5py.AttributeManager, HDF5_ParentHaving):
def __init__(self, attrs: h5py.AttributeManager):
self.attrs = attrs
def __getattr__(self, attrib: str):
return getattr(self.attrs, attrib)
HDF5_Type = Union[HDF5_Dataset, HDF5_Group, HDF5_Attributes]
class HDF5_Serializing(ABC):
@classmethod
@abstractmethod
def from_a(cls, a: HDF5_Type, log: Optional[Logger] = None) -> HDF5_Serializing:
raise NotImplementedError(
f"{cls!s} is missing an implementation for {HDF5_Serializing.from_a.__name__}"
)
@abstractmethod
def as_a(
self, a: HDF5_Type, parent: Optional[HDF5_Group] = None, log: Optional[Logger] = None
) -> HDF5_Type:
raise NotImplementedError(
f"{self!s} is missing an implementation for {HDF5_Serializing.as_a.__name__}!"
)
@abstractmethod
def update(self, log: Optional[Logger] = None):
raise NotImplementedError(
f"{self!s} is missing an implementation for {HDF5_Serializing.update.__name__}!"
)
###########################################################
#
# HDF5 Groups
#
###########################################################
class HDF5_GroupSerializing(HDF5_Serializing, HDF5_AttributeHaving):
def name(self) -> str:
return self.__class__.__name__
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
...
@classmethod
def from_group(cls, group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_GroupSerializable:
...
class HDF5_GroupSerializable(HDF5_GroupSerializing):
def name(self) -> str:
return self.__class__.__name__
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
new_group = parent_group.require_group(self.name())
parent_group[self.name()] = self
# Note: This does nothing but register a group with the name 'name' in the parent group.
# Implementers must now write their serialized instance to this group.
return self
@classmethod
@abstractmethod
def from_group(cls, group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_GroupSerializable:
raise NotImplementedError(
f"from_group not implemented for {cls.__name__}. Make sure you write a method that returns a serialzied version of this object."
)
def require_group_from_group(
self, parent_group: HDF5_Group, log: Optional[Logger] = None
) -> HDF5_GroupSerializable:
# child_group = parent_group.require_group(self.name())
child_group = self.as_group(parent_group, log=log)
@classmethod
def from_a(cls, a: HDF5_Group, log: Logger) -> HDF5_Serializing:
return cls.from_group(parent_group=a, log=log)
def as_a(self, a: HDF5_Type, log: Logger) -> HDF5_Type:
return self.as_group(parent_group=a, log=log)
def update(self, log: Optional[Logger] = None):
self.as_a(self._group.parent, log=log)
class HDF5_GroupSerialiableDict(Dict[T, S], HDF5_GroupSerializable):
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
log = log if log is not None else getLogger()
my_group = parent_group.require_group(self.name())
for field_name, field_value in self.items():
if isinstance(field_value, HDF5_GroupSerializable):
# This value is actually its own group.
# So we create a new group rooted at our dataclass's group
field_value.as_a(my_group, log=log)
elif isinstance(field_value, HDF5_DatasetSerializable):
field_value.as_a(parent_group, log)
else:
my_group.create_attr(field_name, field_value)
return my_group
@classmethod
def from_group(
cls, group: HDF5_Group, log: Optional[Logger] = None
) -> HDF5_GroupSerialableDataclass:
log = log if log is not None else getLogger()
if not log:
log = getLogger()
my_instance = cls.__new__(cls)
for name, value in group.attrs.items():
object.__setattr__(my_instance, name, value)
for name, value in group.items():
if isinstance(value, h5py.Dataset):
buffer = np.empty(value.shape, dtype=value.dtype)
# Copies the values into our buffer
value.read_direct(buffer)
object.__setattr__(my_instance, name, NumpyArrayLike(buffer))
elif isinstance(value, h5py.Group):
# If it's a group, we have to do a little more work
# 1.2) Verify that that class has a method to create an instance group a group.
# 2) Create a new class instance from that group
# 3) Set this object's 'name' field to the object we just created.
try:
ThisClass = get_class_for_name(name)
except AttributeError as e:
serial_exception = HDF5_GroupSerializationException(
f"We couldn't serialize group named {name} (group is attached in the exception.",
e,
group=value,
)
log.exception(serial_exception.msg, serial_exception)
raise serial_exception
# assert get_class_for_name(name) and isinstance(), f"No class found that corresponds to group {name}! Make sure there's a corresponding dataclass named {name} in this module scope!"
try:
this_instance = ThisClass.from_group(value, log=log)
except AttributeError as e:
serial_exception = HDF5_GroupSerializationException(
f"We couldn't serialize group named {name!s} from class {ThisClass!s}. It appears {ThisClass!s} doesn't implement the {HDF5_GroupSerializing.__name__} protocol. Group is attached in the exception.",
e,
group=value,
)
log.exception(serial_exception.msg, serial_exception)
raise serial_exception
object.__setattr__(my_instance, name, this_instance)
return my_instance
class HDF5_GroupSerialableDataclass(HDF5_GroupSerializable):
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
log = log if log is not None else getLogger()
my_group: HDF5_Group = parent_group.require_group(self.name())
for field_name, field_value in vars(self).items():
if isinstance(field_value, HDF5_GroupSerializable):
# And assign it the value of whatever the group of the value is.
# new_group = my_group.require_group(field_name)
field_value.as_group(my_group)
elif isinstance(field_value, HDF5_DatasetSerializable):
field_value.as_a(my_group, log)
else:
my_group.create_attr(field_name, field_value)
return my_group
@classmethod
def from_group(
cls, group: HDF5_Group, log: Optional[Logger] = None
) -> HDF5_GroupSerialableDataclass:
log = log if log is not None else getLogger()
if not log:
log = getLogger()
my_instance = cls.__new__(cls)
# First, copy over attrs:
my_instance.create_attrs(group.get_attrs())
# Then, copy over any datasets or groups.
for name, value in group.items():
if isinstance(value, h5py.Dataset):
# Assuming we're storing a numpy array as this dataset
buffer = np.empty(value.shape, dtype=value.dtype)
value.read_direct(buffer)
object.__setattr__(my_instance, name, buffer)
elif isinstance(value, h5py.Group):
# 1) Find the class described by the group
# 1.1) Verify that we actually know a class by that name. Raise an exception if we don't.
try:
ThisClass = get_class_for_name(name)
except AttributeError as e:
serial_exception = HDF5_GroupSerializationException(
f"We couldn't serialize group named {name} (group is attached in the exception.",
e,
group=value,
)
log.exception(serial_exception.msg, serial_exception)
raise serial_exception
try:
this_instance = ThisClass.from_group(value, log=log)
except AttributeError as e:
serial_exception = HDF5_GroupSerializationException(
f"We couldn't serialize group named {name!s} from class {ThisClass!s}. It appears {ThisClass!s} doesn't implement the {HDF5_GroupSerializing.__name__} protocol. Group is attached in the exception.",
e,
group=value,
)
log.exception(serial_exception.msg, serial_exception)
raise serial_exception
object.__setattr__(my_instance, name, this_instance)
return my_instance
###########################################################
#
# HDF5 Datasets
#
###########################################################
class HDF5_DatasetSerializing(HDF5_Dataset, HDF5_Serializing):
def name(self) -> str:
return self.__class__.__name__
class HDF5_DatasetSerializable(HDF5_DatasetSerializing):
@classmethod
def from_a(
cls, a: Union[HDF5_Dataset, HDF5_Group], log: Optional[Logger] = None
) -> HDF5_DatasetSerializable:
# Assume A is the parent group
# Assuming we're storing a numpy array as this dataset
try:
buffer = np.empty(a.shape, dtype=a.dtype)
a.read_direct(buffer)
data = NumpyArrayLike(buffer)
return HDF5_DatasetSerializable(cls.__new__(cls, buffer))
except AttributeError as e:
log.error("Could not convert to HDF5_DatasetSerializable from: {a!r}")
raise e
return
def as_a(self, a: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Dataset:
dataset = HDF5_Dataset(a.require_dataset(self.name(), shape=self.shape, dtype=self.dtype))
return dataset
def update(self, log: Optional[Logger] = None):
self.as_a(self._group.parent, log=log)
class HDF5_DatasetSerialableDataclass(HDF5_DatasetSerializable):
def as_dataset(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Dataset:
log = log if log is not None else getLogger()
dataset: HDF5_Dataset = super().as_a(parent_group)
dataset.create_attrs(vars(self))
return dataset
@classmethod
def from_dataset(
cls, dataset: HDF5_Dataset, log: Optional[Logger] = None
) -> HDF5_DatasetSerialableDataclass:
log = log if log is not None else getLogger()
if not log:
log = getLogger()
my_instance = cls.__new__(dataset)
for name, value in dataset.objects_from_attrs():
object.__setattr__(my_instance, name, value)
return my_instance
| true | true |
f73a0832b9fd89b058c81ae2b61ab5e1e7aac952 | 4,435 | py | Python | screen.py | devendra-beep/screen | 2cd8b86cc2a97cbc2b0bea33998e076054a2eca1 | [
"MIT"
] | null | null | null | screen.py | devendra-beep/screen | 2cd8b86cc2a97cbc2b0bea33998e076054a2eca1 | [
"MIT"
] | null | null | null | screen.py | devendra-beep/screen | 2cd8b86cc2a97cbc2b0bea33998e076054a2eca1 | [
"MIT"
] | null | null | null |
#author::devendra
import os
import shutil
l1="*" * 100
l2="-" * 100
print(l1)
print(l1)
print(l2)
print(l2)
print(" ###### ###### ##### ###### ##### ## # ")
print(" # # # # # # # # # ")
print(" ###### # ### ###### ##### # # # ")
print(" # # # # # # # # # ")
print(" ###### ###### # # ###### ##### # ## ")
print(" ")
print(l2)
print(l2)
print(l1)
print(l1)
print("\n")
print("Folder having Log Files")
log_f = input()
log_f=log_f.strip()
print("Approx Molecules")
ml=input()
mol=ml.strip()
print("Prefix")
prefx=input()
prefix=prefx.strip()
print("Suffix")
suffx=input()
suffix=suffx.strip()
pro_f="Output"
pro_f=pro_f.strip()
print("Name of output folder")
text_f=input()
text_f=text_f.strip()
os.mkdir(text_f)
while(True):
print("\n")
print("Press 0/1/2/3 -> Exit/Binding_Energy/Copy_pdbqt_for_given_binding_energy/Binding_Energy_of_All")
#print("\n")
nu=input("Choose::")
nu=nu.strip()
n=int(nu)
if(n==0):
break
elif(n==1):
name="Binding.txt"
out = open(name, "w")
print("Binding Energy Value")
e_value=input()
e_value=str(e_value.strip())
value=float(e_value)
l = len(mol)
d1=log_f+prefix
no_of_mol = int(mol)
for i in range(1,no_of_mol):
ll = l - len(str(i))
s = ""
s = ll * "0"
st = s + str(i)
d2=""
d2 = d1+st+suffix
if(os.path.isfile(d2)):
new_f=open(d2)
liness=new_f.readlines()
xx=0
for line in liness:
xx=xx+1
if(xx==28):
h = list(line.split())
vv = h[1]
vvv = float(vv)
if(vvv < value):
out.write("B-Energy in Ligand "+str(i)+" -> "+vv)
out.write("\n")
new_f.close()
out.close()
print("Done")
#shutil.copy(name, text_f)
elif(n==2):
list_values=[]
print("Binding Energy Value")
e_val=input()
e_value=str(e_val)
val=float(e_value)
lm = len(mol)
combine=log_f+prefix
no_ = int(mol)
for tt in range(1,no_):
lll = lm - len(str(tt))
tart = lll * "0"
s = tart + str(tt)
d_new = combine + s + suffix
if(os.path.isfile(d_new)==True):
file_ = open(d_new,"r")
lines=file_.readlines()
jj=0
for line in lines:
jj=jj+1
if(jj==28):
water=list(line.split())
v=water[1]
g=float(v)
if(g<val):
list_values.append(tt)
file_.close()
pdb=".pdbqt.pdbqt"
for kk in (list_values):
var = lm - len(str(kk))
shortv = var * "0"
ss = shortv + str(kk)
rock = log_f+prefix+ss+pdb
if(os.path.isfile(rock)):
shutil.copy(rock, text_f)
print("Done")
elif(n==3):
names="All.txt"
o = open(names, "w")
l = len(mol)
no_of_mol = int(mol)
for i in range(1,no_of_mol):
ll = l - len(str(i))
s = ll * "0"
s = s + str(i)
d6 = log_f+prefix+s+suffix
if(os.path.isfile(d6)==True):
fi = open(d6,"r")
lines = fi.readlines()
jj=0
for line in lines:
jj=jj+1
if(jj==28):
z=list(line.split())
v=z[1]
state="Energy of ligand "+str(i)+" -> "+v
o.write(state)
o.write("\n")
fi.close()
o.close()
print("Done")
#shutil.copy(names, text_f)
print("Over")
| 27.71875 | 107 | 0.381511 |
import os
import shutil
l1="*" * 100
l2="-" * 100
print(l1)
print(l1)
print(l2)
print(l2)
print(" ###### ###### ##### ###### ##### ## # ")
print(" # # # # # # # # # ")
print(" ###### # ### ###### ##### # # # ")
print(" # # # # # # # # # ")
print(" ###### ###### # # ###### ##### # ## ")
print(" ")
print(l2)
print(l2)
print(l1)
print(l1)
print("\n")
print("Folder having Log Files")
log_f = input()
log_f=log_f.strip()
print("Approx Molecules")
ml=input()
mol=ml.strip()
print("Prefix")
prefx=input()
prefix=prefx.strip()
print("Suffix")
suffx=input()
suffix=suffx.strip()
pro_f="Output"
pro_f=pro_f.strip()
print("Name of output folder")
text_f=input()
text_f=text_f.strip()
os.mkdir(text_f)
while(True):
print("\n")
print("Press 0/1/2/3 -> Exit/Binding_Energy/Copy_pdbqt_for_given_binding_energy/Binding_Energy_of_All")
nu=input("Choose::")
nu=nu.strip()
n=int(nu)
if(n==0):
break
elif(n==1):
name="Binding.txt"
out = open(name, "w")
print("Binding Energy Value")
e_value=input()
e_value=str(e_value.strip())
value=float(e_value)
l = len(mol)
d1=log_f+prefix
no_of_mol = int(mol)
for i in range(1,no_of_mol):
ll = l - len(str(i))
s = ""
s = ll * "0"
st = s + str(i)
d2=""
d2 = d1+st+suffix
if(os.path.isfile(d2)):
new_f=open(d2)
liness=new_f.readlines()
xx=0
for line in liness:
xx=xx+1
if(xx==28):
h = list(line.split())
vv = h[1]
vvv = float(vv)
if(vvv < value):
out.write("B-Energy in Ligand "+str(i)+" -> "+vv)
out.write("\n")
new_f.close()
out.close()
print("Done")
elif(n==2):
list_values=[]
print("Binding Energy Value")
e_val=input()
e_value=str(e_val)
val=float(e_value)
lm = len(mol)
combine=log_f+prefix
no_ = int(mol)
for tt in range(1,no_):
lll = lm - len(str(tt))
tart = lll * "0"
s = tart + str(tt)
d_new = combine + s + suffix
if(os.path.isfile(d_new)==True):
file_ = open(d_new,"r")
lines=file_.readlines()
jj=0
for line in lines:
jj=jj+1
if(jj==28):
water=list(line.split())
v=water[1]
g=float(v)
if(g<val):
list_values.append(tt)
file_.close()
pdb=".pdbqt.pdbqt"
for kk in (list_values):
var = lm - len(str(kk))
shortv = var * "0"
ss = shortv + str(kk)
rock = log_f+prefix+ss+pdb
if(os.path.isfile(rock)):
shutil.copy(rock, text_f)
print("Done")
elif(n==3):
names="All.txt"
o = open(names, "w")
l = len(mol)
no_of_mol = int(mol)
for i in range(1,no_of_mol):
ll = l - len(str(i))
s = ll * "0"
s = s + str(i)
d6 = log_f+prefix+s+suffix
if(os.path.isfile(d6)==True):
fi = open(d6,"r")
lines = fi.readlines()
jj=0
for line in lines:
jj=jj+1
if(jj==28):
z=list(line.split())
v=z[1]
state="Energy of ligand "+str(i)+" -> "+v
o.write(state)
o.write("\n")
fi.close()
o.close()
print("Done")
print("Over")
| true | true |
f73a0880b4e933988ec94e019043affed13c632c | 4,597 | py | Python | examples/python/using_qiskit_terra_level_2.py | biplab37/qiskit-aakash | e10b204887606f1f75bdfde182bb0c6d0a322c68 | [
"Apache-2.0"
] | 22 | 2019-08-15T04:39:15.000Z | 2022-03-06T05:17:04.000Z | examples/python/using_qiskit_terra_level_2.py | biplab37/qiskit-aakash | e10b204887606f1f75bdfde182bb0c6d0a322c68 | [
"Apache-2.0"
] | 2 | 2020-10-26T07:12:12.000Z | 2021-12-09T16:22:51.000Z | examples/python/using_qiskit_terra_level_2.py | biplab37/qiskit-aakash | e10b204887606f1f75bdfde182bb0c6d0a322c68 | [
"Apache-2.0"
] | 9 | 2019-09-05T05:33:00.000Z | 2021-10-09T16:04:53.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Example showing how to use Qiskit at level 2 (advanced).
This example shows how an advanced user interacts with Terra.
It builds some circuits and transpiles them with the pass_manager.
"""
import pprint, time
# Import the Qiskit modules
from qiskit import IBMQ, BasicAer
from qiskit import QiskitError
from qiskit.circuit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit.extensions import SwapGate
from qiskit.compiler import assemble
from qiskit.providers.ibmq import least_busy
from qiskit.tools.monitor import job_monitor
from qiskit.transpiler import PassManager
from qiskit.transpiler import CouplingMap
from qiskit.transpiler.passes import Unroller
from qiskit.transpiler.passes import FullAncillaAllocation
from qiskit.transpiler.passes import EnlargeWithAncilla
from qiskit.transpiler.passes import TrivialLayout
from qiskit.transpiler.passes import Decompose
from qiskit.transpiler.passes import CXDirection
from qiskit.transpiler.passes import LookaheadSwap
try:
IBMQ.load_accounts()
except:
print("""WARNING: There's no connection with the API for remote backends.
Have you initialized a file with your personal token?
For now, there's only access to local simulator backends...""")
try:
qubit_reg = QuantumRegister(4, name='q')
clbit_reg = ClassicalRegister(4, name='c')
# Making first circuit: superpositions
qc1 = QuantumCircuit(qubit_reg, clbit_reg, name="bell")
qc1.h(qubit_reg[0])
qc1.cx(qubit_reg[0], qubit_reg[1])
qc1.measure(qubit_reg, clbit_reg)
# Making another circuit: GHZ State
qc2 = QuantumCircuit(qubit_reg, clbit_reg, name="superposition")
qc2.h(qubit_reg)
qc2.cx(qubit_reg[0], qubit_reg[1])
qc2.cx(qubit_reg[0], qubit_reg[2])
qc2.cx(qubit_reg[0], qubit_reg[3])
qc2.measure(qubit_reg, clbit_reg)
# Setting up the backend
print("(Aer Backends)")
for backend in BasicAer.backends():
print(backend.status())
qasm_simulator = BasicAer.get_backend('qasm_simulator')
# Compile and run the circuit on a real device backend
# See a list of available remote backends
print("\n(IBMQ Backends)")
for backend in IBMQ.backends():
print(backend.status())
try:
# select least busy available device and execute.
least_busy_device = least_busy(IBMQ.backends(simulator=False))
except:
print("All devices are currently unavailable.")
print("Running on current least busy device: ", least_busy_device)
# making a pass manager to compile the circuits
coupling_map = CouplingMap(least_busy_device.configuration().coupling_map)
print("coupling map: ", coupling_map)
pm = PassManager()
pm.append(TrivialLayout(coupling_map))
pm.append(FullAncillaAllocation(coupling_map))
pm.append(EnlargeWithAncilla())
pm.append(LookaheadSwap(coupling_map))
pm.append(Decompose(SwapGate))
pm.append(CXDirection(coupling_map))
pm.append(Unroller(['u1', 'u2', 'u3', 'id', 'cx']))
qc1_new = pm.run(qc1)
qc2_new = pm.run(qc2)
print("Bell circuit before passes:")
print(qc1.draw())
print("Bell circuit after passes:")
print(qc1_new.draw())
print("Superposition circuit before passes:")
print(qc2.draw())
print("Superposition circuit after passes:")
print(qc2_new.draw())
# Assemble the two circuits into a runnable qobj
qobj = assemble([qc1_new, qc2_new], shots=1000)
# Running qobj on the simulator
print("Running on simulator:")
sim_job = qasm_simulator.run(qobj)
# Getting the result
sim_result=sim_job.result()
# Show the results
print(sim_result.get_counts(qc1))
print(sim_result.get_counts(qc2))
# Running the job.
print("Running on device:")
exp_job = least_busy_device.run(qobj)
job_monitor(exp_job)
exp_result = exp_job.result()
# Show the results
print(exp_result.get_counts(qc1))
print(exp_result.get_counts(qc2))
except QiskitError as ex:
print('There was an error in the circuit!. Error = {}'.format(ex))
| 32.373239 | 78 | 0.724385 |
import pprint, time
from qiskit import IBMQ, BasicAer
from qiskit import QiskitError
from qiskit.circuit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit.extensions import SwapGate
from qiskit.compiler import assemble
from qiskit.providers.ibmq import least_busy
from qiskit.tools.monitor import job_monitor
from qiskit.transpiler import PassManager
from qiskit.transpiler import CouplingMap
from qiskit.transpiler.passes import Unroller
from qiskit.transpiler.passes import FullAncillaAllocation
from qiskit.transpiler.passes import EnlargeWithAncilla
from qiskit.transpiler.passes import TrivialLayout
from qiskit.transpiler.passes import Decompose
from qiskit.transpiler.passes import CXDirection
from qiskit.transpiler.passes import LookaheadSwap
try:
IBMQ.load_accounts()
except:
print("""WARNING: There's no connection with the API for remote backends.
Have you initialized a file with your personal token?
For now, there's only access to local simulator backends...""")
try:
qubit_reg = QuantumRegister(4, name='q')
clbit_reg = ClassicalRegister(4, name='c')
qc1 = QuantumCircuit(qubit_reg, clbit_reg, name="bell")
qc1.h(qubit_reg[0])
qc1.cx(qubit_reg[0], qubit_reg[1])
qc1.measure(qubit_reg, clbit_reg)
qc2 = QuantumCircuit(qubit_reg, clbit_reg, name="superposition")
qc2.h(qubit_reg)
qc2.cx(qubit_reg[0], qubit_reg[1])
qc2.cx(qubit_reg[0], qubit_reg[2])
qc2.cx(qubit_reg[0], qubit_reg[3])
qc2.measure(qubit_reg, clbit_reg)
print("(Aer Backends)")
for backend in BasicAer.backends():
print(backend.status())
qasm_simulator = BasicAer.get_backend('qasm_simulator')
print("\n(IBMQ Backends)")
for backend in IBMQ.backends():
print(backend.status())
try:
least_busy_device = least_busy(IBMQ.backends(simulator=False))
except:
print("All devices are currently unavailable.")
print("Running on current least busy device: ", least_busy_device)
coupling_map = CouplingMap(least_busy_device.configuration().coupling_map)
print("coupling map: ", coupling_map)
pm = PassManager()
pm.append(TrivialLayout(coupling_map))
pm.append(FullAncillaAllocation(coupling_map))
pm.append(EnlargeWithAncilla())
pm.append(LookaheadSwap(coupling_map))
pm.append(Decompose(SwapGate))
pm.append(CXDirection(coupling_map))
pm.append(Unroller(['u1', 'u2', 'u3', 'id', 'cx']))
qc1_new = pm.run(qc1)
qc2_new = pm.run(qc2)
print("Bell circuit before passes:")
print(qc1.draw())
print("Bell circuit after passes:")
print(qc1_new.draw())
print("Superposition circuit before passes:")
print(qc2.draw())
print("Superposition circuit after passes:")
print(qc2_new.draw())
qobj = assemble([qc1_new, qc2_new], shots=1000)
print("Running on simulator:")
sim_job = qasm_simulator.run(qobj)
sim_result=sim_job.result()
print(sim_result.get_counts(qc1))
print(sim_result.get_counts(qc2))
print("Running on device:")
exp_job = least_busy_device.run(qobj)
job_monitor(exp_job)
exp_result = exp_job.result()
print(exp_result.get_counts(qc1))
print(exp_result.get_counts(qc2))
except QiskitError as ex:
print('There was an error in the circuit!. Error = {}'.format(ex))
| true | true |
f73a096f68d9022cd6fb93c0b13f02d01d49e2d8 | 28,448 | py | Python | python/lbann/modules/subgraph/transformer.py | LLNL/LBANN | 8bcc5d461e52de70e329d73081ca7eee3e5c580a | [
"Apache-2.0"
] | null | null | null | python/lbann/modules/subgraph/transformer.py | LLNL/LBANN | 8bcc5d461e52de70e329d73081ca7eee3e5c580a | [
"Apache-2.0"
] | null | null | null | python/lbann/modules/subgraph/transformer.py | LLNL/LBANN | 8bcc5d461e52de70e329d73081ca7eee3e5c580a | [
"Apache-2.0"
] | null | null | null | """Neural network modules for transformer models."""
import math
import lbann
from lbann.modules.base import Module, FullyConnectedModule
from lbann.util import make_iterable
class MultiheadAttention(Module):
"""Parallel instances of scaled dot-product attention.
See:
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion
Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin.
"Attention is all you need." In Advances in Neural Information
Processing Systems, pp. 5998-6008. 2017.
Args:
embed_dim (int): Size of representation space.
num_heads (int): Number of parallel attention instances. Must
evenly divide `embed_dim`.
name (str): Default name is in the form
'multiheadattention<index>'.
"""
global_count = 0 # Static counter, used for default names
def __init__(self,
embed_dim,
num_heads,
branches,
d_kv = None,
name=None):
super().__init__()
MultiheadAttention.global_count += 1
self.instance = 0
assert embed_dim % num_heads == 0, 'embed_dim must be divisible by num_heads'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
if(d_kv == None):
self.inner_dim = embed_dim
self.head_dim = embed_dim // num_heads
else:
self.inner_dim = d_kv * num_heads
self.head_dim = d_kv
if(branches==0):
self.ENABLE_SUBGRAPH=False
self.BRANCHES=0
else:
self.ENABLE_SUBGRAPH=True
self.BRANCHES = branches
# Module name
self.name = name
if not self.name:
self.name = f'multiheadattention{MultiheadAttention.global_count}'
# Weights for fully-connected layers
self.query_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_query_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_query_bias'),
]
self.key_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_key_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_key_bias'),
]
self.value_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_value_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_value_bias'),
]
self.output_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_output_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_output_bias'),
]
def forward(self, queries, keys, values, mask=None):
"""Apply multi-head attention.
The input and output tensors are interpreted as sequences of
vectors, where the first tensor dimension is the sequence
dimension.
Args:
queries (lbann.Layer): Sequence of query vectors.
keys (lbann.Layer): Sequence of key vectors.
values (lbann.Layer): Sequence of value vectors.
mask (lbann.Layer, optional): Additive attention mask. If
the (i,j) entry is very negative (e.g. -1e9), then the
ith query does not attend to the jth key/value pair.
Returns:
lbann.Layer: Sequence of output vectors. The sequence
length is the same as `queries`.
"""
ENABLE_SUBGRAPH = self.ENABLE_SUBGRAPH
BRANCHES = self.BRANCHES
if(ENABLE_SUBGRAPH):
if(self.num_heads%BRANCHES!=0):
raise ValueError('Num heads should be divisible by BRANCHES')
self.instance += 1
name = f'{self.name}_instance{self.instance}'
# Apply fully-connected layers to input sequences
queries_fc = lbann.ChannelwiseFullyConnected(
queries,
weights=self.query_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_queries_fc',
)
keys_fc = lbann.ChannelwiseFullyConnected(
keys,
weights=self.key_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_keys_fc',
)
values_fc = lbann.ChannelwiseFullyConnected(
values,
weights=self.value_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_values_fc',
)
# Slice embedding vectors for each head
slice_points = [self.head_dim * i for i in range(self.num_heads+1)]
queries_slice = lbann.Slice(
queries_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_queries_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
keys_slice = lbann.Slice(
keys_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_keys_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
values_slice = lbann.Slice(
values_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_values_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
# Compute scaled dot-product attention for each head
attentions = []
tag=0
for head in range(self.num_heads):
head_name = f'{name}_myattention_head{head}'
# Attention inputs
if(ENABLE_SUBGRAPH):
if(head%int(self.num_heads/BRANCHES)==0):
tag+=1
q = lbann.Identity(queries_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
k = lbann.Identity(keys_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
v = lbann.Identity(values_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
else:
q = lbann.Identity(queries_slice)
k = lbann.Identity(keys_slice)
v = lbann.Identity(values_slice)
# Multiply queries and keys
# Note: num_queries x num_keys
y = lbann.MatMul(
q, k,
transpose_b=True,
name=f'{head_name}_matmul',
)
y = lbann.WeightedSum(
y,
scaling_factors=1 / math.sqrt(self.head_dim),
name=f'{head_name}_scale',
)
if(ENABLE_SUBGRAPH):
if mask!=None:
y = lbann.Sum([y, mask[tag]], name=f'{head_name}_mask')
else:
if mask:
y = lbann.Sum([y, mask], name=f'{head_name}_mask')
y = lbann.ChannelwiseSoftmax(y, name=f'{head_name}_softmax')
# Attention output
# Note: num_queries x head_dim
attentions.append(lbann.MatMul(y, v, name=head_name))
#Strong scaling
# Concatenate heads and apply fully-connected layer
if(ENABLE_SUBGRAPH):
attentions = lbann.Concatenation(
attentions,
axis=1,
name=f'{name}_heads_concat',parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
else:
attentions = lbann.Concatenation(
attentions,
axis=1,
name=f'{name}_heads_concat',
)
outputs_fc = lbann.ChannelwiseFullyConnected(
attentions,
weights=self.output_weights,
output_channel_dims=[self.embed_dim],
name=f'{name}',
)
return outputs_fc
class MultiheadAttentionAllSubGraph(Module):
"""Parallel instances of scaled dot-product attention.
See:
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion
Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin.
"Attention is all you need." In Advances in Neural Information
Processing Systems, pp. 5998-6008. 2017.
Args:
embed_dim (int): Size of representation space.
num_heads (int): Number of parallel attention instances. Must
evenly divide `embed_dim`.
name (str): Default name is in the form
'multiheadattention<index>'.
"""
global_count = 0 # Static counter, used for default names
def __init__(self,
embed_dim,
num_heads,
branches,
d_kv = None,
name=None):
super().__init__()
MultiheadAttention.global_count += 1
self.instance = 0
assert embed_dim % num_heads == 0, 'embed_dim must be divisible by num_heads'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
if(d_kv == None):
self.inner_dim = embed_dim
self.head_dim = embed_dim // num_heads
else:
self.inner_dim = d_kv * num_heads
self.head_dim = d_kv
if(branches==0):
self.ENABLE_SUBGRAPH=False
self.BRANCHES=0
else:
self.ENABLE_SUBGRAPH=True
self.BRANCHES = branches
# Module name
self.name = name
if not self.name:
self.name = f'multiheadattention{MultiheadAttention.global_count}'
# Weights for fully-connected layers
self.query_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_query_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_query_bias'),
]
self.key_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_key_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_key_bias'),
]
self.value_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_value_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_value_bias'),
]
#Channelwise FC in SubGraph
self.output_weights= []
for head in range(branches):
self.output_weights.append( [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_head{head}_output_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_head{head}_output_bias'),
] )
def forward(self, queries, keys, values, mask=None):
"""Apply multi-head attention.
The input and output tensors are interpreted as sequences of
vectors, where the first tensor dimension is the sequence
dimension.
Args:
queries (lbann.Layer): Sequence of query vectors.
keys (lbann.Layer): Sequence of key vectors.
values (lbann.Layer): Sequence of value vectors.
mask (lbann.Layer, optional): Additive attention mask. If
the (i,j) entry is very negative (e.g. -1e9), then the
ith query does not attend to the jth key/value pair.
Returns:
lbann.Layer: Sequence of output vectors. The sequence
length is the same as `queries`.
"""
ENABLE_SUBGRAPH = self.ENABLE_SUBGRAPH
BRANCHES = self.BRANCHES
if(ENABLE_SUBGRAPH):
if(self.num_heads%BRANCHES!=0):
raise ValueError('Num heads should be divisible by BRANCHES')
self.instance += 1
name = f'{self.name}_instance{self.instance}'
# Apply fully-connected layers to input sequences
queries_fc = lbann.ChannelwiseFullyConnected(
queries,
weights=self.query_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_queries_fc',
)
keys_fc = lbann.ChannelwiseFullyConnected(
keys,
weights=self.key_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_keys_fc',
)
values_fc = lbann.ChannelwiseFullyConnected(
values,
weights=self.value_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_values_fc',
)
# Slice embedding vectors for each head
slice_points = [self.head_dim * i for i in range(self.num_heads+1)]
queries_slice = lbann.Slice(
queries_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_queries_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
keys_slice = lbann.Slice(
keys_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_keys_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
values_slice = lbann.Slice(
values_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_values_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
# Compute scaled dot-product attention for each head
attentions = []
#variable to combine heads locally in sub-grids
temp_attentions = []
tag=0
for head in range(self.num_heads):
head_name = f'{name}_myattention_head{head}'
# Attention inputs
if(ENABLE_SUBGRAPH):
if(head%int(self.num_heads/BRANCHES)==0):
temp_attentions.append([])
tag+=1
q = lbann.Identity(queries_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
k = lbann.Identity(keys_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
v = lbann.Identity(values_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
else:
q = lbann.Identity(queries_slice)
k = lbann.Identity(keys_slice)
v = lbann.Identity(values_slice)
# Multiply queries and keys
# Note: num_queries x num_keys
y = lbann.MatMul(
q, k,
transpose_b=True,
name=f'{head_name}_matmul',
)
y = lbann.WeightedSum(
y,
scaling_factors=1 / math.sqrt(self.head_dim),
name=f'{head_name}_scale',
)
if(ENABLE_SUBGRAPH):
if mask!=None:
y = lbann.Sum([y, mask[tag]], name=f'{head_name}_mask')
else:
if mask:
y = lbann.Sum([y, mask], name=f'{head_name}_mask')
y = lbann.ChannelwiseSoftmax(y, name=f'{head_name}_softmax')
# Attention output
# Note: num_queries x head_dim
y = lbann.MatMul(y, v, name=head_name)
# attentions.append(lbann.MatMul(y, v, name=head_name))
temp_attentions[-1].append(y)
for count, temp_attention in enumerate(temp_attentions):
if(self.BRANCHES == self.num_heads):
# No need to concat the heads at subgrid level
# if number of subgrids is equal to number of heads
attention_single_subgrid = temp_attentions[count][0]
else:
attention_single_subgrid = lbann.Concatenation(
temp_attention,
axis=1,
name=f'{name}_subgrid_heads_concat{count}',parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':False}
)
attention_single_subgrid = lbann.ChannelwiseFullyConnected(
attention_single_subgrid,
weights=self.output_weights[count],
output_channel_dims=[self.embed_dim],
name=f'{name}_cfc_{count}',
)
attentions.append(attention_single_subgrid)
#Strong scaling
grid_sum_slice = lbann.Cross_Grid_Sum_Slice(attentions)
attentions = []
for head in range(self.BRANCHES):
attentions.append( lbann.Identity(grid_sum_slice) )
return attentions
class MultiheadAttentionAllSubGraphInputSubGrids(Module):
"""Parallel instances of scaled dot-product attention.
See:
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion
Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin.
"Attention is all you need." In Advances in Neural Information
Processing Systems, pp. 5998-6008. 2017.
This module expects inputs in subgrids format
if number of heads is 16 and subgrids is 4
then input should be a list of legnth 4
Args:
embed_dim (int): Size of representation space.
num_heads (int): Number of parallel attention instances. Must
evenly divide `embed_dim`.
name (str): Default name is in the form
'multiheadattention<index>'.
"""
global_count = 0 # Static counter, used for default names
def __init__(self,
embed_dim,
num_heads,
branches,
d_kv = None,
name=None):
super().__init__()
MultiheadAttention.global_count += 1
self.instance = 0
assert embed_dim % num_heads == 0, 'embed_dim must be divisible by num_heads'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
if(d_kv == None):
self.inner_dim = embed_dim
self.head_dim = embed_dim // num_heads
else:
self.inner_dim = d_kv * num_heads
self.head_dim = d_kv
if(branches==0):
self.ENABLE_SUBGRAPH=False
self.BRANCHES=0
else:
self.ENABLE_SUBGRAPH=True
self.BRANCHES = branches
# Module name
self.name = name
if not self.name:
self.name = f'multiheadattention{MultiheadAttention.global_count}'
# Weights for fully-connected layers
self.query_weights = []
self.key_weights = []
self.value_weights = []
for head in range(branches):
self.query_weights.append( [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_head{head}_query_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_head{head}_query_bias'),
])
self.key_weights.append( [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_head{head}_key_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_head{head}_key_bias'),
])
self.value_weights.append( [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_head{head}_value_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_head{head}_value_bias'),
])
#Channelwise FC in SubGraph
self.output_weights= []
for head in range(branches):
self.output_weights.append( [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_head{head}_output_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_head{head}_output_bias'),
] )
def forward(self, queries, keys, values, mask=None):
"""Apply multi-head attention.
The input and output tensors are interpreted as sequences of
vectors, where the first tensor dimension is the sequence
dimension.
Args:
queries (lbann.Layer): Sequence of query vectors.
keys (lbann.Layer): Sequence of key vectors.
values (lbann.Layer): Sequence of value vectors.
mask (lbann.Layer, optional): Additive attention mask. If
the (i,j) entry is very negative (e.g. -1e9), then the
ith query does not attend to the jth key/value pair.
Returns:
lbann.Layer: Sequence of output vectors. The sequence
length is the same as `queries`.
"""
ENABLE_SUBGRAPH = self.ENABLE_SUBGRAPH
BRANCHES = self.BRANCHES
if(ENABLE_SUBGRAPH):
if(self.num_heads%BRANCHES!=0):
raise ValueError('Num heads should be divisible by BRANCHES')
self.instance += 1
name = f'{self.name}_instance{self.instance}'
# Apply fully-connected layers to input sequences
queries_fc = []
keys_fc = []
values_fc = []
# Slice embedding vectors for each head
slice_points = [self.head_dim * i for i in range(int(self.num_heads/self.BRANCHES)+1)]
#Queries strong scaling in CFC
attentions = []
for count, query in enumerate(queries):
temp = lbann.ChannelwiseFullyConnected(
query,
weights=self.query_weights[count],
output_channel_dims=[self.inner_dim],
name=f'{name}_subgrid{count}_queries_fc',
)
attentions.append(temp)
grid_sum_slice = lbann.Cross_Grid_Sum_Slice(attentions)
attentions = []
for head in range(self.BRANCHES):
attentions.append( lbann.Identity(grid_sum_slice) )
for head in range(self.BRANCHES):
temp = lbann.Slice(
attentions[head],
axis=1,
slice_points=slice_points,
name=f'{name}_subgrid{head}_queries_slice',
)
queries_fc.append(temp)
#keys strong scaling in CFC
attentions = []
for count, key in enumerate(keys):
temp = lbann.ChannelwiseFullyConnected(
key,
weights=self.key_weights[count],
output_channel_dims=[self.inner_dim],
name=f'{name}_subgrid{count}_keys_fc',
)
attentions.append(temp)
grid_sum_slice = lbann.Cross_Grid_Sum_Slice(attentions)
attentions = []
for head in range(self.BRANCHES):
attentions.append( lbann.Identity(grid_sum_slice) )
for head in range(self.BRANCHES):
temp = lbann.Slice(
attentions[head],
axis=1,
slice_points=slice_points,
name=f'{name}_subgrid{head}_keys_slice',
)
keys_fc.append(temp)
#Values strong scaling in CFC
attentions = []
for count, value in enumerate(values):
temp = lbann.ChannelwiseFullyConnected(
value,
weights=self.value_weights[count],
output_channel_dims=[self.inner_dim],
name=f'{name}_subgrid{count}_values_fc',
)
attentions.append(temp)
grid_sum_slice = lbann.Cross_Grid_Sum_Slice(attentions)
attentions = []
for head in range(self.BRANCHES):
attentions.append( lbann.Identity(grid_sum_slice) )
for head in range(self.BRANCHES):
temp = lbann.Slice(
attentions[head],
axis=1,
slice_points=slice_points,
name=f'{name}_subgrid{head}_values_slice',
)
values_fc.append(temp)
queries_slice = []
keys_slice = []
values_slice = []
for branch in range(self.BRANCHES):
querie_slice = queries_fc[branch]
key_slice = keys_fc[branch]
value_slice = values_fc[branch]
for head in range(int(self.num_heads/self.BRANCHES)):
queries_slice.append(lbann.Identity(querie_slice))
keys_slice.append(lbann.Identity(key_slice))
values_slice.append(lbann.Identity(value_slice))
# Compute scaled dot-product attention for each head
attentions = []
#variable to combine heads locally in sub-grids
temp_attentions = []
tag=0
for head in range(self.num_heads):
head_name = f'{name}_myattention_head{head}'
# Attention inputs
if(head%int(self.num_heads/BRANCHES)==0):
temp_attentions.append([])
tag+=1
q = lbann.Identity(queries_slice[head])
k = lbann.Identity(keys_slice[head])
v = lbann.Identity(values_slice[head])
# Multiply queries and keys
# Note: num_queries x num_keys
y = lbann.MatMul(
q, k,
transpose_b=True,
name=f'{head_name}_matmul',
)
y = lbann.WeightedSum(
y,
scaling_factors=1 / math.sqrt(self.head_dim),
name=f'{head_name}_scale',
)
if(ENABLE_SUBGRAPH):
if mask!=None:
y = lbann.Sum([y, mask[tag]], name=f'{head_name}_mask')
else:
if mask:
y = lbann.Sum([y, mask], name=f'{head_name}_mask')
y = lbann.ChannelwiseSoftmax(y, name=f'{head_name}_softmax')
# Attention output
# Note: num_queries x head_dim
y = lbann.MatMul(y, v, name=head_name)
# attentions.append(lbann.MatMul(y, v, name=head_name))
temp_attentions[-1].append(y)
for count, temp_attention in enumerate(temp_attentions):
if(self.BRANCHES == self.num_heads):
# No need to concat the heads at subgrid level
# if number of subgrids is equal to number of heads
attention_single_subgrid = temp_attentions[count][0]
else:
attention_single_subgrid = lbann.Concatenation(
temp_attention,
axis=1,
name=f'{name}_subgrid_heads_concat{count}',parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':False}
)
attention_single_subgrid = lbann.ChannelwiseFullyConnected(
attention_single_subgrid,
weights=self.output_weights[count],
output_channel_dims=[self.embed_dim],
name=f'{name}_cfc_{count}',
)
attentions.append(attention_single_subgrid)
#Strong scaling
grid_sum_slice = lbann.Cross_Grid_Sum_Slice(attentions)
attentions = []
for head in range(self.BRANCHES):
attentions.append( lbann.Identity(grid_sum_slice) )
return attentions
| 34.482424 | 127 | 0.565664 | import math
import lbann
from lbann.modules.base import Module, FullyConnectedModule
from lbann.util import make_iterable
class MultiheadAttention(Module):
global_count = 0
def __init__(self,
embed_dim,
num_heads,
branches,
d_kv = None,
name=None):
super().__init__()
MultiheadAttention.global_count += 1
self.instance = 0
assert embed_dim % num_heads == 0, 'embed_dim must be divisible by num_heads'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
if(d_kv == None):
self.inner_dim = embed_dim
self.head_dim = embed_dim // num_heads
else:
self.inner_dim = d_kv * num_heads
self.head_dim = d_kv
if(branches==0):
self.ENABLE_SUBGRAPH=False
self.BRANCHES=0
else:
self.ENABLE_SUBGRAPH=True
self.BRANCHES = branches
self.name = name
if not self.name:
self.name = f'multiheadattention{MultiheadAttention.global_count}'
self.query_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_query_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_query_bias'),
]
self.key_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_key_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_key_bias'),
]
self.value_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_value_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_value_bias'),
]
self.output_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_output_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_output_bias'),
]
def forward(self, queries, keys, values, mask=None):
ENABLE_SUBGRAPH = self.ENABLE_SUBGRAPH
BRANCHES = self.BRANCHES
if(ENABLE_SUBGRAPH):
if(self.num_heads%BRANCHES!=0):
raise ValueError('Num heads should be divisible by BRANCHES')
self.instance += 1
name = f'{self.name}_instance{self.instance}'
queries_fc = lbann.ChannelwiseFullyConnected(
queries,
weights=self.query_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_queries_fc',
)
keys_fc = lbann.ChannelwiseFullyConnected(
keys,
weights=self.key_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_keys_fc',
)
values_fc = lbann.ChannelwiseFullyConnected(
values,
weights=self.value_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_values_fc',
)
slice_points = [self.head_dim * i for i in range(self.num_heads+1)]
queries_slice = lbann.Slice(
queries_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_queries_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
keys_slice = lbann.Slice(
keys_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_keys_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
values_slice = lbann.Slice(
values_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_values_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
attentions = []
tag=0
for head in range(self.num_heads):
head_name = f'{name}_myattention_head{head}'
if(ENABLE_SUBGRAPH):
if(head%int(self.num_heads/BRANCHES)==0):
tag+=1
q = lbann.Identity(queries_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
k = lbann.Identity(keys_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
v = lbann.Identity(values_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
else:
q = lbann.Identity(queries_slice)
k = lbann.Identity(keys_slice)
v = lbann.Identity(values_slice)
y = lbann.MatMul(
q, k,
transpose_b=True,
name=f'{head_name}_matmul',
)
y = lbann.WeightedSum(
y,
scaling_factors=1 / math.sqrt(self.head_dim),
name=f'{head_name}_scale',
)
if(ENABLE_SUBGRAPH):
if mask!=None:
y = lbann.Sum([y, mask[tag]], name=f'{head_name}_mask')
else:
if mask:
y = lbann.Sum([y, mask], name=f'{head_name}_mask')
y = lbann.ChannelwiseSoftmax(y, name=f'{head_name}_softmax')
attentions.append(lbann.MatMul(y, v, name=head_name))
if(ENABLE_SUBGRAPH):
attentions = lbann.Concatenation(
attentions,
axis=1,
name=f'{name}_heads_concat',parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
else:
attentions = lbann.Concatenation(
attentions,
axis=1,
name=f'{name}_heads_concat',
)
outputs_fc = lbann.ChannelwiseFullyConnected(
attentions,
weights=self.output_weights,
output_channel_dims=[self.embed_dim],
name=f'{name}',
)
return outputs_fc
class MultiheadAttentionAllSubGraph(Module):
global_count = 0
def __init__(self,
embed_dim,
num_heads,
branches,
d_kv = None,
name=None):
super().__init__()
MultiheadAttention.global_count += 1
self.instance = 0
assert embed_dim % num_heads == 0, 'embed_dim must be divisible by num_heads'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
if(d_kv == None):
self.inner_dim = embed_dim
self.head_dim = embed_dim // num_heads
else:
self.inner_dim = d_kv * num_heads
self.head_dim = d_kv
if(branches==0):
self.ENABLE_SUBGRAPH=False
self.BRANCHES=0
else:
self.ENABLE_SUBGRAPH=True
self.BRANCHES = branches
self.name = name
if not self.name:
self.name = f'multiheadattention{MultiheadAttention.global_count}'
self.query_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_query_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_query_bias'),
]
self.key_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_key_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_key_bias'),
]
self.value_weights = [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_value_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_value_bias'),
]
self.output_weights= []
for head in range(branches):
self.output_weights.append( [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_head{head}_output_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_head{head}_output_bias'),
] )
def forward(self, queries, keys, values, mask=None):
ENABLE_SUBGRAPH = self.ENABLE_SUBGRAPH
BRANCHES = self.BRANCHES
if(ENABLE_SUBGRAPH):
if(self.num_heads%BRANCHES!=0):
raise ValueError('Num heads should be divisible by BRANCHES')
self.instance += 1
name = f'{self.name}_instance{self.instance}'
queries_fc = lbann.ChannelwiseFullyConnected(
queries,
weights=self.query_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_queries_fc',
)
keys_fc = lbann.ChannelwiseFullyConnected(
keys,
weights=self.key_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_keys_fc',
)
values_fc = lbann.ChannelwiseFullyConnected(
values,
weights=self.value_weights,
output_channel_dims=[self.inner_dim],
name=f'{name}_values_fc',
)
slice_points = [self.head_dim * i for i in range(self.num_heads+1)]
queries_slice = lbann.Slice(
queries_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_queries_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
keys_slice = lbann.Slice(
keys_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_keys_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
values_slice = lbann.Slice(
values_fc,
axis=1,
slice_points=slice_points,
name=f'{name}_values_slice',
parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':ENABLE_SUBGRAPH}
)
attentions = []
temp_attentions = []
tag=0
for head in range(self.num_heads):
head_name = f'{name}_myattention_head{head}'
if(ENABLE_SUBGRAPH):
if(head%int(self.num_heads/BRANCHES)==0):
temp_attentions.append([])
tag+=1
q = lbann.Identity(queries_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
k = lbann.Identity(keys_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
v = lbann.Identity(values_slice, parallel_strategy = {'sub_branch_tag':tag,'enable_subgraph':ENABLE_SUBGRAPH})
else:
q = lbann.Identity(queries_slice)
k = lbann.Identity(keys_slice)
v = lbann.Identity(values_slice)
y = lbann.MatMul(
q, k,
transpose_b=True,
name=f'{head_name}_matmul',
)
y = lbann.WeightedSum(
y,
scaling_factors=1 / math.sqrt(self.head_dim),
name=f'{head_name}_scale',
)
if(ENABLE_SUBGRAPH):
if mask!=None:
y = lbann.Sum([y, mask[tag]], name=f'{head_name}_mask')
else:
if mask:
y = lbann.Sum([y, mask], name=f'{head_name}_mask')
y = lbann.ChannelwiseSoftmax(y, name=f'{head_name}_softmax')
y = lbann.MatMul(y, v, name=head_name)
temp_attentions[-1].append(y)
for count, temp_attention in enumerate(temp_attentions):
if(self.BRANCHES == self.num_heads):
attention_single_subgrid = temp_attentions[count][0]
else:
attention_single_subgrid = lbann.Concatenation(
temp_attention,
axis=1,
name=f'{name}_subgrid_heads_concat{count}',parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':False}
)
attention_single_subgrid = lbann.ChannelwiseFullyConnected(
attention_single_subgrid,
weights=self.output_weights[count],
output_channel_dims=[self.embed_dim],
name=f'{name}_cfc_{count}',
)
attentions.append(attention_single_subgrid)
grid_sum_slice = lbann.Cross_Grid_Sum_Slice(attentions)
attentions = []
for head in range(self.BRANCHES):
attentions.append( lbann.Identity(grid_sum_slice) )
return attentions
class MultiheadAttentionAllSubGraphInputSubGrids(Module):
global_count = 0
def __init__(self,
embed_dim,
num_heads,
branches,
d_kv = None,
name=None):
super().__init__()
MultiheadAttention.global_count += 1
self.instance = 0
assert embed_dim % num_heads == 0, 'embed_dim must be divisible by num_heads'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
if(d_kv == None):
self.inner_dim = embed_dim
self.head_dim = embed_dim // num_heads
else:
self.inner_dim = d_kv * num_heads
self.head_dim = d_kv
if(branches==0):
self.ENABLE_SUBGRAPH=False
self.BRANCHES=0
else:
self.ENABLE_SUBGRAPH=True
self.BRANCHES = branches
self.name = name
if not self.name:
self.name = f'multiheadattention{MultiheadAttention.global_count}'
self.query_weights = []
self.key_weights = []
self.value_weights = []
for head in range(branches):
self.query_weights.append( [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_head{head}_query_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_head{head}_query_bias'),
])
self.key_weights.append( [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_head{head}_key_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_head{head}_key_bias'),
])
self.value_weights.append( [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_head{head}_value_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_head{head}_value_bias'),
])
self.output_weights= []
for head in range(branches):
self.output_weights.append( [
lbann.Weights(initializer=lbann.GlorotNormalInitializer(),
name=f'{self.name}_head{head}_output_matrix'),
lbann.Weights(initializer=lbann.ConstantInitializer(value=0),
name=f'{self.name}_head{head}_output_bias'),
] )
def forward(self, queries, keys, values, mask=None):
ENABLE_SUBGRAPH = self.ENABLE_SUBGRAPH
BRANCHES = self.BRANCHES
if(ENABLE_SUBGRAPH):
if(self.num_heads%BRANCHES!=0):
raise ValueError('Num heads should be divisible by BRANCHES')
self.instance += 1
name = f'{self.name}_instance{self.instance}'
queries_fc = []
keys_fc = []
values_fc = []
slice_points = [self.head_dim * i for i in range(int(self.num_heads/self.BRANCHES)+1)]
attentions = []
for count, query in enumerate(queries):
temp = lbann.ChannelwiseFullyConnected(
query,
weights=self.query_weights[count],
output_channel_dims=[self.inner_dim],
name=f'{name}_subgrid{count}_queries_fc',
)
attentions.append(temp)
grid_sum_slice = lbann.Cross_Grid_Sum_Slice(attentions)
attentions = []
for head in range(self.BRANCHES):
attentions.append( lbann.Identity(grid_sum_slice) )
for head in range(self.BRANCHES):
temp = lbann.Slice(
attentions[head],
axis=1,
slice_points=slice_points,
name=f'{name}_subgrid{head}_queries_slice',
)
queries_fc.append(temp)
attentions = []
for count, key in enumerate(keys):
temp = lbann.ChannelwiseFullyConnected(
key,
weights=self.key_weights[count],
output_channel_dims=[self.inner_dim],
name=f'{name}_subgrid{count}_keys_fc',
)
attentions.append(temp)
grid_sum_slice = lbann.Cross_Grid_Sum_Slice(attentions)
attentions = []
for head in range(self.BRANCHES):
attentions.append( lbann.Identity(grid_sum_slice) )
for head in range(self.BRANCHES):
temp = lbann.Slice(
attentions[head],
axis=1,
slice_points=slice_points,
name=f'{name}_subgrid{head}_keys_slice',
)
keys_fc.append(temp)
attentions = []
for count, value in enumerate(values):
temp = lbann.ChannelwiseFullyConnected(
value,
weights=self.value_weights[count],
output_channel_dims=[self.inner_dim],
name=f'{name}_subgrid{count}_values_fc',
)
attentions.append(temp)
grid_sum_slice = lbann.Cross_Grid_Sum_Slice(attentions)
attentions = []
for head in range(self.BRANCHES):
attentions.append( lbann.Identity(grid_sum_slice) )
for head in range(self.BRANCHES):
temp = lbann.Slice(
attentions[head],
axis=1,
slice_points=slice_points,
name=f'{name}_subgrid{head}_values_slice',
)
values_fc.append(temp)
queries_slice = []
keys_slice = []
values_slice = []
for branch in range(self.BRANCHES):
querie_slice = queries_fc[branch]
key_slice = keys_fc[branch]
value_slice = values_fc[branch]
for head in range(int(self.num_heads/self.BRANCHES)):
queries_slice.append(lbann.Identity(querie_slice))
keys_slice.append(lbann.Identity(key_slice))
values_slice.append(lbann.Identity(value_slice))
attentions = []
temp_attentions = []
tag=0
for head in range(self.num_heads):
head_name = f'{name}_myattention_head{head}'
if(head%int(self.num_heads/BRANCHES)==0):
temp_attentions.append([])
tag+=1
q = lbann.Identity(queries_slice[head])
k = lbann.Identity(keys_slice[head])
v = lbann.Identity(values_slice[head])
y = lbann.MatMul(
q, k,
transpose_b=True,
name=f'{head_name}_matmul',
)
y = lbann.WeightedSum(
y,
scaling_factors=1 / math.sqrt(self.head_dim),
name=f'{head_name}_scale',
)
if(ENABLE_SUBGRAPH):
if mask!=None:
y = lbann.Sum([y, mask[tag]], name=f'{head_name}_mask')
else:
if mask:
y = lbann.Sum([y, mask], name=f'{head_name}_mask')
y = lbann.ChannelwiseSoftmax(y, name=f'{head_name}_softmax')
y = lbann.MatMul(y, v, name=head_name)
temp_attentions[-1].append(y)
for count, temp_attention in enumerate(temp_attentions):
if(self.BRANCHES == self.num_heads):
attention_single_subgrid = temp_attentions[count][0]
else:
attention_single_subgrid = lbann.Concatenation(
temp_attention,
axis=1,
name=f'{name}_subgrid_heads_concat{count}',parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':False}
)
attention_single_subgrid = lbann.ChannelwiseFullyConnected(
attention_single_subgrid,
weights=self.output_weights[count],
output_channel_dims=[self.embed_dim],
name=f'{name}_cfc_{count}',
)
attentions.append(attention_single_subgrid)
grid_sum_slice = lbann.Cross_Grid_Sum_Slice(attentions)
attentions = []
for head in range(self.BRANCHES):
attentions.append( lbann.Identity(grid_sum_slice) )
return attentions
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.