hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3ee9851dbffc2cca8a2ee883242d10f80ff8fe87 | 3,833 | py | Python | huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_partner_pay_orders_response.py | Adek06/huaweicloud-sdk-python-v3 | 3d13b27d089e04a1ae567cd649b3c5509e0391d2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_partner_pay_orders_response.py | Adek06/huaweicloud-sdk-python-v3 | 3d13b27d089e04a1ae567cd649b3c5509e0391d2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_partner_pay_orders_response.py | Adek06/huaweicloud-sdk-python-v3 | 3d13b27d089e04a1ae567cd649b3c5509e0391d2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListPartnerPayOrdersResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int',
'order_infos': 'list[CustomerOrderEntity]'
}
attribute_map = {
'count': 'count',
'order_infos': 'order_infos'
}
def __init__(self, count=None, order_infos=None):
"""ListPartnerPayOrdersResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._count = None
self._order_infos = None
self.discriminator = None
if count is not None:
self.count = count
if order_infos is not None:
self.order_infos = order_infos
@property
def count(self):
"""Gets the count of this ListPartnerPayOrdersResponse.
|参数名称:符合条件的记录总数。| |参数的约束及描述:符合条件的记录总数。必填|
:return: The count of this ListPartnerPayOrdersResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListPartnerPayOrdersResponse.
|参数名称:符合条件的记录总数。| |参数的约束及描述:符合条件的记录总数。必填|
:param count: The count of this ListPartnerPayOrdersResponse.
:type: int
"""
self._count = count
@property
def order_infos(self):
"""Gets the order_infos of this ListPartnerPayOrdersResponse.
|参数名称:总额,即最终优惠后的金额,| |参数约束以及描述:总额,即最终优惠后的金额,非必填|
:return: The order_infos of this ListPartnerPayOrdersResponse.
:rtype: list[CustomerOrderEntity]
"""
return self._order_infos
@order_infos.setter
def order_infos(self, order_infos):
"""Sets the order_infos of this ListPartnerPayOrdersResponse.
|参数名称:总额,即最终优惠后的金额,| |参数约束以及描述:总额,即最终优惠后的金额,非必填|
:param order_infos: The order_infos of this ListPartnerPayOrdersResponse.
:type: list[CustomerOrderEntity]
"""
self._order_infos = order_infos
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListPartnerPayOrdersResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.775362 | 81 | 0.576833 |
706bfaa362976ba670caa575e6317d88f291ef81 | 46,400 | py | Python | imsegm/utilities/drawing.py | Borda/pyImSegm | c52e709f9fcd90bcc6ab06e515685fd4c4e6c301 | [
"BSD-3-Clause"
] | 314 | 2017-10-21T18:56:48.000Z | 2022-03-28T00:03:58.000Z | imsegm/utilities/drawing.py | Borda/pyImSegm | c52e709f9fcd90bcc6ab06e515685fd4c4e6c301 | [
"BSD-3-Clause"
] | 42 | 2017-12-10T17:02:47.000Z | 2022-01-03T21:38:35.000Z | imsegm/utilities/drawing.py | Borda/pyImSegm | c52e709f9fcd90bcc6ab06e515685fd4c4e6c301 | [
"BSD-3-Clause"
] | 71 | 2017-10-21T14:06:01.000Z | 2022-03-25T12:15:30.000Z | """
Framework for visualisations
Copyright (C) 2016-2018 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import logging
import os
import matplotlib
from imsegm.utilities import ImageDimensionError
if os.environ.get('DISPLAY', '') == '' and matplotlib.rcParams['backend'] != 'agg':
print('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from planar import line as pl_line
from scipy import ndimage
from skimage import color, draw, segmentation
#: for blending two images define chess field size in pixels
SIZE_CHESS_FIELD = 50
#: columns from description files which marks the egg annotation by expert
COLUMNS_POSITION_EGG_ANNOT = ('ant_x', 'ant_y', 'post_x', 'post_y', 'lat_x', 'lat_y')
# http://matplotlib.org/examples/color/colormaps_reference.html
# http://htmlcolorcodes.com/
COLOR_ORANGE = '#FF5733'
COLOR_GRAY = '#7E7E7E'
COLOR_GREEN = '#1FFF00'
COLOR_YELLOW = '#FFFB00'
COLOR_PINK = '#FF00FF'
COLOR_BLUE = '#00AAFF'
COLORS = 'bgrmyck'
#: define markers for labels of positive (+1) neutral (0) and negative (-1) class
DICT_LABEL_MARKER = {
-1: ('.', COLOR_GRAY),
0: ('x', COLOR_GRAY),
1: ('.', COLOR_YELLOW),
}
DICT_LABEL_MARKER_FN_FP = {
-2: ('.', COLOR_PINK),
-1: ('.', COLOR_BLUE),
0: ('x', 'w'),
1: ('.', COLOR_YELLOW),
}
def _ellipse(r, c, r_radius, c_radius, orientation=0., shape=None):
""" temporary wrapper until release New version scikit-image v0.13
:param int r: center position in rows
:param int c: center position in columns
:param int r_radius: ellipse diam in rows
:param int c_radius: ellipse diam in columns
:param float orientation: ellipse orientation
:param tuple(int,int) shape: size of output mask
:return tuple(list(int),list(int)): indexes of filled positions
>>> img = np.zeros((10, 12), dtype=int)
>>> rr, cc = _ellipse(5, 6, 3, 5, orientation=np.deg2rad(30))
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
center = np.array([r, c])
radii = np.array([r_radius, c_radius])
# allow just rotation with in range +/- 180 degree
orientation %= np.pi
sin_alpha, cos_alpha = np.sin(orientation), np.cos(orientation)
# compute rotated radii by given rotation
r_radius_rot = abs(r_radius * cos_alpha) + c_radius * sin_alpha
c_radius_rot = r_radius * sin_alpha + abs(c_radius * cos_alpha)
# The upper_left and lower_right corners of the smallest rectangle
# containing the ellipse.
radii_rot = np.array([r_radius_rot, c_radius_rot])
upper_left = np.ceil(center - radii_rot).astype(int)
lower_right = np.floor(center + radii_rot).astype(int)
if shape is not None:
# Constrain upper_left and lower_right by shape boundary.
upper_left = np.maximum(upper_left, np.array([0, 0]))
lower_right = np.minimum(lower_right, np.array(shape[:2]) - 1)
shifted_center = center - upper_left
bounding_shape = lower_right - upper_left + 1
r_lim, c_lim = np.ogrid[0:int(bounding_shape[0]), 0:int(bounding_shape[1])]
r_org, c_org = shifted_center
r_rad, c_rad = radii
r, c = (r_lim - r_org), (c_lim - c_org)
dist_1 = ((r * cos_alpha + c * sin_alpha) / r_rad)**2
dist_2 = ((r * sin_alpha - c * cos_alpha) / c_rad)**2
rr, cc = np.nonzero((dist_1 + dist_2) <= 1)
rr.flags.writeable = True
cc.flags.writeable = True
rr += upper_left[0]
cc += upper_left[1]
return rr, cc
def ellipse(r, c, r_radius, c_radius, orientation=0., shape=None):
""" temporary wrapper until release New version scikit-image v0.13
.. note:: Should be solved in skimage v0.13
:param int r: center position in rows
:param int c: center position in columns
:param int r_radius: ellipse diam in rows
:param int c_radius: ellipse diam in columns
:param float orientation: ellipse orientation
:param tuple(int,int) shape: size of output mask
:return tuple(list(int),list(int)): indexes of filled positions
>>> img = np.zeros((14, 20), dtype=int)
>>> rr, cc = ellipse(7, 10, 3, 9, np.deg2rad(30), img.shape)
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
rr, cc = draw.ellipse(r, c, r_radius, c_radius, rotation=orientation, shape=shape)
# alternative version
# rr, cc = _ellipse(r, c, r_radius, c_radius, orientation, shape)
return rr, cc
def ellipse_perimeter(r, c, r_radius, c_radius, orientation=0., shape=None):
""" see New version scikit-image v0.14
.. note:: Should be solved in skimage v0.14
:param int r: center position in rows
:param int c: center position in columns
:param int r_radius: ellipse diam in rows
:param int c_radius: ellipse diam in columns
:param float orientation: ellipse orientation
:param tuple(int,int) shape: size of output mask
:return tuple(list(int),list(int)): indexes of filled positions
>>> img = np.zeros((14, 20), dtype=int)
>>> rr, cc = ellipse_perimeter(7, 10, 3, 9, np.deg2rad(30), img.shape)
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
rr, cc = draw.ellipse_perimeter(r, c, r_radius, c_radius, orientation=-orientation, shape=shape)
return rr, cc
def norm_aplha(alpha):
""" normalise alpha in range (0, 1)
:param float alpha:
:return float:
>>> norm_aplha(0.5)
0.5
>>> norm_aplha(255)
1.0
>>> norm_aplha(-1)
0
"""
alpha = alpha / 255. if alpha > 1. else alpha
alpha = 0 if alpha < 0. else alpha
alpha = 1. if alpha > 1. else alpha
return alpha
def figure_image_adjustment(fig, img_size):
""" adjust figure as nice image without axis
:param fig: Figure
:param tuple(int,int) img_size: image size
:return Figure:
>>> fig = figure_image_adjustment(plt.figure(), (150, 200))
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
ax = fig.gca()
ax.set(xlim=[0, img_size[1]], ylim=[img_size[0], 0])
ax.axis('off')
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
fig.tight_layout(pad=0)
fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
return fig
def figure_image_segm_results(img, seg, subfig_size=9, mid_labels_alpha=0.2, mid_image_gray=True):
""" creating subfigure with original image, overlapped segmentation contours
and clean result segmentation...
it turns the sequence in vertical / horizontal according major image dim
:param ndarray img: image as background
:param ndarray seg: segmentation
:param int subfig_size: max image size
:param fool mid_image_gray: used color image as bacround in middele
:param float mid_labels_alpha: alpha for middle segmentation overlap
:return Figure:
>>> img = np.random.random((100, 150, 3))
>>> seg = np.random.randint(0, 2, (100, 150))
>>> fig = figure_image_segm_results(img, seg)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
if img.shape[:2] != seg.shape[:2]:
raise ImageDimensionError('different image %r & seg_pipe %r sizes' % (img.shape, seg.shape))
if img.ndim == 2: # for gray images of ovary
# img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
img = color.gray2rgb(img)
fig, axarr = create_figure_by_image(img.shape[:2], subfig_size, nb_subfigs=3)
axarr[0].set_title('original image')
axarr[0].imshow(img)
# visualise the 3th label
axarr[1].set_title('original image w. segment overlap')
img_bg = color.rgb2gray(img) if mid_image_gray else img
axarr[1].imshow(img_bg, cmap=plt.cm.Greys_r)
axarr[1].imshow(seg, alpha=mid_labels_alpha, cmap=plt.cm.jet)
axarr[1].contour(seg, levels=np.unique(seg), linewidths=2, cmap=plt.cm.jet)
axarr[2].set_title('segmentation - all labels')
axarr[2].imshow(seg, cmap=plt.cm.jet)
for ax in axarr:
ax.axis('off')
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
fig.subplots_adjust(wspace=0.01, hspace=0.01)
fig.tight_layout()
return fig
def figure_overlap_annot_segm_image(annot, segm, img=None, subfig_size=9, drop_labels=None, segm_alpha=0.2):
""" figure showing overlap annotation - segmentation - image
:param ndarray annot: user annotation
:param ndarray segm: segmentation
:param ndarray img: original image
:param int subfig_size: maximal sub-figure size
:param float segm_alpha: use transparency
:param list(int) drop_labels: labels to be ignored
:return Figure:
>>> img = np.random.random((100, 150, 3))
>>> seg = np.random.randint(0, 2, (100, 150))
>>> fig = figure_overlap_annot_segm_image(seg, seg, img, drop_labels=[5])
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
norm_size = np.array(annot.shape) / float(np.max(annot.shape))
fig_size = norm_size[::-1] * subfig_size * np.array([3, 1])
fig, axarr = plt.subplots(ncols=3, figsize=fig_size)
if img is None:
img = np.ones(annot.shape)
if img.ndim == 2: # for gray images of ovary
img = color.gray2rgb(img)
axarr[0].set_title('Annotation')
axarr[0].imshow(img)
axarr[0].imshow(annot, alpha=segm_alpha)
axarr[0].contour(annot, levels=np.unique(annot), linewidths=2)
axarr[1].set_title('Segmentation')
axarr[1].imshow(img)
axarr[1].imshow(segm, alpha=segm_alpha)
axarr[1].contour(segm, levels=np.unique(segm), linewidths=2)
# visualise the 3th label
axarr[2].set_title('difference: annot. & segment')
# axarr[2].imshow(~(annot == segm), cmap=plt.cm.Reds)
max_val = np.max(annot.astype(int))
diff = annot - segm
if drop_labels is not None:
for lb in drop_labels:
diff[annot == lb] = 0
cax = axarr[2].imshow(diff, vmin=-max_val, vmax=max_val, alpha=0.5, cmap=plt.cm.bwr)
# vals = np.linspace(-max_val, max_val, max_val * 2 + 1)
plt.colorbar(
cax,
ticks=np.linspace(-max_val, max_val, max_val * 2 + 1),
boundaries=np.linspace(-max_val - 0.5, max_val + 0.5, max_val * 2 + 2)
)
# plt.clim(-max_val - 0.5, max_val - 0.5)
# axarr[2].contour(annot, levels=np.unique(annot), linewidths=1, colors='g')
# axarr[2].contour(segm, levels=np.unique(segm), linewidths=1, colors='b')
for ax in axarr:
ax.axis('off')
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
fig.subplots_adjust(wspace=0.01, hspace=0.01)
fig.tight_layout()
return fig
def figure_segm_graphcut_debug(images, subfig_size=9):
""" creating subfigure with slic, graph edges and results in the first row
and individual class unary terms in the second row
:param dict images: dictionary composed from name and image array
:param int subfig_size: maximal sub-figure size
:return Figure:
>>> images = {
... 'image': np.random.random((100, 150, 3)),
... 'slic': np.random.randint(0, 2, (100, 150)),
... 'slic_mean': np.random.random((100, 150, 3)),
... 'img_graph_edges': np.random.random((100, 150, 3)),
... 'img_graph_segm': np.random.random((100, 150, 3)),
... 'imgs_unary_cost': [np.random.random((100, 150, 3))],
... }
>>> fig = figure_segm_graphcut_debug(images)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
keys = ('image', 'slic', 'slic_mean', 'img_graph_edges', 'img_graph_segm', 'imgs_unary_cost')
if not all(n in images for n in keys):
raise ValueError('missing keys in debug structure %r' % tuple(images.keys()))
nb_cols = max(3, len(images['imgs_unary_cost']))
img = images['image']
if img.ndim == 2: # for gray images of ovary
img = color.gray2rgb(img)
norm_size = np.array(img.shape[:2]) / float(np.max(img.shape))
fig_size = norm_size[::-1] * subfig_size * np.array([nb_cols, 2])
fig, axarr = plt.subplots(2, nb_cols, figsize=fig_size)
img_slic = segmentation.mark_boundaries(img, images['slic'], mode='subpixel')
axarr[0, 0].set_title('SLIC')
axarr[0, 0].imshow(img_slic)
for i, k in enumerate(['img_graph_edges', 'img_graph_segm']):
axarr[0, i + 1].set_title(k)
axarr[0, i + 1].imshow(images[k])
for i, im_uc in enumerate(images['imgs_unary_cost']):
axarr[1, i].set_title('unary cost #%i' % i)
axarr[1, i].imshow(im_uc)
for j in range(2):
for i in range(nb_cols):
axarr[j, i].axis('off')
axarr[j, i].axes.get_xaxis().set_ticklabels([])
axarr[j, i].axes.get_yaxis().set_ticklabels([])
fig.subplots_adjust(left=0, right=1, top=1, bottom=0, wspace=0.05, hspace=0.05)
return fig
def create_figure_by_image(img_size, subfig_size, nb_subfigs=1, extend=0.):
""" crearting image according backround_image
:param tuple(int,int) img_size: image size
:param float subfig_size: maximal sub-figure size
:param int nb_subfigs: number of sub-figure
:param float extend: extension
:return tuple(Figure,list):
"""
norm_size = np.array(img_size) / float(np.max(img_size))
# reverse dimensions and scale by fig size
if norm_size[0] >= norm_size[1]: # horizontal
fig_size = norm_size[::-1] * subfig_size * np.array([nb_subfigs, 1])
fig_size[0] += extend * fig_size[0]
fig, axarr = plt.subplots(ncols=nb_subfigs, figsize=fig_size)
else: # vertical
fig_size = norm_size[::-1] * subfig_size * np.array([1, nb_subfigs])
fig_size[0] += extend * fig_size[0]
fig, axarr = plt.subplots(nrows=nb_subfigs, figsize=fig_size)
return fig, axarr
def figure_ellipse_fitting(img, seg, ellipses, centers, crits, fig_size=9):
""" show figure with result of the ellipse fitting
:param ndarray img: image
:param ndarray seg: segmentation
:param list(tuple(int,int,int,int,float)) ellipses: collection of ellipse parameters
ell. parameters: (x, y, height, width, orientation)
:param list(tuple(int,int)) centers: points
:param list(float) crits:
:param float fig_size: maximal figure size
:return Figure:
>>> img = np.random.random((100, 150, 3))
>>> seg = np.random.randint(0, 2, (100, 150))
>>> ells = np.random.random((3, 5)) * 25
>>> centers = np.random.random((3, 2)) * 25
>>> crits = np.random.random(3)
>>> fig = figure_ellipse_fitting(img[:, :, 0], seg, ells, centers, crits)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
if not len(ellipses) == len(centers) == len(crits):
raise ValueError(
'number of ellipses (%i) and centers (%i) and criteria (%i) should match' %
(len(ellipses), len(centers), len(crits))
)
fig, ax = create_figure_by_image(img.shape[:2], fig_size)
if img.ndim != 2:
raise ImageDimensionError('required image dimension is 2 to instead %r' % img.shape)
ax.imshow(img, cmap=plt.cm.Greys_r)
for i, params in enumerate(ellipses):
c1, c2, h, w, phi = params
rr, cc = ellipse_perimeter(int(c1), int(c2), int(h), int(w), phi)
ax.plot(cc, rr, '.', color=COLORS[i % len(COLORS)], label='#%i with crit=%d' % ((i + 1), int(crits[i])))
ax.legend(loc='lower right')
# plt.plot(centers[:, 1], centers[:, 0], 'ow')
for i in range(len(centers)):
ax.plot(centers[i, 1], centers[i, 0], 'o', color=COLORS[i % len(COLORS)])
ax.set(xlim=[0, seg.shape[1]], ylim=[seg.shape[0], 0])
ax.axis('off')
fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
return fig
def figure_annot_slic_histogram_labels(dict_label_hist, slic_size=-1, slic_regul=-1):
""" plot ration of labels assigned to each superpixel
:param dict_label_hist: dictionary of label name and histogram
:param int slic_size: used for figure title
:param float slic_regul: used for figure title
:return Figure:
>>> np.random.seed(0)
>>> dict_label_hist = {'a': np.tile([1, 0, 0, 0, 1], (25, 1)),
... 'b': np.tile([0, 1, 0, 0, 1], (30, 1))}
>>> fig = figure_annot_slic_histogram_labels(dict_label_hist)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
matrix_hist_all = np.concatenate(tuple(dict_label_hist.values()), axis=0)
lb_sums = np.sum(matrix_hist_all, axis=0)
fig = plt.figure(figsize=(10, 5))
ax = fig.gca()
for i, nb in enumerate(lb_sums):
if nb == 0:
continue
patches, bin_edges = np.histogram(matrix_hist_all[:, i], bins=50, density=True)
bins = [(a + b) / 2. for a, b in zip(bin_edges[:-1], bin_edges[1:])]
# ax.plot(bins, patches, label='label: %i' % i)
ax.semilogy(bins, patches, label='label: %i' % i)
ax.set_title(
'Histogram of labels density in each segments over all annotated images\n'
' (superpixels: size=%i, regul=%f)' % (slic_size, slic_regul)
)
ax.set(xlabel='region densities', ylabel='[%]')
ax.legend()
ax.grid()
return fig
def figure_ray_feature(segm, points, ray_dist_raw=None, ray_dist=None, points_reconst=None, title=''):
""" visualise the segmentation with specific point and estimated ray dist.
:param ndarray segm: segmentation
:param list(tuple(float,float)) points: collection of points
:param list(float) ray_dist_raw:
:param list(float) ray_dist: Ray feature distances
:param ndarray points_reconst: collection of reconstructed points
:param str title: figure title
:return Figure:
.. note:: for more examples, see unittests
"""
ray_dist_raw = ray_dist_raw if ray_dist_raw is not None else []
ray_dist = ray_dist if ray_dist is not None else []
fig, axarr = plt.subplots(nrows=2, ncols=1)
if title:
axarr[0].set_title(title)
axarr[0].imshow(1 - segm, cmap='gray', interpolation='nearest')
axarr[0].plot(points[1], points[0], 'bo')
axarr[0].set(xlim=[0, segm.shape[1]], ylim=[segm.shape[0], 0])
if points_reconst is not None:
axarr[0].plot(points_reconst[:, 1], points_reconst[:, 0], 'g.')
axarr[1].plot(np.linspace(0, 360, len(ray_dist_raw)).tolist(), ray_dist_raw, 'b', label='original')
axarr[1].plot(np.linspace(0, 360, len(ray_dist)).tolist(), ray_dist, 'r', label='final')
axarr[1].set(xlabel='angles [deg]', xlim=[0, 360])
axarr[1].legend(loc=0)
axarr[1].grid()
return fig
def figure_used_samples(img, labels, slic, used_samples, fig_size=12):
""" draw used examples (superpixels)
:param ndarray img: input image for background
:param list(int) labels: labels associated for superpixels
:param ndarray slic: superpixel segmentation
:param list(bool) used_samples: used samples for training
:param int fig_size: figure size
:return Figure:
>>> img = np.random.random((50, 75, 3))
>>> labels = [-1, 0, 2]
>>> used = [1, 0, 0]
>>> seg = np.random.randint(0, 3, img.shape[:2])
>>> fig = figure_used_samples(img, labels, seg, used)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
w_samples = np.asarray(used_samples)[slic]
img = color.gray2rgb(img) if img.ndim == 2 else img
fig, axarr = create_figure_by_image(img.shape[:2], fig_size, nb_subfigs=2, extend=0.15)
axarr[0].imshow(np.asarray(labels)[slic], cmap=plt.cm.jet)
axarr[0].contour(slic, levels=np.unique(slic), colors='w', linewidths=0.5)
axarr[0].axis('off')
axarr[1].imshow(img)
axarr[1].contour(slic, levels=np.unique(slic), colors='w', linewidths=0.5)
cax = axarr[1].imshow(w_samples, cmap=plt.cm.RdYlGn, vmin=0, vmax=1, alpha=0.5)
cbar = plt.colorbar(cax, ticks=[0, 1], boundaries=[-0.5, 0.5, 1.5])
cbar.ax.set_yticklabels(['drop', 'used'])
axarr[1].axis('off')
fig.tight_layout()
return fig
def draw_color_labeling(segments, lut_labels):
""" visualise the graph cut results
:param ndarray segments: np.array<height, width>
:param list(int) lut_labels: look-up-table
:return ndarray: np.array<height, width, 3>
"""
seg = np.asarray(lut_labels)[segments]
clrs = plt.get_cmap('jet')
lbs = np.arange(np.max(seg) + 1)
lut = clrs(lbs / float(lbs.max()))[:, :3]
img = lut[seg]
return img
def draw_graphcut_unary_cost_segments(segments, unary_cost):
""" visualise the unary cost for each class
:param ndarray segments: np.array<height, width>
:param ndarray unary_cost: np.array<nb_spx, nb_classes>
:return []: [np.array<height, width, 3>] * nb_cls
>>> seg = np.random.randint(0, 100, (100, 150))
>>> u_cost = np.random.random((100, 3))
>>> imgs = draw_graphcut_unary_cost_segments(seg, u_cost)
>>> len(imgs)
3
>>> [img.shape for img in imgs]
[(100, 150, 3), (100, 150, 3), (100, 150, 3)]
"""
clrs = plt.get_cmap('Greens')
imgs_u_cost = [None] * unary_cost.shape[-1]
for i in range(unary_cost.shape[-1]):
pw_c_norm = 1 - (unary_cost[:, i] / unary_cost.max())
lut = np.asarray([clrs(p) for p in pw_c_norm])[:, :3]
imgs_u_cost[i] = lut[segments]
return imgs_u_cost
def closest_point_on_line(start, end, point):
""" projection of the point to the line
:param list(int) start: line starting point
:param list(int) end: line ending point
:param list(int) point: point for extimation
:return list(int): point on the line
>>> closest_point_on_line([0, 0], [1, 2], [0, 2])
array([ 0.8, 1.6])
"""
start, end, point = [np.array(a) for a in [start, end, point]]
line = pl_line.Line(start, (end - start))
proj = np.array(line.project(point))
return proj
def draw_eggs_ellipse(mask_shape, pos_ant, pos_lat, pos_post, threshold_overlap=0.6):
""" from given 3 point estimate the ellipse
:param tuple(int,int) mask_shape:
:param [tuple(int,int)] pos_ant: anterior
:param [tuple(int,int)] pos_lat: latitude
:param [tuple(int,int)] pos_post: postlude
:param float threshold_overlap:
:return ndarray:
>>> pos_ant, pos_lat, pos_post = [10, 10], [20, 20], [35, 20]
>>> points = np.array([pos_ant, pos_lat, pos_post])
>>> _= plt.plot(points[:, 0], points[:, 1], 'og')
>>> mask = draw_eggs_ellipse([30, 50], [pos_ant], [pos_lat], [pos_post])
>>> mask.shape
(30, 50)
>>> _= plt.imshow(mask, alpha=0.5, interpolation='nearest')
>>> _= plt.xlim([0, mask.shape[1]]), plt.ylim([0, mask.shape[0]]), plt.grid()
>>> # plt.show()
"""
mask_eggs = np.zeros(mask_shape)
for i, (ant, lat, post) in enumerate(zip(pos_ant, pos_lat, pos_post)):
ant, lat, post = map(np.array, [ant, lat, post])
center = ant + (post - ant) / 2.
lat_proj = closest_point_on_line(ant, post, lat)
# http://stackoverflow.com/questions/433371/ellipse-bounding-a-rectangle
radius_a = (np.linalg.norm(post - ant) / 2. / np.sqrt(2)) * 1.
radius_b = (np.linalg.norm(lat - lat_proj) / np.sqrt(2)) * 1.
angle = np.arctan2(*(post - ant))
rr, cc = ellipse(
int(center[1]), int(center[0]), int(radius_a), int(radius_b), orientation=angle, shape=mask_eggs.shape
)
mask = np.zeros(mask_shape)
mask[rr, cc] = True
# mask = ndimage.morphology.binary_fill_holes(mask)
# distance = ndimage.distance_transform_edt(mask)
# probab = distance / np.max(distance)
# mask = probab >= threshold_dist
m_overlap = np.sum(np.logical_and(mask > 0, mask_eggs > 0)) / float(np.sum(mask))
if m_overlap > threshold_overlap:
logging.debug('skip egg drawing while it overlap by %f', m_overlap)
continue
mask_eggs[mask.astype(bool)] = i + 1
return mask_eggs
def parse_annot_rectangles(rows_slice):
""" parse annotation fromDF to lists
:param rows_slice: a row from a table
:return tuple: the three points
>>> import pandas as pd
>>> dict_row = dict(ant_x=1, ant_y=2, lat_x=3, lat_y=4, post_x=5, post_y=6)
>>> row = pd.DataFrame([dict_row])
>>> parse_annot_rectangles(row)
([(1, 2)], [(3, 4)], [(5, 6)])
>>> rows = pd.DataFrame([dict_row, {n: dict_row[n] + 10 for n in dict_row}])
>>> rows
ant_x ant_y lat_x lat_y post_x post_y
0 1 2 3 4 5 6
1 11 12 13 14 15 16
>>> parse_annot_rectangles(rows)
([(1, 2), (11, 12)], [(3, 4), (13, 14)], [(5, 6), (15, 16)])
"""
dict_eggs = {col: rows_slice[col] for col in COLUMNS_POSITION_EGG_ANNOT}
if all(isinstance(dict_eggs[col], str) for col in dict_eggs):
dict_eggs = {col: map(int, dict_eggs[col][1:-1].lstrip().split()) for col in dict_eggs}
pos_ant = list(zip(dict_eggs['ant_x'], dict_eggs['ant_y']))
pos_lat = list(zip(dict_eggs['lat_x'], dict_eggs['lat_y']))
pos_post = list(zip(dict_eggs['post_x'], dict_eggs['post_y']))
return pos_ant, pos_lat, pos_post
def draw_eggs_rectangle(mask_shape, pos_ant, pos_lat, pos_post):
""" from given 3 point estimate the ellipse
:param tuple(int,int) mask_shape: segmentation size
:param [tuple(int,int)] pos_ant: points
:param [tuple(int,int)] pos_lat: points
:param [tuple(int,int)] pos_post: points
:return list(ndarray):
>>> pos_ant, pos_lat, pos_post = [10, 10], [20, 20], [35, 20]
>>> points = np.array([pos_ant, pos_lat, pos_post])
>>> _= plt.plot(points[:, 0], points[:, 1], 'og')
>>> masks = draw_eggs_rectangle([30, 50], [pos_ant], [pos_lat], [pos_post])
>>> [m.shape for m in masks]
[(30, 50)]
>>> for mask in masks:
... _= plt.imshow(mask, alpha=0.5, interpolation='nearest')
>>> _= plt.xlim([0, mask.shape[1]]), plt.ylim([0, mask.shape[0]]), plt.grid()
>>> # plt.show()
"""
list_masks = []
pos_ant, pos_lat, pos_post = list(pos_ant), list(pos_lat), list(pos_post)
for ant, lat, post in zip(pos_ant, pos_lat, pos_post):
ant, lat, post = map(np.array, [ant, lat, post])
lat_proj = closest_point_on_line(ant, post, lat)
shift = lat - lat_proj
# center = ant + (post - ant) / 2.
# dist = np.linalg.norm(shift)
# angle = np.arctan2(*(post - ant))
points = np.array([ant + shift, ant - shift, post - shift, post + shift, ant + shift])
rr, cc = draw.polygon(points[:, 1], points[:, 0], shape=mask_shape)
mask = np.zeros(mask_shape)
mask[rr, cc] = True
list_masks.append(mask)
return list_masks
def merge_object_masks(masks, overlap_thr=0.7):
""" merge several mask into one multi-class segmentation
:param list(ndarray) masks: collection of masks
:param float overlap_thr: threshold for overlap
:return ndarray:
>>> m1 = np.zeros((5, 6), dtype=int)
>>> m1[:4, :4] = 1
>>> m2 = np.zeros((5, 6), dtype=int)
>>> m2[2:, 2:] = 1
>>> merge_object_masks([m1, m1])
array([[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0]])
>>> merge_object_masks([m1, m2])
array([[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 2, 2, 2, 2],
[1, 1, 2, 2, 2, 2],
[0, 0, 2, 2, 2, 2]])
"""
if len(masks) <= 0:
raise ValueError('no masks are given')
mask = np.array(masks[0])
for i in range(1, len(masks)):
overlap_ratios = []
for j in range(1, int(np.max(mask) + 1)):
overlap = np.sum(np.logical_and(mask == j, masks[i] == 1))
union = np.sum(np.logical_or(mask == j, masks[i] == 1))
overlap_ratios.append(float(overlap) / float(union))
if any(r > overlap_thr for r in overlap_ratios):
logging.debug('skip egg drawing while it overlap by %r', overlap_ratios)
continue
mask[masks[i] == 1] = np.max(mask) + 1
return mask
def draw_image_segm_points(
ax,
img,
points,
labels=None,
slic=None,
color_slic='w',
lut_label_marker=DICT_LABEL_MARKER,
seg_contour=None,
):
""" on plane draw background image or segmentation, overlap with SLIC
contours, add contour of adative segmentation like annot. for centers
plot point with specific property (shape and colour) according label
:param ax: figure axis
:param ndarray img: image
:param list(tuple(int,int)) points: collection of points
:param list(int) labels: LUT labels for superpixels
:param ndarray slic: superpixel segmentation
:param str color_slic: color dor superpixels
:param dict lut_label_marker: dictionary {int: (str, str)} of label and markers
:param ndarray seg_contour: segmentation contour
>>> img = np.random.randint(0, 256, (100, 100))
>>> points = np.random.randint(0, 100, (25, 2))
>>> labels = np.random.randint(0, 5, len(points))
>>> slic = np.random.randint(0, 256, (100, 100))
>>> draw_image_segm_points(plt.Figure().gca(), img, points, labels, slic)
"""
# background image or segmentation
if img.ndim == 2:
ax.imshow(img, alpha=0.3, cmap=plt.cm.gist_earth)
else:
ax.imshow(img)
if slic is not None:
ax.contour(slic, levels=np.unique(slic), alpha=0.5, colors=color_slic, linewidths=0.5)
# fig.gca().imshow(mark_boundaries(img, slic))
if seg_contour is not None and isinstance(seg_contour, np.ndarray):
if img.shape[:2] != seg_contour.shape[:2]:
raise ImageDimensionError('image size %r and segm. %r should match' % (img.shape, seg_contour.shape))
ax.contour(seg_contour, linewidths=3, levels=np.unique(seg_contour))
if labels is not None:
if len(points) != len(labels):
raise ValueError('number of points (%i) and labels (%i) should match' % (len(points), len(labels)))
for lb in lut_label_marker:
marker, clr = lut_label_marker[lb]
ax.plot(points[(labels == lb), 1], points[(labels == lb), 0], marker, color=clr)
else:
ax.plot(points[:, 1], points[:, 0], 'o', color=COLOR_ORANGE)
ax.set(xlim=[0, img.shape[1]], ylim=[img.shape[0], 0])
def figure_image_segm_centres(img, segm, centers=None, cmap_contour=plt.cm.Blues):
""" visualise the input image and segmentation in common frame
:param ndarray img: image
:param ndarray segm: segmentation
:param [tuple(int,int)]|ndarray centers: or np.array
:param obj cmap_contour:
:return Figure:
>>> img = np.random.random((100, 150, 3))
>>> seg = np.random.randint(0, 2, (100, 150))
>>> centre = [[55, 60]]
>>> fig = figure_image_segm_centres(img, seg, centre)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
fig, ax = plt.subplots()
ax.imshow(img)
if np.sum(segm) > 0:
segm_show = segm
if segm.ndim > 2:
segm_show = np.argmax(segm, axis=2)
ax.contour(segm_show, cmap=cmap_contour, linewidths=0.5)
if isinstance(centers, list):
ax.plot(np.array(centers)[:, 1], np.array(centers)[:, 0], 'o', color=COLOR_ORANGE)
elif isinstance(centers, np.ndarray):
if img.shape[:2] != centers.shape[:2]:
raise ImageDimensionError('image size %r and centers %r should match' % (img.shape, centers.shape))
ax.contour(centers, levels=np.unique(centers), cmap=plt.cm.YlOrRd)
ax.set(xlim=[0, img.shape[1]], ylim=[img.shape[0], 0])
fig.tight_layout()
return fig
def draw_graphcut_weighted_edges(segments, centers, edges, edge_weights, img_bg=None, img_alpha=0.5):
""" visualise the edges on the overlapping a background image
:param [tuple(int,int)] centers: list of centers
:param ndarray segments: np.array<height, width>
:param ndarray edges: list of edges of shape <nb_edges, 2>
:param ndarray edge_weights: weight per edge <nb_edges, 1>
:param ndarray img_bg: image background
:param float img_alpha: transparency
:return ndarray: np.array<height, width, 3>
>>> slic = np.array([[0] * 3 + [1] * 3 + [2] * 3+ [3] * 3] * 4 +
... [[4] * 3 + [5] * 3 + [6] * 3 + [7] * 3] * 4)
>>> centres = [[1, 1], [1, 4], [1, 7], [1, 10],
... [5, 1], [5, 4], [5, 7], [5, 10]]
>>> edges = [[0, 1], [1, 2], [2, 3], [0, 4], [1, 5],
... [4, 5], [2, 6], [5, 6], [3, 7], [6, 7]]
>>> img = np.random.randint(0, 256, slic.shape + (3,))
>>> edge_weights = np.ones(len(edges))
>>> edge_weights[0] = 0
>>> img = draw_graphcut_weighted_edges(slic, centres, edges, edge_weights, img_bg=img)
>>> img.shape
(8, 12, 3)
"""
if img_bg is not None:
if img_bg.ndim == 2:
# duplicate channels to be like RGB
img_bg = np.rollaxis(np.tile(img_bg, (3, 1, 1)), 0, 3)
# convert to range 0,1 so the drawing is correct
max_val = 1.
if img_bg.dtype != np.float:
max_val = max(255., img_bg.max())
img = img_bg.astype(np.float) / max_val
# make it partialy transparent
img = (1. - img_alpha) + img * img_alpha
else:
img = np.zeros(segments.shape + (3, ))
clrs = plt.get_cmap('Greens')
diff = (edge_weights.max() - edge_weights.min())
if diff > 0:
edge_ratio = (edge_weights - edge_weights.min()) / diff
else:
edge_ratio = np.zeros(edge_weights.shape)
for i, edge in enumerate(edges):
n1, n2 = edge
y1, x1 = map(int, centers[n1])
y2, x2 = map(int, centers[n2])
# line = draw.line(y1, x1, y2, x2) # , shape=img.shape[:2]
# img[line] = clrs(edge_ratio[i])[:3]
# using anti-aliasing
rr, cc, val = draw.line_aa(y1, x1, y2, x2) # , shape=img.shape[:2]
color_w = np.tile(val, (3, 1)).T
img[rr, cc, :] = color_w * clrs(edge_ratio[i])[:3] + (1 - color_w) * img[rr, cc, :]
circle = draw.circle(y1, x1, radius=2, shape=img.shape[:2])
img[circle] = 1., 1., 0.
return img
def draw_rg2sp_results(ax, seg, slic, debug_rg2sp, iter_index=-1):
""" drawing Region Growing with shape prior
:param ax: figure axis
:param ndarray seg: segmentation
:param ndarray slic: superpixels
:param dict debug_rg2sp: dictionary with debug results
:param int iter_index: iteration index
:return: ax
"""
ax.imshow(debug_rg2sp['labels'][iter_index][slic], cmap=plt.cm.jet)
ax.contour(seg, levels=np.unique(seg), colors='#bfbfbf')
for centre, shift in zip(debug_rg2sp['centres'][iter_index], debug_rg2sp['shifts'][iter_index]):
rot = np.deg2rad(shift)
ax.plot(centre[1], centre[0], 'ow')
ax.arrow(
centre[1], centre[0], np.cos(rot) * 50., np.sin(rot) * 50., fc='w', ec='w', head_width=20., head_length=30.
)
ax.set(
xlim=[0, seg.shape[1]],
ylim=[seg.shape[0], 0],
title='Iteration #%i with E=%.0f' % (iter_index, round(debug_rg2sp['criteria'][iter_index]))
)
return ax
def figure_rg2sp_debug_complete(seg, slic, debug_rg2sp, iter_index=-1, max_size=5):
""" draw figure with all debug (intermediate) segmentation steps
:param ndarray seg: segmentation
:param ndarray slic: superpixels
:param debug_rg2sp: dictionary with some debug parameters
:param int iter_index: iteration index
:param int max_size: max figure size
:return Figure:
>>> seg = np.random.randint(0, 4, (100, 150))
>>> slic = np.random.randint(0, 80, (100, 150))
>>> dict_debug = {
... 'lut_data_cost': np.random.random((80, 3)),
... 'lut_shape_cost': np.random.random((15, 80, 3)),
... 'labels': np.random.randint(0, 4, (15, 80)),
... 'centres': [np.array([np.random.randint(0, 100, 80),
... np.random.randint(0, 150, 80)]).T] * 15,
... 'shifts': np.random.random((15, 3)),
... 'criteria': np.random.random(15),
... }
>>> fig = figure_rg2sp_debug_complete(seg, slic, dict_debug)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
nb_objects = debug_rg2sp['lut_data_cost'].shape[1] - 1
nb_subfigs = max(3, nb_objects)
norm_zise = np.array(seg.shape[:2]) / float(np.max(seg.shape))
fig_size = np.array(norm_zise)[::-1] * np.array([nb_subfigs, 2]) * max_size
fig, axarr = plt.subplots(2, nb_subfigs, figsize=fig_size)
draw_rg2sp_results(axarr[0, 0], seg, slic, debug_rg2sp, iter_index)
axarr[0, 1].plot(debug_rg2sp['criteria'])
axarr[0, 1].plot(iter_index, debug_rg2sp['criteria'][iter_index], 'og')
axarr[0, 1].set(ylabel='Energy', xlabel='iteration')
axarr[0, 1].grid()
axarr[0, 2].set_title('Data cost')
img_shape_cost = debug_rg2sp['lut_shape_cost'][iter_index][:, 0][slic]
im = axarr[0, 2].imshow(img_shape_cost, cmap=plt.cm.jet)
fig.colorbar(im, ax=axarr[0, 2])
for j in range(3):
axarr[0, j].axis('off')
for i in range(nb_objects):
axarr[1, i].set_title('Shape cost for object #%i' % i)
lut = debug_rg2sp['lut_shape_cost'][iter_index][:, i + 1]
im = axarr[1, i].imshow(lut[slic], cmap=plt.cm.bone)
fig.colorbar(im, ax=axarr[1, i])
axarr[1, i].contour(seg, levels=np.unique(seg), cmap=plt.cm.jet)
axarr[1, i].plot(debug_rg2sp['centres'][iter_index][i, 1], debug_rg2sp['centres'][iter_index][i, 0], 'or')
axarr[0, i].axis('off')
fig.subplots_adjust(left=0.01, bottom=0.01, right=0.99, top=0.96)
# fig.tight_layout()
return fig
def make_overlap_images_optical(images):
""" overlap images and show them
:param list(ndarray) images: collection of images
:return ndarray: combined image
>>> im1 = np.zeros((5, 8), dtype=float)
>>> im2 = np.ones((5, 8), dtype=float)
>>> make_overlap_images_optical([im1, im2])
array([[ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]])
"""
logging.info(' make_overlap_images_optical: overlap images')
# get max dimension of the images
max_size = np.max(np.vstack(tuple([im.shape for im in images])), 0)
logging.debug('compute maximal image size: %r', max_size)
imgs_w = []
for im in images:
imgs_w.append(np.zeros(max_size, dtype=im.dtype))
# copy images to the maximal image
for i, im in enumerate(images):
imgs_w[i][:im.shape[0], :im.shape[1]] = im
# put images as backgrounds
img = imgs_w[0] / len(images)
for i in range(1, len(images)):
img = img + imgs_w[i] / len(images)
return img
def make_overlap_images_chess(images, chess_field=SIZE_CHESS_FIELD):
""" overlap images and show them
:param list(ndarray) images: collection of images
:param int chess_field: size of chess field size
:return ndarray: combined image
>>> im1 = np.zeros((5, 10), dtype=int)
>>> im2 = np.ones((5, 10), dtype=int)
>>> make_overlap_images_chess([im1, im2], chess_field=2)
array([[0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
logging.info(' make_overlap_images_chess: overlap images')
# get max dimension of the images
max_size = np.max(np.vstack(tuple([im.shape for im in images])), 0)
logging.debug('compute maximal image size: %r', max_size)
imgs_w = []
for im in images:
imgs_w.append(np.zeros(max_size, dtype=im.dtype))
# copy images to the maximal image
for i, im in enumerate(images):
imgs_w[i][:im.shape[0], :im.shape[1]] = im
img = np.zeros(max_size, dtype=images[0].dtype)
idx_row = 0
for i in range(int(max_size[0] / chess_field)):
idx = idx_row
for j in range(int(max_size[1] / chess_field)):
w_b = i * chess_field
if (w_b + chess_field) < max_size[0]:
w_e = w_b + chess_field
else:
w_e = max_size[0]
h_b = j * chess_field
if (h_b + chess_field) < max_size[1]:
h_e = h_b + chess_field
else:
h_e = max_size[1]
img[w_b:w_e, h_b:h_e] = imgs_w[idx][w_b:w_e, h_b:h_e]
idx = (idx + 1) % len(images)
idx_row = (idx_row + 1) % len(images)
return img
def draw_image_clusters_centers(ax, img, centres, points=None, labels_centre=None, segm=None):
""" draw imageas bacround and clusters centers
:param ax: figure axis
:param ndarray img: image
:param ndarray centres: points
:param ndarray points: optional list of all points
:param list(int) labels_centre: optional list of labels for points
:param ndarray segm: optional segmentation
>>> img = np.random.randint(0, 256, (100, 100, 3))
>>> seg = np.random.randint(0, 3, (100, 100))
>>> centres = np.random.randint(0, 100, (3, 2))
>>> points = np.random.randint(0, 100, (25, 2))
>>> labels = np.random.randint(0, 4, 25)
>>> draw_image_clusters_centers(plt.Figure().gca(), img[:, :, 0], centres, points, labels, seg)
"""
if img is not None:
img = (img / float(np.max(img)))
if img.ndim != 2:
raise ImageDimensionError('required image dimension is 2 to instead %r' % img.shape)
ax.imshow(img, cmap=plt.cm.Greys_r)
ax.set(xlim=[0, img.shape[1]], ylim=[img.shape[0], 0])
if segm is not None:
ax.imshow(segm, alpha=0.1)
ax.contour(segm)
if points is not None and len(points) > 0 and labels_centre is not None:
points = np.array(points)
for i in range(max(labels_centre) + 1):
select = points[np.asarray(labels_centre) == i]
ax.plot(select[:, 1], select[:, 0], '.')
# ax.plot(np.asarray(centres)[:, 1], np.asarray(centres)[:, 0], 'oy')
# ax.plot(np.asarray(centres)[:, 1], np.asarray(centres)[:, 0], 'xr')
if len(centres) == 0:
return
centres = np.asarray(centres)
for s, clr in [
(3e3, '#ccff33'),
(1e3, '#ff3333'),
(1e2, '#00ffff'),
]:
ax.scatter(centres[:, 1], centres[:, 0], s=s, c=clr)
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
def figure_segm_boundary_dist(segm_ref, segm, subfig_size=9):
""" visualise the boundary distances between two segmentation
:param ndarray segm_ref: reference segmentation
:param ndarray segm: estimated segmentation
:param int subfig_size: maximal sub-figure size
:return Figure:
>>> seg = np.zeros((100, 100))
>>> seg[35:80, 10:65] = 1
>>> fig = figure_segm_boundary_dist(seg, seg.T)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
if segm_ref.shape != segm.shape:
raise ImageDimensionError('ref segm %r and segm %r should match' % (segm_ref.shape, segm.shape))
segr_boundary = segmentation.find_boundaries(segm_ref, mode='thick')
segm_boundary = segmentation.find_boundaries(segm, mode='thick')
segm_distance = ndimage.distance_transform_edt(~segm_boundary)
norm_size = np.array(segm_ref.shape[:2]) / float(np.max(segm_ref.shape))
fig_size = norm_size[::-1] * subfig_size * np.array([2, 1])
fig, axarr = plt.subplots(ncols=2, figsize=fig_size)
axarr[0].set_title('boundary distances with reference contour')
im = axarr[0].imshow(segm_distance, cmap=plt.cm.Greys)
plt.colorbar(im, ax=axarr[0])
axarr[0].contour(segm_ref, cmap=plt.cm.jet)
segm_distance[~segr_boundary] = 0
axarr[1].set_title('distance projected to ref. boundary')
im = axarr[1].imshow(segm_distance, cmap=plt.cm.Reds)
plt.colorbar(im, ax=axarr[1])
return fig
| 39.288738 | 119 | 0.601099 |
9db44266da35c2004cef93f5015d3c3cef2f50e8 | 35,978 | py | Python | pattern/text/en/inflect.py | zvelo/pattern | 331a278f63590c1c415f102f935c9d10b62b4a03 | [
"BSD-3-Clause"
] | null | null | null | pattern/text/en/inflect.py | zvelo/pattern | 331a278f63590c1c415f102f935c9d10b62b4a03 | [
"BSD-3-Clause"
] | null | null | null | pattern/text/en/inflect.py | zvelo/pattern | 331a278f63590c1c415f102f935c9d10b62b4a03 | [
"BSD-3-Clause"
] | null | null | null | #### PATTERN | EN | INFLECT ########################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
####################################################################################################
# Regular expressions-based rules for English word inflection:
# - pluralization and singularization of nouns and adjectives,
# - conjugation of verbs,
# - comparative and superlative of adjectives.
# Accuracy (measured on CELEX English morphology word forms):
# 95% for pluralize()
# 96% for singularize()
# 95% for Verbs.find_lemma() (for regular verbs)
# 96% for Verbs.find_lexeme() (for regular verbs)
import os
import sys
import re
try:
MODULE = os.path.dirname(os.path.abspath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
from pattern.text import Verbs as _Verbs
from pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
PROGRESSIVE,
PARTICIPLE
)
sys.path.pop(0)
VERB, NOUN, ADJECTIVE, ADVERB = "VB", "NN", "JJ", "RB"
VOWELS = "aeiouy"
re_vowel = re.compile(r"a|e|i|o|u|y", re.I)
is_vowel = lambda ch: ch in VOWELS
#### ARTICLE #######################################################################################
# Based on the Ruby Linguistics module by Michael Granger:
# http://www.deveiate.org/projects/Linguistics/wiki/English
RE_ARTICLE = map(lambda x: (re.compile(x[0]), x[1]), (
("euler|hour(?!i)|heir|honest|hono", "an"), # exceptions: an hour, an honor
# Abbreviations:
# strings of capitals starting with a vowel-sound consonant followed by another consonant,
# which are not likely to be real words.
(r"(?!FJO|[HLMNS]Y.|RY[EO]|SQU|(F[LR]?|[HL]|MN?|N|RH?|S[CHKLMNPTVW]?|X(YL)?)[AEIOU])[FHLMNRSX][A-Z]", "an"),
(r"^[aefhilmnorsx][.-]" , "an"),
(r"^[a-z][.-]" , "a" ),
(r"^[^aeiouy]" , "a" ), # consonants: a bear
(r"^e[uw]" , "a" ), # -eu like "you": a european
(r"^onc?e" , "a" ), # -o like "wa" : a one-liner
(r"uni([^nmd]|mo)" , "a" ), # -u like "you": a university
(r"^u[bcfhjkqrst][aeiou]", "a" ), # -u like "you": a uterus
(r"^[aeiou]" , "an"), # vowels: an owl
(r"y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)", "an"), # y like "i": an yclept, a year
(r"" , "a" ) # guess "a"
))
def definite_article(word):
return "the"
def indefinite_article(word):
""" Returns the indefinite article for a given word.
For example: indefinite_article("university") => "a" university.
"""
word = word.split(" ")[0]
for rule, article in RE_ARTICLE:
if rule.search(word) is not None:
return article
DEFINITE, INDEFINITE = \
"definite", "indefinite"
def article(word, function=INDEFINITE):
""" Returns the indefinite (a or an) or definite (the) article for the given word.
"""
return function == DEFINITE and definite_article(word) or indefinite_article(word)
_article = article
def referenced(word, article=INDEFINITE):
""" Returns a string with the article + the word.
"""
return "%s %s" % (_article(word, article), word)
#print referenced("hour")
#print referenced("FBI")
#print referenced("bear")
#print referenced("one-liner")
#print referenced("european")
#print referenced("university")
#print referenced("uterus")
#print referenced("owl")
#print referenced("yclept")
#print referenced("year")
#### PLURALIZE #####################################################################################
# Based on "An Algorithmic Approach to English Pluralization" by Damian Conway:
# http://www.csse.monash.edu.au/~damian/papers/HTML/Plurals.html
# Prepositions are used in forms like "mother-in-law" and "man at arms".
plural_prepositions = set((
"about" , "before" , "during", "of" , "till" ,
"above" , "behind" , "except", "off" , "to" ,
"across" , "below" , "for" , "on" , "under",
"after" , "beneath", "from" , "onto" , "until",
"among" , "beside" , "in" , "out" , "unto" ,
"around" , "besides", "into" , "over" , "upon" ,
"at" , "between", "near" , "since", "with" ,
"athwart", "betwixt",
"beyond",
"but",
"by"))
# Inflection rules that are either:
# - general,
# - apply to a certain category of words,
# - apply to a certain category of words only in classical mode,
# - apply only in classical mode.
# Each rule is a (suffix, inflection, category, classic)-tuple.
plural_rules = [
# 0) Indefinite articles and demonstratives.
(( r"^a$|^an$", "some" , None, False),
( r"^this$", "these" , None, False),
( r"^that$", "those" , None, False),
( r"^any$", "all" , None, False)
), # 1) Possessive adjectives.
(( r"^my$", "our" , None, False),
( r"^your$", "your" , None, False),
( r"^thy$", "your" , None, False),
(r"^her$|^his$", "their" , None, False),
( r"^its$", "their" , None, False),
( r"^their$", "their" , None, False)
), # 2) Possessive pronouns.
(( r"^mine$", "ours" , None, False),
( r"^yours$", "yours" , None, False),
( r"^thine$", "yours" , None, False),
(r"^her$|^his$", "theirs" , None, False),
( r"^its$", "theirs" , None, False),
( r"^their$", "theirs" , None, False)
), # 3) Personal pronouns.
(( r"^I$", "we" , None, False),
( r"^me$", "us" , None, False),
( r"^myself$", "ourselves" , None, False),
( r"^you$", "you" , None, False),
(r"^thou$|^thee$", "ye" , None, False),
( r"^yourself$", "yourself" , None, False),
( r"^thyself$", "yourself" , None, False),
( r"^she$|^he$", "they" , None, False),
(r"^it$|^they$", "they" , None, False),
(r"^her$|^him$", "them" , None, False),
(r"^it$|^them$", "them" , None, False),
( r"^herself$", "themselves" , None, False),
( r"^himself$", "themselves" , None, False),
( r"^itself$", "themselves" , None, False),
( r"^themself$", "themselves" , None, False),
( r"^oneself$", "oneselves" , None, False)
), # 4) Words that do not inflect.
(( r"$", "" , "uninflected", False),
( r"$", "" , "uncountable", False),
( r"s$", "s" , "s-singular" , False),
( r"fish$", "fish" , None, False),
(r"([- ])bass$", "\\1bass" , None, False),
( r"ois$", "ois" , None, False),
( r"sheep$", "sheep" , None, False),
( r"deer$", "deer" , None, False),
( r"pox$", "pox" , None, False),
(r"([A-Z].*)ese$", "\\1ese" , None, False),
( r"itis$", "itis" , None, False),
(r"(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$", "\\1ose", None, False)
), # 5) Irregular plural forms (e.g., mongoose, oxen).
(( r"atlas$", "atlantes" , None, True ),
( r"atlas$", "atlases" , None, False),
( r"beef$", "beeves" , None, True ),
( r"brother$", "brethren" , None, True ),
( r"child$", "children" , None, False),
( r"corpus$", "corpora" , None, True ),
( r"corpus$", "corpuses" , None, False),
( r"^cow$", "kine" , None, True ),
( r"ephemeris$", "ephemerides", None, False),
( r"ganglion$", "ganglia" , None, True ),
( r"genie$", "genii" , None, True ),
( r"genus$", "genera" , None, False),
( r"graffito$", "graffiti" , None, False),
( r"loaf$", "loaves" , None, False),
( r"money$", "monies" , None, True ),
( r"mongoose$", "mongooses" , None, False),
( r"mythos$", "mythoi" , None, False),
( r"octopus$", "octopodes" , None, True ),
( r"opus$", "opera" , None, True ),
( r"opus$", "opuses" , None, False),
( r"^ox$", "oxen" , None, False),
( r"penis$", "penes" , None, True ),
( r"penis$", "penises" , None, False),
( r"soliloquy$", "soliloquies", None, False),
( r"testis$", "testes" , None, False),
( r"trilby$", "trilbys" , None, False),
( r"turf$", "turves" , None, True ),
( r"numen$", "numena" , None, False),
( r"occiput$", "occipita" , None, True )
), # 6) Irregular inflections for common suffixes (e.g., synopses, mice, men).
(( r"man$", "men" , None, False),
( r"person$", "people" , None, False),
(r"([lm])ouse$", "\\1ice" , None, False),
( r"tooth$", "teeth" , None, False),
( r"goose$", "geese" , None, False),
( r"foot$", "feet" , None, False),
( r"zoon$", "zoa" , None, False),
( r"([csx])is$", "\\1es" , None, False)
), # 7) Fully assimilated classical inflections
# (e.g., vertebrae, codices).
(( r"ex$", "ices" , "ex-ices" , False),
( r"ex$", "ices" , "ex-ices*", True ), # * = classical mode
( r"um$", "a" , "um-a" , False),
( r"um$", "a" , "um-a*", True ),
( r"on$", "a" , "on-a" , False),
( r"a$", "ae" , "a-ae" , False),
( r"a$", "ae" , "a-ae*", True )
), # 8) Classical variants of modern inflections
# (e.g., stigmata, soprani).
(( r"trix$", "trices" , None, True),
( r"eau$", "eaux" , None, True),
( r"ieu$", "ieu" , None, True),
( r"([iay])nx$", "\\1nges" , None, True),
( r"en$", "ina" , "en-ina*", True),
( r"a$", "ata" , "a-ata*", True),
( r"is$", "ides" , "is-ides*", True),
( r"us$", "i" , "us-i*", True),
( r"us$", "us " , "us-us*", True),
( r"o$", "i" , "o-i*", True),
( r"$", "i" , "-i*", True),
( r"$", "im" , "-im*", True)
), # 9) -ch, -sh and -ss take -es in the plural
# (e.g., churches, classes).
(( r"([cs])h$", "\\1hes" , None, False),
( r"ss$", "sses" , None, False),
( r"x$", "xes" , None, False)
), # 10) -f or -fe sometimes take -ves in the plural
# (e.g, lives, wolves).
(( r"([aeo]l)f$", "\\1ves" , None, False),
( r"([^d]ea)f$", "\\1ves" , None, False),
( r"arf$", "arves" , None, False),
(r"([nlw]i)fe$", "\\1ves" , None, False),
), # 11) -y takes -ys if preceded by a vowel, -ies otherwise
# (e.g., storeys, Marys, stories).
((r"([aeiou])y$", "\\1ys" , None, False),
(r"([A-Z].*)y$", "\\1ys" , None, False),
( r"y$", "ies" , None, False)
), # 12) -o sometimes takes -os, -oes otherwise.
# -o is preceded by a vowel takes -os
# (e.g., lassos, potatoes, bamboos).
(( r"o$", "os", "o-os", False),
(r"([aeiou])o$", "\\1os" , None, False),
( r"o$", "oes" , None, False)
), # 13) Miltary stuff
# (e.g., Major Generals).
(( r"l$", "ls", "general-generals", False),
), # 14) Assume that the plural takes -s
# (cats, programmes, ...).
(( r"$", "s" , None, False),)
]
# For performance, compile the regular expressions once:
plural_rules = [[(re.compile(r[0]), r[1], r[2], r[3]) for r in grp] for grp in plural_rules]
# Suffix categories.
plural_categories = {
"uninflected": [
"bison" , "debris" , "headquarters" , "news" , "swine" ,
"bream" , "diabetes" , "herpes" , "pincers" , "trout" ,
"breeches" , "djinn" , "high-jinks" , "pliers" , "tuna" ,
"britches" , "eland" , "homework" , "proceedings", "whiting" ,
"carp" , "elk" , "innings" , "rabies" , "wildebeest"
"chassis" , "flounder" , "jackanapes" , "salmon" ,
"clippers" , "gallows" , "mackerel" , "scissors" ,
"cod" , "graffiti" , "measles" , "series" ,
"contretemps", "mews" , "shears" ,
"corps" , "mumps" , "species"
],
"uncountable": [
"advice" , "fruit" , "ketchup" , "meat" , "sand" ,
"bread" , "furniture" , "knowledge" , "mustard" , "software" ,
"butter" , "garbage" , "love" , "news" , "understanding",
"cheese" , "gravel" , "luggage" , "progress" , "water"
"electricity", "happiness" , "mathematics" , "research" ,
"equipment" , "information", "mayonnaise" , "rice"
],
"s-singular": [
"acropolis" , "caddis" , "dais" , "glottis" , "pathos" ,
"aegis" , "cannabis" , "digitalis" , "ibis" , "pelvis" ,
"alias" , "canvas" , "epidermis" , "lens" , "polis" ,
"asbestos" , "chaos" , "ethos" , "mantis" , "rhinoceros" ,
"bathos" , "cosmos" , "gas" , "marquis" , "sassafras" ,
"bias" , "glottis" , "metropolis" , "trellis"
],
"ex-ices": [
"codex" , "murex" , "silex"
],
"ex-ices*": [
"apex" , "index" , "pontifex" , "vertex" ,
"cortex" , "latex" , "simplex" , "vortex"
],
"um-a": [
"agendum" , "candelabrum", "desideratum" , "extremum" , "stratum" ,
"bacterium" , "datum" , "erratum" , "ovum"
],
"um-a*": [
"aquarium" , "emporium" , "maximum" , "optimum" , "stadium" ,
"compendium" , "enconium" , "medium" , "phylum" , "trapezium" ,
"consortium" , "gymnasium" , "memorandum" , "quantum" , "ultimatum" ,
"cranium" , "honorarium" , "millenium" , "rostrum" , "vacuum" ,
"curriculum" , "interregnum", "minimum" , "spectrum" , "velum" ,
"dictum" , "lustrum" , "momentum" , "speculum"
],
"on-a": [
"aphelion" , "hyperbaton" , "perihelion" ,
"asyndeton" , "noumenon" , "phenomenon" ,
"criterion" , "organon" , "prolegomenon"
],
"a-ae": [
"alga" , "alumna" , "vertebra"
],
"a-ae*": [
"abscissa" , "aurora" , "hyperbola" , "nebula" ,
"amoeba" , "formula" , "lacuna" , "nova" ,
"antenna" , "hydra" , "medusa" , "parabola"
],
"en-ina*": [
"foramen" , "lumen" , "stamen"
],
"a-ata*": [
"anathema" , "dogma" , "gumma" , "miasma" , "stigma" ,
"bema" , "drama" , "lemma" , "schema" , "stoma" ,
"carcinoma" , "edema" , "lymphoma" , "oedema" , "trauma" ,
"charisma" , "enema" , "magma" , "sarcoma" ,
"diploma" , "enigma" , "melisma" , "soma" ,
],
"is-ides*": [
"clitoris" , "iris"
],
"us-i*": [
"focus" , "nimbus" , "succubus" ,
"fungus" , "nucleolus" , "torus" ,
"genius" , "radius" , "umbilicus" ,
"incubus" , "stylus" , "uterus"
],
"us-us*": [
"apparatus" , "hiatus" , "plexus" , "status"
"cantus" , "impetus" , "prospectus" ,
"coitus" , "nexus" , "sinus" ,
],
"o-i*": [
"alto" , "canto" , "crescendo" , "soprano" ,
"basso" , "contralto" , "solo" , "tempo"
],
"-i*": [
"afreet" , "afrit" , "efreet"
],
"-im*": [
"cherub" , "goy" , "seraph"
],
"o-os": [
"albino" , "dynamo" , "guano" , "lumbago" , "photo" ,
"archipelago", "embryo" , "inferno" , "magneto" , "pro" ,
"armadillo" , "fiasco" , "jumbo" , "manifesto" , "quarto" ,
"commando" , "generalissimo", "medico" , "rhino" ,
"ditto" , "ghetto" , "lingo" , "octavo" , "stylo"
],
"general-generals": [
"Adjutant" , "Brigadier" , "Lieutenant" , "Major" , "Quartermaster",
"adjutant" , "brigadier" , "lieutenant" , "major" , "quartermaster"
]
}
def pluralize(word, pos=NOUN, custom={}, classical=True):
""" Returns the plural of a given word, e.g., child => children.
Handles nouns and adjectives, using classical inflection by default
(i.e., where "matrix" pluralizes to "matrices" and not "matrixes").
The custom dictionary is for user-defined replacements.
"""
if word in custom:
return custom[word]
# Recurse genitives.
# Remove the apostrophe and any trailing -s,
# form the plural of the resultant noun, and then append an apostrophe (dog's => dogs').
if word.endswith(("'", "'s")):
w = word.rstrip("'s")
w = pluralize(w, pos, custom, classical)
if w.endswith("s"):
return w + "'"
else:
return w + "'s"
# Recurse compound words
# (e.g., Postmasters General, mothers-in-law, Roman deities).
w = word.replace("-", " ").split(" ")
if len(w) > 1:
if w[1] == "general" or \
w[1] == "General" and \
w[0] not in plural_categories["general-generals"]:
return word.replace(w[0], pluralize(w[0], pos, custom, classical))
elif w[1] in plural_prepositions:
return word.replace(w[0], pluralize(w[0], pos, custom, classical))
else:
return word.replace(w[-1], pluralize(w[-1], pos, custom, classical))
# Only a very few number of adjectives inflect.
n = range(len(plural_rules))
if pos.startswith(ADJECTIVE):
n = [0, 1]
# Apply pluralization rules.
for i in n:
for suffix, inflection, category, classic in plural_rules[i]:
# A general rule, or a classic rule in classical mode.
if category is None:
if not classic or (classic and classical):
if suffix.search(word) is not None:
return suffix.sub(inflection, word)
# A rule pertaining to a specific category of words.
if category is not None:
if word in plural_categories[category] and (not classic or (classic and classical)):
if suffix.search(word) is not None:
return suffix.sub(inflection, word)
return word
#print pluralize("part-of-speech")
#print pluralize("child")
#print pluralize("dog's")
#print pluralize("wolf")
#print pluralize("bear")
#print pluralize("kitchen knife")
#print pluralize("octopus", classical=True)
#print pluralize("matrix", classical=True)
#print pluralize("matrix", classical=False)
#print pluralize("my", pos=ADJECTIVE)
#### SINGULARIZE ###################################################################################
# Adapted from Bermi Ferrer's Inflector for Python:
# http://www.bermi.org/inflector/
# Copyright (c) 2006 Bermi Ferrer Martinez
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software to deal in this software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this software, and to permit
# persons to whom this software is furnished to do so, subject to the following
# condition:
#
# THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THIS SOFTWARE.
singular_rules = [
(r'(?i)(.)ae$' , '\\1a' ),
(r'(?i)(.)itis$' , '\\1itis' ),
(r'(?i)(.)eaux$' , '\\1eau' ),
(r'(?i)(quiz)zes$' , '\\1' ),
(r'(?i)(matr)ices$' , '\\1ix' ),
(r'(?i)(ap|vert|ind)ices$', '\\1ex' ),
(r'(?i)^(ox)en' , '\\1' ),
(r'(?i)(alias|status)es$' , '\\1' ),
(r'(?i)([octop|vir])i$' , '\\1us' ),
(r'(?i)(cris|ax|test)es$' , '\\1is' ),
(r'(?i)(shoe)s$' , '\\1' ),
(r'(?i)(o)es$' , '\\1' ),
(r'(?i)(bus)es$' , '\\1' ),
(r'(?i)([m|l])ice$' , '\\1ouse' ),
(r'(?i)(x|ch|ss|sh)es$' , '\\1' ),
(r'(?i)(m)ovies$' , '\\1ovie' ),
(r'(?i)(.)ombies$' , '\\1ombie'),
(r'(?i)(s)eries$' , '\\1eries'),
(r'(?i)([^aeiouy]|qu)ies$', '\\1y' ),
# -f, -fe sometimes take -ves in the plural
# (e.g., lives, wolves).
(r"([aeo]l)ves$" , "\\1f" ),
(r"([^d]ea)ves$" , "\\1f" ),
(r"arves$" , "arf" ),
(r"erves$" , "erve" ),
(r"([nlw]i)ves$" , "\\1fe" ),
(r'(?i)([lr])ves$' , '\\1f' ),
(r"([aeo])ves$" , "\\1ve" ),
(r'(?i)(sive)s$' , '\\1' ),
(r'(?i)(tive)s$' , '\\1' ),
(r'(?i)(hive)s$' , '\\1' ),
(r'(?i)([^f])ves$' , '\\1fe' ),
# -ses suffixes.
(r'(?i)(^analy)ses$' , '\\1sis' ),
(r'(?i)((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)ses$', '\\1\\2sis'),
(r'(?i)(.)opses$' , '\\1opsis'),
(r'(?i)(.)yses$' , '\\1ysis' ),
(r'(?i)(h|d|r|o|n|b|cl|p)oses$', '\\1ose'),
(r'(?i)(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$', '\\1ose'),
(r'(?i)(.)oses$' , '\\1osis' ),
# -a
(r'(?i)([ti])a$' , '\\1um' ),
(r'(?i)(n)ews$' , '\\1ews' ),
(r'(?i)s$' , '' ),
]
# For performance, compile the regular expressions only once:
singular_rules = [(re.compile(r[0]), r[1]) for r in singular_rules]
singular_uninflected = set((
"bison" , "debris" , "headquarters", "pincers" , "trout" ,
"bream" , "diabetes" , "herpes" , "pliers" , "tuna" ,
"breeches" , "djinn" , "high-jinks" , "proceedings", "whiting" ,
"britches" , "eland" , "homework" , "rabies" , "wildebeest"
"carp" , "elk" , "innings" , "salmon" ,
"chassis" , "flounder" , "jackanapes" , "scissors" ,
"christmas" , "gallows" , "mackerel" , "series" ,
"clippers" , "georgia" , "measles" , "shears" ,
"cod" , "graffiti" , "mews" , "species" ,
"contretemps", "mumps" , "swine" ,
"corps" , "news" , "swiss" ,
))
singular_uncountable = set((
"advice" , "equipment", "happiness" , "luggage" , "news" , "software" ,
"bread" , "fruit" , "information" , "mathematics", "progress" , "understanding",
"butter" , "furniture", "ketchup" , "mayonnaise" , "research" , "water"
"cheese" , "garbage" , "knowledge" , "meat" , "rice" ,
"electricity", "gravel" , "love" , "mustard" , "sand" ,
))
singular_ie = set((
"alergie" , "cutie" , "hoagie" , "newbie" , "softie" , "veggie" ,
"auntie" , "doggie" , "hottie" , "nightie" , "sortie" , "weenie" ,
"beanie" , "eyrie" , "indie" , "oldie" , "stoolie" , "yuppie" ,
"birdie" , "freebie" , "junkie" , "^pie" , "sweetie" , "zombie"
"bogie" , "goonie" , "laddie" , "pixie" , "techie" ,
"bombie" , "groupie" , "laramie" , "quickie" , "^tie" ,
"collie" , "hankie" , "lingerie" , "reverie" , "toughie" ,
"cookie" , "hippie" , "meanie" , "rookie" , "valkyrie" ,
))
singular_irregular = {
"atlantes": "atlas",
"atlases": "atlas",
"axes": "axe",
"beeves": "beef",
"brethren": "brother",
"children": "child",
"children": "child",
"corpora": "corpus",
"corpuses": "corpus",
"ephemerides": "ephemeris",
"feet": "foot",
"ganglia": "ganglion",
"geese": "goose",
"genera": "genus",
"genii": "genie",
"graffiti": "graffito",
"helves": "helve",
"kine": "cow",
"leaves": "leaf",
"loaves": "loaf",
"men": "man",
"mongooses": "mongoose",
"monies": "money",
"moves": "move",
"mythoi": "mythos",
"numena": "numen",
"occipita": "occiput",
"octopodes": "octopus",
"opera": "opus",
"opuses": "opus",
"our": "my",
"oxen": "ox",
"penes": "penis",
"penises": "penis",
"people": "person",
"sexes": "sex",
"soliloquies": "soliloquy",
"teeth": "tooth",
"testes": "testis",
"trilbys": "trilby",
"turves": "turf",
"zoa": "zoon",
}
def singularize(word, pos=NOUN, custom={}):
""" Returns the singular of a given word.
"""
if word in custom.keys():
return custom[word]
# Recurse compound words (e.g. mothers-in-law).
if "-" in word:
w = word.split("-")
if len(w) > 1 and w[1] in plural_prepositions:
return singularize(w[0], pos, custom)+"-"+"-".join(w[1:])
# dogs' => dog's
if word.endswith("'"):
return singularize(word[:-1]) + "'s"
w = word.lower()
for x in singular_uninflected:
if x.endswith(w):
return word
for x in singular_uncountable:
if x.endswith(w):
return word
for x in singular_ie:
if w.endswith(x+"s"):
return w
for x in singular_irregular.keys():
if w.endswith(x):
return re.sub('(?i)'+x+'$', singular_irregular[x], word)
for suffix, inflection in singular_rules:
m = suffix.search(word)
g = m and m.groups() or []
if m:
for k in range(len(g)):
if g[k] is None:
inflection = inflection.replace('\\' + str(k + 1), '')
return suffix.sub(inflection, word)
return word
#### VERB CONJUGATION ##############################################################################
class Verbs(_Verbs):
def __init__(self):
_Verbs.__init__(self, os.path.join(MODULE, "en-verbs.txt"),
language = "en",
format = [0, 1, 2, 3, 7, 8, 17, 18, 19, 23, 25, 24, 16, 9, 10, 11, 15, 33, 26, 27, 28, 32],
default = {
1: 0, 2: 0, 3: 0, 7: 0, # present singular => infinitive ("I walk")
4: 7, 5: 7, 6: 7, # present plural
17: 25, 18: 25, 19: 25, 23: 25, # past singular
20: 23, 21: 23, 22: 23, # past plural
9: 16, 10: 16, 11: 16, 15: 16, # present singular negated
12: 15, 13: 15, 14: 15, # present plural negated
26: 33, 27: 33, 28: 33, # past singular negated
29: 32, 30: 32, 31: 32, 32: 33 # past plural negated
})
def find_lemma(self, verb):
""" Returns the base form of the given inflected verb, using a rule-based approach.
This is problematic if a verb ending in -e is given in the past tense or gerund.
"""
v = verb.lower()
b = False
if v in ("'m", "'re", "'s", "n't"):
return "be"
if v in ("'d", "'ll"):
return "will"
if v in ("'ve"):
return "have"
if v.endswith("s"):
if v.endswith("ies") and len(v) > 3 and v[-4] not in VOWELS:
return v[:-3]+"y" # complies => comply
if v.endswith(("sses", "shes", "ches", "xes")):
return v[:-2] # kisses => kiss
return v[:-1]
if v.endswith("ied") and re_vowel.search(v[:-3]) is not None:
return v[:-3]+"y" # envied => envy
if v.endswith("ing") and re_vowel.search(v[:-3]) is not None:
v = v[:-3]; b=True; # chopping => chopp
if v.endswith("ed") and re_vowel.search(v[:-2]) is not None:
v = v[:-2]; b=True; # danced => danc
if b:
# Doubled consonant after short vowel: chopp => chop.
if len(v) > 3 and v[-1] == v[-2] and v[-3] in VOWELS and v[-4] not in VOWELS and not v.endswith("ss"):
return v[:-1]
if v.endswith(("ick", "ack")):
return v[:-1] # panick => panic
# Guess common cases where the base form ends in -e:
if v.endswith(("v", "z", "c", "i")):
return v+"e" # danc => dance
if v.endswith("g") and v.endswith(("dg", "lg", "ng", "rg")):
return v+"e" # indulg => indulge
if v.endswith(("b", "d", "g", "k", "l", "m", "r", "s", "t")) \
and len(v) > 2 and v[-2] in VOWELS and not v[-3] in VOWELS \
and not v.endswith("er"):
return v+"e" # generat => generate
if v.endswith("n") and v.endswith(("an", "in")) and not v.endswith(("ain", "oin", "oan")):
return v+"e" # imagin => imagine
if v.endswith("l") and len(v) > 1 and v[-2] not in VOWELS:
return v+"e" # squabbl => squabble
if v.endswith("f") and len(v) > 2 and v[-2] in VOWELS and v[-3] not in VOWELS:
return v+"e" # chaf => chafed
if v.endswith("e"):
return v+"e" # decre => decree
if v.endswith(("th", "ang", "un", "cr", "vr", "rs", "ps", "tr")):
return v+"e"
return v
def find_lexeme(self, verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
if len(v) > 1 and v.endswith("e") and v[-2] not in VOWELS:
# Verbs ending in a consonant followed by "e": dance, save, devote, evolve.
return [v, v, v, v+"s", v, v[:-1]+"ing"] + [v+"d"]*6
if len(v) > 1 and v.endswith("y") and v[-2] not in VOWELS:
# Verbs ending in a consonant followed by "y": comply, copy, magnify.
return [v, v, v, v[:-1]+"ies", v, v+"ing"] + [v[:-1]+"ied"]*6
if v.endswith(("ss", "sh", "ch", "x")):
# Verbs ending in sibilants: kiss, bless, box, polish, preach.
return [v, v, v, v+"es", v, v+"ing"] + [v+"ed"]*6
if v.endswith("ic"):
# Verbs ending in -ic: panic, mimic.
return [v, v, v, v+"es", v, v+"king"] + [v+"ked"]*6
if len(v) > 1 and v[-1] not in VOWELS and v[-2] not in VOWELS:
# Verbs ending in a consonant cluster: delight, clamp.
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6
if (len(v) > 1 and v.endswith(("y", "w")) and v[-2] in VOWELS) \
or (len(v) > 2 and v[-1] not in VOWELS and v[-2] in VOWELS and v[-3] in VOWELS) \
or (len(v) > 3 and v[-1] not in VOWELS and v[-3] in VOWELS and v[-4] in VOWELS):
# Verbs ending in a long vowel or diphthong followed by a consonant: paint, devour, play.
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6
if len(v) > 2 and v[-1] not in VOWELS and v[-2] in VOWELS and v[-3] not in VOWELS:
# Verbs ending in a short vowel followed by a consonant: chat, chop, or compel.
return [v, v, v, v+"s", v, v+v[-1]+"ing"] + [v+v[-1]+"ed"]*6
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6
verbs = Verbs()
conjugate, lemma, lexeme, tenses = \
verbs.conjugate, verbs.lemma, verbs.lexeme, verbs.tenses
#print conjugate("imaginarify", "part", parse=True)
#print conjugate("imaginarify", "part", parse=False)
#### COMPARATIVE & SUPERLATIVE #####################################################################
VOWELS = "aeiouy"
grade_irregular = {
"bad": ( "worse", "worst"),
"far": ("further", "farthest"),
"good": ( "better", "best"),
"hind": ( "hinder", "hindmost"),
"ill": ( "worse", "worst"),
"less": ( "lesser", "least"),
"little": ( "less", "least"),
"many": ( "more", "most"),
"much": ( "more", "most"),
"well": ( "better", "best")
}
grade_uninflected = ["giant", "glib", "hurt", "known", "madly"]
COMPARATIVE = "er"
SUPERLATIVE = "est"
def _count_syllables(word):
""" Returns the estimated number of syllables in the word by counting vowel-groups.
"""
n = 0
p = False # True if the previous character was a vowel.
for ch in word.endswith("e") and word[:-1] or word:
v = ch in VOWELS
n += int(v and not p)
p = v
return n
def grade(adjective, suffix=COMPARATIVE):
""" Returns the comparative or superlative form of the given adjective.
"""
n = _count_syllables(adjective)
if adjective in grade_irregular:
# A number of adjectives inflect irregularly.
return grade_irregular[adjective][suffix != COMPARATIVE]
elif adjective in grade_uninflected:
# A number of adjectives don't inflect at all.
return "%s %s" % (suffix == COMPARATIVE and "more" or "most", adjective)
elif n <= 2 and adjective.endswith("e"):
# With one syllable and ending with an e: larger, wiser.
suffix = suffix.lstrip("e")
elif n == 1 and len(adjective) >= 3 \
and adjective[-1] not in VOWELS and adjective[-2] in VOWELS and adjective[-3] not in VOWELS:
# With one syllable ending with consonant-vowel-consonant: bigger, thinner.
if not adjective.endswith(("w")): # Exceptions: lower, newer.
suffix = adjective[-1] + suffix
elif n == 1:
# With one syllable ending with more consonants or vowels: briefer.
pass
elif n == 2 and adjective.endswith("y"):
# With two syllables ending with a y: funnier, hairier.
adjective = adjective[:-1] + "i"
elif n == 2 and adjective[-2:] in ("er", "le", "ow"):
# With two syllables and specific suffixes: gentler, narrower.
pass
else:
# With three or more syllables: more generous, more important.
return "%s %s" % (suffix==COMPARATIVE and "more" or "most", adjective)
return adjective + suffix
def comparative(adjective):
return grade(adjective, COMPARATIVE)
def superlative(adjective):
return grade(adjective, SUPERLATIVE)
#### ATTRIBUTIVE & PREDICATIVE #####################################################################
def attributive(adjective):
return adjective
def predicative(adjective):
return adjective
| 44.362515 | 114 | 0.470899 |
f8c67437c2b51435eef9d8efdb8673fb1f4cadbf | 665 | py | Python | Gathered CTF writeups/2015-09-16-csaw/forensics_400_sharpturn/flipuj.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/2015-09-16-csaw/forensics_400_sharpturn/flipuj.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/2015-09-16-csaw/forensics_400_sharpturn/flipuj.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | expected = 'efda2f556de36b9e9e1d62417c5f282d8961e2f8' # level 1
expected = '354ebf392533dce06174f9c8c093036c138935f3' # level 2
expected = 'd961f81a588fcfd5e57bbea7e17ddae8a5e61333' # level 3
expected = 'f8d0839dd728cb9a723e32058dcc386070d5e3b5' # level 4
d = open('sharp_v4_f8d0_8096').read()
def githash(d):
import hashlib
return hashlib.sha1('blob {}\0{}'.format(len(d), d)).hexdigest()
def bitflips(dat):
for i in range(len(dat)):
c = ord(dat[i])
for bit in range(256):
cc = chr(bit)
dd = dat[:i] + cc + dat[i+1:]
yield dd
for f in bitflips(d):
if githash(f) == expected:
print f
| 27.708333 | 68 | 0.64812 |
dde95f47b474eeeb685cfba0f072bb3ec79c826b | 140 | py | Python | netmiko/supermicro/__init__.py | sandyw777/netmiko | 09aa80ca88869f5c4753e2364babd72871f8934b | [
"MIT"
] | null | null | null | netmiko/supermicro/__init__.py | sandyw777/netmiko | 09aa80ca88869f5c4753e2364babd72871f8934b | [
"MIT"
] | null | null | null | netmiko/supermicro/__init__.py | sandyw777/netmiko | 09aa80ca88869f5c4753e2364babd72871f8934b | [
"MIT"
] | null | null | null | from netmiko.supermicro.smci_switch_usp import SmciSwitchUspTelnet, SmciSwitchUspSSH
__all__ = ["SmciSwitchUspSSH", "SmciSwitchUspTelnet"]
| 35 | 84 | 0.85 |
e676c43ef1a5e91563eeb86af3e6beae53752e8c | 2,943 | py | Python | setup.py | victordomingos/optimize-images-x | c21d23fc77392b2947c91ccb470defd591f3d7b4 | [
"MIT"
] | 14 | 2021-04-15T19:26:07.000Z | 2022-01-08T15:23:05.000Z | setup.py | victordomingos/optimize-images-x | c21d23fc77392b2947c91ccb470defd591f3d7b4 | [
"MIT"
] | 2 | 2021-05-29T14:30:48.000Z | 2021-06-14T20:55:07.000Z | setup.py | victordomingos/optimize-images-x | c21d23fc77392b2947c91ccb470defd591f3d7b4 | [
"MIT"
] | null | null | null | # encoding: utf-8
import os
import sys
from setuptools import setup, find_packages
used = sys.version_info
required = (3, 7)
if used[:2] < required:
msg = f'Unsupported Python version: ' \
f'{sys.version_info.major}.{sys.version_info.minor}. ' \
f'Python 3.7 or later is required.'
sys.stderr.write(msg)
sys.exit(1)
short_desc = "A desktop app written in Python, that exposes and unlocks the " \
"full power of Optimize Images in a nice graphical user interface, " \
"to help you reduce the file size of images."
def read_readme(file_name):
with open(os.path.join(os.path.dirname(__file__), file_name)) as f:
return f.read()
setup(name='optimize-images-x',
version=__import__('optimize_images_x').__version__,
description=short_desc,
author="Victor Domingos",
packages=find_packages(),
include_package_data=True,
long_description=read_readme('README.md'), # for PyPI
long_description_content_type="text/markdown",
license='MIT',
url='https://no-title.victordomingos.com/projects/optimize-images-x/',
project_urls={
'Documentation': 'https://github.com/victordomingos/optimize-images-x/',
'Source': 'https://github.com/victordomingos/optimize-images-x',
'Bug Reports': 'https://github.com/victordomingos/optimize-images-x/issues',
},
python_requires='>=3.7',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: MacOS X',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Operating System :: POSIX :: Linux ',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Utilities',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
],
keywords='python3 pythonista-ios pil pillow image-processing ' \
'image-compression image-optimization image-optimisation seo '
'seo-optimization website-performance gui recursive non-recursive',
install_requires=[
'optimize-images==1.5.0',
'pillow>=8.2.0',
'piexif==1.1.3',
'watchdog==2.1.2'
],
entry_points={
'console_scripts': ['optimize-images-x = optimize_images_x.__main__:main']
},
)
| 35.890244 | 86 | 0.608223 |
448cb814d9ccec51899120f2a7098935550a92e8 | 9,914 | py | Python | openpype/hosts/maya/api/plugin.py | jrsndl/pype | f9d80ef2c0663921291c5f47d24bea51fc43bac7 | [
"MIT"
] | 1 | 2020-09-21T14:55:33.000Z | 2020-09-21T14:55:33.000Z | openpype/hosts/maya/api/plugin.py | jrsndl/pype | f9d80ef2c0663921291c5f47d24bea51fc43bac7 | [
"MIT"
] | null | null | null | openpype/hosts/maya/api/plugin.py | jrsndl/pype | f9d80ef2c0663921291c5f47d24bea51fc43bac7 | [
"MIT"
] | null | null | null | from avalon import api
from avalon.vendor import qargparse
import avalon.maya
from openpype.api import PypeCreatorMixin
def get_reference_node(members, log=None):
"""Get the reference node from the container members
Args:
members: list of node names
Returns:
str: Reference node name.
"""
from maya import cmds
# Collect the references without .placeHolderList[] attributes as
# unique entries (objects only) and skipping the sharedReferenceNode.
references = set()
for ref in cmds.ls(members, exactType="reference", objectsOnly=True):
# Ignore any `:sharedReferenceNode`
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
continue
# Ignore _UNKNOWN_REF_NODE_ (PLN-160)
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
continue
references.add(ref)
assert references, "No reference node found in container"
# Get highest reference node (least parents)
highest = min(references,
key=lambda x: len(get_reference_node_parents(x)))
# Warn the user when we're taking the highest reference node
if len(references) > 1:
if not log:
from openpype.lib import PypeLogger
log = PypeLogger().get_logger(__name__)
log.warning("More than one reference node found in "
"container, using highest reference node: "
"%s (in: %s)", highest, list(references))
return highest
def get_reference_node_parents(ref):
"""Return all parent reference nodes of reference node
Args:
ref (str): reference node.
Returns:
list: The upstream parent reference nodes.
"""
from maya import cmds
parent = cmds.referenceQuery(ref,
referenceNode=True,
parent=True)
parents = []
while parent:
parents.append(parent)
parent = cmds.referenceQuery(parent,
referenceNode=True,
parent=True)
return parents
class Creator(PypeCreatorMixin, avalon.maya.Creator):
pass
class ReferenceLoader(api.Loader):
"""A basic ReferenceLoader for Maya
This will implement the basic behavior for a loader to inherit from that
will containerize the reference and will implement the `remove` and
`update` logic.
"""
options = [
qargparse.Integer(
"count",
label="Count",
default=1,
min=1,
help="How many times to load?"
),
qargparse.Double3(
"offset",
label="Position Offset",
help="Offset loaded models for easier selection."
)
]
def load(
self,
context,
name=None,
namespace=None,
options=None
):
import os
from avalon.maya import lib
from avalon.maya.pipeline import containerise
assert os.path.exists(self.fname), "%s does not exist." % self.fname
asset = context['asset']
loaded_containers = []
count = options.get("count") or 1
for c in range(0, count):
namespace = namespace or lib.unique_namespace(
asset["name"] + "_",
prefix="_" if asset["name"][0].isdigit() else "",
suffix="_",
)
# Offset loaded subset
if "offset" in options:
offset = [i * c for i in options["offset"]]
options["translate"] = offset
self.log.info(options)
self.process_reference(
context=context,
name=name,
namespace=namespace,
options=options
)
# Only containerize if any nodes were loaded by the Loader
nodes = self[:]
if not nodes:
return
# FIXME: there is probably better way to do this for looks.
if "look" in self.families:
loaded_containers.append(containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__
))
else:
ref_node = get_reference_node(nodes, self.log)
loaded_containers.append(containerise(
name=name,
namespace=namespace,
nodes=[ref_node],
context=context,
loader=self.__class__.__name__
))
c += 1
namespace = None
return loaded_containers
def process_reference(self, context, name, namespace, data):
"""To be implemented by subclass"""
raise NotImplementedError("Must be implemented by subclass")
def update(self, container, representation):
import os
from maya import cmds
node = container["objectName"]
path = api.get_representation_path(representation)
# Get reference node from container members
members = cmds.sets(node, query=True, nodesOnly=True)
reference_node = get_reference_node(members, self.log)
file_type = {
"ma": "mayaAscii",
"mb": "mayaBinary",
"abc": "Alembic"
}.get(representation["name"])
assert file_type, "Unsupported representation: %s" % representation
assert os.path.exists(path), "%s does not exist." % path
# Need to save alembic settings and reapply, cause referencing resets
# them to incoming data.
alembic_attrs = ["speed", "offset", "cycleType"]
alembic_data = {}
if representation["name"] == "abc":
alembic_nodes = cmds.ls(
"{}:*".format(members[0].split(":")[0]), type="AlembicNode"
)
if alembic_nodes:
for attr in alembic_attrs:
node_attr = "{}.{}".format(alembic_nodes[0], attr)
alembic_data[attr] = cmds.getAttr(node_attr)
else:
cmds.warning(
"No alembic nodes found in {}".format(
cmds.ls("{}:*".format(members[0].split(":")[0]))
)
)
try:
content = cmds.file(path,
loadReference=reference_node,
type=file_type,
returnNewNodes=True)
except RuntimeError as exc:
# When changing a reference to a file that has load errors the
# command will raise an error even if the file is still loaded
# correctly (e.g. when raising errors on Arnold attributes)
# When the file is loaded and has content, we consider it's fine.
if not cmds.referenceQuery(reference_node, isLoaded=True):
raise
content = cmds.referenceQuery(reference_node,
nodes=True,
dagPath=True)
if not content:
raise
self.log.warning("Ignoring file read error:\n%s", exc)
# Reapply alembic settings.
if representation["name"] == "abc":
alembic_nodes = cmds.ls(
"{}:*".format(members[0].split(":")[0]), type="AlembicNode"
)
if alembic_nodes:
for attr, value in alembic_data.items():
cmds.setAttr("{}.{}".format(alembic_nodes[0], attr), value)
# Fix PLN-40 for older containers created with Avalon that had the
# `.verticesOnlySet` set to True.
if cmds.getAttr("{}.verticesOnlySet".format(node)):
self.log.info("Setting %s.verticesOnlySet to False", node)
cmds.setAttr("{}.verticesOnlySet".format(node), False)
# Remove any placeHolderList attribute entries from the set that
# are remaining from nodes being removed from the referenced file.
members = cmds.sets(node, query=True)
invalid = [x for x in members if ".placeHolderList" in x]
if invalid:
cmds.sets(invalid, remove=node)
# Update metadata
cmds.setAttr("{}.representation".format(node),
str(representation["_id"]),
type="string")
def remove(self, container):
"""Remove an existing `container` from Maya scene
Deprecated; this functionality is replaced by `api.remove()`
Arguments:
container (openpype:container-1.0): Which container
to remove from scene.
"""
from maya import cmds
node = container["objectName"]
# Assume asset has been referenced
members = cmds.sets(node, query=True)
reference_node = get_reference_node(members, self.log)
assert reference_node, ("Imported container not supported; "
"container must be referenced.")
self.log.info("Removing '%s' from Maya.." % container["name"])
namespace = cmds.referenceQuery(reference_node, namespace=True)
fname = cmds.referenceQuery(reference_node, filename=True)
cmds.file(fname, removeReference=True)
try:
cmds.delete(node)
except ValueError:
# Already implicitly deleted by Maya upon removing reference
pass
try:
# If container is not automatically cleaned up by May (issue #118)
cmds.namespace(removeNamespace=namespace,
deleteNamespaceContent=True)
except RuntimeError:
pass
| 32.29316 | 79 | 0.553762 |
1be49e397229a69fd83b030f6962e7eeaf6776cf | 5,983 | py | Python | pr3/linear.py | mikss/pr3 | 0cab2a6edf0ff6ed56e1d91132bac72be95d8ff6 | [
"MIT"
] | null | null | null | pr3/linear.py | mikss/pr3 | 0cab2a6edf0ff6ed56e1d91132bac72be95d8ff6 | [
"MIT"
] | null | null | null | pr3/linear.py | mikss/pr3 | 0cab2a6edf0ff6ed56e1d91132bac72be95d8ff6 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from enum import Enum
from typing import Optional, Set, Tuple, Union
import numpy as np
from scipy.stats import gennorm
from sklearn.linear_model import lars_path_gram
from sklearn.utils.validation import check_random_state
class ProjectionVector:
beta: np.ndarray
q: int
def __init__(self, q: int):
self.q = q
def _normalize(self) -> None:
try:
self.beta = self.beta / np.linalg.norm(self.beta, ord=self.q, axis=0)
except AttributeError:
raise AttributeError("Must set attribute `beta` before normalizing.")
class ProjectionSampler(ProjectionVector):
def __init__(
self,
p: int,
q: int = 2,
sparsity: int = -1,
random_state: Optional[Union[int, np.random.RandomState]] = None,
):
"""Generates a normalized random projection vector (for initialization purposes).
Args:
p: The dimension of the vector.
q: The order of ell^q unit ball from which to sample.
sparsity: The number of non-zero coordinates; pass -1 for a dense vector.
random_state: NumPy random state.
"""
super().__init__(q=q)
_rs = check_random_state(random_state)
if sparsity > 0:
q_generalized_normal = np.zeros((p, 1))
idxs = _rs.choice(a=p, size=sparsity, replace=False)
q_generalized_normal[idxs, 0] = gennorm.rvs(beta=q, size=sparsity)
else:
q_generalized_normal = gennorm.rvs(beta=q, size=(p, 1))
self.beta = q_generalized_normal
self._normalize()
class SufficientStatisticsRegressionProjection(ABC, ProjectionVector):
def fit(self, x: np.ndarray, y: np.ndarray, w: Optional[np.ndarray] = None) -> None:
"""Fits weighted least squares (WLS) linear regression model via sufficient statistics.
Args:
x: design matrix of shape (n_samples, n_features)
y: response matrix of shape (n_samples, n_responses)
w: weight vector of shape (n_samples,)
"""
w = self._reshape_weights(x.shape[0], w)
xtx, xty = self.compute_sufficient_statistics(x, y, w)
wess = self.compute_effective_sample_size(w)
self._fit_sufficient(xtx, xty, wess)
def fit_normalize(self, x: np.ndarray, y: np.ndarray, w: Optional[np.ndarray] = None) -> None:
self.fit(x, y, w)
self._normalize()
def predict(self, x: np.ndarray) -> np.ndarray:
"""Predicts from linear model."""
return x @ self.beta
@staticmethod
def _reshape_weights(n: int, w: Optional[np.ndarray] = None) -> np.ndarray:
if w is None:
w = np.ones(n)
return w.reshape((n, 1))
@staticmethod
def compute_sufficient_statistics(
x: np.ndarray, y: np.ndarray, w: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Computes (weighted) sufficient statistics for WLS regression (Gram xtx, cross xty)."""
xtx = np.multiply(x, w).T @ x / w.sum()
xty = np.multiply(x, w).T @ y / w.sum()
return xtx, xty
@staticmethod
def compute_effective_sample_size(w: np.ndarray) -> float:
"""Computes effective sample size given sample weights."""
return w.sum() ** 2.0 / (w ** 2.0).sum()
@abstractmethod
def _fit_sufficient(self, xtx, xty, wess) -> None:
"""Fits linear model using only second-order sufficient statistics."""
class LowerUpperRegressionProjection(SufficientStatisticsRegressionProjection):
ridge: float
def __init__(self, q: int = 2, ridge: float = 0.0):
"""Instantiates a WLS linear model with ridge regularization and q-normalized beta.
This implementation computes regression coefficients by solving a system of linear equations
via the LU decomposition, which is the technique implemented by `gesv`, the LAPACK routine
called by `np.linalg.solve`.
Args:
q: The order of ell^q norm with which to normalize resultant beta.
ridge: Regularization level.
"""
super().__init__(q=q)
self.ridge = ridge
def _fit_sufficient(self, xtx, xty, wess) -> None:
self.beta = np.linalg.solve(xtx + self.ridge * np.eye(xtx.shape[0]), xty)
class LeastAngleRegressionProjection(SufficientStatisticsRegressionProjection):
max_iter: int
min_corr: float
alpha: np.ndarray
def __init__(self, q: int = 2, max_iter: int = 100, min_corr: float = 1e-4):
"""Instantiates a WLS linear regression model with sparse and q-normalized beta.
This implementation computes regression coefficients by iteratively traversing the LASSO
regularization path, which serves as an efficient way to solve the l1-regularized least
squares optimization problem (on par with, say, FISTA, but typically more efficient than
black-box quadratic programming methods).
Args:
q: The order of ell^q norm with which to normalize resultant beta.
max_iter: Maximum number of iterations.
"""
super().__init__(q=q)
self.max_iter = max_iter
self.min_corr = min_corr
def _fit_sufficient(self, xtx, xty, wess) -> None:
self.alpha, _, coefs = lars_path_gram(
Xy=xty[:, 0],
Gram=xtx,
n_samples=wess,
max_iter=self.max_iter,
alpha_min=self.min_corr,
method="lasso",
)
self.beta = coefs[:, [-1]]
class ProjectionOptimizerRegistry(Enum):
lower_upper = LowerUpperRegressionProjection
least_angle = LeastAngleRegressionProjection
@classmethod
def valid_mnemonics(cls) -> Set[str]:
return set(name for name, _ in cls.__members__.items())
@classmethod
def valid_regressors(cls) -> Set[SufficientStatisticsRegressionProjection]:
return set(value for _, value in cls.__members__.items())
| 36.260606 | 100 | 0.643824 |
9adb14619bad075a83dd2a54cfc6a56d6336f1e4 | 304 | py | Python | Part_1/ch04_list/4_5_magic8Ball.py | hyperpc/AutoStuffWithPython | e05f5e0acb5818d634e4ab84d640848cd4ae7e70 | [
"MIT"
] | null | null | null | Part_1/ch04_list/4_5_magic8Ball.py | hyperpc/AutoStuffWithPython | e05f5e0acb5818d634e4ab84d640848cd4ae7e70 | [
"MIT"
] | null | null | null | Part_1/ch04_list/4_5_magic8Ball.py | hyperpc/AutoStuffWithPython | e05f5e0acb5818d634e4ab84d640848cd4ae7e70 | [
"MIT"
] | null | null | null | import random
messages = [
'It is certain'
,'It is decidedly so'
,'Yes definitely'
,'Reply hazy try again'
,'Ask again later'
,'Concentrate and ask again'
,'My reply is no'
,'Outlook not so good'
,'Very doubtful'
]
print(messages[random.randint(0, len(messages)-1)]) | 20.266667 | 51 | 0.621711 |
8e842b8f72f4f446075495438693df5e5f6da927 | 11,564 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/switchhostranges_5d2115168a5da6fa79eb866056d9b27b.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/switchhostranges_5d2115168a5da6fa79eb866056d9b27b.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/switchhostranges_5d2115168a5da6fa79eb866056d9b27b.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class SwitchHostRanges(Base):
"""The Swicth Host Ranges window describes the switch Host Ranges and its configuration parameters. This window is available for Switch configuration only. When the user selects a column in the grid view, the entire composite column break-up appears in a split pane. When a user selects a non-composite cell or column in the grid, like, Enable, the Preview pane displays empty cells for all the rows.
The SwitchHostRanges class encapsulates a list of switchHostRanges resources that are managed by the user.
A list of resources can be retrieved from the server using the SwitchHostRanges.find() method.
The list can be managed by using the SwitchHostRanges.add() and SwitchHostRanges.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'switchHostRanges'
_SDM_ATT_MAP = {
'EnableStaticIp': 'enableStaticIp',
'EnableVlan': 'enableVlan',
'Enabled': 'enabled',
'HostMacAddress': 'hostMacAddress',
'HostStaticIpv4Address': 'hostStaticIpv4Address',
'HostVlanid': 'hostVlanid',
'NumberOfHostsPerPort': 'numberOfHostsPerPort',
}
def __init__(self, parent):
super(SwitchHostRanges, self).__init__(parent)
@property
def EnableStaticIp(self):
"""
Returns
-------
- bool: If selected, Host Static IPv4 Address is available for change. It indicates if static IP will be configured in simulated Host.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableStaticIp'])
@EnableStaticIp.setter
def EnableStaticIp(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableStaticIp'], value)
@property
def EnableVlan(self):
"""
Returns
-------
- bool: If selected, Host VLAN ID is available for change. It indicates if VLAN will be configured in Host.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableVlan'])
@EnableVlan.setter
def EnableVlan(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableVlan'], value)
@property
def Enabled(self):
"""
Returns
-------
- bool: If selected, the Host Range gets configured in the switch.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def HostMacAddress(self):
"""
Returns
-------
- str: The MAC Address of the simulated Host. The composite column break-up appears in a split pane on the right.
"""
return self._get_attribute(self._SDM_ATT_MAP['HostMacAddress'])
@HostMacAddress.setter
def HostMacAddress(self, value):
self._set_attribute(self._SDM_ATT_MAP['HostMacAddress'], value)
@property
def HostStaticIpv4Address(self):
"""
Returns
-------
- str: The static IPv4 Address of the Host. This is available only if Enable Static IP is selected. The composite column break-up appears in a split pane on the right.
"""
return self._get_attribute(self._SDM_ATT_MAP['HostStaticIpv4Address'])
@HostStaticIpv4Address.setter
def HostStaticIpv4Address(self, value):
self._set_attribute(self._SDM_ATT_MAP['HostStaticIpv4Address'], value)
@property
def HostVlanid(self):
"""
Returns
-------
- str: The VLAN ID of the Host. This is available only if Enable VLAN is selected. The composite column break-up appears in a split pane on the right.
"""
return self._get_attribute(self._SDM_ATT_MAP['HostVlanid'])
@HostVlanid.setter
def HostVlanid(self, value):
self._set_attribute(self._SDM_ATT_MAP['HostVlanid'], value)
@property
def NumberOfHostsPerPort(self):
"""
Returns
-------
- number: Specify the number of switches to be configured for every switch port.
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberOfHostsPerPort'])
@NumberOfHostsPerPort.setter
def NumberOfHostsPerPort(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumberOfHostsPerPort'], value)
def update(self, EnableStaticIp=None, EnableVlan=None, Enabled=None, HostMacAddress=None, HostStaticIpv4Address=None, HostVlanid=None, NumberOfHostsPerPort=None):
"""Updates switchHostRanges resource on the server.
Args
----
- EnableStaticIp (bool): If selected, Host Static IPv4 Address is available for change. It indicates if static IP will be configured in simulated Host.
- EnableVlan (bool): If selected, Host VLAN ID is available for change. It indicates if VLAN will be configured in Host.
- Enabled (bool): If selected, the Host Range gets configured in the switch.
- HostMacAddress (str): The MAC Address of the simulated Host. The composite column break-up appears in a split pane on the right.
- HostStaticIpv4Address (str): The static IPv4 Address of the Host. This is available only if Enable Static IP is selected. The composite column break-up appears in a split pane on the right.
- HostVlanid (str): The VLAN ID of the Host. This is available only if Enable VLAN is selected. The composite column break-up appears in a split pane on the right.
- NumberOfHostsPerPort (number): Specify the number of switches to be configured for every switch port.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, EnableStaticIp=None, EnableVlan=None, Enabled=None, HostMacAddress=None, HostStaticIpv4Address=None, HostVlanid=None, NumberOfHostsPerPort=None):
"""Adds a new switchHostRanges resource on the server and adds it to the container.
Args
----
- EnableStaticIp (bool): If selected, Host Static IPv4 Address is available for change. It indicates if static IP will be configured in simulated Host.
- EnableVlan (bool): If selected, Host VLAN ID is available for change. It indicates if VLAN will be configured in Host.
- Enabled (bool): If selected, the Host Range gets configured in the switch.
- HostMacAddress (str): The MAC Address of the simulated Host. The composite column break-up appears in a split pane on the right.
- HostStaticIpv4Address (str): The static IPv4 Address of the Host. This is available only if Enable Static IP is selected. The composite column break-up appears in a split pane on the right.
- HostVlanid (str): The VLAN ID of the Host. This is available only if Enable VLAN is selected. The composite column break-up appears in a split pane on the right.
- NumberOfHostsPerPort (number): Specify the number of switches to be configured for every switch port.
Returns
-------
- self: This instance with all currently retrieved switchHostRanges resources using find and the newly added switchHostRanges resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained switchHostRanges resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, EnableStaticIp=None, EnableVlan=None, Enabled=None, HostMacAddress=None, HostStaticIpv4Address=None, HostVlanid=None, NumberOfHostsPerPort=None):
"""Finds and retrieves switchHostRanges resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve switchHostRanges resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all switchHostRanges resources from the server.
Args
----
- EnableStaticIp (bool): If selected, Host Static IPv4 Address is available for change. It indicates if static IP will be configured in simulated Host.
- EnableVlan (bool): If selected, Host VLAN ID is available for change. It indicates if VLAN will be configured in Host.
- Enabled (bool): If selected, the Host Range gets configured in the switch.
- HostMacAddress (str): The MAC Address of the simulated Host. The composite column break-up appears in a split pane on the right.
- HostStaticIpv4Address (str): The static IPv4 Address of the Host. This is available only if Enable Static IP is selected. The composite column break-up appears in a split pane on the right.
- HostVlanid (str): The VLAN ID of the Host. This is available only if Enable VLAN is selected. The composite column break-up appears in a split pane on the right.
- NumberOfHostsPerPort (number): Specify the number of switches to be configured for every switch port.
Returns
-------
- self: This instance with matching switchHostRanges resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of switchHostRanges data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the switchHostRanges resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 50.719298 | 404 | 0.700017 |
6c633740162a793795bd68de8881b12febeac442 | 7,661 | py | Python | base/lib/pythonbin/pymongo/ssl_support.py | threefoldtech/sandbox_osx | e2a5ea812c3789dea40113719dbad6d6ee7cd720 | [
"Apache-2.0"
] | 34 | 2018-07-13T11:30:46.000Z | 2022-01-05T13:48:10.000Z | venv/lib/python3.6/site-packages/pymongo/ssl_support.py | HeyWeiPan/vnpy_crypto | 844381797a475a01c05a4e162592a5a6e3a48032 | [
"MIT"
] | 9 | 2020-03-24T16:56:41.000Z | 2022-03-11T23:45:08.000Z | env/lib/python3.6/site-packages/pymongo/ssl_support.py | bcss-pm/incidents | 927a102104b5718fe118bceb307d3cd633d6699b | [
"MIT"
] | 22 | 2018-07-13T11:30:48.000Z | 2021-09-25T13:30:08.000Z | # Copyright 2014-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Support for SSL in PyMongo."""
import atexit
import sys
import threading
HAVE_SSL = True
try:
import ssl
except ImportError:
HAVE_SSL = False
HAVE_CERTIFI = False
try:
import certifi
HAVE_CERTIFI = True
except ImportError:
pass
HAVE_WINCERTSTORE = False
try:
from wincertstore import CertFile
HAVE_WINCERTSTORE = True
except ImportError:
pass
from bson.py3compat import string_type
from pymongo.errors import ConfigurationError
_WINCERTSLOCK = threading.Lock()
_WINCERTS = None
_PY37PLUS = sys.version_info[:2] >= (3, 7)
if HAVE_SSL:
try:
# Python 2.7.9+, 3.2+, PyPy 2.5.1+, etc.
from ssl import SSLContext
except ImportError:
from pymongo.ssl_context import SSLContext
def validate_cert_reqs(option, value):
"""Validate the cert reqs are valid. It must be None or one of the
three values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or
``ssl.CERT_REQUIRED``.
"""
if value is None:
return value
elif isinstance(value, string_type) and hasattr(ssl, value):
value = getattr(ssl, value)
if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED):
return value
raise ValueError("The value of %s must be one of: "
"`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or "
"`ssl.CERT_REQUIRED`" % (option,))
def _load_wincerts():
"""Set _WINCERTS to an instance of wincertstore.Certfile."""
global _WINCERTS
certfile = CertFile()
certfile.addstore("CA")
certfile.addstore("ROOT")
atexit.register(certfile.close)
_WINCERTS = certfile
# XXX: Possible future work.
# - OCSP? Not supported by python at all.
# http://bugs.python.org/issue17123
# - Adding an ssl_context keyword argument to MongoClient? This might
# be useful for sites that have unusual requirements rather than
# trying to expose every SSLContext option through a keyword/uri
# parameter.
def get_ssl_context(*args):
"""Create and return an SSLContext object."""
(certfile,
keyfile,
passphrase,
ca_certs,
cert_reqs,
crlfile,
match_hostname) = args
verify_mode = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
# Note PROTOCOL_SSLv23 is about the most misleading name imaginable.
# This configures the server and client to negotiate the
# highest protocol version they both support. A very good thing.
# PROTOCOL_TLS_CLIENT was added in CPython 3.6, deprecating
# PROTOCOL_SSLv23.
ctx = SSLContext(
getattr(ssl, "PROTOCOL_TLS_CLIENT", ssl.PROTOCOL_SSLv23))
# SSLContext.check_hostname was added in CPython 2.7.9 and 3.4.
# PROTOCOL_TLS_CLIENT (added in Python 3.6) enables it by default.
if hasattr(ctx, "check_hostname"):
if _PY37PLUS and verify_mode != ssl.CERT_NONE:
# Python 3.7 uses OpenSSL's hostname matching implementation
# making it the obvious version to start using this with.
# Python 3.6 might have been a good version, but it suffers
# from https://bugs.python.org/issue32185.
# We'll use our bundled match_hostname for older Python
# versions, which also supports IP address matching
# with Python < 3.5.
ctx.check_hostname = match_hostname
else:
ctx.check_hostname = False
if hasattr(ctx, "options"):
# Explicitly disable SSLv2, SSLv3 and TLS compression. Note that
# up to date versions of MongoDB 2.4 and above already disable
# SSLv2 and SSLv3, python disables SSLv2 by default in >= 2.7.7
# and >= 3.3.4 and SSLv3 in >= 3.4.3. There is no way for us to do
# any of this explicitly for python 2.6 or 2.7 before 2.7.9.
ctx.options |= getattr(ssl, "OP_NO_SSLv2", 0)
ctx.options |= getattr(ssl, "OP_NO_SSLv3", 0)
# OpenSSL >= 1.0.0
ctx.options |= getattr(ssl, "OP_NO_COMPRESSION", 0)
if certfile is not None:
try:
if passphrase is not None:
vi = sys.version_info
# Since python just added a new parameter to an existing method
# this seems to be about the best we can do.
if (vi[0] == 2 and vi < (2, 7, 9) or
vi[0] == 3 and vi < (3, 3)):
raise ConfigurationError(
"Support for ssl_pem_passphrase requires "
"python 2.7.9+ (pypy 2.5.1+) or 3.3+")
ctx.load_cert_chain(certfile, keyfile, passphrase)
else:
ctx.load_cert_chain(certfile, keyfile)
except ssl.SSLError as exc:
raise ConfigurationError(
"Private key doesn't match certificate: %s" % (exc,))
if crlfile is not None:
if not hasattr(ctx, "verify_flags"):
raise ConfigurationError(
"Support for ssl_crlfile requires "
"python 2.7.9+ (pypy 2.5.1+) or 3.4+")
# Match the server's behavior.
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
ctx.load_verify_locations(crlfile)
if ca_certs is not None:
ctx.load_verify_locations(ca_certs)
elif cert_reqs != ssl.CERT_NONE:
# CPython >= 2.7.9 or >= 3.4.0, pypy >= 2.5.1
if hasattr(ctx, "load_default_certs"):
ctx.load_default_certs()
# Python >= 3.2.0, useless on Windows.
elif (sys.platform != "win32" and
hasattr(ctx, "set_default_verify_paths")):
ctx.set_default_verify_paths()
elif sys.platform == "win32" and HAVE_WINCERTSTORE:
with _WINCERTSLOCK:
if _WINCERTS is None:
_load_wincerts()
ctx.load_verify_locations(_WINCERTS.name)
elif HAVE_CERTIFI:
ctx.load_verify_locations(certifi.where())
else:
raise ConfigurationError(
"`ssl_cert_reqs` is not ssl.CERT_NONE and no system "
"CA certificates could be loaded. `ssl_ca_certs` is "
"required.")
ctx.verify_mode = verify_mode
return ctx
else:
def validate_cert_reqs(option, dummy):
"""No ssl module, raise ConfigurationError."""
raise ConfigurationError("The value of %s is set but can't be "
"validated. The ssl module is not available"
% (option,))
def get_ssl_context(*dummy):
"""No ssl module, raise ConfigurationError."""
raise ConfigurationError("The ssl module is not available.")
| 40.321053 | 83 | 0.596006 |
59cb2db6bc152e7afbe32aafafb24b5a879f1f8d | 21,070 | py | Python | venv/lib/python2.7/site-packages/sklearn/ensemble/tests/test_bagging.py | bopopescu/fbserver | e812dbc4dc0cbf2fda19473015a3d7e253718a19 | [
"Apache-2.0"
] | null | null | null | venv/lib/python2.7/site-packages/sklearn/ensemble/tests/test_bagging.py | bopopescu/fbserver | e812dbc4dc0cbf2fda19473015a3d7e253718a19 | [
"Apache-2.0"
] | null | null | null | venv/lib/python2.7/site-packages/sklearn/ensemble/tests/test_bagging.py | bopopescu/fbserver | e812dbc4dc0cbf2fda19473015a3d7e253718a19 | [
"Apache-2.0"
] | 1 | 2020-07-23T19:26:19.000Z | 2020-07-23T19:26:19.000Z | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
"""Check classification for various parameter settings."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
"""Check classification for various parameter settings on sparse input."""
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
def test_regression():
"""Check regression for various parameter settings."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
"""Check regression for various parameter settings on sparse input."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
"""Test that bootstraping samples generate non-perfect base estimators."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
"""Test that bootstraping features may generate dupplicate features."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
"""Predict probabilities."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
"""Check that oob prediction is a good estimation of the generalization
error."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
"""Check that oob prediction is a good estimation of the generalization
error."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
"""Check singleton ensembles."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
"""Test that it gives proper exception on deficient input."""
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_raises(NotImplementedError,
BaggingClassifier(base).fit(X, y).decision_function, X)
def test_parallel_classification():
"""Check parallel classification."""
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
"""Check parallel regression."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
"""Check that bagging ensembles can be grid-searched."""
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
"""Check base_estimator and its default values."""
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
if __name__ == "__main__":
import nose
nose.runmodule()
| 38.378871 | 78 | 0.560228 |
9ec949f3d9e00fe9a64fab4af291299ea05d45ba | 1,296 | py | Python | experiments/horse/generate_gif.py | songweijia/hdfsrs | 94cf078c3fbd1b84cf1acd3816e206ceb7327b66 | [
"Apache-2.0"
] | 1 | 2017-04-23T01:08:58.000Z | 2017-04-23T01:08:58.000Z | experiments/horse/generate_gif.py | songweijia/hdfsrs | 94cf078c3fbd1b84cf1acd3816e206ceb7327b66 | [
"Apache-2.0"
] | null | null | null | experiments/horse/generate_gif.py | songweijia/hdfsrs | 94cf078c3fbd1b84cf1acd3816e206ceb7327b66 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import Image
import sys, os
from glob import glob
from PIL import Image, ImageSequence
from images2gif import writeGif
def main(data_path,width,height,duration):
files_list = glob(os.path.join(data_path, '*.dat'))
last = -1 # last timestamp
images = []
for fn in sorted(files_list):
now = int(fn.split("/")[-1].split(".")[0])
#insert it into the gif.
f = open(fn,'r')
img = Image.new('L',(width,height),"white")
pix = img.load()
for y in range(0,height):
for x in range(0,width):
pix[x,y] = ord(f.read(1))
f.close()
images.append(img)
writeGif("out.gif", images, duration=duration);
if __name__ == '__main__':
if len(sys.argv) != 5:
print "Usage: %s <data_path> <width> <height> <duration in sec>" % str(sys.argv[0])
else:
main(sys.argv[1],int(sys.argv[2]),int(sys.argv[3]),float(sys.argv[4]))
'''
millsec=0;
for i in range(0,11):
r = Image.open("data/horse%s.jpg" % str(i))
width = r.size[0]
height = r.size[1]
barr = bytearray(width*height);
pix = r.load()
of=open("data/perfect/frame_%s.dat" % '{:04d}'.format(millsec),"w");
for y in range(0,height):
for x in range(0,width):
barr[y*width+x] = pix[x,y]
of.write(barr);
of.flush();
of.close();
millsec = millsec + 40
'''
| 25.92 | 87 | 0.614198 |
cfdf22f7e612dde812ff124ce56b374255d28af3 | 6,145 | py | Python | tests/integration/testcases.py | kvalev/compose | d90e96f9bb9b2835e1b60def00ae77e9693d31cb | [
"Apache-2.0"
] | 7 | 2022-01-27T16:34:50.000Z | 2022-02-09T18:45:15.000Z | tests/integration/testcases.py | kvalev/compose | d90e96f9bb9b2835e1b60def00ae77e9693d31cb | [
"Apache-2.0"
] | 2 | 2021-03-25T22:19:31.000Z | 2021-06-01T23:11:11.000Z | tests/integration/testcases.py | waqasalam/docker-compose | a0f21e985c4a5b8dfba9881f836d05bd082e7e2f | [
"Apache-2.0"
] | 1 | 2019-06-06T19:07:53.000Z | 2019-06-06T19:07:53.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
import functools
import os
import pytest
from docker.errors import APIError
from docker.utils import version_lt
from .. import unittest
from compose.cli.docker_client import docker_client
from compose.config.config import resolve_environment
from compose.config.environment import Environment
from compose.const import API_VERSIONS
from compose.const import COMPOSEFILE_V1 as V1
from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.const import COMPOSEFILE_V2_0 as V2_1
from compose.const import COMPOSEFILE_V2_2 as V2_2
from compose.const import COMPOSEFILE_V2_3 as V2_3
from compose.const import COMPOSEFILE_V3_0 as V3_0
from compose.const import COMPOSEFILE_V3_2 as V3_2
from compose.const import COMPOSEFILE_V3_5 as V3_5
from compose.const import LABEL_PROJECT
from compose.progress_stream import stream_output
from compose.service import Service
SWARM_SKIP_CONTAINERS_ALL = os.environ.get('SWARM_SKIP_CONTAINERS_ALL', '0') != '0'
SWARM_SKIP_CPU_SHARES = os.environ.get('SWARM_SKIP_CPU_SHARES', '0') != '0'
SWARM_SKIP_RM_VOLUMES = os.environ.get('SWARM_SKIP_RM_VOLUMES', '0') != '0'
SWARM_ASSUME_MULTINODE = os.environ.get('SWARM_ASSUME_MULTINODE', '0') != '0'
def pull_busybox(client):
client.pull('busybox:latest', stream=False)
def get_links(container):
links = container.get('HostConfig.Links') or []
def format_link(link):
_, alias = link.split(':')
return alias.split('/')[-1]
return [format_link(link) for link in links]
def engine_max_version():
if 'DOCKER_VERSION' not in os.environ:
return V3_5
version = os.environ['DOCKER_VERSION'].partition('-')[0]
if version_lt(version, '1.10'):
return V1
if version_lt(version, '1.12'):
return V2_0
if version_lt(version, '1.13'):
return V2_1
if version_lt(version, '17.06'):
return V3_2
return V3_5
def min_version_skip(version):
return pytest.mark.skipif(
engine_max_version() < version,
reason="Engine version %s is too low" % version
)
def v2_only():
return min_version_skip(V2_0)
def v2_1_only():
return min_version_skip(V2_1)
def v2_2_only():
return min_version_skip(V2_2)
def v2_3_only():
return min_version_skip(V2_3)
def v3_only():
return min_version_skip(V3_0)
class DockerClientTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
version = API_VERSIONS[engine_max_version()]
cls.client = docker_client(Environment(), version)
@classmethod
def tearDownClass(cls):
del cls.client
def tearDown(self):
for c in self.client.containers(
all=True,
filters={'label': '%s=composetest' % LABEL_PROJECT}):
self.client.remove_container(c['Id'], force=True)
for i in self.client.images(
filters={'label': 'com.docker.compose.test_image'}):
try:
self.client.remove_image(i, force=True)
except APIError as e:
if e.is_server_error():
pass
volumes = self.client.volumes().get('Volumes') or []
for v in volumes:
if 'composetest_' in v['Name']:
self.client.remove_volume(v['Name'])
networks = self.client.networks()
for n in networks:
if 'composetest_' in n['Name']:
self.client.remove_network(n['Name'])
def create_service(self, name, **kwargs):
if 'image' not in kwargs and 'build' not in kwargs:
kwargs['image'] = 'busybox:latest'
if 'command' not in kwargs:
kwargs['command'] = ["top"]
kwargs['environment'] = resolve_environment(
kwargs, Environment.from_env_file(None)
)
labels = dict(kwargs.setdefault('labels', {}))
labels['com.docker.compose.test-name'] = self.id()
return Service(name, client=self.client, project='composetest', **kwargs)
def check_build(self, *args, **kwargs):
kwargs.setdefault('rm', True)
build_output = self.client.build(*args, **kwargs)
with open(os.devnull, 'w') as devnull:
for event in stream_output(build_output, devnull):
pass
def require_api_version(self, minimum):
api_version = self.client.version()['ApiVersion']
if version_lt(api_version, minimum):
pytest.skip("API version is too low ({} < {})".format(api_version, minimum))
def get_volume_data(self, volume_name):
if not is_cluster(self.client):
return self.client.inspect_volume(volume_name)
volumes = self.client.volumes(filters={'name': volume_name})['Volumes']
assert len(volumes) > 0
return self.client.inspect_volume(volumes[0]['Name'])
def if_runtime_available(runtime):
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if runtime not in self.client.info().get('Runtimes', {}):
return pytest.skip("This daemon does not support the '{}'' runtime".format(runtime))
return f(self, *args, **kwargs)
return wrapper
return decorator
def is_cluster(client):
if SWARM_ASSUME_MULTINODE:
return True
def get_nodes_number():
try:
return len(client.nodes())
except APIError:
# If the Engine is not part of a Swarm, the SDK will raise
# an APIError
return 0
if not hasattr(is_cluster, 'nodes') or is_cluster.nodes is None:
# Only make the API call if the value hasn't been cached yet
is_cluster.nodes = get_nodes_number()
return is_cluster.nodes > 1
def no_cluster(reason):
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if is_cluster(self.client):
pytest.skip("Test will not be run in cluster mode: %s" % reason)
return
return f(self, *args, **kwargs)
return wrapper
return decorator
| 30.420792 | 100 | 0.651261 |
21cf66a955e4531a6a106eb918d5f116ee6e7b6a | 1,338 | py | Python | ard/non_dp_runs/male/utils.py | DPBayes/data-sharing-examples | f9fffc5b8f45d8dd7b93cb7e812439decfa51193 | [
"MIT"
] | null | null | null | ard/non_dp_runs/male/utils.py | DPBayes/data-sharing-examples | f9fffc5b8f45d8dd7b93cb7e812439decfa51193 | [
"MIT"
] | 2 | 2020-11-13T18:31:07.000Z | 2021-05-03T12:48:43.000Z | ard/ablation_study/no_death_strat/male/utils.py | DPBayes/data-sharing-examples | f9fffc5b8f45d8dd7b93cb7e812439decfa51193 | [
"MIT"
] | 2 | 2020-11-21T06:35:20.000Z | 2020-11-25T16:58:09.000Z | import torch
import torch.nn.functional as F
import math
def logsumexp(inputs, dim=None, keepdim=False):
return (inputs - F.log_softmax(inputs, dim=dim)).mean(dim, keepdim=keepdim)
def clip(model, C, dim=1):
example_norms = 0
for p in model.parameters():
example_norms += p.grad.data.norm(dim=dim)**2
example_norms = torch.sqrt(example_norms)
clip = torch.clamp(example_norms/C, 1.0)
for p in model.parameters():
p.grad.data = p.grad.data.div_(clip.unsqueeze(1))
def pickle_stuff(stuff, DPVI_params, pickle_name, path='./results/'):
import pickle, datetime
today = datetime.date.today()
file_name_extend = '_'+str(today.day)+'_'+str(today.month)
fne_original = file_name_extend
if np.all(DPVI_params['sigma']==0):
pickle_name = pickle_name+'_nondp'
else:
pickle_name = pickle_name+'_dp'
fne_extend = 0
while True:
try:
f = open(path+pickle_name+file_name_extend+'.p', 'rb')
print('You are trying to override an existing pickle file: %s'%pickle_name)
f.close()
file_name_extend = fne_original + '('+str(fne_extend)+')'
fne_extend+=1
except:
pickle.dump(stuff, open(path+pickle_name+file_name_extend+'.p', 'wb'))
break
return file_name_extend
| 34.307692 | 87 | 0.642003 |
ec82487907279e7b9bef24850b13d9b00b5d0889 | 4,445 | py | Python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_07_01/aio/operations_async/_operations_async.py | hurtn/azure-sdk-for-python | 64cc053e589691da22fed7a47611199818c99b2b | [
"MIT"
] | 1 | 2020-12-10T03:17:51.000Z | 2020-12-10T03:17:51.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_07_01/aio/operations_async/_operations_async.py | hurtn/azure-sdk-for-python | 64cc053e589691da22fed7a47611199818c99b2b | [
"MIT"
] | 15 | 2019-07-12T18:18:04.000Z | 2019-07-25T20:55:51.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_07_01/aio/operations_async/_operations_async.py | hurtn/azure-sdk-for-python | 64cc053e589691da22fed7a47611199818c99b2b | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.OperationListResult"]:
"""Lists all of the available Storage Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2018_07_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Storage/operations'} # type: ignore
| 43.578431 | 115 | 0.659618 |
bebceed2e24bc61240c264f3c681fff76d313df6 | 1,126 | py | Python | PS02/weblog_bounds.py | tttonyl/student | 72f76208c1409142a513861feb0493376a88fd67 | [
"CC0-1.0"
] | 2 | 2016-04-07T17:59:09.000Z | 2016-04-08T05:22:15.000Z | PS02/weblog_bounds.py | jbrambleDC/PS04 | f8f8923c1879bb6cba5caf5845ae6c8b239b84e8 | [
"CC0-1.0"
] | null | null | null | PS02/weblog_bounds.py | jbrambleDC/PS04 | f8f8923c1879bb6cba5caf5845ae6c8b239b84e8 | [
"CC0-1.0"
] | 2 | 2019-09-23T05:32:32.000Z | 2021-04-30T15:28:58.000Z | #!/usr/bin/env python2
# Print the min and max datetime for each file in the weblog directory
import mrjob
from mrjob.job import MRJob
from weblog import Weblog # imports class defined in weblog.py
import os
class WeblogBounds(MRJob):
def mapper(self, _, line):
# Get the name of the input file, per mrjob v0.4.6 documentation
# https://pythonhosted.org/mrjob/utils-compat.html
filename = mrjob.compat.jobconf_from_env("map.input.file")
# parse the weblog input line
log = Weblog(line)
# output <filename,datetime>
yield filename,log.datetime
def reducer(self, key, values):
# find the minimum and the maximum date for each key
# notice that we can't simply say min(values) and max(values), because we need to compute
# both at the same time (we don't want to consume the values)
vmin = None
vmax = None
for v in values:
if v<vmin or not vmin: vmin=v
if v>vmax or not vmax: vmax=v
yield (key,(vmin,vmax))
if __name__=="__main__":
WeblogBounds.run()
| 30.432432 | 97 | 0.638544 |
1866f0b091f0326e53fa6566789f476cf335268d | 275 | py | Python | stormed/method/queue.py | bufferx/stormed-amqp | 6b29ae069a3cf62277ed1dffe7dd895a743f28e9 | [
"MIT"
] | 15 | 2015-03-06T12:35:52.000Z | 2020-06-28T01:46:09.000Z | stormed/method/queue.py | brimcfadden/stormed-amqp | 59e81bfa4632366dc3f20b3dff25df3331480798 | [
"MIT"
] | 14 | 2015-01-08T21:13:27.000Z | 2016-03-09T13:22:01.000Z | stormed/method/queue.py | brimcfadden/stormed-amqp | 59e81bfa4632366dc3f20b3dff25df3331480798 | [
"MIT"
] | 14 | 2015-01-11T22:25:37.000Z | 2020-06-28T01:46:13.000Z | from stormed.util import add_method
from stormed.method.codegen.queue import *
@add_method(DeclareOk)
def handle(self, ch):
if ch.callback:
ch.invoke_callback(self)
@add_method(PurgeOk)
def handle(self, channel):
channel.invoke_callback(self.message_count)
| 22.916667 | 47 | 0.76 |
5a732fa8d07987386dae047037bf505dfbc5d2eb | 35,135 | py | Python | netbox/dcim/choices.py | tmarcucci/netbox | b29a5511df77b0a72d93cc7517450fd681aade8b | [
"Apache-2.0"
] | null | null | null | netbox/dcim/choices.py | tmarcucci/netbox | b29a5511df77b0a72d93cc7517450fd681aade8b | [
"Apache-2.0"
] | null | null | null | netbox/dcim/choices.py | tmarcucci/netbox | b29a5511df77b0a72d93cc7517450fd681aade8b | [
"Apache-2.0"
] | null | null | null | from utilities.choices import ChoiceSet
#
# Sites
#
class SiteStatusChoices(ChoiceSet):
STATUS_PLANNED = 'planned'
STATUS_STAGING = 'staging'
STATUS_ACTIVE = 'active'
STATUS_DECOMMISSIONING = 'decommissioning'
STATUS_RETIRED = 'retired'
CHOICES = (
(STATUS_PLANNED, 'Planned'),
(STATUS_STAGING, 'Staging'),
(STATUS_ACTIVE, 'Active'),
(STATUS_DECOMMISSIONING, 'Decommissioning'),
(STATUS_RETIRED, 'Retired'),
)
CSS_CLASSES = {
STATUS_PLANNED: 'info',
STATUS_STAGING: 'primary',
STATUS_ACTIVE: 'success',
STATUS_DECOMMISSIONING: 'warning',
STATUS_RETIRED: 'danger',
}
#
# Racks
#
class RackTypeChoices(ChoiceSet):
TYPE_2POST = '2-post-frame'
TYPE_4POST = '4-post-frame'
TYPE_CABINET = '4-post-cabinet'
TYPE_WALLFRAME = 'wall-frame'
TYPE_WALLCABINET = 'wall-cabinet'
CHOICES = (
(TYPE_2POST, '2-post frame'),
(TYPE_4POST, '4-post frame'),
(TYPE_CABINET, '4-post cabinet'),
(TYPE_WALLFRAME, 'Wall-mounted frame'),
(TYPE_WALLCABINET, 'Wall-mounted cabinet'),
)
class RackWidthChoices(ChoiceSet):
WIDTH_10IN = 10
WIDTH_19IN = 19
WIDTH_21IN = 21
WIDTH_23IN = 23
CHOICES = (
(WIDTH_10IN, '10 inches'),
(WIDTH_19IN, '19 inches'),
(WIDTH_21IN, '21 inches'),
(WIDTH_23IN, '23 inches'),
)
class RackStatusChoices(ChoiceSet):
STATUS_RESERVED = 'reserved'
STATUS_AVAILABLE = 'available'
STATUS_PLANNED = 'planned'
STATUS_ACTIVE = 'active'
STATUS_DEPRECATED = 'deprecated'
CHOICES = (
(STATUS_RESERVED, 'Reserved'),
(STATUS_AVAILABLE, 'Available'),
(STATUS_PLANNED, 'Planned'),
(STATUS_ACTIVE, 'Active'),
(STATUS_DEPRECATED, 'Deprecated'),
)
CSS_CLASSES = {
STATUS_RESERVED: 'warning',
STATUS_AVAILABLE: 'success',
STATUS_PLANNED: 'info',
STATUS_ACTIVE: 'primary',
STATUS_DEPRECATED: 'danger',
}
class RackDimensionUnitChoices(ChoiceSet):
UNIT_MILLIMETER = 'mm'
UNIT_INCH = 'in'
CHOICES = (
(UNIT_MILLIMETER, 'Millimeters'),
(UNIT_INCH, 'Inches'),
)
class RackElevationDetailRenderChoices(ChoiceSet):
RENDER_JSON = 'json'
RENDER_SVG = 'svg'
CHOICES = (
(RENDER_JSON, 'json'),
(RENDER_SVG, 'svg')
)
#
# DeviceTypes
#
class SubdeviceRoleChoices(ChoiceSet):
ROLE_PARENT = 'parent'
ROLE_CHILD = 'child'
CHOICES = (
(ROLE_PARENT, 'Parent'),
(ROLE_CHILD, 'Child'),
)
#
# Devices
#
class DeviceFaceChoices(ChoiceSet):
FACE_FRONT = 'front'
FACE_REAR = 'rear'
CHOICES = (
(FACE_FRONT, 'Front'),
(FACE_REAR, 'Rear'),
)
class DeviceStatusChoices(ChoiceSet):
STATUS_OFFLINE = 'offline'
STATUS_ACTIVE = 'active'
STATUS_PLANNED = 'planned'
STATUS_STAGED = 'staged'
STATUS_FAILED = 'failed'
STATUS_INVENTORY = 'inventory'
STATUS_DECOMMISSIONING = 'decommissioning'
CHOICES = (
(STATUS_OFFLINE, 'Offline'),
(STATUS_ACTIVE, 'Active'),
(STATUS_PLANNED, 'Planned'),
(STATUS_STAGED, 'Staged'),
(STATUS_FAILED, 'Failed'),
(STATUS_INVENTORY, 'Inventory'),
(STATUS_DECOMMISSIONING, 'Decommissioning'),
)
CSS_CLASSES = {
STATUS_OFFLINE: 'warning',
STATUS_ACTIVE: 'success',
STATUS_PLANNED: 'info',
STATUS_STAGED: 'primary',
STATUS_FAILED: 'danger',
STATUS_INVENTORY: 'secondary',
STATUS_DECOMMISSIONING: 'warning',
}
#
# ConsolePorts
#
class ConsolePortTypeChoices(ChoiceSet):
TYPE_DE9 = 'de-9'
TYPE_DB25 = 'db-25'
TYPE_RJ11 = 'rj-11'
TYPE_RJ12 = 'rj-12'
TYPE_RJ45 = 'rj-45'
TYPE_MINI_DIN_8 = 'mini-din-8'
TYPE_USB_A = 'usb-a'
TYPE_USB_B = 'usb-b'
TYPE_USB_C = 'usb-c'
TYPE_USB_MINI_A = 'usb-mini-a'
TYPE_USB_MINI_B = 'usb-mini-b'
TYPE_USB_MICRO_A = 'usb-micro-a'
TYPE_USB_MICRO_B = 'usb-micro-b'
TYPE_USB_MICRO_AB = 'usb-micro-ab'
TYPE_OTHER = 'other'
CHOICES = (
('Serial', (
(TYPE_DE9, 'DE-9'),
(TYPE_DB25, 'DB-25'),
(TYPE_RJ11, 'RJ-11'),
(TYPE_RJ12, 'RJ-12'),
(TYPE_RJ45, 'RJ-45'),
(TYPE_MINI_DIN_8, 'Mini-DIN 8'),
)),
('USB', (
(TYPE_USB_A, 'USB Type A'),
(TYPE_USB_B, 'USB Type B'),
(TYPE_USB_C, 'USB Type C'),
(TYPE_USB_MINI_A, 'USB Mini A'),
(TYPE_USB_MINI_B, 'USB Mini B'),
(TYPE_USB_MICRO_A, 'USB Micro A'),
(TYPE_USB_MICRO_B, 'USB Micro B'),
(TYPE_USB_MICRO_AB, 'USB Micro AB'),
)),
('Other', (
(TYPE_OTHER, 'Other'),
)),
)
class ConsolePortSpeedChoices(ChoiceSet):
SPEED_1200 = 1200
SPEED_2400 = 2400
SPEED_4800 = 4800
SPEED_9600 = 9600
SPEED_19200 = 19200
SPEED_38400 = 38400
SPEED_57600 = 57600
SPEED_115200 = 115200
CHOICES = (
(SPEED_1200, '1200 bps'),
(SPEED_2400, '2400 bps'),
(SPEED_4800, '4800 bps'),
(SPEED_9600, '9600 bps'),
(SPEED_19200, '19.2 kbps'),
(SPEED_38400, '38.4 kbps'),
(SPEED_57600, '57.6 kbps'),
(SPEED_115200, '115.2 kbps'),
)
#
# PowerPorts
#
class PowerPortTypeChoices(ChoiceSet):
# IEC 60320
TYPE_IEC_C6 = 'iec-60320-c6'
TYPE_IEC_C8 = 'iec-60320-c8'
TYPE_IEC_C14 = 'iec-60320-c14'
TYPE_IEC_C16 = 'iec-60320-c16'
TYPE_IEC_C20 = 'iec-60320-c20'
TYPE_IEC_C22 = 'iec-60320-c22'
# IEC 60309
TYPE_IEC_PNE4H = 'iec-60309-p-n-e-4h'
TYPE_IEC_PNE6H = 'iec-60309-p-n-e-6h'
TYPE_IEC_PNE9H = 'iec-60309-p-n-e-9h'
TYPE_IEC_2PE4H = 'iec-60309-2p-e-4h'
TYPE_IEC_2PE6H = 'iec-60309-2p-e-6h'
TYPE_IEC_2PE9H = 'iec-60309-2p-e-9h'
TYPE_IEC_3PE4H = 'iec-60309-3p-e-4h'
TYPE_IEC_3PE6H = 'iec-60309-3p-e-6h'
TYPE_IEC_3PE9H = 'iec-60309-3p-e-9h'
TYPE_IEC_3PNE4H = 'iec-60309-3p-n-e-4h'
TYPE_IEC_3PNE6H = 'iec-60309-3p-n-e-6h'
TYPE_IEC_3PNE9H = 'iec-60309-3p-n-e-9h'
# NEMA non-locking
TYPE_NEMA_115P = 'nema-1-15p'
TYPE_NEMA_515P = 'nema-5-15p'
TYPE_NEMA_520P = 'nema-5-20p'
TYPE_NEMA_530P = 'nema-5-30p'
TYPE_NEMA_550P = 'nema-5-50p'
TYPE_NEMA_615P = 'nema-6-15p'
TYPE_NEMA_620P = 'nema-6-20p'
TYPE_NEMA_630P = 'nema-6-30p'
TYPE_NEMA_650P = 'nema-6-50p'
TYPE_NEMA_1030P = 'nema-10-30p'
TYPE_NEMA_1050P = 'nema-10-50p'
TYPE_NEMA_1420P = 'nema-14-20p'
TYPE_NEMA_1430P = 'nema-14-30p'
TYPE_NEMA_1450P = 'nema-14-50p'
TYPE_NEMA_1460P = 'nema-14-60p'
TYPE_NEMA_1515P = 'nema-15-15p'
TYPE_NEMA_1520P = 'nema-15-20p'
TYPE_NEMA_1530P = 'nema-15-30p'
TYPE_NEMA_1550P = 'nema-15-50p'
TYPE_NEMA_1560P = 'nema-15-60p'
# NEMA locking
TYPE_NEMA_L115P = 'nema-l1-15p'
TYPE_NEMA_L515P = 'nema-l5-15p'
TYPE_NEMA_L520P = 'nema-l5-20p'
TYPE_NEMA_L530P = 'nema-l5-30p'
TYPE_NEMA_L550P = 'nema-l5-50p'
TYPE_NEMA_L615P = 'nema-l6-15p'
TYPE_NEMA_L620P = 'nema-l6-20p'
TYPE_NEMA_L630P = 'nema-l6-30p'
TYPE_NEMA_L650P = 'nema-l6-50p'
TYPE_NEMA_L1030P = 'nema-l10-30p'
TYPE_NEMA_L1420P = 'nema-l14-20p'
TYPE_NEMA_L1430P = 'nema-l14-30p'
TYPE_NEMA_L1450P = 'nema-l14-50p'
TYPE_NEMA_L1460P = 'nema-l14-60p'
TYPE_NEMA_L1520P = 'nema-l15-20p'
TYPE_NEMA_L1530P = 'nema-l15-30p'
TYPE_NEMA_L1550P = 'nema-l15-50p'
TYPE_NEMA_L1560P = 'nema-l15-60p'
TYPE_NEMA_L2120P = 'nema-l21-20p'
TYPE_NEMA_L2130P = 'nema-l21-30p'
# California style
TYPE_CS6361C = 'cs6361c'
TYPE_CS6365C = 'cs6365c'
TYPE_CS8165C = 'cs8165c'
TYPE_CS8265C = 'cs8265c'
TYPE_CS8365C = 'cs8365c'
TYPE_CS8465C = 'cs8465c'
# ITA/international
TYPE_ITA_C = 'ita-c'
TYPE_ITA_E = 'ita-e'
TYPE_ITA_F = 'ita-f'
TYPE_ITA_EF = 'ita-ef'
TYPE_ITA_G = 'ita-g'
TYPE_ITA_H = 'ita-h'
TYPE_ITA_I = 'ita-i'
TYPE_ITA_J = 'ita-j'
TYPE_ITA_K = 'ita-k'
TYPE_ITA_L = 'ita-l'
TYPE_ITA_M = 'ita-m'
TYPE_ITA_N = 'ita-n'
TYPE_ITA_O = 'ita-o'
# USB
TYPE_USB_A = 'usb-a'
TYPE_USB_B = 'usb-b'
TYPE_USB_C = 'usb-c'
TYPE_USB_MINI_A = 'usb-mini-a'
TYPE_USB_MINI_B = 'usb-mini-b'
TYPE_USB_MICRO_A = 'usb-micro-a'
TYPE_USB_MICRO_B = 'usb-micro-b'
TYPE_USB_MICRO_AB = 'usb-micro-ab'
TYPE_USB_3_B = 'usb-3-b'
TYPE_USB_3_MICROB = 'usb-3-micro-b'
# Direct current (DC)
TYPE_DC = 'dc-terminal'
# Proprietary
TYPE_SAF_D_GRID = 'saf-d-grid'
# Other
TYPE_HARDWIRED = 'hardwired'
CHOICES = (
('IEC 60320', (
(TYPE_IEC_C6, 'C6'),
(TYPE_IEC_C8, 'C8'),
(TYPE_IEC_C14, 'C14'),
(TYPE_IEC_C16, 'C16'),
(TYPE_IEC_C20, 'C20'),
(TYPE_IEC_C22, 'C22'),
)),
('IEC 60309', (
(TYPE_IEC_PNE4H, 'P+N+E 4H'),
(TYPE_IEC_PNE6H, 'P+N+E 6H'),
(TYPE_IEC_PNE9H, 'P+N+E 9H'),
(TYPE_IEC_2PE4H, '2P+E 4H'),
(TYPE_IEC_2PE6H, '2P+E 6H'),
(TYPE_IEC_2PE9H, '2P+E 9H'),
(TYPE_IEC_3PE4H, '3P+E 4H'),
(TYPE_IEC_3PE6H, '3P+E 6H'),
(TYPE_IEC_3PE9H, '3P+E 9H'),
(TYPE_IEC_3PNE4H, '3P+N+E 4H'),
(TYPE_IEC_3PNE6H, '3P+N+E 6H'),
(TYPE_IEC_3PNE9H, '3P+N+E 9H'),
)),
('NEMA (Non-locking)', (
(TYPE_NEMA_115P, 'NEMA 1-15P'),
(TYPE_NEMA_515P, 'NEMA 5-15P'),
(TYPE_NEMA_520P, 'NEMA 5-20P'),
(TYPE_NEMA_530P, 'NEMA 5-30P'),
(TYPE_NEMA_550P, 'NEMA 5-50P'),
(TYPE_NEMA_615P, 'NEMA 6-15P'),
(TYPE_NEMA_620P, 'NEMA 6-20P'),
(TYPE_NEMA_630P, 'NEMA 6-30P'),
(TYPE_NEMA_650P, 'NEMA 6-50P'),
(TYPE_NEMA_1030P, 'NEMA 10-30P'),
(TYPE_NEMA_1050P, 'NEMA 10-50P'),
(TYPE_NEMA_1420P, 'NEMA 14-20P'),
(TYPE_NEMA_1430P, 'NEMA 14-30P'),
(TYPE_NEMA_1450P, 'NEMA 14-50P'),
(TYPE_NEMA_1460P, 'NEMA 14-60P'),
(TYPE_NEMA_1515P, 'NEMA 15-15P'),
(TYPE_NEMA_1520P, 'NEMA 15-20P'),
(TYPE_NEMA_1530P, 'NEMA 15-30P'),
(TYPE_NEMA_1550P, 'NEMA 15-50P'),
(TYPE_NEMA_1560P, 'NEMA 15-60P'),
)),
('NEMA (Locking)', (
(TYPE_NEMA_L115P, 'NEMA L1-15P'),
(TYPE_NEMA_L515P, 'NEMA L5-15P'),
(TYPE_NEMA_L520P, 'NEMA L5-20P'),
(TYPE_NEMA_L530P, 'NEMA L5-30P'),
(TYPE_NEMA_L550P, 'NEMA L5-50P'),
(TYPE_NEMA_L615P, 'NEMA L6-15P'),
(TYPE_NEMA_L620P, 'NEMA L6-20P'),
(TYPE_NEMA_L630P, 'NEMA L6-30P'),
(TYPE_NEMA_L650P, 'NEMA L6-50P'),
(TYPE_NEMA_L1030P, 'NEMA L10-30P'),
(TYPE_NEMA_L1420P, 'NEMA L14-20P'),
(TYPE_NEMA_L1430P, 'NEMA L14-30P'),
(TYPE_NEMA_L1450P, 'NEMA L14-50P'),
(TYPE_NEMA_L1460P, 'NEMA L14-60P'),
(TYPE_NEMA_L1520P, 'NEMA L15-20P'),
(TYPE_NEMA_L1530P, 'NEMA L15-30P'),
(TYPE_NEMA_L1550P, 'NEMA L15-50P'),
(TYPE_NEMA_L1560P, 'NEMA L15-60P'),
(TYPE_NEMA_L2120P, 'NEMA L21-20P'),
(TYPE_NEMA_L2130P, 'NEMA L21-30P'),
)),
('California Style', (
(TYPE_CS6361C, 'CS6361C'),
(TYPE_CS6365C, 'CS6365C'),
(TYPE_CS8165C, 'CS8165C'),
(TYPE_CS8265C, 'CS8265C'),
(TYPE_CS8365C, 'CS8365C'),
(TYPE_CS8465C, 'CS8465C'),
)),
('International/ITA', (
(TYPE_ITA_C, 'ITA Type C (CEE 7/16)'),
(TYPE_ITA_E, 'ITA Type E (CEE 7/5)'),
(TYPE_ITA_F, 'ITA Type F (CEE 7/4)'),
(TYPE_ITA_EF, 'ITA Type E/F (CEE 7/7)'),
(TYPE_ITA_G, 'ITA Type G (BS 1363)'),
(TYPE_ITA_H, 'ITA Type H'),
(TYPE_ITA_I, 'ITA Type I'),
(TYPE_ITA_J, 'ITA Type J'),
(TYPE_ITA_K, 'ITA Type K'),
(TYPE_ITA_L, 'ITA Type L (CEI 23-50)'),
(TYPE_ITA_M, 'ITA Type M (BS 546)'),
(TYPE_ITA_N, 'ITA Type N'),
(TYPE_ITA_O, 'ITA Type O'),
)),
('USB', (
(TYPE_USB_A, 'USB Type A'),
(TYPE_USB_B, 'USB Type B'),
(TYPE_USB_C, 'USB Type C'),
(TYPE_USB_MINI_A, 'USB Mini A'),
(TYPE_USB_MINI_B, 'USB Mini B'),
(TYPE_USB_MICRO_A, 'USB Micro A'),
(TYPE_USB_MICRO_B, 'USB Micro B'),
(TYPE_USB_MICRO_AB, 'USB Micro AB'),
(TYPE_USB_3_B, 'USB 3.0 Type B'),
(TYPE_USB_3_MICROB, 'USB 3.0 Micro B'),
)),
('DC', (
(TYPE_DC, 'DC Terminal'),
)),
('Proprietary', (
(TYPE_SAF_D_GRID, 'Saf-D-Grid'),
)),
('Other', (
(TYPE_HARDWIRED, 'Hardwired'),
)),
)
#
# PowerOutlets
#
class PowerOutletTypeChoices(ChoiceSet):
# IEC 60320
TYPE_IEC_C5 = 'iec-60320-c5'
TYPE_IEC_C7 = 'iec-60320-c7'
TYPE_IEC_C13 = 'iec-60320-c13'
TYPE_IEC_C15 = 'iec-60320-c15'
TYPE_IEC_C19 = 'iec-60320-c19'
TYPE_IEC_C21 = 'iec-60320-c21'
# IEC 60309
TYPE_IEC_PNE4H = 'iec-60309-p-n-e-4h'
TYPE_IEC_PNE6H = 'iec-60309-p-n-e-6h'
TYPE_IEC_PNE9H = 'iec-60309-p-n-e-9h'
TYPE_IEC_2PE4H = 'iec-60309-2p-e-4h'
TYPE_IEC_2PE6H = 'iec-60309-2p-e-6h'
TYPE_IEC_2PE9H = 'iec-60309-2p-e-9h'
TYPE_IEC_3PE4H = 'iec-60309-3p-e-4h'
TYPE_IEC_3PE6H = 'iec-60309-3p-e-6h'
TYPE_IEC_3PE9H = 'iec-60309-3p-e-9h'
TYPE_IEC_3PNE4H = 'iec-60309-3p-n-e-4h'
TYPE_IEC_3PNE6H = 'iec-60309-3p-n-e-6h'
TYPE_IEC_3PNE9H = 'iec-60309-3p-n-e-9h'
# NEMA non-locking
TYPE_NEMA_115R = 'nema-1-15r'
TYPE_NEMA_515R = 'nema-5-15r'
TYPE_NEMA_520R = 'nema-5-20r'
TYPE_NEMA_530R = 'nema-5-30r'
TYPE_NEMA_550R = 'nema-5-50r'
TYPE_NEMA_615R = 'nema-6-15r'
TYPE_NEMA_620R = 'nema-6-20r'
TYPE_NEMA_630R = 'nema-6-30r'
TYPE_NEMA_650R = 'nema-6-50r'
TYPE_NEMA_1030R = 'nema-10-30r'
TYPE_NEMA_1050R = 'nema-10-50r'
TYPE_NEMA_1420R = 'nema-14-20r'
TYPE_NEMA_1430R = 'nema-14-30r'
TYPE_NEMA_1450R = 'nema-14-50r'
TYPE_NEMA_1460R = 'nema-14-60r'
TYPE_NEMA_1515R = 'nema-15-15r'
TYPE_NEMA_1520R = 'nema-15-20r'
TYPE_NEMA_1530R = 'nema-15-30r'
TYPE_NEMA_1550R = 'nema-15-50r'
TYPE_NEMA_1560R = 'nema-15-60r'
# NEMA locking
TYPE_NEMA_L115R = 'nema-l1-15r'
TYPE_NEMA_L515R = 'nema-l5-15r'
TYPE_NEMA_L520R = 'nema-l5-20r'
TYPE_NEMA_L530R = 'nema-l5-30r'
TYPE_NEMA_L550R = 'nema-l5-50r'
TYPE_NEMA_L615R = 'nema-l6-15r'
TYPE_NEMA_L620R = 'nema-l6-20r'
TYPE_NEMA_L630R = 'nema-l6-30r'
TYPE_NEMA_L650R = 'nema-l6-50r'
TYPE_NEMA_L1030R = 'nema-l10-30r'
TYPE_NEMA_L1420R = 'nema-l14-20r'
TYPE_NEMA_L1430R = 'nema-l14-30r'
TYPE_NEMA_L1450R = 'nema-l14-50r'
TYPE_NEMA_L1460R = 'nema-l14-60r'
TYPE_NEMA_L1520R = 'nema-l15-20r'
TYPE_NEMA_L1530R = 'nema-l15-30r'
TYPE_NEMA_L1550R = 'nema-l15-50r'
TYPE_NEMA_L1560R = 'nema-l15-60r'
TYPE_NEMA_L2120R = 'nema-l21-20r'
TYPE_NEMA_L2130R = 'nema-l21-30r'
# California style
TYPE_CS6360C = 'CS6360C'
TYPE_CS6364C = 'CS6364C'
TYPE_CS8164C = 'CS8164C'
TYPE_CS8264C = 'CS8264C'
TYPE_CS8364C = 'CS8364C'
TYPE_CS8464C = 'CS8464C'
# ITA/international
TYPE_ITA_E = 'ita-e'
TYPE_ITA_F = 'ita-f'
TYPE_ITA_G = 'ita-g'
TYPE_ITA_H = 'ita-h'
TYPE_ITA_I = 'ita-i'
TYPE_ITA_J = 'ita-j'
TYPE_ITA_K = 'ita-k'
TYPE_ITA_L = 'ita-l'
TYPE_ITA_M = 'ita-m'
TYPE_ITA_N = 'ita-n'
TYPE_ITA_O = 'ita-o'
# USB
TYPE_USB_A = 'usb-a'
TYPE_USB_MICROB = 'usb-micro-b'
TYPE_USB_C = 'usb-c'
# Direct current (DC)
TYPE_DC = 'dc-terminal'
# Proprietary
TYPE_HDOT_CX = 'hdot-cx'
TYPE_SAF_D_GRID = 'saf-d-grid'
# Other
TYPE_HARDWIRED = 'hardwired'
CHOICES = (
('IEC 60320', (
(TYPE_IEC_C5, 'C5'),
(TYPE_IEC_C7, 'C7'),
(TYPE_IEC_C13, 'C13'),
(TYPE_IEC_C15, 'C15'),
(TYPE_IEC_C19, 'C19'),
(TYPE_IEC_C21, 'C21'),
)),
('IEC 60309', (
(TYPE_IEC_PNE4H, 'P+N+E 4H'),
(TYPE_IEC_PNE6H, 'P+N+E 6H'),
(TYPE_IEC_PNE9H, 'P+N+E 9H'),
(TYPE_IEC_2PE4H, '2P+E 4H'),
(TYPE_IEC_2PE6H, '2P+E 6H'),
(TYPE_IEC_2PE9H, '2P+E 9H'),
(TYPE_IEC_3PE4H, '3P+E 4H'),
(TYPE_IEC_3PE6H, '3P+E 6H'),
(TYPE_IEC_3PE9H, '3P+E 9H'),
(TYPE_IEC_3PNE4H, '3P+N+E 4H'),
(TYPE_IEC_3PNE6H, '3P+N+E 6H'),
(TYPE_IEC_3PNE9H, '3P+N+E 9H'),
)),
('NEMA (Non-locking)', (
(TYPE_NEMA_115R, 'NEMA 1-15R'),
(TYPE_NEMA_515R, 'NEMA 5-15R'),
(TYPE_NEMA_520R, 'NEMA 5-20R'),
(TYPE_NEMA_530R, 'NEMA 5-30R'),
(TYPE_NEMA_550R, 'NEMA 5-50R'),
(TYPE_NEMA_615R, 'NEMA 6-15R'),
(TYPE_NEMA_620R, 'NEMA 6-20R'),
(TYPE_NEMA_630R, 'NEMA 6-30R'),
(TYPE_NEMA_650R, 'NEMA 6-50R'),
(TYPE_NEMA_1030R, 'NEMA 10-30R'),
(TYPE_NEMA_1050R, 'NEMA 10-50R'),
(TYPE_NEMA_1420R, 'NEMA 14-20R'),
(TYPE_NEMA_1430R, 'NEMA 14-30R'),
(TYPE_NEMA_1450R, 'NEMA 14-50R'),
(TYPE_NEMA_1460R, 'NEMA 14-60R'),
(TYPE_NEMA_1515R, 'NEMA 15-15R'),
(TYPE_NEMA_1520R, 'NEMA 15-20R'),
(TYPE_NEMA_1530R, 'NEMA 15-30R'),
(TYPE_NEMA_1550R, 'NEMA 15-50R'),
(TYPE_NEMA_1560R, 'NEMA 15-60R'),
)),
('NEMA (Locking)', (
(TYPE_NEMA_L115R, 'NEMA L1-15R'),
(TYPE_NEMA_L515R, 'NEMA L5-15R'),
(TYPE_NEMA_L520R, 'NEMA L5-20R'),
(TYPE_NEMA_L530R, 'NEMA L5-30R'),
(TYPE_NEMA_L550R, 'NEMA L5-50R'),
(TYPE_NEMA_L615R, 'NEMA L6-15R'),
(TYPE_NEMA_L620R, 'NEMA L6-20R'),
(TYPE_NEMA_L630R, 'NEMA L6-30R'),
(TYPE_NEMA_L650R, 'NEMA L6-50R'),
(TYPE_NEMA_L1030R, 'NEMA L10-30R'),
(TYPE_NEMA_L1420R, 'NEMA L14-20R'),
(TYPE_NEMA_L1430R, 'NEMA L14-30R'),
(TYPE_NEMA_L1450R, 'NEMA L14-50R'),
(TYPE_NEMA_L1460R, 'NEMA L14-60R'),
(TYPE_NEMA_L1520R, 'NEMA L15-20R'),
(TYPE_NEMA_L1530R, 'NEMA L15-30R'),
(TYPE_NEMA_L1550R, 'NEMA L15-50R'),
(TYPE_NEMA_L1560R, 'NEMA L15-60R'),
(TYPE_NEMA_L2120R, 'NEMA L21-20R'),
(TYPE_NEMA_L2130R, 'NEMA L21-30R'),
)),
('California Style', (
(TYPE_CS6360C, 'CS6360C'),
(TYPE_CS6364C, 'CS6364C'),
(TYPE_CS8164C, 'CS8164C'),
(TYPE_CS8264C, 'CS8264C'),
(TYPE_CS8364C, 'CS8364C'),
(TYPE_CS8464C, 'CS8464C'),
)),
('ITA/International', (
(TYPE_ITA_E, 'ITA Type E (CEE7/5)'),
(TYPE_ITA_F, 'ITA Type F (CEE7/3)'),
(TYPE_ITA_G, 'ITA Type G (BS 1363)'),
(TYPE_ITA_H, 'ITA Type H'),
(TYPE_ITA_I, 'ITA Type I'),
(TYPE_ITA_J, 'ITA Type J'),
(TYPE_ITA_K, 'ITA Type K'),
(TYPE_ITA_L, 'ITA Type L (CEI 23-50)'),
(TYPE_ITA_M, 'ITA Type M (BS 546)'),
(TYPE_ITA_N, 'ITA Type N'),
(TYPE_ITA_O, 'ITA Type O'),
)),
('USB', (
(TYPE_USB_A, 'USB Type A'),
(TYPE_USB_MICROB, 'USB Micro B'),
(TYPE_USB_C, 'USB Type C'),
)),
('DC', (
(TYPE_DC, 'DC Terminal'),
)),
('Proprietary', (
(TYPE_HDOT_CX, 'HDOT Cx'),
(TYPE_SAF_D_GRID, 'Saf-D-Grid'),
)),
('Other', (
(TYPE_HARDWIRED, 'Hardwired'),
)),
)
class PowerOutletFeedLegChoices(ChoiceSet):
FEED_LEG_A = 'A'
FEED_LEG_B = 'B'
FEED_LEG_C = 'C'
CHOICES = (
(FEED_LEG_A, 'A'),
(FEED_LEG_B, 'B'),
(FEED_LEG_C, 'C'),
)
#
# Interfaces
#
class InterfaceKindChoices(ChoiceSet):
KIND_PHYSICAL = 'physical'
KIND_VIRTUAL = 'virtual'
KIND_WIRELESS = 'wireless'
CHOICES = (
(KIND_PHYSICAL, 'Physical'),
(KIND_VIRTUAL, 'Virtual'),
(KIND_WIRELESS, 'Wireless'),
)
class InterfaceTypeChoices(ChoiceSet):
# Virtual
TYPE_VIRTUAL = 'virtual'
TYPE_LAG = 'lag'
# Ethernet
TYPE_100ME_FIXED = '100base-tx'
TYPE_1GE_FIXED = '1000base-t'
TYPE_1GE_GBIC = '1000base-x-gbic'
TYPE_1GE_SFP = '1000base-x-sfp'
TYPE_2GE_FIXED = '2.5gbase-t'
TYPE_5GE_FIXED = '5gbase-t'
TYPE_10GE_FIXED = '10gbase-t'
TYPE_10GE_CX4 = '10gbase-cx4'
TYPE_10GE_SFP_PLUS = '10gbase-x-sfpp'
TYPE_10GE_XFP = '10gbase-x-xfp'
TYPE_10GE_XENPAK = '10gbase-x-xenpak'
TYPE_10GE_X2 = '10gbase-x-x2'
TYPE_25GE_SFP28 = '25gbase-x-sfp28'
TYPE_50GE_SFP56 = '50gbase-x-sfp56'
TYPE_40GE_QSFP_PLUS = '40gbase-x-qsfpp'
TYPE_50GE_QSFP28 = '50gbase-x-sfp28'
TYPE_100GE_CFP = '100gbase-x-cfp'
TYPE_100GE_CFP2 = '100gbase-x-cfp2'
TYPE_100GE_CFP4 = '100gbase-x-cfp4'
TYPE_100GE_CPAK = '100gbase-x-cpak'
TYPE_100GE_QSFP28 = '100gbase-x-qsfp28'
TYPE_200GE_CFP2 = '200gbase-x-cfp2'
TYPE_200GE_QSFP56 = '200gbase-x-qsfp56'
TYPE_400GE_QSFP_DD = '400gbase-x-qsfpdd'
TYPE_400GE_OSFP = '400gbase-x-osfp'
# Wireless
TYPE_80211A = 'ieee802.11a'
TYPE_80211G = 'ieee802.11g'
TYPE_80211N = 'ieee802.11n'
TYPE_80211AC = 'ieee802.11ac'
TYPE_80211AD = 'ieee802.11ad'
TYPE_80211AX = 'ieee802.11ax'
# Cellular
TYPE_GSM = 'gsm'
TYPE_CDMA = 'cdma'
TYPE_LTE = 'lte'
# SONET
TYPE_SONET_OC3 = 'sonet-oc3'
TYPE_SONET_OC12 = 'sonet-oc12'
TYPE_SONET_OC48 = 'sonet-oc48'
TYPE_SONET_OC192 = 'sonet-oc192'
TYPE_SONET_OC768 = 'sonet-oc768'
TYPE_SONET_OC1920 = 'sonet-oc1920'
TYPE_SONET_OC3840 = 'sonet-oc3840'
# Fibrechannel
TYPE_1GFC_SFP = '1gfc-sfp'
TYPE_2GFC_SFP = '2gfc-sfp'
TYPE_4GFC_SFP = '4gfc-sfp'
TYPE_8GFC_SFP_PLUS = '8gfc-sfpp'
TYPE_16GFC_SFP_PLUS = '16gfc-sfpp'
TYPE_32GFC_SFP28 = '32gfc-sfp28'
TYPE_64GFC_QSFP_PLUS = '64gfc-qsfpp'
TYPE_128GFC_QSFP28 = '128gfc-sfp28'
# InfiniBand
TYPE_INFINIBAND_SDR = 'infiniband-sdr'
TYPE_INFINIBAND_DDR = 'infiniband-ddr'
TYPE_INFINIBAND_QDR = 'infiniband-qdr'
TYPE_INFINIBAND_FDR10 = 'infiniband-fdr10'
TYPE_INFINIBAND_FDR = 'infiniband-fdr'
TYPE_INFINIBAND_EDR = 'infiniband-edr'
TYPE_INFINIBAND_HDR = 'infiniband-hdr'
TYPE_INFINIBAND_NDR = 'infiniband-ndr'
TYPE_INFINIBAND_XDR = 'infiniband-xdr'
# Serial
TYPE_T1 = 't1'
TYPE_E1 = 'e1'
TYPE_T3 = 't3'
TYPE_E3 = 'e3'
# ATM/DSL
TYPE_XDSL = 'xdsl'
# Stacking
TYPE_STACKWISE = 'cisco-stackwise'
TYPE_STACKWISE_PLUS = 'cisco-stackwise-plus'
TYPE_FLEXSTACK = 'cisco-flexstack'
TYPE_FLEXSTACK_PLUS = 'cisco-flexstack-plus'
TYPE_JUNIPER_VCP = 'juniper-vcp'
TYPE_SUMMITSTACK = 'extreme-summitstack'
TYPE_SUMMITSTACK128 = 'extreme-summitstack-128'
TYPE_SUMMITSTACK256 = 'extreme-summitstack-256'
TYPE_SUMMITSTACK512 = 'extreme-summitstack-512'
# Other
TYPE_OTHER = 'other'
CHOICES = (
(
'Virtual interfaces',
(
(TYPE_VIRTUAL, 'Virtual'),
(TYPE_LAG, 'Link Aggregation Group (LAG)'),
),
),
(
'Ethernet (fixed)',
(
(TYPE_100ME_FIXED, '100BASE-TX (10/100ME)'),
(TYPE_1GE_FIXED, '1000BASE-T (1GE)'),
(TYPE_2GE_FIXED, '2.5GBASE-T (2.5GE)'),
(TYPE_5GE_FIXED, '5GBASE-T (5GE)'),
(TYPE_10GE_FIXED, '10GBASE-T (10GE)'),
(TYPE_10GE_CX4, '10GBASE-CX4 (10GE)'),
)
),
(
'Ethernet (modular)',
(
(TYPE_1GE_GBIC, 'GBIC (1GE)'),
(TYPE_1GE_SFP, 'SFP (1GE)'),
(TYPE_10GE_SFP_PLUS, 'SFP+ (10GE)'),
(TYPE_10GE_XFP, 'XFP (10GE)'),
(TYPE_10GE_XENPAK, 'XENPAK (10GE)'),
(TYPE_10GE_X2, 'X2 (10GE)'),
(TYPE_25GE_SFP28, 'SFP28 (25GE)'),
(TYPE_50GE_SFP56, 'SFP56 (50GE)'),
(TYPE_40GE_QSFP_PLUS, 'QSFP+ (40GE)'),
(TYPE_50GE_QSFP28, 'QSFP28 (50GE)'),
(TYPE_100GE_CFP, 'CFP (100GE)'),
(TYPE_100GE_CFP2, 'CFP2 (100GE)'),
(TYPE_200GE_CFP2, 'CFP2 (200GE)'),
(TYPE_100GE_CFP4, 'CFP4 (100GE)'),
(TYPE_100GE_CPAK, 'Cisco CPAK (100GE)'),
(TYPE_100GE_QSFP28, 'QSFP28 (100GE)'),
(TYPE_200GE_QSFP56, 'QSFP56 (200GE)'),
(TYPE_400GE_QSFP_DD, 'QSFP-DD (400GE)'),
(TYPE_400GE_OSFP, 'OSFP (400GE)'),
)
),
(
'Wireless',
(
(TYPE_80211A, 'IEEE 802.11a'),
(TYPE_80211G, 'IEEE 802.11b/g'),
(TYPE_80211N, 'IEEE 802.11n'),
(TYPE_80211AC, 'IEEE 802.11ac'),
(TYPE_80211AD, 'IEEE 802.11ad'),
(TYPE_80211AX, 'IEEE 802.11ax'),
)
),
(
'Cellular',
(
(TYPE_GSM, 'GSM'),
(TYPE_CDMA, 'CDMA'),
(TYPE_LTE, 'LTE'),
)
),
(
'SONET',
(
(TYPE_SONET_OC3, 'OC-3/STM-1'),
(TYPE_SONET_OC12, 'OC-12/STM-4'),
(TYPE_SONET_OC48, 'OC-48/STM-16'),
(TYPE_SONET_OC192, 'OC-192/STM-64'),
(TYPE_SONET_OC768, 'OC-768/STM-256'),
(TYPE_SONET_OC1920, 'OC-1920/STM-640'),
(TYPE_SONET_OC3840, 'OC-3840/STM-1234'),
)
),
(
'FibreChannel',
(
(TYPE_1GFC_SFP, 'SFP (1GFC)'),
(TYPE_2GFC_SFP, 'SFP (2GFC)'),
(TYPE_4GFC_SFP, 'SFP (4GFC)'),
(TYPE_8GFC_SFP_PLUS, 'SFP+ (8GFC)'),
(TYPE_16GFC_SFP_PLUS, 'SFP+ (16GFC)'),
(TYPE_32GFC_SFP28, 'SFP28 (32GFC)'),
(TYPE_64GFC_QSFP_PLUS, 'QSFP+ (64GFC)'),
(TYPE_128GFC_QSFP28, 'QSFP28 (128GFC)'),
)
),
(
'InfiniBand',
(
(TYPE_INFINIBAND_SDR, 'SDR (2 Gbps)'),
(TYPE_INFINIBAND_DDR, 'DDR (4 Gbps)'),
(TYPE_INFINIBAND_QDR, 'QDR (8 Gbps)'),
(TYPE_INFINIBAND_FDR10, 'FDR10 (10 Gbps)'),
(TYPE_INFINIBAND_FDR, 'FDR (13.5 Gbps)'),
(TYPE_INFINIBAND_EDR, 'EDR (25 Gbps)'),
(TYPE_INFINIBAND_HDR, 'HDR (50 Gbps)'),
(TYPE_INFINIBAND_NDR, 'NDR (100 Gbps)'),
(TYPE_INFINIBAND_XDR, 'XDR (250 Gbps)'),
)
),
(
'Serial',
(
(TYPE_T1, 'T1 (1.544 Mbps)'),
(TYPE_E1, 'E1 (2.048 Mbps)'),
(TYPE_T3, 'T3 (45 Mbps)'),
(TYPE_E3, 'E3 (34 Mbps)'),
)
),
(
'ATM',
(
(TYPE_XDSL, 'xDSL'),
)
),
(
'Stacking',
(
(TYPE_STACKWISE, 'Cisco StackWise'),
(TYPE_STACKWISE_PLUS, 'Cisco StackWise Plus'),
(TYPE_FLEXSTACK, 'Cisco FlexStack'),
(TYPE_FLEXSTACK_PLUS, 'Cisco FlexStack Plus'),
(TYPE_JUNIPER_VCP, 'Juniper VCP'),
(TYPE_SUMMITSTACK, 'Extreme SummitStack'),
(TYPE_SUMMITSTACK128, 'Extreme SummitStack-128'),
(TYPE_SUMMITSTACK256, 'Extreme SummitStack-256'),
(TYPE_SUMMITSTACK512, 'Extreme SummitStack-512'),
)
),
(
'Other',
(
(TYPE_OTHER, 'Other'),
)
),
)
class InterfaceModeChoices(ChoiceSet):
MODE_ACCESS = 'access'
MODE_TAGGED = 'tagged'
MODE_TAGGED_ALL = 'tagged-all'
CHOICES = (
(MODE_ACCESS, 'Access'),
(MODE_TAGGED, 'Tagged'),
(MODE_TAGGED_ALL, 'Tagged (All)'),
)
#
# FrontPorts/RearPorts
#
class PortTypeChoices(ChoiceSet):
TYPE_8P8C = '8p8c'
TYPE_8P6C = '8p6c'
TYPE_8P4C = '8p4c'
TYPE_8P2C = '8p2c'
TYPE_6P6C = '6p6c'
TYPE_6P4C = '6p4c'
TYPE_6P2C = '6p2c'
TYPE_4P4C = '4p4c'
TYPE_4P2C = '4p2c'
TYPE_GG45 = 'gg45'
TYPE_TERA4P = 'tera-4p'
TYPE_TERA2P = 'tera-2p'
TYPE_TERA1P = 'tera-1p'
TYPE_110_PUNCH = '110-punch'
TYPE_BNC = 'bnc'
TYPE_F = 'f'
TYPE_N = 'n'
TYPE_MRJ21 = 'mrj21'
TYPE_ST = 'st'
TYPE_SC = 'sc'
TYPE_SC_APC = 'sc-apc'
TYPE_FC = 'fc'
TYPE_LC = 'lc'
TYPE_LC_APC = 'lc-apc'
TYPE_MTRJ = 'mtrj'
TYPE_MPO = 'mpo'
TYPE_LSH = 'lsh'
TYPE_LSH_APC = 'lsh-apc'
TYPE_SPLICE = 'splice'
TYPE_CS = 'cs'
TYPE_SN = 'sn'
TYPE_SMA_905 = 'sma-905'
TYPE_SMA_906 = 'sma-906'
TYPE_URM_P2 = 'urm-p2'
TYPE_URM_P4 = 'urm-p4'
TYPE_URM_P8 = 'urm-p8'
CHOICES = (
(
'Copper',
(
(TYPE_8P8C, '8P8C'),
(TYPE_8P6C, '8P6C'),
(TYPE_8P4C, '8P4C'),
(TYPE_8P2C, '8P2C'),
(TYPE_6P6C, '6P6C'),
(TYPE_6P4C, '6P4C'),
(TYPE_6P2C, '6P2C'),
(TYPE_4P4C, '4P4C'),
(TYPE_4P2C, '4P2C'),
(TYPE_GG45, 'GG45'),
(TYPE_TERA4P, 'TERA 4P'),
(TYPE_TERA2P, 'TERA 2P'),
(TYPE_TERA1P, 'TERA 1P'),
(TYPE_110_PUNCH, '110 Punch'),
(TYPE_BNC, 'BNC'),
(TYPE_F, 'F Connector'),
(TYPE_N, 'N Connector'),
(TYPE_MRJ21, 'MRJ21'),
),
),
(
'Fiber Optic',
(
(TYPE_FC, 'FC'),
(TYPE_LC, 'LC'),
(TYPE_LC_APC, 'LC/APC'),
(TYPE_LSH, 'LSH'),
(TYPE_LSH_APC, 'LSH/APC'),
(TYPE_MPO, 'MPO'),
(TYPE_MTRJ, 'MTRJ'),
(TYPE_SC, 'SC'),
(TYPE_SC_APC, 'SC/APC'),
(TYPE_ST, 'ST'),
(TYPE_CS, 'CS'),
(TYPE_SN, 'SN'),
(TYPE_SMA_905, 'SMA 905'),
(TYPE_SMA_906, 'SMA 906'),
(TYPE_URM_P2, 'URM-P2'),
(TYPE_URM_P4, 'URM-P4'),
(TYPE_URM_P8, 'URM-P8'),
(TYPE_SPLICE, 'Splice'),
)
)
)
#
# Cables
#
class CableTypeChoices(ChoiceSet):
TYPE_CAT3 = 'cat3'
TYPE_CAT5 = 'cat5'
TYPE_CAT5E = 'cat5e'
TYPE_CAT6 = 'cat6'
TYPE_CAT6A = 'cat6a'
TYPE_CAT7 = 'cat7'
TYPE_CAT7A = 'cat7a'
TYPE_CAT8 = 'cat8'
TYPE_DAC_ACTIVE = 'dac-active'
TYPE_DAC_PASSIVE = 'dac-passive'
TYPE_MRJ21_TRUNK = 'mrj21-trunk'
TYPE_COAXIAL = 'coaxial'
TYPE_MMF = 'mmf'
TYPE_MMF_OM1 = 'mmf-om1'
TYPE_MMF_OM2 = 'mmf-om2'
TYPE_MMF_OM3 = 'mmf-om3'
TYPE_MMF_OM4 = 'mmf-om4'
TYPE_MMF_OM5 = 'mmf-om5'
TYPE_SMF = 'smf'
TYPE_SMF_OS1 = 'smf-os1'
TYPE_SMF_OS2 = 'smf-os2'
TYPE_AOC = 'aoc'
TYPE_POWER = 'power'
CHOICES = (
(
'Copper', (
(TYPE_CAT3, 'CAT3'),
(TYPE_CAT5, 'CAT5'),
(TYPE_CAT5E, 'CAT5e'),
(TYPE_CAT6, 'CAT6'),
(TYPE_CAT6A, 'CAT6a'),
(TYPE_CAT7, 'CAT7'),
(TYPE_CAT7A, 'CAT7a'),
(TYPE_CAT8, 'CAT8'),
(TYPE_DAC_ACTIVE, 'Direct Attach Copper (Active)'),
(TYPE_DAC_PASSIVE, 'Direct Attach Copper (Passive)'),
(TYPE_MRJ21_TRUNK, 'MRJ21 Trunk'),
(TYPE_COAXIAL, 'Coaxial'),
),
),
(
'Fiber', (
(TYPE_MMF, 'Multimode Fiber'),
(TYPE_MMF_OM1, 'Multimode Fiber (OM1)'),
(TYPE_MMF_OM2, 'Multimode Fiber (OM2)'),
(TYPE_MMF_OM3, 'Multimode Fiber (OM3)'),
(TYPE_MMF_OM4, 'Multimode Fiber (OM4)'),
(TYPE_MMF_OM5, 'Multimode Fiber (OM5)'),
(TYPE_SMF, 'Singlemode Fiber'),
(TYPE_SMF_OS1, 'Singlemode Fiber (OS1)'),
(TYPE_SMF_OS2, 'Singlemode Fiber (OS2)'),
(TYPE_AOC, 'Active Optical Cabling (AOC)'),
),
),
(TYPE_POWER, 'Power'),
)
class CableStatusChoices(ChoiceSet):
STATUS_CONNECTED = 'connected'
STATUS_PLANNED = 'planned'
STATUS_DECOMMISSIONING = 'decommissioning'
CHOICES = (
(STATUS_CONNECTED, 'Connected'),
(STATUS_PLANNED, 'Planned'),
(STATUS_DECOMMISSIONING, 'Decommissioning'),
)
CSS_CLASSES = {
STATUS_CONNECTED: 'success',
STATUS_PLANNED: 'info',
STATUS_DECOMMISSIONING: 'warning',
}
class CableLengthUnitChoices(ChoiceSet):
# Metric
UNIT_KILOMETER = 'km'
UNIT_METER = 'm'
UNIT_CENTIMETER = 'cm'
# Imperial
UNIT_MILE = 'mi'
UNIT_FOOT = 'ft'
UNIT_INCH = 'in'
CHOICES = (
(UNIT_KILOMETER, 'Kilometers'),
(UNIT_METER, 'Meters'),
(UNIT_CENTIMETER, 'Centimeters'),
(UNIT_MILE, 'Miles'),
(UNIT_FOOT, 'Feet'),
(UNIT_INCH, 'Inches'),
)
#
# PowerFeeds
#
class PowerFeedStatusChoices(ChoiceSet):
STATUS_OFFLINE = 'offline'
STATUS_ACTIVE = 'active'
STATUS_PLANNED = 'planned'
STATUS_FAILED = 'failed'
CHOICES = (
(STATUS_OFFLINE, 'Offline'),
(STATUS_ACTIVE, 'Active'),
(STATUS_PLANNED, 'Planned'),
(STATUS_FAILED, 'Failed'),
)
CSS_CLASSES = {
STATUS_OFFLINE: 'warning',
STATUS_ACTIVE: 'success',
STATUS_PLANNED: 'info',
STATUS_FAILED: 'danger',
}
class PowerFeedTypeChoices(ChoiceSet):
TYPE_PRIMARY = 'primary'
TYPE_REDUNDANT = 'redundant'
CHOICES = (
(TYPE_PRIMARY, 'Primary'),
(TYPE_REDUNDANT, 'Redundant'),
)
CSS_CLASSES = {
TYPE_PRIMARY: 'success',
TYPE_REDUNDANT: 'info',
}
class PowerFeedSupplyChoices(ChoiceSet):
SUPPLY_AC = 'ac'
SUPPLY_DC = 'dc'
CHOICES = (
(SUPPLY_AC, 'AC'),
(SUPPLY_DC, 'DC'),
)
class PowerFeedPhaseChoices(ChoiceSet):
PHASE_SINGLE = 'single-phase'
PHASE_3PHASE = 'three-phase'
CHOICES = (
(PHASE_SINGLE, 'Single phase'),
(PHASE_3PHASE, 'Three-phase'),
)
| 28.941516 | 69 | 0.53821 |
8a1a992a06a197b1b40919e1cac4fccb09ff6078 | 2,730 | py | Python | Tools/Scripts/webkitpy/layout_tests/servers/server_base_unittest.py | quanganh2627/bytm-x64-L-w05-2015_external_chromium_org_third_party_WebKit | 20e637e67a0c272870ae4d78466a68bcb77af041 | [
"BSD-3-Clause"
] | null | null | null | Tools/Scripts/webkitpy/layout_tests/servers/server_base_unittest.py | quanganh2627/bytm-x64-L-w05-2015_external_chromium_org_third_party_WebKit | 20e637e67a0c272870ae4d78466a68bcb77af041 | [
"BSD-3-Clause"
] | null | null | null | Tools/Scripts/webkitpy/layout_tests/servers/server_base_unittest.py | quanganh2627/bytm-x64-L-w05-2015_external_chromium_org_third_party_WebKit | 20e637e67a0c272870ae4d78466a68bcb77af041 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests.port import test
from webkitpy.layout_tests.servers.server_base import ServerBase
class TestServerBase(unittest.TestCase):
def test_corrupt_pid_file(self):
# This tests that if the pid file is corrupt or invalid,
# both start() and stop() deal with it correctly and delete the file.
host = MockHost()
test_port = test.TestPort(host)
server = ServerBase(test_port, test_port.default_results_directory())
server._pid_file = '/tmp/pidfile'
server._spawn_process = lambda: 4
server._is_server_running_on_all_ports = lambda: True
host.filesystem.write_text_file(server._pid_file, 'foo')
server.stop()
self.assertEqual(host.filesystem.files[server._pid_file], None)
host.filesystem.write_text_file(server._pid_file, 'foo')
server.start()
self.assertEqual(server._pid, 4)
# Note that the pid file would not be None if _spawn_process()
# was actually a real implementation.
self.assertEqual(host.filesystem.files[server._pid_file], None)
| 46.271186 | 77 | 0.752381 |
641a72c8e3d745b0d54dc2e7f4f2d45e84edcc5d | 2,588 | py | Python | Alignment/OfflineValidation/test/testCompare_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Alignment/OfflineValidation/test/testCompare_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Alignment/OfflineValidation/test/testCompare_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#process.GlobalTag.globaltag = "FT_R_53_V6C::All"
process.GlobalTag.globaltag = "GR_R_61_V6::All" # FT_R_42_V13A used in rereco of 2010 cosmics
process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("Configuration.Geometry.GeometryIdeal_cff")
#process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.TrackerGeometryBuilder.trackerGeometry_cfi")
process.load("Alignment.OfflineValidation.TrackerGeometryCompare_cfi")
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
enable = cms.untracked.bool(True)
),
files = cms.untracked.PSet(
detailedInfo = cms.untracked.PSet(
)
)
)
process.source = cms.Source("EmptySource")
process.load("DQM.SiStripCommon.TkHistoMap_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.p = cms.Path(process.TrackerGeometryCompare)
process.TrackerGeometryCompare.inputROOTFile1 = 'myInputGeometry_mp1260m2.root'
process.TrackerGeometryCompare.inputROOTFile2 = 'IDEAL'
process.TrackerGeometryCompare.outputFile = 'outputComparison_mp1260m2_2012AB.root'
process.load("CommonTools.UtilAlgos.TFileService_cfi")
#process.TFileService = cms.Service("TFileService",
# fileName = cms.string('TkSurfDeform_mp1260m2_2012AB.root')
# )
process.TFileService.fileName = cms.string('TkSurfDeform_mp1260m2_2012AB.root')
process.TrackerGeometryCompare.levels = ['DetUnit']
#process.TrackerGeometryCompare.levels = ['Tracker','DetUnit']
#process.TrackerGeometryCompare.levels = ['Tracker','TOBBarrel','TIBBarrel','TECEndcap','TECDisk']
#process.TrackerGeometryCompare.levels = ["TPBBarrel","TPEEndcap","TIBBarrel","TIDEndcap","TOBBarrel","TECEndcap"]
"""
process.TrackerGeometryCompare.writeToDB = True
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBSetup,
timetype = cms.untracked.string('runnumber'),
connect = cms.string('sqlite_file:yourNewTrackerCenteredObject.db'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('TrackerAlignmentRcd'),
tag = cms.string('Alignments')
),
cms.PSet(
record = cms.string('TrackerAlignmentErrorExtendedRcd'),
tag = cms.string('AlignmentErrorsExtended')
))
)
"""
| 34.506667 | 115 | 0.760046 |
2ec1b08f1138eeb26969bfcd8167472467e678a6 | 225 | py | Python | BootCRUDApp/forms.py | cs-fullstack-2019-spring/django-bootstrapcrud-cw-gkg901 | c9543d77cbd2d28248fbf1f0577c0075330db8d7 | [
"Apache-2.0"
] | null | null | null | BootCRUDApp/forms.py | cs-fullstack-2019-spring/django-bootstrapcrud-cw-gkg901 | c9543d77cbd2d28248fbf1f0577c0075330db8d7 | [
"Apache-2.0"
] | null | null | null | BootCRUDApp/forms.py | cs-fullstack-2019-spring/django-bootstrapcrud-cw-gkg901 | c9543d77cbd2d28248fbf1f0577c0075330db8d7 | [
"Apache-2.0"
] | null | null | null | from .models import sellModel
from django import forms
class sellForm(forms.ModelForm):
class Meta:
model = sellModel
fields = '__all__'
labels = {
'picture': "Image Url",
}
| 17.307692 | 35 | 0.577778 |
3ae75117f8f3fdbb7d06eeabf7aebcec8b802b22 | 506 | py | Python | data/scripts/templates/object/building/player/construction/shared_construction_player_house_naboo_small_style_02.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/building/player/construction/shared_construction_player_house_naboo_small_style_02.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/building/player/construction/shared_construction_player_house_naboo_small_style_02.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/player/construction/shared_construction_player_house_naboo_small_style_02.iff"
result.attribute_template_id = -1
result.stfName("player_structure","temporary_structure")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 29.764706 | 114 | 0.756917 |
fdda707bb42ca802022c070bf08c12930c5b9c16 | 2,507 | py | Python | STRUCT2xtl.py | rpt26/STRUCT2xtl | dbc4a17e9d7b707e2889f8e74cadb2f6354e9f5b | [
"MIT"
] | null | null | null | STRUCT2xtl.py | rpt26/STRUCT2xtl | dbc4a17e9d7b707e2889f8e74cadb2f6354e9f5b | [
"MIT"
] | null | null | null | STRUCT2xtl.py | rpt26/STRUCT2xtl | dbc4a17e9d7b707e2889f8e74cadb2f6354e9f5b | [
"MIT"
] | null | null | null | import numpy as np
from mendeleev import element
import glob
filename = 'Ti3SiC2.STRUCT_OUT'
def struct2xtl(filename):
system = filename.split(sep='.')[0]
latt_vectors = np.empty((3,3))
with open(filename) as text_file:
for i in range(3):
latt_vect = text_file.readline().split()
for j, x in enumerate(latt_vect):
latt_vectors[i,j] = float(x)
num_of_atoms = int(text_file.readline())
atom_coords = np.empty((num_of_atoms, 3))
atom_labels = []
for i in range(num_of_atoms):
atom = text_file.readline().split()
atom_num = int(atom[1])
atom_label = element(atom_num).symbol
atom_labels.append(atom_label)
for j, x in enumerate(atom[2:]):
atom_coords[i, j] = float(x)
## Crystallographic Note
# Convention for crystallographioc parameters is that
# a unit cell (which must tesselate in 3D space) can be
# defined by three scalar lengths and three angles
# a, b, c and alpha, beta, gamma
# alpha is the angle between the b and c vectors and so on.
# See Crystallography and Crystal Defects
# by Kelly and Knowles for further details
a = np.linalg.norm(latt_vectors[0])
b = np.linalg.norm(latt_vectors[1])
c = np.linalg.norm(latt_vectors[2])
alpha = np.arccos(np.dot(latt_vectors[1],latt_vectors[2])/(c*b))
beta = np.arccos(np.dot(latt_vectors[0],latt_vectors[2])/(a*c))
gamma = np.arccos(np.dot(latt_vectors[0],latt_vectors[1])/(a*b))
latt_params = np.empty(6,)
latt_params[0] = a
latt_params[1] = b
latt_params[2] = c
latt_params[3] = np.degrees(alpha)
latt_params[4] = np.degrees(beta)
latt_params[5] = np.degrees(gamma)
latt_params = tuple(latt_params)
title = 'TITLE ' + system + '\n'
unit_cell = ('CELL\n'
+'{:g} {:g} {:g} {:g} {:g} {:g}\n'.format(*latt_params)
+'SYMMETRY NUMBER 1 \n'
+'SYMETTRY LABEL P1 \n')
atoms = 'ATOMS\nNAME X Y Z\n'
for i, label in enumerate(atom_labels):
atom = label + ' {:g} {:g} {:g}\n'.format(*atom_coords[i])
atoms += atom
text = title + unit_cell + atoms
with open( system + '.xtl', 'wt') as file:
file.write(text)
print(text)
print('\n #########################\n '
+'Saved xtl to file.')
for filename in glob.iglob('*.STRUCT_OUT'):
struct2xtl(filename)
| 33.878378 | 72 | 0.585959 |
438d4367cc74314d55bda03de165a4a53c80e2f2 | 9,422 | py | Python | tensorflow_probability/python/vi/mutual_information_test.py | brianwa84/probability | 6f8e78d859ac41170be5147c8c7bde54cc5aa83e | [
"Apache-2.0"
] | 2 | 2020-02-21T06:30:00.000Z | 2021-08-08T19:29:15.000Z | tensorflow_probability/python/vi/mutual_information_test.py | brianwa84/probability | 6f8e78d859ac41170be5147c8c7bde54cc5aa83e | [
"Apache-2.0"
] | 2 | 2021-08-25T16:14:51.000Z | 2022-02-10T04:47:11.000Z | tensorflow_probability/python/vi/mutual_information_test.py | brianwa84/probability | 6f8e78d859ac41170be5147c8c7bde54cc5aa83e | [
"Apache-2.0"
] | 1 | 2020-06-04T23:26:31.000Z | 2020-06-04T23:26:31.000Z | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for mutual information estimators and helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
mi = tfp.vi.mutual_information
tfd = tfp.distributions
LOWER_BOUND_MIN_GAP = 0.3
LOWER_BOUND_MAX_GAP = 0.1
class MutualInformationTest(tfp_test_util.TestCase):
def setUp(self):
super(MutualInformationTest, self).setUp()
self.seed = tfp_test_util.test_seed()
np.random.seed(self.seed)
self.scores = np.random.normal(
loc=1.0,
scale=2.0,
size=[13, 17])
batch_size = 1000
rho = 0.8
dim = 2
x, eps = tf.split(value=tf.random.normal(shape=(2*batch_size, dim),
seed=self.seed),
num_or_size_splits=2, axis=0)
mean = rho * x
stddev = tf.sqrt(1. - tf.square(rho))
y = mean + stddev * eps
conditional_dist = tfd.MultivariateNormalDiag(
mean, scale_identity_multiplier=stddev)
marginal_dist = tfd.MultivariateNormalDiag(tf.zeros(dim), tf.ones(dim))
# The conditional_scores has its shape [y_batch_dim, distibution_batch_dim]
# as the `lower_bound_info_nce` requires `scores[i, j] = f(x[i], y[j])
# = log p(x[i] | y[j])`.
self.conditional_scores = conditional_dist.log_prob(y[:, tf.newaxis, :])
self.marginal_scores = marginal_dist.log_prob(y)[:, tf.newaxis]
self.optimal_critic = 1 + self.conditional_scores - self.marginal_scores
self.theoretical_mi = np.float32(-0.5 * np.log(1. - rho**2) * dim)
# Y is N-D standard normal distributed.
self.differential_entropy_y = 0.5 * np.log(2 * np.pi * np.e) * dim
def test_check_and_get_mask(self):
test_scores = tf.ones([2, 3])
positive_mask = np.eye(N=2, M=3, dtype=bool)
# create default masks
r_scores, r_pos_mask = mi._check_and_get_mask(test_scores)
self.assertEqual(r_scores.shape, [2, 3])
self.assertAllEqual(self.evaluate(r_pos_mask), positive_mask)
def test_get_masked_scores(self):
scores = np.array([[2., 5., -1e-3],
[-1073., 4.2, -4.]]).astype(np.float32)
mask = scores < 3.
target_res = np.array([[2., -np.inf, -1e-3],
[-1073., -np.inf, -4.]]).astype(np.float32)
func_res = mi._get_masked_scores(scores, mask)
self.assertAllEqual(self.evaluate(func_res), target_res)
def test_masked_logmeanexp(self):
# test1: compare against numpy/scipy implementation.
masked_scores = self.scores
num_masked_ele = np.sum(masked_scores > 0.)
masked_scores[masked_scores <= 0.] = -np.inf
numpy_impl = np.float32(
scipy.special.logsumexp(masked_scores) - np.log(num_masked_ele))
result_0d = mi._masked_logmeanexp(self.scores, self.scores > 0, axis=None)
self.assertAllClose(self.evaluate(result_0d), numpy_impl)
# test2: test against results from composition of numpy functions.
scores_2 = np.array([[2., 5., -1e-3],
[-1073., 4.2, -4.]], dtype=np.float32)
result_empty_sum = mi._masked_logmeanexp(
scores_2, scores_2 < 0., axis=None)
numpy_result = np.log(np.mean(np.exp(scores_2[scores_2 < 0.])))
self.assertAllClose(self.evaluate(result_empty_sum),
numpy_result.astype(np.float32))
# test3: whether `axis` arg works as expected.
result_1d = mi._masked_logmeanexp(self.scores, self.scores > 0, axis=[1,])
self.assertEqual(result_1d.shape, [13,])
def test_lower_bound_barber_agakov(self):
# Test1: against numpy reimplementation
test_scores = tf.random.normal(shape=[100,], stddev=5.)
test_entropy = tf.random.normal(shape=[], stddev=10.)
impl_estimation, test_scores, test_entropy = self.evaluate(
[mi.lower_bound_barber_agakov(logu=test_scores, entropy=test_entropy),
test_scores, test_entropy])
numpy_estimation = np.mean(test_scores) + test_entropy
self.assertAllClose(impl_estimation, numpy_estimation)
# Test2: batched input
test_scores_2 = tf.random.normal(shape=[13, 5], stddev=5.)
test_entropy_2 = tf.random.normal(shape=[13,], stddev=10.)
impl_estimation_2, test_scores_2, test_entropy_2 = self.evaluate(
[mi.lower_bound_barber_agakov(
logu=test_scores_2, entropy=test_entropy_2),
test_scores_2, test_entropy_2])
numpy_estimation_2 = np.mean(test_scores_2, axis=-1) + test_entropy_2
self.assertAllClose(impl_estimation_2, numpy_estimation_2)
# Test3: test example, since the estimation is a lower bound, we test
# by range.
impl_estimation_3 = self.evaluate(
mi.lower_bound_barber_agakov(
logu=tf.linalg.diag_part(self.conditional_scores),
entropy=self.differential_entropy_y))
self.assertAllInRange(
impl_estimation_3,
self.theoretical_mi-LOWER_BOUND_MIN_GAP,
self.theoretical_mi+LOWER_BOUND_MAX_GAP)
def test_lower_bound_info_nce(self):
# Numerical test with correlated gaussian as random variables.
info_nce_bound = self.evaluate(
mi.lower_bound_info_nce(self.conditional_scores))
self.assertAllInRange(
info_nce_bound,
lower_bound=self.theoretical_mi-LOWER_BOUND_MIN_GAP,
upper_bound=self.theoretical_mi+LOWER_BOUND_MAX_GAP)
# Check the masked against none masked version
info_nce_bound_1 = self.evaluate(
mi.lower_bound_info_nce(self.scores))
positive_mask = np.eye(self.scores.shape[0], self.scores.shape[1])
info_nce_bound_2 = self.evaluate(
mi.lower_bound_info_nce(self.scores, positive_mask, validate_args=True))
self.assertAllClose(info_nce_bound_1, info_nce_bound_2)
# Check batched against none batched version
info_nce_bound_3 = self.evaluate(
mi.lower_bound_info_nce(tf.tile(self.scores[None, :, :], [3, 1, 1])))
self.assertAllClose(
info_nce_bound_3,
self.evaluate(tf.tile(info_nce_bound_1[tf.newaxis,], [3])))
def test_lower_bound_jensen_shannon(self):
# Check against numpy implementation.
log_f = self.optimal_critic
js_bound, log_f = self.evaluate([mi.lower_bound_jensen_shannon(log_f),
log_f])
# The following numpy softplus is numerically stable when x is large
# log(1+exp(x)) = log(1+exp(x)) - log(exp(x)) + x = log(1+exp(-x)) + x
numpy_softplus = lambda x: np.log(1+np.exp(-np.abs(x))) + np.maximum(x, 0)
log_f_diag = np.diag(log_f)
n = np.float32(log_f.shape[0])
first_term = np.mean(-numpy_softplus(-log_f_diag))
second_term = (np.sum(numpy_softplus(log_f)) -
np.sum(numpy_softplus(log_f_diag))) / (n * (n - 1.))
numpy_implementation = first_term - second_term
self.assertAllClose(js_bound, numpy_implementation, rtol=1e-5)
# Check the masked against none masked version
js_bound_1 = mi.lower_bound_jensen_shannon(self.scores)
positive_mask = np.eye(self.scores.shape[0], self.scores.shape[1])
js_bound_2 = self.evaluate(
mi.lower_bound_jensen_shannon(self.scores, positive_mask,
validate_args=True))
self.assertAllClose(js_bound_1, js_bound_2)
# Check batched against none batched version
js_bound_3 = self.evaluate(
mi.lower_bound_jensen_shannon(
tf.tile(self.scores[tf.newaxis, :, :], [3, 1, 1])))
self.assertAllClose(
js_bound_3, self.evaluate(tf.tile(js_bound_1[tf.newaxis,], [3])))
def test_lower_bound_nguyen_wainwright_jordan(self):
# Numerical test against theoretical values
nwj_bound = self.evaluate(
mi.lower_bound_nguyen_wainwright_jordan(self.optimal_critic))
self.assertAllInRange(
nwj_bound,
lower_bound=self.theoretical_mi-LOWER_BOUND_MIN_GAP,
upper_bound=self.theoretical_mi+LOWER_BOUND_MAX_GAP)
# Check the masked against none masked version
nwj_bound_1 = mi.lower_bound_nguyen_wainwright_jordan(self.scores)
positive_mask = np.eye(self.scores.shape[0], self.scores.shape[1])
nwj_bound_2 = self.evaluate(
mi.lower_bound_nguyen_wainwright_jordan(
self.scores, positive_mask, validate_args=True))
self.assertAllClose(nwj_bound_1, nwj_bound_2)
# Check batched against none batched version
nwj_bound_3 = self.evaluate(
mi.lower_bound_nguyen_wainwright_jordan(
tf.tile(self.scores[tf.newaxis, :, :], [3, 1, 1])))
self.assertAllClose(
nwj_bound_3, self.evaluate(tf.tile(nwj_bound_1[None,], [3])))
if __name__ == '__main__':
tf.test.main()
| 41.875556 | 80 | 0.687858 |
fb4b387aa7da4f499cb02552fc93f881a38c6b15 | 7,577 | py | Python | rcc_dp/frequency_estimation/experiment_coding_cost.py | lamylio/federated | 3f79e71344016ae5e5ec550557af25e5c169a934 | [
"Apache-2.0"
] | 1 | 2022-03-16T02:13:39.000Z | 2022-03-16T02:13:39.000Z | rcc_dp/frequency_estimation/experiment_coding_cost.py | notminusone/federated | 6a709f5598450232b918c046cfeba849f479d5cb | [
"Apache-2.0"
] | null | null | null | rcc_dp/frequency_estimation/experiment_coding_cost.py | notminusone/federated | 6a709f5598450232b918c046cfeba849f479d5cb | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation of miracle, rhr, and subset selection when coding cost is varied."""
import json
import math
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from rcc_dp import modify_pi
from rcc_dp.frequency_estimation import miracle
from rcc_dp.frequency_estimation import rhr
from rcc_dp.frequency_estimation import ss
from rcc_dp.frequency_estimation import unbias
matplotlib.rcParams["ps.useafm"] = True
matplotlib.rcParams["pdf.use14corefonts"] = True
matplotlib.rcParams["text.usetex"] = True
def generate_geometric_distribution(k, lbd):
"""Generate the discrete geometric distribution."""
elements = range(0, k)
prob = [
(1 - lbd) * math.pow(lbd, x) / (1 - math.pow(lbd, k)) for x in elements
]
return prob
def generate_uniform_distribution(k):
"""Generate the discrete uniform distribution."""
raw_distribution = [1] * k
sum_raw = sum(raw_distribution)
prob = [float(y) / float(sum_raw) for y in raw_distribution]
return prob
def generate_zipf_distribution(k, degree):
"""Generate the discrete zipf distribution."""
raw_distribution = [1 / (float(i)**(degree)) for i in range(1, k + 1)]
sum_raw = sum(raw_distribution)
prob = [float(y) / float(sum_raw) for y in raw_distribution]
return prob
def evaluate(work_path, config, file_open=open):
"""Evaluates miracle, rhr, ss methods."""
with file_open(work_path + "/config.json", "w") as f:
json.dump(config.to_dict(), f)
start_time = time.time()
alpha = config.alpha
# Get default values.
k = config.k
n = config.n
epsilon_target = config.epsilon_target
vary_space = config.cc_space
print("coding space = " + str(vary_space))
modified_miracle_error = np.zeros((config.num_itr, len(vary_space)))
rhr_error = np.zeros((config.num_itr, 1))
ss_error = np.zeros((config.num_itr, 1))
rhr_coding_cost = epsilon_target
for itr in range(config.num_itr):
print("itr = %d" % itr)
print("epsilon target = " + str(epsilon_target))
print("n = " + str(n))
print("k = %d" % k)
if config.run_modified_miracle:
eta = epsilon_target / 2.0
print("eta = " + str(eta))
print("alpha = " + str(alpha))
if config.distribution == "geometric":
lbd = config.lbd_geometric
prob = generate_geometric_distribution(k, lbd)
elif config.distribution == "zipf":
degree = config.degree_zipf
prob = generate_zipf_distribution(k, degree)
elif config.distribution == "uniform":
prob = generate_uniform_distribution(k)
else:
raise ValueError(
"distribution should be either be geometric, zipf, uniform.")
x = np.random.choice(k, n, p=prob)
if config.run_ss:
x_ss = ss.encode_string_fast(k, epsilon_target, x)
prob_ss = ss.decode_string(k, epsilon_target, x_ss, n, normalization=1)
ss_error[itr, 0] = np.linalg.norm(
[p_i - phat_i for p_i, phat_i in zip(prob, prob_ss)], ord=1)
if config.run_rhr:
x_rhr = rhr.encode_string(k, epsilon_target, rhr_coding_cost, x)
prob_rhr = rhr.decode_string_fast(
k, epsilon_target, rhr_coding_cost, x_rhr,
normalization=1) # estimate the original underlying distribution
rhr_error[itr, 0] = np.linalg.norm(
[p_i - phat_i for p_i, phat_i in zip(prob, prob_rhr)], ord=1)
for step, vary_parameter in enumerate(vary_space):
coding_cost = vary_parameter
print("coding cost = %d" % coding_cost)
if config.run_modified_miracle:
x_modified_miracle = np.zeros((k, n))
for i in range(n):
if config.encoding_type == "fast":
x_modified_miracle[:,
i] = miracle.encode_decode_modified_miracle_fast(
i + itr * n, x[i], k, alpha * epsilon_target,
2**coding_cost)
else:
_, pi, _ = miracle.encoder(i + itr * n, x[i], k,
alpha * epsilon_target, 2**coding_cost)
expected_beta = np.ceil(k / (np.exp(epsilon_target) + 1)) / k
pi_all = modify_pi.modify_pi(
pi, eta, epsilon_target, (np.exp(epsilon_target / 2)) /
(1 + expected_beta * (np.exp(epsilon_target) - 1)))
index = np.random.choice(2**coding_cost, 1, p=pi_all[-1])[0]
x_modified_miracle[:, i] = miracle.decoder(i + itr * n, index, k,
alpha * epsilon_target,
2**coding_cost)
prob_modified_miracle = unbias.unbias_modified_miracle(
k,
alpha * epsilon_target,
2**coding_cost,
x_modified_miracle.T,
n,
normalization=1)
modified_miracle_error[itr, step] = np.linalg.norm(
[p_i - phat_i for p_i, phat_i in zip(prob, prob_modified_miracle)],
ord=1)
print(time.time() - start_time)
print("--------------")
if config.run_modified_miracle:
print("modified miracle error:")
print(np.mean(modified_miracle_error, axis=0))
if config.run_ss:
print("ss error:")
print(np.mean(ss_error, axis=0))
if config.run_rhr:
print("rhr error:")
print(np.mean(rhr_error, axis=0))
plt.figure(figsize=(10, 8), dpi=80)
if config.run_modified_miracle:
plt.errorbar(
vary_space,
np.mean(modified_miracle_error, axis=0),
yerr=np.std(modified_miracle_error, axis=0),
linewidth=3.0,
label="MMRC")
if config.run_ss:
plt.errorbar(
vary_space, [np.mean(ss_error, axis=0)[0]] * len(vary_space),
yerr=[np.std(ss_error, axis=0)[0] / np.sqrt(config.num_itr)] *
len(vary_space),
ls="--",
linewidth=3.0,
label="Subset Selection")
if config.run_rhr:
plt.errorbar(
vary_space, [np.mean(rhr_error, axis=0)[0]] * len(vary_space),
yerr=[np.std(rhr_error, axis=0)[0] / np.sqrt(config.num_itr)] *
len(vary_space),
ls="--",
linewidth=3.0,
label="RHR")
plt.xticks(fontsize=28)
plt.yticks(fontsize=28)
plt.ylabel(r"$\ell_{2}$ error", fontsize=28)
plt.xlabel(r"$\#$ bits", fontsize=28)
plt.yticks([0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
plt.legend(fontsize=24, loc="upper right")
with file_open(work_path + "/rcc_dp_ss_mse_vs_coding_cost.png", "wb") as f:
plt.savefig(f, format="png")
with file_open(work_path + "/time.txt", "w") as f:
np.savetxt(f, np.array(time.time() - start_time).reshape(-1, 1))
if config.run_modified_miracle:
with file_open(work_path + "/modified_miracle_error.csv", "w") as f:
np.savetxt(f, modified_miracle_error, delimiter=",")
if config.run_ss:
with file_open(work_path + "/ss_error.csv", "w") as f:
np.savetxt(f, ss_error, delimiter=",")
if config.run_rhr:
with file_open(work_path + "/rhr_error.csv", "w") as f:
np.savetxt(f, rhr_error, delimiter=",")
| 35.24186 | 82 | 0.635212 |
1eb129640304214d9c010a65049d169dce4e9dd0 | 197 | py | Python | python/src/algorithm/coding/itertools/probability.py | kakaba2009/MachineLearning | 26b389f8ffb5f3af939dfc9ebfdf2c6b2fc2ae33 | [
"Apache-2.0"
] | null | null | null | python/src/algorithm/coding/itertools/probability.py | kakaba2009/MachineLearning | 26b389f8ffb5f3af939dfc9ebfdf2c6b2fc2ae33 | [
"Apache-2.0"
] | null | null | null | python/src/algorithm/coding/itertools/probability.py | kakaba2009/MachineLearning | 26b389f8ffb5f3af939dfc9ebfdf2c6b2fc2ae33 | [
"Apache-2.0"
] | null | null | null | import itertools
N = int(input())
S = input().split(' ')
K = int(input())
S = list(S)
C = list(itertools.combinations(S, K))
T = len(C)
a = list(filter(lambda x: 'a' in x, C))
print(len(a)/T)
| 13.133333 | 39 | 0.588832 |
2ed452eb2c64733a4beeb7de4cf8471084453c52 | 5,371 | py | Python | pydub/silence.py | sean-bailey/pydub | c2f3199be393ad9e65a172d4737dbc68b56c3ec9 | [
"MIT"
] | null | null | null | pydub/silence.py | sean-bailey/pydub | c2f3199be393ad9e65a172d4737dbc68b56c3ec9 | [
"MIT"
] | null | null | null | pydub/silence.py | sean-bailey/pydub | c2f3199be393ad9e65a172d4737dbc68b56c3ec9 | [
"MIT"
] | null | null | null | import itertools
from .utils import db_to_float
def detect_silence(audio_segment, min_silence_len=1000, silence_thresh=-16, seek_step=1):
seg_len = len(audio_segment)
# you can't have a silent portion of a sound that is longer than the sound
if seg_len < min_silence_len:
return []
# convert silence threshold to a float value (so we can compare it to rms)
silence_thresh = db_to_float(silence_thresh) * audio_segment.max_possible_amplitude
# find silence and add start and end indicies to the to_cut list
silence_starts = []
# check successive (1 sec by default) chunk of sound for silence
# try a chunk at every "seek step" (or every chunk for a seek step == 1)
last_slice_start = seg_len - min_silence_len
slice_starts = range(0, last_slice_start + 1, seek_step)
# guarantee last_slice_start is included in the range
# to make sure the last portion of the audio is seached
if last_slice_start % seek_step:
slice_starts = itertools.chain(slice_starts, [last_slice_start])
for i in slice_starts:
audio_slice = audio_segment[i:i + min_silence_len]
if audio_slice.rms <= silence_thresh:
silence_starts.append(i)
# short circuit when there is no silence
if not silence_starts:
return []
# combine the silence we detected into ranges (start ms - end ms)
silent_ranges = []
prev_i = silence_starts.pop(0)
current_range_start = prev_i
for silence_start_i in silence_starts:
continuous = (silence_start_i == prev_i + seek_step)
# sometimes two small blips are enough for one particular slice to be
# non-silent, despite the silence all running together. Just combine
# the two overlapping silent ranges.
silence_has_gap = silence_start_i > (prev_i + min_silence_len)
if not continuous and silence_has_gap:
silent_ranges.append([current_range_start,
prev_i + min_silence_len])
current_range_start = silence_start_i
prev_i = silence_start_i
silent_ranges.append([current_range_start,
prev_i + min_silence_len])
return silent_ranges
def detect_nonsilent(audio_segment, min_silence_len=1000, silence_thresh=-16, seek_step=1):
silent_ranges = detect_silence(audio_segment, min_silence_len, silence_thresh, seek_step)
len_seg = len(audio_segment)
# if there is no silence, the whole thing is nonsilent
if not silent_ranges:
return [[0, len_seg]]
# short circuit when the whole audio segment is silent
if silent_ranges[0][0] == 0 and silent_ranges[0][1] == len_seg:
return []
prev_end_i = 0
nonsilent_ranges = []
for start_i, end_i in silent_ranges:
nonsilent_ranges.append([prev_end_i, start_i])
prev_end_i = end_i
if end_i != len_seg:
nonsilent_ranges.append([prev_end_i, len_seg])
if nonsilent_ranges[0] == [0, 0]:
nonsilent_ranges.pop(0)
return nonsilent_ranges
def split_on_silence(audio_segment, min_silence_len=1000, silence_thresh=-16, keep_silence=100,
seek_step=1):
"""
audio_segment - original pydub.AudioSegment() object
min_silence_len - (in ms) minimum length of a silence to be used for
a split. default: 1000ms
silence_thresh - (in dBFS) anything quieter than this will be
considered silence. default: -16dBFS
keep_silence - (in ms or True/False) leave some silence at the beginning
and end of the chunks. Keeps the sound from sounding like it
is abruptly cut off.
When the length of the silence is less than the keep_silence duration
it is split evenly between the preceding and following non-silent
segments.
If True is specified, all the silence is kept, if False none is kept.
default: 100ms
"""
if isinstance(keep_silence, bool):
keep_silence = len(audio_segment) if keep_silence else 0
not_silence_ranges = detect_nonsilent(audio_segment, min_silence_len, silence_thresh, seek_step)
# from the itertools documentation
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
start_min = 0
chunks = []
for (start_i, end_i), (start_ii, end_ii) in pairwise(not_silence_ranges):
end_max = end_i + (start_ii - end_i + 1)//2 # +1 for rounding with integer division
start_i = max(start_min, start_i - keep_silence)
end_i = min(end_max, end_i + keep_silence)
chunks.append(audio_segment[start_i:end_i])
start_min = end_max
chunks.append(audio_segment[max(start_min, start_ii - keep_silence):
min(len(audio_segment), end_ii + keep_silence)])
return chunks
def detect_leading_silence(sound, silence_threshold=-50.0, chunk_size=10):
'''
sound is a pydub.AudioSegment
silence_threshold in dB
chunk_size in ms
iterate over chunks until you find the first one with sound
'''
trim_ms = 0 # ms
assert chunk_size > 0 # to avoid infinite loop
while sound[trim_ms:trim_ms+chunk_size].dBFS < silence_threshold and trim_ms < len(sound):
trim_ms += chunk_size
return trim_ms
| 34.429487 | 100 | 0.679389 |
8dbb9a08951f8b5de7db7521a2909567b3199d18 | 1,667 | py | Python | main/rest/media_next.py | ktsakalozos/tator | f9ce28d646b4afd926c4350e5d8b6ff66d44a03b | [
"MIT"
] | null | null | null | main/rest/media_next.py | ktsakalozos/tator | f9ce28d646b4afd926c4350e5d8b6ff66d44a03b | [
"MIT"
] | null | null | null | main/rest/media_next.py | ktsakalozos/tator | f9ce28d646b4afd926c4350e5d8b6ff66d44a03b | [
"MIT"
] | null | null | null | from collections import defaultdict
from ..models import Media
from ..search import TatorSearch
from ..schema import MediaNextSchema
from ._base_views import BaseDetailView
from ._media_query import get_media_es_query
from ._permissions import ProjectViewOnlyPermission
class MediaNextAPI(BaseDetailView):
""" Retrieve ID of next media in a media list.
This endpoint accepts the same query parameters as a GET request to the `Medias` endpoint,
but only returns the next media ID from the media passed as a path parameter. This allows
iteration through a media list without serializing the entire list, which may be large.
"""
schema = MediaNextSchema()
permission_classes = [ProjectViewOnlyPermission]
http_method_names = ['get']
def _get(self, params):
# Find this object.
media_id = params['id']
media = Media.objects.get(pk=media_id)
# Get query associated with media filters.
query = get_media_es_query(media.project.pk, params)
# Modify the query to only retrieve next media.
range_filter = [{'range': {'_exact_name': {'gt': media.name}}}]
if query['query']['bool']['filter']:
query['query']['bool']['filter'] += range_filter
else:
query['query']['bool']['filter'] = range_filter
query['size'] = 1
media_ids, count = TatorSearch().search(media.project.pk, query)
if count > 0:
response_data = {'next': media_ids[0]}
else:
response_data = {'next': -1}
return response_data
def get_queryset(self):
return Media.objects.all()
| 34.020408 | 98 | 0.656269 |
afef5b43019b0d15c5ddf63c213ae1c3ab05ad4f | 47,377 | py | Python | chirptext/texttaglib.py | letuananh/chirptext | 4b97b4bc1817b5ad7ee113381321c94ff9a15799 | [
"MIT"
] | 5 | 2017-05-01T09:54:47.000Z | 2020-03-29T20:42:53.000Z | chirptext/texttaglib.py | letuananh/chirptext | 4b97b4bc1817b5ad7ee113381321c94ff9a15799 | [
"MIT"
] | 21 | 2018-04-03T13:39:55.000Z | 2021-05-20T13:53:52.000Z | chirptext/texttaglib.py | letuananh/chirptext | 4b97b4bc1817b5ad7ee113381321c94ff9a15799 | [
"MIT"
] | 1 | 2021-05-17T07:05:00.000Z | 2021-05-17T07:05:00.000Z | # -*- coding: utf-8 -*-
""" Text Annotation (texttaglib - TTL) module
"""
# This code is a part of chirptext library: https://github.com/letuananh/chirptext
# :copyright: (c) 2012 Le Tuan Anh <tuananh.ke@gmail.com>
# :license: MIT, see LICENSE for more details.
import csv
import json
import logging
import os
from collections import Mapping
from collections import defaultdict as dd
from typing import TypeVar, Generic
from . import chio
from .anhxa import DataObject
from .anhxa import IDGenerator
from .chio import iter_tsv_stream
MODE_TSV = 'tsv'
MODE_JSON = 'json'
class Tag(DataObject):
""" A general tag which can be used for annotating linguistic objects such as Sentence, Chunk, or Token
Note: object types cannot be None. If forced with ``Tag('val', type=None)`` type will be set to an empty string ''
If an object is passed in as type str(type) will be used to convert it into a string.
"""
GOLD = 'gold'
NONE = ''
DEFAULT = 'n/a'
MFS = 'mfs' # most frequent sense
WORDNET = 'wn'
OTHER = 'other'
NLTK = 'nltk'
ISF = 'isf' # integrated semantic framework: https://github.com/letuananh/intsem.fx
MECAB = "mecab"
def __init__(self, value: str = '', type: str = NONE, cfrom=-1, cto=-1, source=NONE, **kwargs):
super().__init__(**kwargs)
self.__value = value if value == '' or value is None else str(value)
self.__type = str(type) if type else '' # tag type
self.__cfrom = cfrom
self.__cto = cto
self.source = source
@property
def cfrom(self):
""" starting character index of a Tag """
return self.__cfrom
@cfrom.setter
def cfrom(self, value):
self.__cfrom = int(value) if value is not None else None
@property
def value(self):
return self.__value
@value.setter
def value(self, value):
self.__value = value
@property
def type(self):
return self.__type
@property
def cto(self):
""" ending character index of a Tag """
return self.__cto
@cto.setter
def cto(self, value):
self.__cto = int(value) if value is not None else None
@property
def text(self):
""" The text value of this tag
Tag.text returns Tag.value except when value does not exist (i.e. is None)
in which an empty string '' will be returned
"""
return self.value if self.value else ''
@text.setter
def text(self, value):
self.value = value
def __repr__(self):
if not self.type:
return f'Tag(value={repr(self.value)})'
else:
return f'Tag(value={repr(self.value)}, type={repr(self.type)})'
def __str__(self):
if self.cfrom not in (-1, None) and self.cto not in (-1, None):
return f"{self.type}/{self.value}[{self.cfrom}:{self.cto}]"
else:
return f"{self.type}/{self.value}"
def to_dict(self, default_cfrom=-1, default_cto=-1, *args, **kwargs):
""" Serialize this Tag object data into a dict """
a_dict = {'value': self.value}
if self.type:
a_dict['type'] = self.type
if self.source:
a_dict['source'] = self.source
if self.cfrom not in (None, -1, default_cfrom, self.parent.cfrom if self.parent else None) and self.cfrom >= 0:
a_dict['cfrom'] = self.cfrom
if self.cto not in (None, -1, default_cto, self.parent.cto if self.parent else None) and self.cto >= 0:
a_dict['cto'] = self.cto
return a_dict
def clone(self, **kwargs):
""" Create a new tag object """
_source_dict = self.to_dict()
_source_dict.update(kwargs)
return Tag.from_dict(_source_dict)
@staticmethod
def from_dict(json_dict):
""" Create a Tag object from a dict's data """
tag = Tag(**json_dict)
return tag
T = TypeVar('TagType')
class ProtoList:
""" A list of data objects that can construct new children """
def __init__(self, parent=None, proto=Tag, proto_kwargs=None, proto_key="ID", index_key=False,
claim_hook=None, release_hook=None, taglist_create_hook=None, *args, **kwargs):
self.__parent = parent
self.__proto = proto
self.__proto_kwargs = proto_kwargs
self.__proto_key = proto_key
self.__obj_map = {}
self.__has_index = index_key and proto_key
self.__claim_hook = claim_hook # notify parent to claim an object
self.__release_hook = release_hook # notify parent that an object has been removed
self.__taglist_create_hook = taglist_create_hook # notify parent that TagList created a new object using new()
self.__children = []
def __len__(self):
return len(self.__children)
def __iter__(self):
return iter(self.__children)
def __getitem__(self, idx):
if isinstance(idx, int):
return self.__children[idx]
elif self.__has_index:
return self.__obj_map[idx]
else:
raise IndexError("object search value has to be either sequence position (int) or key (str)")
def __setitem__(self, key, value):
return self._add_obj(value, key, replace=True)
def __contains__(self, value):
if self.__has_index:
return value in self.__children or value in self.__obj_map
else:
return value in self.__children
def __repr__(self):
return repr([repr(c) for c in self])
def __str__(self):
return str([str(c) for c in self])
def new(self, *args, **kwargs):
""" Create a new object and add this this TokenList """
# prefer kwargs to proto_kwargs, except for type
if self.__proto_kwargs:
for k, v in self.__proto_kwargs.items():
if k == 'type':
if 'type' in kwargs and kwargs['type']:
if kwargs['type'] != self.__proto_kwargs['type']:
raise ValueError("Cannot construct new object due to conflicting types")
kwargs['type'] = self.__proto_kwargs['type']
elif k not in kwargs:
kwargs[k] = self.__proto_kwargs[k]
new_obj = self.__proto(*args, **kwargs)
if self.__taglist_create_hook:
self.__taglist_create_hook(new_obj)
return self._add_obj(new_obj)
def append(self, obj):
return self._add_obj(obj)
def extend(self, values):
for obj in values:
self.append(obj)
def insert(self, idx, obj):
self._add_obj(obj, idx=idx)
def index(self, *args, **kwargs):
return self.__children.index(*args, **kwargs)
def _add_obj(self, obj, idx=None, replace=False):
""" [Internal function] Add an existing object into this list
Currently this function is only used for constructing structures from input streams.
General users should NOT use this function as it is very likely to be removed in the future
:param obj: a new object to add to this list
:param idx: position to insert the new object to, or set to None to append the new object to the end of list.
:param replace: replace an existing object at a given position instead of inserting
"""
if self.__claim_hook:
self.__claim_hook(obj)
if self.__has_index:
if getattr(obj, self.__proto_key):
self.__obj_map[getattr(obj, self.__proto_key)] = obj
if idx is None:
self.__children.append(obj)
elif replace:
_old_obj = self.__children[idx]
self._release_obj(_old_obj)
self.__children[idx] = obj
else:
self.__children.insert(idx, obj)
return obj
def by_id(self, id: str, **kwargs):
""" ID value has to be string """
for _obj in self:
if getattr(_obj, self.__proto_key) == id:
return _obj
if 'default' in kwargs:
return kwargs['default']
else:
raise IndexError("No object could be found with the given index and no default value was provided")
def remove(self, obj_ref):
# remove from map
if obj_ref in self.__obj_map:
_obj = self.__obj_map[obj_ref]
else:
_obj = obj_ref
# remove the object from this list
if _obj in self.__children:
self.__children.remove(_obj)
return self._release_obj(_obj)
def _release_obj(self, obj):
# remove the obj from obj map index
if self.__has_index:
key = getattr(obj, self.__proto_key)
self.__obj_map.pop(key)
if self.__release_hook:
self.__release_hook(obj)
return obj
def values(self):
""" Compile a value list from all children """
return [c.value for c in self.__children]
class TagSet(Generic[T]):
""" contains all tags of a linguistic object """
class TagMap:
def __init__(self, tagset):
self.__dict__["_TagMap__tagset"]: TagSet = tagset
def __getitem__(self, type) -> T:
""" Get the first tag object in the tag list of a given type if exist, else return None """
if type in self.__tagset and len(self.__tagset[type]) > 0:
return self.__tagset[type][0]
else:
return None
def __setitem__(self, type, value):
""" Set the first tag object in the tag list of a given type to key if exist, else create a new tag
:param type: type of the generic tag object being added
:param value: if value is a dict-like object, it will be unpacked into object constructor, otherwise it will be used as the tag value
"""
_old = self[type]
_kwargs = {}
if isinstance(value, Mapping):
_kwargs = value
if 'type' in _kwargs:
if _kwargs['type'] != type:
raise ValueError("Multiple values for type were provided")
else:
_kwargs.pop('type')
value = _kwargs.pop('value') if 'value' in _kwargs else ''
if not _old:
# create a new tag
self.__tagset.new(value, type=type, **_kwargs)
else:
# pop the old tag and replace it with a new one
self.__tagset.replace(_old, value, type=type, **_kwargs)
def __getattr__(self, type) -> T:
""" get the first tag object in the tag list of a given type if exist, else return None """
return self[type]
def __setattr__(self, type, value):
""" Set the first tag object in the tag list of a given type to key if exist, else create a new tag """
self[type] = value
def get_or_create(self, type, default=None, check_type=True):
""" Get an existing tag object with a specific type, or create a new one using defaulted values
:param default: A Tag object or a dict-like structure, both will be used to construct a new Tag object
If defaults is set to None then an empty tag of the given type will be created
:param check_type: Make sure that type information in defaults is not conflicting with querying type
:raises: ValueError
"""
if type in self.__tagset:
return self[type]
elif default is None:
return self.__tagset.new('', type=type)
elif isinstance(default, Tag):
if check_type and default.type and default.type != type:
raise ValueError(
f"Could not create new tag object due to type conflicting ({repr(type)} != {repr(default.type)})")
else:
return self.__tagset._append(default.clone(type=type))
elif isinstance(default, Mapping):
_kwargs = dict(default)
if 'value' in _kwargs:
_value = _kwargs.pop("value")
else:
_value = None
if 'type' in _kwargs:
if check_type and _kwargs['type'] != type:
raise ValueError(
f"Could not create new tag object due to type conflicting ({repr(type)} != {repr(_kwargs['type'])})")
_kwargs.pop("type")
return self.__tagset.new(_value, type=type, **_kwargs)
else:
# use defaults as the input value string
return self.__tagset.new(str(default), type=type)
def __init__(self, parent=None, **kwargs):
self.__parent = parent
self.__proto_kwargs = kwargs['proto_kwargs'] if 'proto_kwargs' in kwargs else {}
self.__proto = kwargs['proto'] if 'proto' in kwargs else Tag
self.__dict__["_TagSet__tags"] = []
self.__dict__["_TagSet__tagmap"] = TagSet.TagMap(self)
self.__dict__["_TagSet__tagsmap"] = dict()
@property
def gold(self):
""" Interact with first tag (gold) directly """
return self.__tagmap
def __len__(self):
""" Number of tags in this object """
return len(self.__tags)
def __getitem__(self, type) -> T:
""" Get the all tags of a given type """
if type not in self.__tagsmap:
self.__tagsmap[type] = ProtoList(proto=self.__proto,
proto_kwargs={'type': type},
taglist_create_hook=lambda x: self.__tags.append(x))
return self.__tagsmap[type]
def __getattr__(self, type) -> T:
""" Get the first tag of a given type if it exists"""
return self[type]
def __contains__(self, type):
""" Check if there is at least a tag with a type """
return type in self.__tagsmap
def __iter__(self) -> T:
""" Loop through all tags in this set """
return iter(self.__tags)
def items(self):
""" Return an iterator to loop through all (type, value_list) pairs in this TagSet """
return self.__tagsmap.items()
def _construct_obj(self, *args, **kwargs) -> T:
""" Construct a new tag object and notify parent if possible """
if self.__proto_kwargs:
# prioritise values in kwargs rather than in default constructor kwargs
for k, v in self.__proto_kwargs.items():
if k not in self.kwargs:
kwargs[k] = v
_tag = self.__proto(*args, **kwargs)
# TODO to review this _claim book design
if self.__parent is not None and self.__parent._claim:
self.__parent._claim(_tag)
return _tag
def new(self, value, type='', *args, **kwargs) -> T:
""" Create a new generic tag object """
if not value and not type:
raise ValueError("Concept value and type cannot be both empty")
_tag = self._construct_obj(value, type, *args, **kwargs)
return self._append(_tag)
def _append(self, tag):
""" [Internal] Add an existing tag object into the list
General users should NOT use this method as it is very likely to be removed in the future
"""
self.__map_tag(tag)
self.__tags.append(tag)
return tag
def __map_tag(self, tag):
self[tag.type].append(tag)
return tag
def _replace_obj(self, old_obj, new_obj):
self.__tags.remove(old_obj)
self.__tags.append(new_obj)
if old_obj.type == new_obj.type:
_taglist = self.__tagsmap[old_obj.type]
_taglist[_taglist.index(old_obj)] = new_obj
else:
self.__tagsmap[old_obj.type].remove(old_obj)
self.__tagsmap[new_obj.type].append(new_obj)
return new_obj
def replace(self, old_obj, value: str, type='', *args, **kwargs) -> T:
""" Create a new tag to replace an existing tag object
:param old_obj: Old object to be removed and replaced with a newly crated object
:param value: text value for the new tag object
:param type: type for the new object, defaulted to an empty string
"""
new_obj = self._construct_obj(value=value, type=type, *args, **kwargs)
return self._replace_obj(old_obj, new_obj)
def remove(self, tag: T) -> T:
""" Remove a generic tag object and return them """
if tag is None:
raise ValueError("Null tag object cannot be popped")
elif tag.type not in self:
raise ValueError("This tag object does not exist in this TagSet")
else:
self.__tagsmap[tag.type].remove(tag)
self.__tags.remove(tag)
return tag
def pop(self, idx: int) -> T:
""" Remove a tag at a given position and return it """
return self.remove(self.__tags[idx])
def index(self, obj):
return self.__tags.index(obj)
def values(self, type=None):
""" Get all values of tags with the specified type or all tags when type is None """
return (t.value for t in (self[type] if type is not None else self))
def to_dict(self, *args, **kwargs):
""" Create a list of dicts from all tag objects """
return {"tags": [t.to_dict(*args, **kwargs) for t in self]}
class Token(DataObject):
""" A sentence token (i.e. a word) """
def __init__(self, text='', cfrom=-1, cto=-1, pos=None, lemma=None, comment=None, flag=None, **kwargs):
""" A token (e.g. a word in a sentence) """
super().__init__(**kwargs)
self.__tags: TagSet[Tag] = TagSet[Tag](parent=self)
self.cfrom = cfrom
self.cto = cto
self.__text = text # original/surface form
self.lemma = lemma # dictionary form
self.pos = pos
self.comment = comment
self.flag = flag
def __getitem__(self, name):
return self.tag[name].value if name in self.__tags else None
def __setitem__(self, name, value):
self.tag[name] = value
def __getattr__(self, name):
return self[name]
def __len__(self):
return len(self.__tags)
def __iter__(self):
return iter(self.__tags)
def __repr__(self):
return "`{l}`<{f}:{t}>".format(l=self.text, f=self.cfrom, t=self.cto)
def __str__(self):
return self.text
@property
def text(self):
""" Text value of a Token object """
return self.__text
@text.setter
def text(self, value):
self.__text = value
@property
def value(self):
""" Alias to Token.text """
return self.text
@value.setter
def value(self, value):
self.text = value
@property
def tag(self):
return self.__tags.gold
@property
def tag(self):
""" Interact with first tag (gold) directly """
return self.__tags.gold
@property
def tags(self):
""" Tag manager object of this sentence (list access) """
return self.__tags
def surface(self):
""" Get the surface form of this token """
# Prioritise self.text
if self.text:
return self.text
elif self.sent and self.sent.text:
return self.sent.text[self.cfrom:self.cto]
else:
return ''
def tag_map(self):
""" Build a map from tagtype to list of tags """
tm = dd(list)
for tag in self.__tags:
tm[tag.type].append(tag)
return tm
def to_dict(self):
token_json = {'cfrom': self.cfrom,
'cto': self.cto,
'text': self.text}
if self.lemma:
token_json['lemma'] = self.lemma
if self.pos:
token_json['pos'] = self.pos
if self.comment:
token_json['comment'] = self.comment
if self.flag:
token_json['flag'] = self.flag
all_tags = [t.to_dict(default_cfrom=self.cfrom, default_cto=self.cto) for t in self.tags]
if all_tags:
token_json['tags'] = all_tags
return token_json
@staticmethod
def from_dict(token_dict):
tk = Token()
tk.update(token_dict, 'cfrom', 'cto', 'text', 'lemma', 'pos', 'comment')
# rebuild tags
for tag_json in token_dict.get('tags', []):
tk.tags.new(**tag_json)
return tk
class TokenList(list):
""" A list of Token - Accept both token index and token object """
def __init__(self, *args, **kwargs):
super().__init__()
self.sent = None
def __eq__(self, other):
if not isinstance(other, list):
return False
elif len(other) != len(self):
return False
else:
for i1, i2 in zip(self, other):
if i1 != i2:
return False
return True
def __add__(self, other):
return self.extend(other)
def __iadd__(self, other):
return self.extend(other)
def __ensure_token(self, token):
if isinstance(token, Token):
return token
elif isinstance(token, int):
if self.sent is None:
raise ValueError("Using token index in a TokenList without sentence ref")
return self.sent[token]
else:
raise ValueError(f"Invalid token value: {token} (Only token index and Token objects are accepted")
def append(self, x):
""" Add tokens to this concept """
super().append(self.__ensure_token(x))
def extend(self, iterable):
""" Add all tokens from an iterable to this TokenList
:param iterable: An iterable of int (for token indices) or Token list
:raises: ValueError
"""
super().extend(self.__ensure_token(t) for t in iterable)
def insert(self, i, x):
""" Insert a token at a given position """
super().insert(i, self.__ensure_token(x))
class Concept(Tag):
""" Represent a concept in an utterance, which may refers to multiple tokens """
FLAG = 'flag'
COMMENT = 'comment'
NOT_MATCHED = 'E'
def __init__(self, value='', type=None, clemma=None, tokens=None, comment=None, flag=None, sent=None, **kwargs):
super().__init__(value, type, **kwargs)
self.__tokens = TokenList()
self.sent = sent
self.clemma = clemma
if tokens:
self.tokens.extend(tokens)
self.comment = comment
self.flag = flag
@property
def tokens(self):
return self.__tokens
@tokens.setter
def tokens(self, values):
self.__tokens.clear()
self.__tokens.extend(values)
@property
def sent(self):
return self.__sent
@sent.setter
def sent(self, value):
self.__sent = value
self.__tokens.sent = value
def __getitem__(self, idx):
""" Get the idx-th token of this concept """
return self.__tokens[idx]
def __iter__(self):
""" Iterate through all tokens in this concept """
return iter(self.__tokens)
def __len__(self):
""" Number of tokens belong to this concept """
return len(self.__tokens)
def __repr__(self):
return f'<{self.type}:{self.value}:"{self.clemma}">'
def __str__(self):
return '<{t}:"{l}">({ws})'.format(l=self.clemma, t=self.value, ws=self.__tokens)
def remove(self, token: Token):
""" Remove a Token object from this concept """
self.__tokens.remove(token)
def pop(self, idx=None) -> Token:
""" Remove a token from this concept and return it
:param idx: the index of the token to be removed. If set to None (defaulted) idx of the last token will be used
:type idx: int
"""
if idx is None:
return self.__tokens.pop()
else:
return self.__tokens.pop(idx)
def to_dict(self, *args, **kwargs):
concept_dict = super().to_dict(*args, **kwargs)
if self.sent:
# get token idx from sent
concept_dict['tokens'] = [self.sent.tokens.index(t) for t in self.__tokens]
else:
concept_dict['tokens'] = [t.text for t in self.__tokens]
if self.clemma is not None:
concept_dict['clemma'] = self.clemma
if self.value:
concept_dict['value'] = self.value
if self.type:
concept_dict['type'] = self.type
if self.comment:
concept_dict[Concept.COMMENT] = self.comment
if self.flag:
concept_dict[Concept.FLAG] = self.flag
return concept_dict
class Sentence(DataObject):
""" Represent an utterance (i.e. a sentence) """
def __init__(self, text='', ID=None, tokens=None, **kwargs):
super().__init__(text=text, **kwargs)
self.text = text
self.ID = ID
self.__tags: TagSet[Tag] = TagSet[Tag](parent=self)
self.__concepts: TagSet[Concept] = TagSet[Concept](proto=Concept, proto_kwargs={'sent': self})
self.__tokens: ProtoList = ProtoList(parent=self, proto=Token, proto_kwargs={'sent': self})
if tokens:
self.tokens = tokens
@property
def ID(self) -> str:
""" ID string of a sentence """
return self.__ID
@ID.setter
def ID(self, value: str):
self.__ID = str(value) if value is not None else None
@property
def text(self):
return self.__text
@text.setter
def text(self, value):
self.__text = value
def __repr__(self):
if self.ID:
return f"Sentence(ID={repr(self.ID)}, text={repr(self.text)})"
else:
return f"Sentence({repr(self.text)})"
def __str__(self):
""" The text content of this sentence """
return self.text
def __getitem__(self, idx: int) -> Token:
""" Get the token at a given position in this sentence """
return self.__tokens[idx]
def __len__(self):
""" Number of tokens in this sentence """
return len(self.__tokens)
@property
def tags(self):
""" Tag manager object of this sentence (list access) """
return self.__tags
@property
def tag(self):
""" Interact with first tag (gold) directly """
return self.__tags.gold
@property
def concepts(self):
""" Concept manager object of this sentence (list access) """
return self.__concepts
@property
def concept(self):
""" Interact with gold concept (gold) directly """
return self.__concepts.gold
@property
def tokens(self):
""" Access token list of this sentence """
return self.__tokens
@tokens.setter
def tokens(self, tokens):
if self.__tokens:
raise Exception("Cannot import tokens as my token list is not empty")
else:
self._import_tokens(tokens)
def surface(self, tag):
""" Get surface string that is associated with a linguistic object """
if tag.cfrom is not None and tag.cto is not None and tag.cfrom >= 0 and tag.cto >= 0:
return self.text[tag.cfrom:tag.cto]
else:
return ''
def tcmap(self, *concept_type):
""" Create a token-concepts map
:param concept_type: When provided, only concept with specified type(s) will be mapped
"""
_tcmap = dd(list)
for concept in self.__concepts:
if concept_type and concept.type not in concept_type:
continue
else:
for w in concept:
_tcmap[w].append(concept)
return _tcmap
def mwe(self, *concept_type):
""" return an iterator of concepts that are linked to more than 1 token.
# filter all Wordnet-based multi-word expressions
>>> sent.mwe("WN")
# filter senses from wordnets, Princeton Wordnet, and Open Multilingual Wordnet
>>> sent.mwe("WN", "PWN", "OMW")
# If you already have a type lise, try to use Python unpack syntax with
>>> types = ["WN", "PWN", "OMW"]
>>> sent.mwe(*types)
:param concept_type: When provided, only concept with specified type(s) will be considered
"""
if concept_type:
return (c for c in self.__concepts if len(c.tokens) > 1 and c.type in concept_type)
else:
return (c for c in self.__concepts if len(c.tokens) > 1)
def msw(self, *concept_type):
""" Return a generator of tokens with more than one concept.
:param concept_type: When provided, only concept with specified type(s) will be considered
"""
return (t for t, c in self.tcmap(*concept_type).items() if len(c) > 1)
def _claim(self, obj):
""" [Internal function] claim ownership of an object """
obj.sent = self
def _import_tokens(self, tokens, import_hook=None, ignorecase=True):
""" [Internal function ] Import a list of string as tokens
General users should NOT use this function as it's very likely to be changed in the future
"""
text = self.text.lower() if ignorecase else self.text
has_hooker = import_hook and callable(import_hook)
cfrom = 0
if self.__tokens:
for tk in self.__tokens:
if tk.cfrom and tk.cfrom > cfrom:
cfrom = tk.cfrom
for token in tokens:
if has_hooker:
import_hook(token)
to_find = token.lower() if ignorecase else token
start = text.find(to_find, cfrom)
# stanford parser
if to_find == '``' or to_find == "''":
start_dq = text.find('"', cfrom)
if start_dq > -1 and (start == -1 or start > start_dq):
to_find = '"'
start = start_dq
if to_find == '`' or to_find == "'":
start_dq = text.find("'", cfrom)
if start_dq > -1 and (start == -1 or start > start_dq):
to_find = "'"
start = start_dq
if start == -1:
raise LookupError('Cannot find token `{t}` in sent `{s}`({l}) from {i} ({p})'.format(t=token, s=self.text, l=len(self.text), i=cfrom, p=self.text[cfrom:cfrom + 20]))
cfrom = start
cto = cfrom + len(to_find)
self.tokens.new(token, cfrom, cto)
cfrom = cto - 1
def fix_cfrom_cto(self, import_hook=None, ignorecase=True):
text = self.text.lower() if ignorecase else self.text
has_hooker = import_hook and callable(import_hook)
cfrom = 0
for token in self.tokens:
if has_hooker:
import_hook(token.text)
start = text.find(token.text.lower() if ignorecase else token.text, cfrom)
if start == -1:
raise LookupError('Cannot find token `{t}` in sent `{s}`({l}) from {i} ({p})'.format(t=token, s=self.text, l=len(self.text), i=cfrom, p=self.text[cfrom:cfrom + 20]))
cfrom = start
cto = cfrom + len(token.text)
token.cfrom = cfrom
token.cto = cto
cfrom = cto - 1
def to_dict(self, *args, **kwargs):
""" Generate a JSON-ready dict that contains this sentence data
"""
sent_dict = {'text': self.text}
if self.tokens:
sent_dict['tokens'] = [t.to_dict() for t in self.tokens]
if self.concepts:
sent_dict['concepts'] = [c.to_dict() for c in self.concepts]
if self.ID is not None:
sent_dict['ID'] = self.ID
if self.flag is not None:
sent_dict['flag'] = self.flag
if self.comment is not None:
sent_dict['comment'] = self.comment
if self.__tags:
sent_dict['tags'] = [t.to_dict() for t in self.__tags]
return sent_dict
@staticmethod
def from_dict(json_sent):
sent = Sentence(json_sent['text'])
sent.update(json_sent, 'ID', 'comment', 'flag')
# import tokens
for json_token in json_sent.get('tokens', []):
sent.tokens._add_obj(Token.from_dict(json_token))
# import concepts
for json_concept in json_sent.get('concepts', []):
concept = sent.concepts.new(**json_concept)
concept.update(json_concept, Concept.COMMENT, Concept.FLAG)
# import sentence's tag
for json_tag in json_sent.get('tags', []):
sent.tags.new(**json_tag)
return sent
class Document(DataObject):
def __init__(self, name='', path='.', **kwargs):
super().__init__(**kwargs)
self.name = name
self.path = path
self.__sents = ProtoList(parent=self, proto=Sentence, index_key=True, claim_hook=self.__claim_sent_obj)
self.__idgen = IDGenerator(id_hook=lambda x: x in self) # for creating a new sentence without ID
@property
def sents(self):
return self.__sents
def __contains__(self, sent_id):
""" Check if a given sentence ID exists in this Document """
return str(sent_id) in self.__sents
def __len__(self):
return len(self.__sents)
def __getitem__(self, sent_ref) -> Sentence:
""" Get a sentence object by ID (string) or position (int) """
return self.__sents[sent_ref]
def __iter__(self):
""" Return an iterator to loop though all sentences in this Document """
return iter(self.__sents)
def __claim_sent_obj(self, sent: Sentence):
if not sent.ID:
sent.ID = next(self.__idgen)
class TxtReader(object):
def __init__(self, sent_stream, token_stream, concept_stream, link_stream, tag_stream, doc_name='', doc_path='.'):
self.sent_stream = sent_stream
self.token_stream = token_stream
self.concept_stream = concept_stream
self.link_stream = link_stream
self.tag_stream = tag_stream
self.doc_name = doc_name
self.doc_path = doc_path
def sent_reader(self):
return iter_tsv_stream(self.sent_stream) if self.sent_stream else None
def token_reader(self):
return iter_tsv_stream(self.token_stream) if self.token_stream else None
def concept_reader(self):
return iter_tsv_stream(self.concept_stream) if self.concept_stream else None
def link_reader(self):
return iter_tsv_stream(self.link_stream) if self.link_stream else None
def tag_reader(self):
return iter_tsv_stream(self.tag_stream) if self.tag_stream else None
@staticmethod
def from_path(path):
doc_path = os.path.dirname(path)
doc_name = os.path.basename(path)
doc = Document(name=doc_name, path=doc_path)
return TxtReader.from_doc(doc)
@staticmethod
def from_doc(doc, encoding='utf-8'):
sent_path = os.path.join(doc.path, '{}_sents.txt'.format(doc.name))
token_path = os.path.join(doc.path, '{}_tokens.txt'.format(doc.name))
concept_path = os.path.join(doc.path, '{}_concepts.txt'.format(doc.name))
link_path = os.path.join(doc.path, '{}_links.txt'.format(doc.name))
tag_path = os.path.join(doc.path, '{}_tags.txt'.format(doc.name))
reader = TxtReader(sent_stream=open(sent_path, mode='rt', encoding=encoding),
token_stream=open(token_path, mode='rt', encoding=encoding),
concept_stream=open(concept_path, mode='rt', encoding=encoding),
link_stream=open(link_path, mode='rt', encoding=encoding),
tag_stream=open(tag_path, mode='rt', encoding=encoding),
doc_name=doc.name,
doc_path=doc.path)
return reader
def close(self):
self.sent_stream.close()
self.token_stream.close()
self.concept_stream.close()
self.link_stream.close()
self.tag_stream.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def read(self, doc=None):
""" Read tagged doc from mutliple files (sents, tokens, concepts, links, tags) """
if not self.sent_stream:
raise Exception("There is no sentence data stream available")
if doc is None:
doc = Document(name=self.doc_name, path=self.doc_path)
for row in self.sent_reader():
if len(row) == 2:
sid, text = row
doc.sents.new(text.strip(), ID=sid)
elif len(row) == 4:
sid, text, flag, comment = row
sent = doc.sents.new(text.strip(), ID=sid)
sent.flag = flag
sent.comment = comment
# Read tokens if available
if self.token_stream:
# read all tokens first
sent_tokens_map = dd(list)
for token_row in self.token_reader():
if len(token_row) == 6:
sid, wid, token, lemma, pos, comment = token_row
else:
sid, wid, token, lemma, pos = token_row
comment = ''
sent_tokens_map[sid].append((token, lemma, pos.strip(), wid, comment))
# import tokens
for sent in doc:
sent_tokens = sent_tokens_map[sent.ID]
sent.tokens = ([x[0] for x in sent_tokens])
for ((tk, lemma, pos, wid, comment), token) in zip(sent_tokens, sent.tokens):
token.pos = pos
token.lemma = lemma
token.comment = comment
# only read concepts if tokens are available
if self.concept_stream:
concept_map = {}
# read concepts
for concept_row in self.concept_reader():
if len(concept_row) == 6:
sid, cid, clemma, value, _type, comment = concept_row
elif len(concept_row) == 5:
sid, cid, clemma, value, _type = concept_row
else:
sid, cid, clemma, value = concept_row
comment = ''
_type = ''
if not value and not _type:
raise ValueError("Invalid concept line (concept value and type cannot be both zero)")
cid = int(cid)
sent = doc[sid]
# TODO: read type info from file
c = sent.concepts.new(value.strip(), type=_type, clemma=clemma, comment=comment)
concept_map[(sid, cid)] = c
# only read concept-token links if tokens and concepts are available
for sid, cid, wid in self.link_reader():
sent = doc[sid]
concept = concept_map[(sid, int(cid.strip()))]
token = sent[int(wid.strip())]
concept.tokens.append(token)
# read sentence level tags
if self.tag_stream:
for row in self.tag_reader():
if len(row) == 5:
sid, cfrom, cto, value, _type = row
wid = None
if len(row) == 6:
sid, cfrom, cto, value, _type, wid = row
if cfrom:
cfrom = int(cfrom)
if cto:
cto = int(cto)
if wid is None or wid == '':
doc[sid].tags.new(value=value, type=_type, cfrom=cfrom, cto=cto)
else:
doc[sid][int(wid)].tags.new(value=value, type=_type, cfrom=cfrom, cto=cto)
return doc
class TxtWriter(object):
STD_DIALECT = 'excel-tab'
STD_QUOTING = csv.QUOTE_MINIMAL
def __init__(self, sent_stream, token_stream, concept_stream, link_stream, tag_stream, id_seed=1,
csv_dialect=STD_DIALECT, csv_quoting=STD_QUOTING):
self.sent_stream = sent_stream
self.token_stream = token_stream
self.concept_stream = concept_stream
self.link_stream = link_stream
self.tag_stream = tag_stream
self.sent_writer = csv.writer(sent_stream, dialect=csv_dialect, quoting=csv_quoting)
self.token_writer = csv.writer(token_stream, dialect=csv_dialect, quoting=csv_quoting)
self.concept_writer = csv.writer(concept_stream, dialect=csv_dialect, quoting=csv_quoting)
self.link_writer = csv.writer(link_stream, dialect=csv_dialect, quoting=csv_quoting)
self.tag_writer = csv.writer(tag_stream, dialect=csv_dialect, quoting=csv_quoting)
self.__idgen = IDGenerator(id_seed=id_seed)
def write_sent(self, sent, **kwargs):
flag = sent.flag if sent.flag is not None else ''
comment = sent.comment if sent.comment is not None else ''
sid = sent.ID if sent.ID is not None else next(self.__idgen)
self.sent_writer.writerow((sid, sent.text, flag, comment))
# write tokens
for wid, token in enumerate(sent.tokens):
self.token_writer.writerow((sid, wid, token.text or token.surface(), token.lemma, token.pos, token.comment))
# write concepts & wclinks
for cid, concept in enumerate(sent.concepts):
# write concept
self.concept_writer.writerow((sid, cid, concept.clemma, concept.value, concept.type, concept.comment))
# write cwlinks
for token in concept.tokens:
wid = sent.tokens.index(token)
self.link_writer.writerow((sid, cid, wid))
# write tags
for tag in sent.tags:
self.tag_writer.writerow((sid, tag.cfrom, tag.cto, tag.value, tag.type, ''))
# write token-level tags
for wid, token in enumerate(sent.tokens):
for tag in token:
self.tag_writer.writerow((sid, tag.cfrom, tag.cto, tag.value, tag.type, wid))
def write_doc(self, doc, **kwargs):
for sent in doc:
self.write_sent(sent)
def close(self):
self.sent_stream.close()
self.token_stream.close()
self.concept_stream.close()
self.link_stream.close()
self.tag_stream.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@staticmethod
def from_doc(doc, encoding='utf-8', **kwargs):
sent_path = os.path.join(doc.path, '{}_sents.txt'.format(doc.name))
token_path = os.path.join(doc.path, '{}_tokens.txt'.format(doc.name))
concept_path = os.path.join(doc.path, '{}_concepts.txt'.format(doc.name))
link_path = os.path.join(doc.path, '{}_links.txt'.format(doc.name))
tag_path = os.path.join(doc.path, '{}_tags.txt'.format(doc.name))
return TxtWriter(sent_stream=open(sent_path, mode='wt', encoding=encoding),
token_stream=open(token_path, mode='wt', encoding=encoding),
concept_stream=open(concept_path, mode='wt', encoding=encoding),
link_stream=open(link_path, mode='wt', encoding=encoding),
tag_stream=open(tag_path, mode='wt', encoding=encoding), **kwargs)
@staticmethod
def from_path(path, **kwargs):
doc_path = os.path.dirname(path)
doc_name = os.path.basename(path)
doc = Document(name=doc_name, path=doc_path)
return TxtWriter.from_doc(doc, **kwargs)
class JSONWriter(object):
def __init__(self, output_stream, id_seed=1, **kwargs):
self.__output_stream = output_stream
self.__idgen = IDGenerator(id_seed=id_seed)
def write_sent(self, sent, ensure_ascii=False, **kwargs):
if sent.ID is None:
sent.ID = next(self.__idgen)
self.__output_stream.write(json.dumps(sent.to_dict(), ensure_ascii=ensure_ascii))
self.__output_stream.write('\n')
def write_doc(self, doc, ensure_ascii=False, **kwargs):
for sent in doc:
self.write_sent(sent, ensure_ascii=ensure_ascii)
def close(self):
try:
if self.__output_stream is not None:
self.__output_stream.flush()
self.__output_stream.close()
self.__output_stream = None
except Exception:
logging.getLogger(__name__).exception("Could not close JSONWriter's output stream properly")
@staticmethod
def from_path(path, id_seed=1, **kwargs):
return JSONWriter(output_stream=chio.open(path, mode='wt', **kwargs), id_seed=id_seed)
@staticmethod
def from_doc(doc, **kwargs):
doc_path = os.path.join(doc.path, doc.name + '.ttl.json')
return JSONWriter.from_path(doc_path, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# -----------------------------------------------------------------------------
# Helper functions
# -----------------------------------------------------------------------------
def read_json_iter(path):
""" Iterate through each sentence in a TTL/JSON file """
if not os.path.isfile(path):
raise Exception("Document file could not be found: {}".format(path))
with chio.open(path) as infile:
for line in infile:
j = json.loads(line)
sent = Sentence.from_dict(j)
yield sent
return
def read_json(path):
""" Read a TTL Document in TTL-JSON format """
if not os.path.isfile(path):
raise Exception("Document file could not be found: {}".format(path))
doc_name = os.path.splitext(os.path.basename(path))[0]
doc_path = os.path.dirname(path)
doc = Document(doc_name, path=doc_path)
for sent in read_json_iter(path):
doc._add_sent_obj(sent)
return doc
def write_json(path, doc, ensure_ascii=False, **kwargs):
""" Save a TTL Document in JSON format """
with JSONWriter.from_path(path) as writer:
writer.write_doc(doc, ensure_ascii=ensure_ascii, **kwargs)
def read(path, mode=MODE_TSV):
""" Helper function to read Document in TTL-TXT format (i.e. ${docname}_*.txt)
E.g. read('~/data/myfile') is the same as Document('myfile', '~/data/').read()
"""
if mode == 'tsv':
doc_path = os.path.dirname(path)
doc_name = os.path.basename(path)
doc = Document(name=doc_name, path=doc_path)
reader = TxtReader.from_doc(doc)
doc = reader.read(doc=doc)
reader.close()
return doc
elif mode == 'json':
return read_json(path)
else:
raise Exception("Invalid mode - [{}] was provided".format(mode))
def write(path, doc, mode=MODE_TSV, **kwargs):
""" Helper function to write doc to TTL-TXT format """
if mode == MODE_TSV:
with TxtWriter.from_path(path) as writer:
writer.write_doc(doc)
elif mode == MODE_JSON:
write_json(path, doc, **kwargs)
| 36.528142 | 181 | 0.58425 |
3fa9da7743b9990abf1047fd9f9e79666e5042b9 | 11,150 | py | Python | pyboto3/shield.py | thecraftman/pyboto3 | 653a0db2b00b06708334431da8f169d1f7c7734f | [
"MIT"
] | null | null | null | pyboto3/shield.py | thecraftman/pyboto3 | 653a0db2b00b06708334431da8f169d1f7c7734f | [
"MIT"
] | null | null | null | pyboto3/shield.py | thecraftman/pyboto3 | 653a0db2b00b06708334431da8f169d1f7c7734f | [
"MIT"
] | null | null | null | '''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def create_protection(Name=None, ResourceArn=None):
"""
Enables AWS Shield Advanced for a specific AWS resource. The resource can be an Amazon CloudFront distribution, Elastic Load Balancing load balancer, or an Amazon Route 53 hosted zone.
See also: AWS API Documentation
:example: response = client.create_protection(
Name='string',
ResourceArn='string'
)
:type Name: string
:param Name: [REQUIRED]
Friendly name for the Protection you are creating.
:type ResourceArn: string
:param ResourceArn: [REQUIRED]
The ARN (Amazon Resource Name) of the resource to be protected.
:rtype: dict
:return: {
'ProtectionId': 'string'
}
"""
pass
def create_subscription():
"""
Activates AWS Shield Advanced for an account.
See also: AWS API Documentation
:example: response = client.create_subscription()
:rtype: dict
:return: {}
"""
pass
def delete_protection(ProtectionId=None):
"""
Deletes an AWS Shield Advanced Protection .
See also: AWS API Documentation
:example: response = client.delete_protection(
ProtectionId='string'
)
:type ProtectionId: string
:param ProtectionId: [REQUIRED]
The unique identifier (ID) for the Protection object to be deleted.
:rtype: dict
:return: {}
"""
pass
def delete_subscription():
"""
Removes AWS Shield Advanced from an account.
See also: AWS API Documentation
:example: response = client.delete_subscription()
:rtype: dict
:return: {}
"""
pass
def describe_attack(AttackId=None):
"""
Describes the details of a DDoS attack.
See also: AWS API Documentation
:example: response = client.describe_attack(
AttackId='string'
)
:type AttackId: string
:param AttackId: [REQUIRED]
The unique identifier (ID) for the attack that to be described.
:rtype: dict
:return: {
'Attack': {
'AttackId': 'string',
'ResourceArn': 'string',
'SubResources': [
{
'Type': 'IP'|'URL',
'Id': 'string',
'AttackVectors': [
{
'VectorType': 'string',
'VectorCounters': [
{
'Name': 'string',
'Max': 123.0,
'Average': 123.0,
'Sum': 123.0,
'N': 123,
'Unit': 'string'
},
]
},
],
'Counters': [
{
'Name': 'string',
'Max': 123.0,
'Average': 123.0,
'Sum': 123.0,
'N': 123,
'Unit': 'string'
},
]
},
],
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'AttackCounters': [
{
'Name': 'string',
'Max': 123.0,
'Average': 123.0,
'Sum': 123.0,
'N': 123,
'Unit': 'string'
},
],
'Mitigations': [
{
'MitigationName': 'string'
},
]
}
}
"""
pass
def describe_protection(ProtectionId=None):
"""
Lists the details of a Protection object.
See also: AWS API Documentation
:example: response = client.describe_protection(
ProtectionId='string'
)
:type ProtectionId: string
:param ProtectionId: [REQUIRED]
The unique identifier (ID) for the Protection object that is described.
:rtype: dict
:return: {
'Protection': {
'Id': 'string',
'Name': 'string',
'ResourceArn': 'string'
}
}
"""
pass
def describe_subscription():
"""
Provides details about the AWS Shield Advanced subscription for an account.
See also: AWS API Documentation
:example: response = client.describe_subscription()
:rtype: dict
:return: {
'Subscription': {
'StartTime': datetime(2015, 1, 1),
'TimeCommitmentInSeconds': 123
}
}
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def list_attacks(ResourceArns=None, StartTime=None, EndTime=None, NextToken=None, MaxResults=None):
"""
Returns all ongoing DDoS attacks or all DDoS attacks during a specified time period.
See also: AWS API Documentation
:example: response = client.list_attacks(
ResourceArns=[
'string',
],
StartTime={
'FromInclusive': datetime(2015, 1, 1),
'ToExclusive': datetime(2015, 1, 1)
},
EndTime={
'FromInclusive': datetime(2015, 1, 1),
'ToExclusive': datetime(2015, 1, 1)
},
NextToken='string',
MaxResults=123
)
:type ResourceArns: list
:param ResourceArns: The ARN (Amazon Resource Name) of the resource that was attacked. If this is left blank, all applicable resources for this account will be included.
(string) --
:type StartTime: dict
:param StartTime: The time period for the attacks.
FromInclusive (datetime) --The start time, in the format 2016-12-16T13:50Z.
ToExclusive (datetime) --The end time, in the format 2016-12-16T15:50Z.
:type EndTime: dict
:param EndTime: The end of the time period for the attacks.
FromInclusive (datetime) --The start time, in the format 2016-12-16T13:50Z.
ToExclusive (datetime) --The end time, in the format 2016-12-16T15:50Z.
:type NextToken: string
:param NextToken: The ListAttacksRequest.NextMarker value from a previous call to ListAttacksRequest . Pass null if this is the first call.
:type MaxResults: integer
:param MaxResults: The maximum number of AttackSummary objects to be returned. If this is left blank, the first 20 results will be returned.
:rtype: dict
:return: {
'AttackSummaries': [
{
'AttackId': 'string',
'ResourceArn': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'AttackVectors': [
{
'VectorType': 'string'
},
]
},
],
'NextToken': 'string'
}
"""
pass
def list_protections(NextToken=None, MaxResults=None):
"""
Lists all Protection objects for the account.
See also: AWS API Documentation
:example: response = client.list_protections(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: The ListProtectionsRequest.NextToken value from a previous call to ListProtections . Pass null if this is the first call.
:type MaxResults: integer
:param MaxResults: The maximum number of Protection objects to be returned. If this is left blank the first 20 results will be returned.
:rtype: dict
:return: {
'Protections': [
{
'Id': 'string',
'Name': 'string',
'ResourceArn': 'string'
},
],
'NextToken': 'string'
}
"""
pass
| 27.875 | 188 | 0.5513 |
e1d0b80474fabe461284f6b29986f79ab61930e6 | 4,882 | py | Python | code/training/train.py | SUPRIYO-MUKHERJEE/test_PipelineDF | 6eab94fcb05afaa64cd3862374ebd549219a1e8a | [
"MIT"
] | null | null | null | code/training/train.py | SUPRIYO-MUKHERJEE/test_PipelineDF | 6eab94fcb05afaa64cd3862374ebd549219a1e8a | [
"MIT"
] | null | null | null | code/training/train.py | SUPRIYO-MUKHERJEE/test_PipelineDF | 6eab94fcb05afaa64cd3862374ebd549219a1e8a | [
"MIT"
] | null | null | null | """
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
# -*- coding: utf-8 -*-
"""Untitled1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/194NiIGq0HjdHgyk6a_jdtnRUFDPtq8wf
"""
import pickle
from azureml.core import Workspace
from azureml.core.run import Run
import os
import argparse
from sklearn.datasets import load_diabetes
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import numpy as np
import json
import subprocess
from typing import Tuple, List
import pandas as pd
import sklearn
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
#import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
parser = argparse.ArgumentParser("train")
parser.add_argument(
"--config_suffix", type=str, help="Datetime suffix for json config files"
)
parser.add_argument(
"--json_config",
type=str,
help="Directory to write all the intermediate json configs",
)
args = parser.parse_args()
print("Argument 1: %s" % args.config_suffix)
print("Argument 2: %s" % args.json_config)
if not (args.json_config is None):
os.makedirs(args.json_config, exist_ok=True)
print("%s created" % args.json_config)
run = Run.get_context()
exp = run.experiment
ws = run.experiment.workspace
trained_path = 'training/trained_model.pkl'
data_path = 'model_data/data_processed.csv'
data = pd.read_csv(data_path)
print(data.shape)
X = data.drop(['fraud_reported'], axis = 1)
y = data['fraud_reported']
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.20, random_state=23)
model = RandomForestClassifier(n_estimators = 100, random_state = 0)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
run.log("accuracy", accuracy)
confusion_matrix = confusion_matrix(y_test, y_pred)
run.log("confusion_matrix", accuracy)
tn, fp, fn, tp = confusion_matrix.ravel()
recall_0 = tn/(tn+fp)
recall_1 = tp/(tp+fn)
run.log("recall_0", recall_0)
run.log("recall_1", recall_1)
precision_0 = tn/(tn+fn)
precision_1 = tp/(tp+fp)
run.log("precision_0", precision_0)
run.log("precision_1", precision_1)
# Save model as part of the run history
model_name = "testmodel.pkl"
# model_name = "."
with open(model_name, "wb") as file:
joblib.dump(value=model, filename=model_name)
# upload the model file explicitly into artifacts
run.upload_file(name="./outputs/" + model_name, path_or_stream=model_name)
print("Uploaded the model {} to experiment {}".format(model_name, run.experiment.name))
dirpath = os.getcwd()
print(dirpath)
print("Following files are uploaded ")
print(run.get_file_names())
# register the model
# run.log_model(file_name = model_name)
# print('Registered the model {} to run history {}'.format(model_name, run.history.name))
run_id = {}
run_id["run_id"] = run.id
run_id["experiment_name"] = run.experiment.name
filename = "run_id_{}.json".format(args.config_suffix)
output_path = os.path.join(args.json_config, filename)
with open(output_path, "w") as outfile:
json.dump(run_id, outfile)
run.complete()
| 32.986486 | 106 | 0.778984 |
b85be2904e778604735afaf33b25e26c4dc84f5f | 1,601 | py | Python | generate.py | DL-Circle/Mongolian-Script-Generator | ddc3eecae02b67612e615536d8fc2dd6829416ad | [
"MIT"
] | 2 | 2022-01-27T02:54:41.000Z | 2022-01-29T08:05:29.000Z | generate.py | DL-Circle/Mongolian-Script-Generator | ddc3eecae02b67612e615536d8fc2dd6829416ad | [
"MIT"
] | null | null | null | generate.py | DL-Circle/Mongolian-Script-Generator | ddc3eecae02b67612e615536d8fc2dd6829416ad | [
"MIT"
] | null | null | null | #-*- coding:utf-8 -*-
from diffusion_model.trainer import GaussianDiffusion, num_to_groups
from diffusion_model.unet import create_model
from torchvision import utils
import argparse
import torch
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--exportfile', type=str, default='sample.png')
parser.add_argument('-w', '--weightfile', type=str)
parser.add_argument('-d', '--device', type=str, default='cuda')
parser.add_argument('--input_size', type=int, default=128)
parser.add_argument('--num_channels', type=int, default=64)
parser.add_argument('--num_res_blocks', type=int, default=2)
parser.add_argument('--timesteps', type=int, default=1000)
parser.add_argument('-s', '--num_sample', type=int, default=16)
args = parser.parse_args()
exportfile = args.exportfile
weightfile = args.weightfile
input_size = args.input_size
num_channels = args.num_channels
num_res_blocks = args.num_res_blocks
num_sample = args.num_sample
device = args.device
model = create_model(input_size, num_channels, num_res_blocks)
diffusion = GaussianDiffusion(
model,
image_size = input_size,
timesteps = args.timesteps, # number of steps
loss_type = 'l1' # L1 or L2
).to(device)
weight = torch.load(weightfile)
diffusion.load_state_dict(weight['ema'])
print("Model Loaded!")
batches = num_to_groups(4, num_sample)
imgs_list = list(map(lambda n: diffusion.sample(batch_size=num_sample), batches))
imgs = torch.cat(imgs_list, dim=0)
imgs = (imgs + 1) * 0.5
utils.save_image(imgs, exportfile, nrow = 4)
print("Done!")
# import matplotlib.pyplot as plt
# plt.imshow(imgs)
# plt.show()
| 32.02 | 81 | 0.751405 |
48bb529c5d5a0817b3c6e3353e857c62a73b8a16 | 91 | py | Python | run.py | ellotecnologia/galadriel | 16b592818d8beb8407805e43f2f881975b245d94 | [
"MIT"
] | null | null | null | run.py | ellotecnologia/galadriel | 16b592818d8beb8407805e43f2f881975b245d94 | [
"MIT"
] | null | null | null | run.py | ellotecnologia/galadriel | 16b592818d8beb8407805e43f2f881975b245d94 | [
"MIT"
] | null | null | null | from app.app import create_app
from config import BaseConfig
app = create_app(BaseConfig)
| 18.2 | 30 | 0.824176 |
82d52950d9183776a13ff6ec1f323533f1f1ec5e | 2,402 | py | Python | tests/run_tests.py | Conqu3red/PB2-Leaderboards-Bot | 489285524937684af9b7b1e504fc631045838c0e | [
"MIT"
] | 4 | 2020-09-18T18:13:12.000Z | 2021-08-02T10:49:35.000Z | tests/run_tests.py | Conqu3red/PB2-Leaderboards-Bot | 489285524937684af9b7b1e504fc631045838c0e | [
"MIT"
] | 1 | 2021-10-01T12:14:56.000Z | 2021-10-01T12:14:56.000Z | tests/run_tests.py | Conqu3red/PB2-Leaderboards-Bot | 489285524937684af9b7b1e504fc631045838c0e | [
"MIT"
] | null | null | null | import unittest
import os
from pprint import pprint
from functions import *
class CheatedScoreTest(unittest.TestCase):
def test(self):
d = os.path.join("tests", "cheated_score_test")
# load test data first
with open(os.path.join(d, "p1.json")) as f:
p1 = json.load(f)
with open(os.path.join(d, "p2.json")) as f:
p2 = json.load(f)
level = Level(id="temp", name="temp", short_name="0-0", isTest=True)
with open(f"data/{level.id}.json", "w") as f:
json.dump(p1, f)
level.reload_leaderboard(new_data=p2)
print(level.leaderboard)
self.assertEqual(len(level.leaderboard["any"]["top_history"]), 1, "top history is not correct")
self.assertEqual(level.leaderboard["any"]["top_history"][0]["owner"]["display_name"], "user2", "score on top history is not correct")
class SwitchPlaceTest(unittest.TestCase):
def test(self):
d = os.path.join("tests", "switch_place_test")
with open(os.path.join(d, "p1.json")) as f:
p1 = json.load(f)
level = Level(id="temp", name="temp", short_name="0-0", isTest=True)
with open(f"data/{level.id}.json", "w") as f:
json.dump(p1, f)
level.reload_leaderboard(new_data=p1)
all_levels.levels = [level]
result = get_oldest_scores_leaderboard()
print("\n\nresult:")
pprint(result)
t = datetime.datetime.fromtimestamp(result[0]["time"]).strftime("%d/%m/%Y-%H:%M")
print(t)
expected = "24/04/2021-00:20"
self.assertEqual(t, expected, f"expected '{expected}' but got {t}")
class TieTest(unittest.TestCase):
def test(self):
d = os.path.join("tests", "tie_test")
with open(os.path.join(d, "p1.json")) as f:
p1 = json.load(f)
level = Level(id="temp", name="temp", short_name="0-0", isTest=True)
with open(f"data/{level.id}.json", "w") as f:
json.dump(p1, f)
level.reload_leaderboard(new_data=p1)
all_levels.levels = [level]
result = get_oldest_scores_leaderboard()
print("\n\nresult:")
for s in result:
s["time"] = datetime.datetime.fromtimestamp(s["time"]).strftime("%d/%m/%Y-%H:%M")
pprint(result)
#t = datetime.datetime.fromtimestamp(result[0]["time"]).strftime("%d/%m/%Y-%H:%M")
#print(t)
#expected = "24/04/2021-00:20"
self.assertEqual(result[0]["time"], "24/04/2021-01:00", f"expected '24/04/2021-01:00' but got {result[0]['time']}")
self.assertEqual(result[1]["time"], "24/04/2021-02:00", f"expected '24/04/2021-02:00' but got {result[1]['time']}")
unittest.main() | 32.90411 | 135 | 0.666944 |
eda3a75ade23f09acab8a00afbb4b6fef4b9047e | 6,595 | py | Python | util/process_data.py | zhengshoujian/yolo_3d | 57cb0b0314e32161cde9033f48d63c5862db2b14 | [
"MIT"
] | 3 | 2019-10-14T23:21:08.000Z | 2020-08-01T09:02:02.000Z | util/process_data.py | zhengshoujian/yolo_3d | 57cb0b0314e32161cde9033f48d63c5862db2b14 | [
"MIT"
] | 1 | 2019-12-21T07:47:04.000Z | 2020-08-01T14:26:49.000Z | util/process_data.py | zhengshoujian/yolo_3d | 57cb0b0314e32161cde9033f48d63c5862db2b14 | [
"MIT"
] | null | null | null | import os
import numpy as np
import cv2
import copy
def parse_annotation(label_dir, cls_to_ind):
all_objs = []
for label_file in os.listdir(label_dir):
image_file = label_file.replace('txt', 'png')
for line in open(label_dir + label_file).readlines():
line = line.strip().split(' ')
truncated = np.abs(float(line[1]))
occluded = np.abs(float(line[2]))
cls = line[0]
# add objects to train data only when their truncated level <0.3 and occluded level <= 1
if cls in cls_to_ind.keys() and truncated < 0.3 and occluded <= 1:
theta_loc = -float(line[3]) + 3*np.pi / 2.
# Make sure object's theta_loc is in [0..2*pi].
theta_loc = theta_loc - np.floor(theta_loc / (2. * np.pi)) * (2. * np.pi)
obj = {'name': cls,
'image': image_file,
'xmin': int(float(line[4])),
'ymin': int(float(line[5])),
'xmax': int(float(line[6])),
'ymax': int(float(line[7])),
'dims': np.array([float(number) for number in line[8:11]]),
'theta_loc': theta_loc
}
all_objs.append(obj)
return all_objs
def compute_anchors(angle, bin_num=6, overlap=0.1):
anchors = []
wedge = 2. * np.pi / bin_num
l_index = int(angle / wedge)
r_index = l_index + 1
if (angle - l_index * wedge) < wedge / 2 * (1 + overlap / 2):
anchors.append([l_index, angle - l_index * wedge])
if (r_index * wedge - angle) < wedge / 2 * (1 + overlap / 2):
anchors.append([r_index % bin_num, angle - r_index * wedge])
return anchors
def process_obj_attributes(objs, dims_avg, cls_to_ind, bin_num=6, overlap=0.1):
for obj in objs:
# Fix dimensions
obj['dims'] = obj['dims'] - dims_avg[cls_to_ind[obj['name']]]
# Fix orientation and confidence for no flip
orientation = np.zeros((bin_num, 2))
confidence = np.zeros(bin_num)
anchors = compute_anchors(obj['theta_loc'], bin_num, overlap)
for anchor in anchors:
orientation[anchor[0]] = np.array([np.cos(anchor[1]), np.sin(anchor[1])])
confidence[anchor[0]] = 1.
confidence = confidence / np.sum(confidence)
obj['orient'] = orientation
obj['conf'] = confidence
# Fix orientation and confidence for flip
orientation = np.zeros((bin_num, 2))
confidence = np.zeros(bin_num)
anchors = compute_anchors(2. * np.pi - obj['theta_loc'], bin_num)
for anchor in anchors:
orientation[anchor[0]] = np.array([np.cos(anchor[1]), np.sin(anchor[1])])
confidence[anchor[0]] = 1
confidence = confidence / np.sum(confidence)
obj['orient_flipped'] = orientation
obj['conf_flipped'] = confidence
return objs
def get_obj_patch(image_dir, obj, target_size = (224, 224)):
### Prepare image patch
xmin = obj['xmin'] # + np.random.randint(-MAX_JIT, MAX_JIT+1)
ymin = obj['ymin'] # + np.random.randint(-MAX_JIT, MAX_JIT+1)
xmax = obj['xmax'] # + np.random.randint(-MAX_JIT, MAX_JIT+1)
ymax = obj['ymax'] # + np.random.randint(-MAX_JIT, MAX_JIT+1)
img = cv2.imread(image_dir + obj['image'])
img = copy.deepcopy(img[ymin:ymax + 1, xmin:xmax + 1]).astype(np.float32)
# flip the image
flip = np.random.binomial(1, .5)
is_flipped = False
if flip > 0.5:
img = cv2.flip(img, 1)
is_flipped = True
# resize the image to standard size
img = cv2.resize(img, target_size)
img = img - np.array([[[103.939, 116.779, 123.68]]])
return img, is_flipped
def load_and_process_annotation_data(label_dir,dims_avg,cls_to_ind):
objs = parse_annotation(label_dir,cls_to_ind)
return process_obj_attributes(objs, dims_avg, cls_to_ind)
def train_data_gen(all_objs, image_dir, batch_size,bin_num=6):
num_obj = len(all_objs)
keys = list(range(num_obj))
np.random.shuffle(keys)
l_bound = 0
r_bound = batch_size if batch_size < num_obj else num_obj
while True:
if l_bound == r_bound:
l_bound = 0
r_bound = batch_size if batch_size < num_obj else num_obj
np.random.shuffle(keys)
currt_inst = 0
x_batch = np.zeros((r_bound - l_bound, 224, 224, 3))
d_batch = np.zeros((r_bound - l_bound, 3))
o_batch = np.zeros((r_bound - l_bound, bin_num, 2))
c_batch = np.zeros((r_bound - l_bound, bin_num))
for key in keys[l_bound:r_bound]:
# get object patch and do augment
obj = all_objs[key]
image, is_flipped= get_obj_patch(image_dir,all_objs[key])
# fix object's orientation and confidence
if is_flipped:
dimension, orientation, confidence = obj['dims'], obj['orient_flipped'], obj['conf_flipped']
else:
dimension, orientation, confidence = obj['dims'], obj['orient'], obj['conf']
x_batch[currt_inst, :] = image
d_batch[currt_inst, :] = dimension
o_batch[currt_inst, :] = orientation
c_batch[currt_inst, :] = confidence
currt_inst += 1
yield x_batch, [d_batch, o_batch, c_batch]
l_bound = r_bound
r_bound = r_bound + batch_size
if r_bound > num_obj: r_bound = num_obj
def get_cam_data(calib_file):
for line in open(calib_file):
if 'P2:' in line:
cam_to_img = line.strip().split(' ')
cam_to_img = np.asarray([float(number) for number in cam_to_img[1:]])
cam_to_img = np.reshape(cam_to_img, (3, 4))
return cam_to_img
def get_dect2D_data(box2d_file,classes):
dect2D_data = []
box2d_reserved = []
for line in open(box2d_file):
line = line.strip().split(' ')
cls = line[0]
truncated = np.abs(float(line[1]))
occluded = np.abs(float(line[2]))
# Transform regressed dimension
if cls in classes:
box_2D = np.asarray(line[4:8],dtype=np.float)
# draw 3D box only when the object's truncated level <0.3 and occluded level <= 1,
# the rests are to be drawn their origin 2D box
if truncated < 0.3 and occluded <= 1:
dect2D_data.append([cls, box_2D])
else:
box2d_reserved.append([cls, box_2D])
return dect2D_data, box2d_reserved
| 33.308081 | 108 | 0.581501 |
246a1e506b52359922e1be99dc54d1eb7364b23c | 1,515 | py | Python | var/spack/repos/builtin/packages/help/package.py | scs-lab/spack | 77956aad6aa523c2a6c7256eb3c75094bf955c35 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/help/package.py | scs-lab/spack | 77956aad6aa523c2a6c7256eb3c75094bf955c35 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/help/package.py | scs-lab/spack | 77956aad6aa523c2a6c7256eb3c75094bf955c35 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2020-09-15T02:37:59.000Z | 2020-09-21T04:34:38.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# ----------------------------------------------------------------------------
# If you submit this package back to Spack as a pull request,
# please first remove this boilerplate and all FIXME comments.
#
# This is a template package file for Spack. We've put "FIXME"
# next to all the things you'll want to change. Once you've handled
# them, you can save this file and test your package like this:
#
# spack install help
#
# You can edit this file again by typing:
#
# spack edit help
#
# See the Spack documentation for more information on packaging.
# ----------------------------------------------------------------------------
from spack import *
class Help(Package):
"""FIXME: Put a proper description of your package here."""
# FIXME: Add a proper url for your package's homepage here.
homepage = "https://www.example.com"
url = "help"
# FIXME: Add a list of GitHub accounts to
# notify when the package is updated.
# maintainers = ['github_user1', 'github_user2']
# FIXME: Add proper versions and checksums here.
# version('1.2.3', '0123456789abcdef0123456789abcdef')
# FIXME: Add dependencies if required.
# depends_on('foo')
def install(self, spec, prefix):
# FIXME: Unknown build system
make()
make('install')
| 32.234043 | 78 | 0.621782 |
4cad9d93a7b49c037605cd57eec1e4e0d3b627f5 | 81,344 | py | Python | djstripe/migrations/0001_initial.py | HiddenClever/dj-stripe | cc64c084f2f5c92d9aca60cf81c064bfa1fcde7f | [
"MIT"
] | null | null | null | djstripe/migrations/0001_initial.py | HiddenClever/dj-stripe | cc64c084f2f5c92d9aca60cf81c064bfa1fcde7f | [
"MIT"
] | null | null | null | djstripe/migrations/0001_initial.py | HiddenClever/dj-stripe | cc64c084f2f5c92d9aca60cf81c064bfa1fcde7f | [
"MIT"
] | null | null | null | # Generated by Django 2.0.6 on 2018-06-10 22:23
import uuid
import django.core.validators
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import djstripe.enums
import djstripe.fields
import djstripe.models
DJSTRIPE_SUBSCRIBER_MODEL = getattr(settings, "DJSTRIPE_SUBSCRIBER_MODEL", settings.AUTH_USER_MODEL)
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('business_name', djstripe.fields.StripeCharField(help_text='The publicly visible name of the business', max_length=255)),
('business_primary_color', djstripe.fields.StripeCharField(help_text='A CSS hex color value representing the primary branding color for this account', max_length=7, null=True)),
('business_url', djstripe.fields.StripeCharField(help_text='The publicly visible website of the business', max_length=200, null=True)),
('charges_enabled', djstripe.fields.StripeBooleanField(help_text='Whether the account can create live charges')),
('country', djstripe.fields.StripeCharField(help_text='The country of the account', max_length=2)),
('debit_negative_balances', djstripe.fields.StripeNullBooleanField(default=False, help_text='A Boolean indicating if Stripe should try to reclaim negative balances from an attached bank account.')),
('decline_charge_on', djstripe.fields.StripeJSONField(help_text='Account-level settings to automatically decline certain types of charges regardless of the decision of the card issuer', null=True)),
('default_currency', djstripe.fields.StripeCharField(help_text='The currency this account has chosen to use as the default', max_length=3)),
('details_submitted', djstripe.fields.StripeBooleanField(help_text='Whether account details have been submitted. Standard accounts cannot receive payouts before this is true.')),
('display_name', djstripe.fields.StripeCharField(help_text='The display name for this account. This is used on the Stripe Dashboard to differentiate between accounts.', max_length=255)),
('email', djstripe.fields.StripeCharField(help_text='The primary user’s email address.', max_length=255)),
('legal_entity', djstripe.fields.StripeJSONField(help_text='Information about the legal entity itself, including about the associated account representative', null=True)),
('payout_schedule', djstripe.fields.StripeJSONField(help_text='Details on when funds from charges are available, and when they are paid out to an external account.', null=True)),
('payout_statement_descriptor', djstripe.fields.StripeCharField(default='', help_text='The text that appears on the bank account statement for payouts.', max_length=255, null=True)),
('payouts_enabled', djstripe.fields.StripeBooleanField(help_text='Whether Stripe can send payouts to this account')),
('product_description', djstripe.fields.StripeCharField(help_text='Internal-only description of the product sold or service provided by the business. It’s used by Stripe for risk and underwriting purposes.', max_length=255, null=True)),
('statement_descriptor', djstripe.fields.StripeCharField(default='', help_text='The default text that appears on credit card statements when a charge is made directly on the account', max_length=255)),
('support_email', djstripe.fields.StripeCharField(help_text='A publicly shareable support email address for the business', max_length=255)),
('support_phone', djstripe.fields.StripeCharField(help_text='A publicly shareable support phone number for the business', max_length=255)),
('support_url', djstripe.fields.StripeCharField(help_text='A publicly shareable URL that provides support for this account', max_length=200)),
('timezone', djstripe.fields.StripeCharField(help_text='The timezone used in the Stripe Dashboard for this account.', max_length=50)),
('type', djstripe.fields.StripeEnumField(enum=djstripe.enums.AccountType, help_text='The Stripe account type.', max_length=8)),
('tos_acceptance', djstripe.fields.StripeJSONField(help_text='Details on the acceptance of the Stripe Services Agreement', null=True)),
('verification', djstripe.fields.StripeJSONField(help_text='Information on the verification state of the account, including what information is needed and by when', null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BankAccount',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('account_holder_name', djstripe.fields.StripeCharField(help_text='The name of the person or business that owns the bank account.', max_length=5000, null=True)),
('account_holder_type', djstripe.fields.StripeEnumField(enum=djstripe.enums.BankAccountHolderType, help_text='The type of entity that holds the account.', max_length=10)),
('bank_name', djstripe.fields.StripeCharField(help_text='Name of the bank associated with the routing number (e.g., `WELLS FARGO`).', max_length=255)),
('country', djstripe.fields.StripeCharField(help_text='Two-letter ISO code representing the country the bank account is located in.', max_length=2)),
('currency', djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code', max_length=3)),
('default_for_currency', djstripe.fields.StripeNullBooleanField(help_text='Whether this external account is the default account for its currency.')),
('fingerprint', djstripe.fields.StripeCharField(help_text='Uniquely identifies this particular bank account. You can use this attribute to check whether two bank accounts are the same.', max_length=16)),
('last4', djstripe.fields.StripeCharField(max_length=4)),
('routing_number', djstripe.fields.StripeCharField(help_text='The routing transit number for the bank account.', max_length=255)),
('status', djstripe.fields.StripeEnumField(enum=djstripe.enums.BankAccountStatus, max_length=19)),
('account', models.ForeignKey(help_text='The account the charge was made on behalf of. Null here indicates that this value was never set.', on_delete=django.db.models.deletion.PROTECT, related_name='bank_account', to='djstripe.Account')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Card',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('address_city', djstripe.fields.StripeTextField(help_text='Billing address city.', null=True)),
('address_country', djstripe.fields.StripeTextField(help_text='Billing address country.', null=True)),
('address_line1', djstripe.fields.StripeTextField(help_text='Billing address (Line 1).', null=True)),
('address_line1_check', djstripe.fields.StripeEnumField(enum=djstripe.enums.CardCheckResult, help_text='If `address_line1` was provided, results of the check.', max_length=11, null=True)),
('address_line2', djstripe.fields.StripeTextField(help_text='Billing address (Line 2).', null=True)),
('address_state', djstripe.fields.StripeTextField(help_text='Billing address state.', null=True)),
('address_zip', djstripe.fields.StripeTextField(help_text='Billing address zip code.', null=True)),
('address_zip_check', djstripe.fields.StripeEnumField(enum=djstripe.enums.CardCheckResult, help_text='If `address_zip` was provided, results of the check.', max_length=11, null=True)),
('brand', djstripe.fields.StripeEnumField(enum=djstripe.enums.CardBrand, help_text='Card brand.', max_length=16)),
('country', djstripe.fields.StripeCharField(help_text='Two-letter ISO code representing the country of the card.', max_length=2, null=True)),
('cvc_check', djstripe.fields.StripeEnumField(enum=djstripe.enums.CardCheckResult, help_text='If a CVC was provided, results of the check.', max_length=11, null=True)),
('dynamic_last4', djstripe.fields.StripeCharField(help_text='(For tokenized numbers only.) The last four digits of the device account number.', max_length=4, null=True)),
('exp_month', djstripe.fields.StripeIntegerField(help_text='Card expiration month.')),
('exp_year', djstripe.fields.StripeIntegerField(help_text='Card expiration year.')),
('fingerprint', djstripe.fields.StripeTextField(help_text='Uniquely identifies this particular card number.', null=True)),
('funding', djstripe.fields.StripeEnumField(enum=djstripe.enums.CardFundingType, help_text='Card funding type.', max_length=7)),
('last4', djstripe.fields.StripeCharField(help_text='Last four digits of Card number.', max_length=4)),
('name', djstripe.fields.StripeTextField(help_text='Cardholder name.', null=True)),
('tokenization_method', djstripe.fields.StripeEnumField(enum=djstripe.enums.CardTokenizationMethod, help_text='If the card number is tokenized, this is the method that was used.', max_length=11, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Charge',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('amount', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text='Amount charged.', max_digits=8)),
('amount_refunded', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text='Amount refunded (can be less than the amount attribute on the charge if a partial refund was issued).', max_digits=8)),
('captured', djstripe.fields.StripeBooleanField(default=False, help_text='If the charge was created without capturing, this boolean represents whether or not it is still uncaptured or has since been captured.')),
('currency', djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code representing the currency in which the charge was made.', max_length=3)),
('failure_code', djstripe.fields.StripeEnumField(enum=djstripe.enums.ApiErrorCode, help_text='Error code explaining reason for charge failure if available.', max_length=20, null=True)),
('failure_message', djstripe.fields.StripeTextField(help_text='Message to user further explaining reason for charge failure if available.', null=True)),
('fraud_details', djstripe.fields.StripeJSONField(help_text='Hash with information on fraud assessments for the charge.')),
('outcome', djstripe.fields.StripeJSONField(help_text='Details about whether or not the payment was accepted, and why.')),
('paid', djstripe.fields.StripeBooleanField(default=False, help_text='True if the charge succeeded, or was successfully authorized for later capture, False otherwise.')),
('receipt_email', djstripe.fields.StripeCharField(help_text='The email address that the receipt for this charge was sent to.', max_length=800, null=True)),
('receipt_number', djstripe.fields.StripeCharField(help_text='The transaction number that appears on email receipts sent for this charge.', max_length=9, null=True)),
('refunded', djstripe.fields.StripeBooleanField(default=False, help_text='Whether or not the charge has been fully refunded. If the charge is only partially refunded, this attribute will still be false.')),
('shipping', djstripe.fields.StripeJSONField(help_text='Shipping information for the charge', null=True)),
('statement_descriptor', djstripe.fields.StripeCharField(help_text='An arbitrary string to be displayed on your customer\'s credit card statement. The statement description may not include <>"\' characters, and will appear on your customer\'s statement in capital letters. Non-ASCII characters are automatically stripped. While most banks display this information consistently, some may display it incorrectly or not at all.', max_length=22, null=True)),
('status', djstripe.fields.StripeEnumField(enum=djstripe.enums.ChargeStatus, help_text='The status of the payment.', max_length=9)),
('transfer_group', djstripe.fields.StripeCharField(blank=True, help_text='A string that identifies this transaction as part of a group.', max_length=255, null=True)),
('fee', djstripe.fields.StripeCurrencyField(decimal_places=2, max_digits=8, null=True)),
('fee_details', djstripe.fields.StripeJSONField(null=True)),
('source_type', djstripe.fields.StripeEnumField(enum=djstripe.enums.LegacySourceType, help_text='The payment source type. If the payment source is supported by dj-stripe, a corresponding model is attached to this Charge via a foreign key matching this field.', max_length=16, null=True)),
('source_stripe_id', djstripe.fields.StripeIdField(help_text='The payment source id.', max_length=255, null=True)),
('fraudulent', djstripe.fields.StripeBooleanField(default=False, help_text='Whether or not this charge was marked as fraudulent.')),
('receipt_sent', models.BooleanField(default=False, help_text='Whether or not a receipt was sent for this charge.')),
('account', models.ForeignKey(help_text='The account the charge was made on behalf of. Null here indicates that this value was never set.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='djstripe.Account')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Coupon',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('stripe_id', djstripe.fields.StripeIdField(max_length=500)),
('amount_off', djstripe.fields.StripeCurrencyField(blank=True, decimal_places=2, help_text='Amount that will be taken off the subtotal of any invoices for this customer.', max_digits=8, null=True)),
('currency', djstripe.fields.StripeCharField(blank=True, help_text='Three-letter ISO currency code', max_length=3, null=True)),
('duration', djstripe.fields.StripeEnumField(enum=djstripe.enums.CouponDuration, help_text='Describes how long a customer who applies this coupon will get the discount.', max_length=9)),
('duration_in_months', djstripe.fields.StripePositiveIntegerField(blank=True, help_text='If `duration` is `repeating`, the number of months the coupon applies.', null=True)),
('max_redemptions', djstripe.fields.StripePositiveIntegerField(blank=True, help_text='Maximum number of times this coupon can be redeemed, in total, before it is no longer valid.', null=True)),
('percent_off', djstripe.fields.StripePositiveIntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(100)])),
('redeem_by', djstripe.fields.StripeDateTimeField(blank=True, help_text='Date after which the coupon can no longer be redeemed. Max 5 years in the future.', null=True)),
('times_redeemed', djstripe.fields.StripePositiveIntegerField(default=0, editable=False, help_text='Number of times this coupon has been applied to a customer.')),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('account_balance', djstripe.fields.StripeIntegerField(help_text="Current balance, if any, being stored on the customer's account. If negative, the customer has credit to apply to the next invoice. If positive, the customer has an amount owed that will be added to thenext invoice. The balance does not refer to any unpaid invoices; it solely takes into account amounts that have yet to be successfullyapplied to any invoice. This balance is only taken into account for recurring billing purposes (i.e., subscriptions, invoices, invoice items).")),
('business_vat_id', djstripe.fields.StripeCharField(help_text="The customer's VAT identification number.", max_length=20, null=True)),
('currency', djstripe.fields.StripeCharField(help_text='The currency the customer can be charged in for recurring billing purposes (subscriptions, invoices, invoice items).', max_length=3, null=True)),
('delinquent', djstripe.fields.StripeBooleanField(help_text="Whether or not the latest charge for the customer's latest invoice has failed.")),
('coupon_start', djstripe.fields.StripeDateTimeField(editable=False, help_text='If a coupon is present, the date at which it was applied.', null=True)),
('coupon_end', djstripe.fields.StripeDateTimeField(editable=False, help_text='If a coupon is present and has a limited duration, the date that the discount will end.', null=True)),
('email', djstripe.fields.StripeTextField(null=True)),
('shipping', djstripe.fields.StripeJSONField(help_text='Shipping information associated with the customer.', null=True)),
('date_purged', models.DateTimeField(editable=False, null=True)),
('coupon', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='djstripe.Coupon')),
],
),
migrations.CreateModel(
name='Dispute',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('amount', djstripe.fields.StripeIntegerField(help_text='Disputed amount. Usually the amount of the charge, but can differ (usually because of currency fluctuation or because only part of the order is disputed).')),
('currency', djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code.', max_length=3)),
('evidence', djstripe.fields.StripeJSONField(help_text='Evidence provided to respond to a dispute.')),
('evidence_details', djstripe.fields.StripeJSONField(help_text='Information about the evidence submission.')),
('is_charge_refundable', djstripe.fields.StripeBooleanField(help_text='If true, it is still possible to refund the disputed payment. Once the payment has been fully refunded, no further funds will be withdrawn from your Stripe account as a result of this dispute.')),
('reason', djstripe.fields.StripeEnumField(enum=djstripe.enums.DisputeReason, max_length=25)),
('status', djstripe.fields.StripeEnumField(enum=djstripe.enums.DisputeStatus, max_length=22)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Event',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('api_version', djstripe.fields.StripeCharField(blank=True, help_text='the API version at which the event data was rendered. Blank for old entries only, all new entries will have this value', max_length=15)),
('data', djstripe.fields.StripeJSONField(help_text='data received at webhook. data should be considered to be garbage until validity check is run and valid flag is set')),
('request_id', djstripe.fields.StripeCharField(blank=True, help_text="Information about the request that triggered this event, for traceability purposes. If empty string then this is an old entry without that data. If Null then this is not an old entry, but a Stripe 'automated' event with no associated request.", max_length=50, null=True)),
('idempotency_key', djstripe.fields.StripeTextField(blank=True, null=True)),
('type', djstripe.fields.StripeCharField(help_text="Stripe's event description code", max_length=250)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FileUpload',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('filename', djstripe.fields.StripeCharField(help_text='A filename for the file, suitable for saving to a filesystem.', max_length=255)),
('purpose', djstripe.fields.StripeEnumField(enum=djstripe.enums.FileUploadPurpose, help_text='The purpose of the uploaded file.', max_length=24)),
('size', djstripe.fields.StripeIntegerField(help_text='The size in bytes of the file upload object.')),
('type', djstripe.fields.StripeEnumField(enum=djstripe.enums.FileUploadType, help_text='The type of the file returned.', max_length=4)),
('url', djstripe.fields.StripeCharField(help_text='A read-only URL where the uploaded file can be accessed.', max_length=200)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='IdempotencyKey',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('action', models.CharField(max_length=100)),
('livemode', models.BooleanField(help_text='Whether the key was used in live or test mode.')),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Invoice',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('amount_due', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text="Final amount due at this time for this invoice. If the invoice's total is smaller than the minimum charge amount, for example, or if there is account credit that can be applied to the invoice, the amount_due may be 0. If there is a positive starting_balance for the invoice (the customer owes money), the amount_due will also take that into account. The charge that gets generated for the invoice will be for the amount specified in amount_due.", max_digits=8)),
('amount_paid', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text='The amount, in cents, that was paid.', max_digits=8, null=True)),
('amount_remaining', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text='The amount, in cents, that was paid.', max_digits=8, null=True)),
('application_fee', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text="The fee in cents that will be applied to the invoice and transferred to the application owner's Stripe account when the invoice is paid.", max_digits=8, null=True)),
('attempt_count', djstripe.fields.StripeIntegerField(help_text='Number of payment attempts made for this invoice, from the perspective of the payment retry schedule. Any payment attempt counts as the first attempt, and subsequently only automatic retries increment the attempt count. In other words, manual payment attempts after the first attempt do not affect the retry schedule.')),
('attempted', djstripe.fields.StripeBooleanField(default=False, help_text='Whether or not an attempt has been made to pay the invoice. An invoice is not attempted until 1 hour after the ``invoice.created`` webhook, for example, so you might not want to display that invoice as unpaid to your users.')),
('billing', djstripe.fields.StripeEnumField(enum=djstripe.enums.InvoiceBilling, help_text='When charging automatically, Stripe will attempt to pay this invoiceusing the default source attached to the customer. When sending an invoice, Stripe will email this invoice to the customer with payment instructions.', max_length=20, null=True)),
('closed', djstripe.fields.StripeBooleanField(default=False, help_text="Whether or not the invoice is still trying to collect payment. An invoice is closed if it's either paid or it has been marked closed. A closed invoice will no longer attempt to collect payment.")),
('currency', djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code.', max_length=3)),
('date', djstripe.fields.StripeDateTimeField(help_text='The date on the invoice.')),
('due_date', djstripe.fields.StripeDateTimeField(help_text='The date on which payment for this invoice is due. This value will be null for invoices where billing=charge_automatically.', null=True)),
('ending_balance', djstripe.fields.StripeIntegerField(help_text='Ending customer balance after attempting to pay invoice. If the invoice has not been attempted yet, this will be null.', null=True)),
('forgiven', djstripe.fields.StripeBooleanField(default=False, help_text='Whether or not the invoice has been forgiven. Forgiving an invoice instructs us to update the subscription status as if the invoice were successfully paid. Once an invoice has been forgiven, it cannot be unforgiven or reopened.')),
('hosted_invoice_url', djstripe.fields.StripeCharField(help_text='The URL for the hosted invoice page, which allows customers to view and pay an invoice. If the invoice has not been frozen yet, this will be null.', max_length=799, null=True)),
('invoice_pdf', djstripe.fields.StripeCharField(help_text='The link to download the PDF for the invoice. If the invoice has not been frozen yet, this will be null.', max_length=799, null=True)),
('next_payment_attempt', djstripe.fields.StripeDateTimeField(help_text='The time at which payment will next be attempted.', null=True)),
('number', djstripe.fields.StripeCharField(help_text='A unique, identifying string that appears on emails sent to the customer for this invoice. This starts with the customer’s unique invoice_prefix if it is specified.', max_length=64, null=True)),
('paid', djstripe.fields.StripeBooleanField(default=False, help_text='The time at which payment will next be attempted.')),
('period_end', djstripe.fields.StripeDateTimeField(help_text='End of the usage period during which invoice items were added to this invoice.')),
('period_start', djstripe.fields.StripeDateTimeField(help_text='Start of the usage period during which invoice items were added to this invoice.')),
('receipt_number', djstripe.fields.StripeCharField(help_text='This is the transaction number that appears on email receipts sent for this invoice.', max_length=64, null=True)),
('starting_balance', djstripe.fields.StripeIntegerField(help_text='Starting customer balance before attempting to pay invoice. If the invoice has not been attempted yet, this will be the current customer balance.')),
('statement_descriptor', djstripe.fields.StripeCharField(help_text='An arbitrary string to be displayed on your customer\'s credit card statement. The statement description may not include <>"\' characters, and will appear on your customer\'s statement in capital letters. Non-ASCII characters are automatically stripped. While most banks display this information consistently, some may display it incorrectly or not at all.', max_length=22, null=True)),
('subscription_proration_date', djstripe.fields.StripeDateTimeField(help_text='Only set for upcoming invoices that preview prorations. The time used to calculate prorations.', null=True)),
('subtotal', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text='Only set for upcoming invoices that preview prorations. The time used to calculate prorations.', max_digits=8)),
('tax', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text='The amount of tax included in the total, calculated from ``tax_percent`` and the subtotal. If no ``tax_percent`` is defined, this value will be null.', max_digits=8, null=True)),
('tax_percent', djstripe.fields.StripePercentField(decimal_places=2, help_text="This percentage of the subtotal has been added to the total amount of the invoice, including invoice line items and discounts. This field is inherited from the subscription's ``tax_percent`` field, but can be changed before the invoice is paid. This field defaults to null.", max_digits=5, null=True, validators=[django.core.validators.MinValueValidator(1.0), django.core.validators.MaxValueValidator(100.0)])),
('total', djstripe.fields.StripeCurrencyField(decimal_places=2, max_digits=8, verbose_name='Total after discount.')),
('webhooks_delivered_at', djstripe.fields.StripeDateTimeField(help_text='The time at which webhooks for this invoice were successfully delivered (if the invoice had no webhooks to deliver, this will match `date`). Invoice payment is delayed until webhooks are delivered, or until all webhook delivery attempts have been exhausted.', null=True)),
],
options={
'ordering': ['-date'],
},
),
migrations.CreateModel(
name='InvoiceItem',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('amount', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text='Amount invoiced.', max_digits=8)),
('currency', djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code.', max_length=3)),
('date', djstripe.fields.StripeDateTimeField(help_text='The date on the invoiceitem.')),
('discountable', djstripe.fields.StripeBooleanField(default=False, help_text='If True, discounts will apply to this invoice item. Always False for prorations.')),
('period', djstripe.fields.StripeJSONField()),
('period_end', djstripe.fields.StripeDateTimeField(help_text="Might be the date when this invoiceitem's invoice was sent.")),
('period_start', djstripe.fields.StripeDateTimeField(help_text='Might be the date when this invoiceitem was added to the invoice')),
('proration', djstripe.fields.StripeBooleanField(default=False, help_text='Whether or not the invoice item was created automatically as a proration adjustment when the customer switched plans.')),
('quantity', djstripe.fields.StripeIntegerField(help_text='If the invoice item is a proration, the quantity of the subscription for which the proration was computed.', null=True)),
('customer', models.ForeignKey(help_text='The customer associated with this invoiceitem.', on_delete=django.db.models.deletion.CASCADE, related_name='invoiceitems', to='djstripe.Customer')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.CharField(max_length=255, primary_key=True, serialize=False)),
('type', models.CharField(db_index=True, max_length=12)),
],
),
migrations.CreateModel(
name='Payout',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('amount', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text='Amount to be transferred to your bank account or debit card.', max_digits=8)),
('arrival_date', djstripe.fields.StripeDateTimeField(help_text='Date the payout is expected to arrive in the bank. This factors in delays like weekends or bank holidays.')),
('currency', djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code.', max_length=3)),
('failure_code', djstripe.fields.StripeEnumField(blank=True, enum=djstripe.enums.PayoutFailureCode, help_text='Error code explaining reason for transfer failure if available. See https://stripe.com/docs/api/python#transfer_failures.', max_length=23, null=True)),
('failure_message', djstripe.fields.StripeTextField(blank=True, help_text='Message to user further explaining reason for payout failure if available.', null=True)),
('method', djstripe.fields.StripeEnumField(enum=djstripe.enums.PayoutMethod, help_text='The method used to send this payout. `instant` is only supported for payouts to debit cards.', max_length=8)),
('statement_descriptor', djstripe.fields.StripeCharField(blank=True, help_text="Extra information about a payout to be displayed on the user's bank statement.", max_length=255, null=True)),
('status', djstripe.fields.StripeEnumField(enum=djstripe.enums.PayoutStatus, help_text='Current status of the payout. A payout will be `pending` until it is submitted to the bank, at which point it becomes `in_transit`. I t will then change to paid if the transaction goes through. If it does not go through successfully, its status will change to `failed` or `canceled`.', max_length=10)),
('type', djstripe.fields.StripeEnumField(enum=djstripe.enums.PayoutType, max_length=12)),
('destination', models.ForeignKey(help_text='ID of the bank account or card the payout was sent to.', null=True, on_delete=django.db.models.deletion.PROTECT, to='djstripe.BankAccount')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Plan',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('aggregate_usage', djstripe.fields.StripeEnumField(enum=djstripe.enums.PlanAggregateUsage, help_text='Specifies a usage aggregation strategy for plans of usage_type=metered. Allowed values are `sum` for summing up all usage during a period, `last_during_period` for picking the last usage record reported within a period, `last_ever` for picking the last usage record ever (across period bounds) or max which picks the usage record with the maximum reported usage during a period. Defaults to `sum`.', max_length=18, null=True)),
('amount', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text='Amount to be charged on the interval specified.', max_digits=8)),
('billing_scheme', djstripe.fields.StripeEnumField(enum=djstripe.enums.PlanBillingScheme, help_text='Describes how to compute the price per period. Either `per_unit` or `tiered`. `per_unit` indicates that the fixed amount (specified in amount) will be charged per unit in quantity (for plans with `usage_type=licensed`), or per unit of total usage (for plans with `usage_type=metered`). `tiered` indicates that the unit pricing will be computed using a tiering strategy as defined using the tiers and tiers_mode attributes.', max_length=8, null=True)),
('currency', djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code', max_length=3)),
('interval', djstripe.fields.StripeEnumField(enum=djstripe.enums.PlanInterval, help_text='The frequency with which a subscription should be billed.', max_length=5)),
('interval_count', djstripe.fields.StripeIntegerField(help_text='The number of intervals (specified in the interval property) between each subscription billing.', null=True)),
('nickname', djstripe.fields.StripeCharField(help_text='A brief description of the plan, hidden from customers.', max_length=5000, null=True)),
('tiers', djstripe.fields.StripeJSONField(help_text='Each element represents a pricing tier. This parameter requires `billing_scheme` to be set to `tiered`.', null=True)),
('tiers_mode', djstripe.fields.StripeEnumField(enum=djstripe.enums.PlanTiersMode, help_text='Defines if the tiering price should be `graduated` or `volume` based. In `volume`-based tiering, the maximum quantity within a period determines the per unit price, in `graduated` tiering pricing can successively change as the quantity grows.', max_length=9, null=True)),
('transform_usage', djstripe.fields.StripeJSONField(help_text='Apply a transformation to the reported usage or set quantity before computing the billed price. Cannot be combined with `tiers`.', null=True)),
('trial_period_days', djstripe.fields.StripeIntegerField(help_text='Number of trial period days granted when subscribing a customer to this plan. Null if the plan has no trial period.', null=True)),
('usage_type', djstripe.fields.StripeEnumField(default='licensed', enum=djstripe.enums.PlanUsageType, help_text='Configures how the quantity per period should be determined, can be either`metered` or `licensed`. `licensed` will automatically bill the `quantity` set for a plan when adding it to a subscription, `metered` will aggregate the total usage based on usage records. Defaults to `licensed`.', max_length=8)),
('name', djstripe.fields.StripeTextField(help_text='Name of the plan, to be displayed on invoices and in the web interface.', null=True)),
('statement_descriptor', djstripe.fields.StripeCharField(help_text='An arbitrary string to be displayed on your customer\'s credit card statement. The statement description may not include <>"\' characters, and will appear on your customer\'s statement in capital letters. Non-ASCII characters are automatically stripped. While most banks display this information consistently, some may display it incorrectly or not at all.', max_length=22, null=True)),
],
options={
'ordering': ['amount'],
},
),
migrations.CreateModel(
name='Product',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('name', djstripe.fields.StripeCharField(help_text="The product's name, meant to be displayable to the customer. Applicable to both `service` and `good` types.", max_length=5000)),
('type', djstripe.fields.StripeEnumField(enum=djstripe.enums.ProductType, help_text='The type of the product. The product is either of type `good`, which is eligible for use with Orders and SKUs, or `service`, which is eligible for use with Subscriptions and Plans.', max_length=7)),
('active', djstripe.fields.StripeNullBooleanField(help_text='Whether the product is currently available for purchase. Only applicable to products of `type=good`.')),
('attributes', djstripe.fields.StripeJSONField(help_text='A list of up to 5 attributes that each SKU can provide values for (e.g., `["color", "size"]`). Only applicable to products of `type=good`.', null=True)),
('caption', djstripe.fields.StripeCharField(help_text='A short one-line description of the product, meant to be displayableto the customer. Only applicable to products of `type=good`.', max_length=5000, null=True)),
('deactivate_on', djstripe.fields.StripeJSONField(blank=True, help_text='An array of connect application identifiers that cannot purchase this product. Only applicable to products of `type=good`.')),
('images', djstripe.fields.StripeJSONField(blank=True, help_text='A list of up to 8 URLs of images for this product, meant to be displayable to the customer. Only applicable to products of `type=good`.')),
('package_dimensions', djstripe.fields.StripeJSONField(help_text='The dimensions of this product for shipping purposes. A SKU associated with this product can override this value by having its own `package_dimensions`. Only applicable to products of `type=good`.', null=True)),
('shippable', djstripe.fields.StripeNullBooleanField(help_text='Whether this product is a shipped good. Only applicable to products of `type=good`.')),
('url', djstripe.fields.StripeCharField(help_text='A URL of a publicly-accessible webpage for this product. Only applicable to products of `type=good`.', max_length=799, null=True)),
('statement_descriptor', djstripe.fields.StripeCharField(help_text="Extra information about a product which will appear on your customer's credit card statement. In the case that multiple products are billed at once, the first statement descriptor will be used. Only available on products of type=`service`.", max_length=22, null=True)),
('unit_label', djstripe.fields.StripeCharField(max_length=12, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Refund',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('amount', djstripe.fields.StripeIntegerField(help_text='Amount, in cents.')),
('currency', djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code', max_length=3)),
('failure_reason', djstripe.fields.StripeEnumField(enum=djstripe.enums.RefundFailureReason, help_text='If the refund failed, the reason for refund failure if known.', max_length=24, null=True)),
('reason', djstripe.fields.StripeEnumField(enum=djstripe.enums.RefundReason, help_text='Reason for the refund.', max_length=21, null=True)),
('receipt_number', djstripe.fields.StripeCharField(help_text='The transaction number that appears on email receipts sent for this charge.', max_length=9, null=True)),
('status', djstripe.fields.StripeEnumField(enum=djstripe.enums.RefundFailureReason, help_text='Status of the refund.', max_length=24)),
('charge', models.ForeignKey(help_text='The charge that was refunded', on_delete=django.db.models.deletion.CASCADE, related_name='refunds', to='djstripe.Charge')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Source',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('amount', djstripe.fields.StripeCurrencyField(blank=True, decimal_places=2, help_text='Amount associated with the source. This is the amount for which the source will be chargeable once ready. Required for `single_use` sources.', max_digits=8, null=True)),
('client_secret', djstripe.fields.StripeCharField(help_text='The client secret of the source. Used for client-side retrieval using a publishable key.', max_length=255)),
('currency', djstripe.fields.StripeCharField(blank=True, help_text='Three-letter ISO currency code', max_length=3, null=True)),
('flow', djstripe.fields.StripeEnumField(enum=djstripe.enums.SourceFlow, help_text='The authentication flow of the source.', max_length=17)),
('owner', djstripe.fields.StripeJSONField(help_text='Information about the owner of the payment instrument that may be used or required by particular source types.')),
('statement_descriptor', djstripe.fields.StripeCharField(blank=True, help_text="Extra information about a source. This will appear on your customer's statement every time you charge the source.", max_length=255, null=True)),
('status', djstripe.fields.StripeEnumField(enum=djstripe.enums.SourceStatus, help_text='The status of the source. Only `chargeable` sources can be used to create a charge.', max_length=10)),
('type', djstripe.fields.StripeEnumField(enum=djstripe.enums.SourceType, help_text='The type of the source.', max_length=19)),
('usage', djstripe.fields.StripeEnumField(enum=djstripe.enums.SourceUsage, help_text='Whether this source should be reusable or not. Some source types may or may not be reusable by construction, while other may leave the option at creation.', max_length=10)),
('code_verification', djstripe.fields.StripeJSONField(blank=True, help_text='Information related to the code verification flow. Present if the source is authenticated by a verification code (`flow` is `code_verification`).', null=True)),
('receiver', djstripe.fields.StripeJSONField(blank=True, help_text='Information related to the receiver flow. Present if the source is a receiver (`flow` is `receiver`).', null=True)),
('redirect', djstripe.fields.StripeJSONField(blank=True, help_text='Information related to the redirect flow. Present if the source is authenticated by a redirect (`flow` is `redirect`).', null=True)),
('source_data', djstripe.fields.StripeJSONField(help_text='The data corresponding to the source type.')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sources_v3', to='djstripe.Customer')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Subscription',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('application_fee_percent', djstripe.fields.StripePercentField(blank=True, decimal_places=2, help_text="A positive decimal that represents the fee percentage of the subscription invoice amount that will be transferred to the application owner's Stripe account each billing period.", max_digits=5, null=True, validators=[django.core.validators.MinValueValidator(1.0), django.core.validators.MaxValueValidator(100.0)])),
('billing', djstripe.fields.StripeEnumField(enum=djstripe.enums.InvoiceBilling, help_text='Either `charge_automatically`, or `send_invoice`. When charging automatically, Stripe will attempt to pay this subscription at the end of the cycle using the default source attached to the customer. When sending an invoice, Stripe will email your customer an invoice with payment instructions.', max_length=20)),
('billing_cycle_anchor', djstripe.fields.StripeDateTimeField(help_text='Determines the date of the first full invoice, and, for plans with `month` or `year` intervals, the day of the month for subsequent invoices.', null=True)),
('cancel_at_period_end', djstripe.fields.StripeBooleanField(default=False, help_text='If the subscription has been canceled with the ``at_period_end`` flag set to true, ``cancel_at_period_end`` on the subscription will be true. You can use this attribute to determine whether a subscription that has a status of active is scheduled to be canceled at the end of the current period.')),
('canceled_at', djstripe.fields.StripeDateTimeField(blank=True, help_text='If the subscription has been canceled, the date of that cancellation. If the subscription was canceled with ``cancel_at_period_end``, canceled_at will still reflect the date of the initial cancellation request, not the end of the subscription period when the subscription is automatically moved to a canceled state.', null=True)),
('current_period_end', djstripe.fields.StripeDateTimeField(help_text='End of the current period for which the subscription has been invoiced. At the end of this period, a new invoice will be created.')),
('current_period_start', djstripe.fields.StripeDateTimeField(help_text='Start of the current period for which the subscription has been invoiced.')),
('days_until_due', djstripe.fields.StripeIntegerField(help_text='Number of days a customer has to pay invoices generated by this subscription. This value will be `null` for subscriptions where `billing=charge_automatically`.', null=True)),
('ended_at', djstripe.fields.StripeDateTimeField(blank=True, help_text='If the subscription has ended (either because it was canceled or because the customer was switched to a subscription to a new plan), the date the subscription ended.', null=True)),
('quantity', djstripe.fields.StripeIntegerField(help_text='The quantity applied to this subscription.')),
('start', djstripe.fields.StripeDateTimeField(help_text='Date the subscription started.')),
('status', djstripe.fields.StripeEnumField(enum=djstripe.enums.SubscriptionStatus, help_text='The status of this subscription.', max_length=8)),
('tax_percent', djstripe.fields.StripePercentField(blank=True, decimal_places=2, help_text='A positive decimal (with at most two decimal places) between 1 and 100. This represents the percentage of the subscription invoice subtotal that will be calculated and added as tax to the final amount each billing period.', max_digits=5, null=True, validators=[django.core.validators.MinValueValidator(1.0), django.core.validators.MaxValueValidator(100.0)])),
('trial_end', djstripe.fields.StripeDateTimeField(blank=True, help_text='If the subscription has a trial, the end of that trial.', null=True)),
('trial_start', djstripe.fields.StripeDateTimeField(blank=True, help_text='If the subscription has a trial, the beginning of that trial.', null=True)),
('customer', models.ForeignKey(help_text='The customer associated with this subscription.', on_delete=django.db.models.deletion.CASCADE, related_name='subscriptions', to='djstripe.Customer')),
('plan', models.ForeignKey(help_text='The plan associated with this subscription.', on_delete=django.db.models.deletion.CASCADE, related_name='subscriptions', to='djstripe.Plan')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Transfer',
fields=[
('djstripe_id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', djstripe.fields.StripeIdField(max_length=255, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(default=None, help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.')),
('created', djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True)),
('djstripe_created', models.DateTimeField(auto_now_add=True)),
('djstripe_updated', models.DateTimeField(auto_now=True)),
('amount', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text='The amount transferred', max_digits=8)),
('amount_reversed', djstripe.fields.StripeCurrencyField(decimal_places=2, help_text='The amount reversed (can be less than the amount attribute on the transfer if a partial reversal was issued).', max_digits=8, null=True)),
('currency', djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code.', max_length=3)),
('destination', djstripe.fields.StripeIdField(help_text='ID of the bank account, card, or Stripe account the transfer was sent to.', max_length=255)),
('destination_payment', djstripe.fields.StripeIdField(help_text='If the destination is a Stripe account, this will be the ID of the payment that the destination account received for the transfer.', max_length=255, null=True)),
('reversed', djstripe.fields.StripeBooleanField(default=False, help_text='Whether or not the transfer has been fully reversed. If the transfer is only partially reversed, this attribute will still be false.')),
('source_transaction', djstripe.fields.StripeIdField(help_text='ID of the charge (or other transaction) that was used to fund the transfer. If null, the transfer was funded from the available balance.', max_length=255, null=True)),
('source_type', djstripe.fields.StripeEnumField(enum=djstripe.enums.LegacySourceType, help_text='The source balance from which this transfer came.', max_length=16)),
('transfer_group', djstripe.fields.StripeCharField(blank=True, help_text='A string that identifies this transaction as part of a group.', max_length=255, null=True)),
('date', djstripe.fields.StripeDateTimeField(help_text="Date the transfer is scheduled to arrive in the bank. This doesn't factor in delays like weekends or bank holidays.")),
('destination_type', djstripe.fields.StripeCharField(blank=True, help_text='The type of the transfer destination.', max_length=14, null=True)),
('failure_code', djstripe.fields.StripeEnumField(blank=True, enum=djstripe.enums.PayoutFailureCode, help_text='Error code explaining reason for transfer failure if available. See https://stripe.com/docs/api/python#transfer_failures.', max_length=23, null=True)),
('failure_message', djstripe.fields.StripeTextField(blank=True, help_text='Message to user further explaining reason for transfer failure if available.', null=True)),
('statement_descriptor', djstripe.fields.StripeCharField(help_text='An arbitrary string to be displayed on your customer\'s credit card statement. The statement description may not include <>"\' characters, and will appear on your customer\'s statement in capital letters. Non-ASCII characters are automatically stripped. While most banks display this information consistently, some may display it incorrectly or not at all.', max_length=22, null=True)),
('status', djstripe.fields.StripeEnumField(blank=True, enum=djstripe.enums.PayoutStatus, help_text='The current status of the transfer. A transfer will be pending until it is submitted to the bank, at which point it becomes in_transit. It will then change to paid if the transaction goes through. If it does not go through successfully, its status will change to failed or canceled.', max_length=10, null=True)),
('fee', djstripe.fields.StripeCurrencyField(decimal_places=2, max_digits=8, null=True)),
('fee_details', djstripe.fields.StripeJSONField(null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='WebhookEventTrigger',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('remote_ip', models.GenericIPAddressField(help_text='IP address of the request client.')),
('headers', djstripe.fields.JSONField()),
('body', models.TextField(blank=True)),
('valid', models.BooleanField(default=False, help_text='Whether or not the webhook event has passed validation')),
('processed', models.BooleanField(default=False, help_text='Whether or not the webhook event has been successfully processed')),
('exception', models.CharField(blank=True, max_length=128)),
('traceback', models.TextField(blank=True, help_text='Traceback if an exception was thrown during processing')),
('djstripe_version', models.CharField(default=djstripe.models._get_version, help_text='The version of dj-stripe when the webhook was received', max_length=32)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('event', models.ForeignKey(blank=True, help_text='Event object contained in the (valid) Webhook', null=True, on_delete=django.db.models.deletion.SET_NULL, to='djstripe.Event')),
],
),
migrations.CreateModel(
name='UpcomingInvoice',
fields=[
('invoice_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='djstripe.Invoice')),
],
options={
'abstract': False,
},
bases=('djstripe.invoice',),
),
migrations.AddField(
model_name='plan',
name='product',
field=models.ForeignKey(help_text='The product whose pricing this plan determines.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='djstripe.Product'),
),
migrations.AddField(
model_name='invoiceitem',
name='invoice',
field=models.ForeignKey(help_text='The invoice to which this invoiceitem is attached.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoiceitems', to='djstripe.Invoice'),
),
migrations.AddField(
model_name='invoiceitem',
name='plan',
field=models.ForeignKey(help_text='If the invoice item is a proration, the plan of the subscription for which the proration was computed.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invoiceitems', to='djstripe.Plan'),
),
migrations.AddField(
model_name='invoiceitem',
name='subscription',
field=models.ForeignKey(help_text='The subscription that this invoice item has been created for, if any.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invoiceitems', to='djstripe.Subscription'),
),
migrations.AddField(
model_name='invoice',
name='charge',
field=models.OneToOneField(help_text='The latest charge generated for this invoice, if any.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='latest_invoice', to='djstripe.Charge'),
),
migrations.AddField(
model_name='invoice',
name='customer',
field=models.ForeignKey(help_text='The customer associated with this invoice.', on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='djstripe.Customer'),
),
migrations.AddField(
model_name='invoice',
name='subscription',
field=models.ForeignKey(help_text='The subscription that this invoice was prepared for, if any.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invoices', to='djstripe.Subscription'),
),
migrations.AlterUniqueTogether(
name='idempotencykey',
unique_together={('action', 'livemode')},
),
migrations.AddField(
model_name='customer',
name='default_source',
field=djstripe.fields.PaymentMethodForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='customers', to='djstripe.PaymentMethod'),
),
migrations.AddField(
model_name='customer',
name='subscriber',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='djstripe_customers', to=DJSTRIPE_SUBSCRIBER_MODEL),
),
migrations.AlterUniqueTogether(
name='coupon',
unique_together={('stripe_id', 'livemode')},
),
migrations.AddField(
model_name='charge',
name='customer',
field=models.ForeignKey(help_text='The customer associated with this charge.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='djstripe.Customer'),
),
migrations.AddField(
model_name='charge',
name='dispute',
field=models.ForeignKey(help_text='Details about the dispute if the charge has been disputed.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='charges', to='djstripe.Dispute'),
),
migrations.AddField(
model_name='charge',
name='invoice',
field=models.ForeignKey(help_text='The invoice this charge is for if one exists.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='charges', to='djstripe.Invoice'),
),
migrations.AddField(
model_name='charge',
name='source',
field=djstripe.fields.PaymentMethodForeignKey(help_text='The source used for this charge.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='charges', to='djstripe.PaymentMethod'),
),
migrations.AddField(
model_name='charge',
name='transfer',
field=models.ForeignKey(help_text='The transfer to the destination account (only applicable if the charge was created using the destination parameter).', null=True, on_delete=django.db.models.deletion.CASCADE, to='djstripe.Transfer'),
),
migrations.AddField(
model_name='card',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sources', to='djstripe.Customer'),
),
migrations.AddField(
model_name='bankaccount',
name='customer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bank_account', to='djstripe.Customer'),
),
migrations.AddField(
model_name='account',
name='business_logo',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='djstripe.FileUpload'),
),
migrations.AlterUniqueTogether(
name='customer',
unique_together={('subscriber', 'livemode')},
),
]
| 117.549133 | 568 | 0.701945 |
8bdb04418db0218afb8c8e7e448fab98e21ba0fd | 3,772 | py | Python | tests/keras/test_initializations.py | the-moliver/keras | 4fa7e5d454dd4f3f33f1d756a2a8659f2e789141 | [
"MIT"
] | 150 | 2017-01-15T15:32:23.000Z | 2021-11-23T15:07:55.000Z | tests/keras/test_initializations.py | wdw110/keras | 4fa7e5d454dd4f3f33f1d756a2a8659f2e789141 | [
"MIT"
] | 40 | 2017-01-15T15:41:05.000Z | 2020-11-16T13:15:50.000Z | tests/keras/test_initializations.py | wdw110/keras | 4fa7e5d454dd4f3f33f1d756a2a8659f2e789141 | [
"MIT"
] | 38 | 2017-01-15T22:04:06.000Z | 2019-11-01T22:35:35.000Z | import pytest
import numpy as np
from keras import initializations
from keras import backend as K
# 2D tensor test fixture
FC_SHAPE = (100, 100)
# 4D convolution in th order. This shape has the same effective shape as FC_SHAPE
CONV_SHAPE = (25, 25, 2, 2)
# The equivalent shape of both test fixtures
SHAPE = (100, 100)
def _runner(init, shape, target_mean=None, target_std=None,
target_max=None, target_min=None):
variable = init(shape)
output = K.get_value(variable)
lim = 1e-2
if target_std is not None:
assert abs(output.std() - target_std) < lim
if target_mean is not None:
assert abs(output.mean() - target_mean) < lim
if target_max is not None:
assert abs(output.max() - target_max) < lim
if target_min is not None:
assert abs(output.min() - target_min) < lim
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_uniform(tensor_shape):
_runner(initializations.uniform, tensor_shape, target_mean=0.,
target_max=0.05, target_min=-0.05)
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_normal(tensor_shape):
_runner(initializations.normal, tensor_shape, target_mean=0., target_std=0.05)
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_lecun_uniform(tensor_shape):
scale = np.sqrt(3. / SHAPE[0])
_runner(initializations.lecun_uniform, tensor_shape,
target_mean=0., target_max=scale, target_min=-scale)
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_glorot_uniform(tensor_shape):
scale = np.sqrt(6. / (SHAPE[0] + SHAPE[1]))
_runner(initializations.glorot_uniform, tensor_shape, target_mean=0.,
target_max=scale, target_min=-scale)
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_glorot_normal(tensor_shape):
scale = np.sqrt(2. / (SHAPE[0] + SHAPE[1]))
_runner(initializations.glorot_normal, tensor_shape,
target_mean=0., target_std=scale)
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_he_uniform(tensor_shape):
scale = np.sqrt(6. / SHAPE[0])
_runner(initializations.he_uniform, tensor_shape, target_mean=0.,
target_max=scale, target_min=-scale)
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_he_normal(tensor_shape):
scale = np.sqrt(2. / SHAPE[0])
_runner(initializations.he_normal, tensor_shape,
target_mean=0., target_std=scale)
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_orthogonal(tensor_shape):
_runner(initializations.orthogonal, tensor_shape,
target_mean=0.)
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_identity(tensor_shape):
if len(tensor_shape) > 2:
with pytest.raises(Exception):
_runner(initializations.identity, tensor_shape,
target_mean=1. / SHAPE[0], target_max=1.)
else:
_runner(initializations.identity, tensor_shape,
target_mean=1. / SHAPE[0], target_max=1.)
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_zero(tensor_shape):
_runner(initializations.zero, tensor_shape,
target_mean=0., target_max=0.)
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_one(tensor_shape):
_runner(initializations.one, tensor_shape,
target_mean=1., target_max=1.)
if __name__ == '__main__':
pytest.main([__file__])
| 34.605505 | 84 | 0.692206 |
e6555fb3c7703dba771283562f37c0d6e2a391ae | 1,610 | py | Python | mplwidget.py | ryanjphelan/ENGG4801_RyanPhelan | 473c5e834cfee8c921722c4d78292b34c76c6514 | [
"MIT"
] | null | null | null | mplwidget.py | ryanjphelan/ENGG4801_RyanPhelan | 473c5e834cfee8c921722c4d78292b34c76c6514 | [
"MIT"
] | null | null | null | mplwidget.py | ryanjphelan/ENGG4801_RyanPhelan | 473c5e834cfee8c921722c4d78292b34c76c6514 | [
"MIT"
] | null | null | null | # ------------------------------------------------- -----
# -------------------- mplwidget.py --------------------
# -------------------------------------------------- ----
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from PyQt5.QtWidgets import QInputDialog, QLineEdit, QFileDialog, QGridLayout
from PyQt5.QtGui import QIcon
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import ( NavigationToolbar2QT as NavigationToolbar )
class MplWidget(QWidget):
def __init__(self, parent = None):
QWidget.__init__(self, parent)
self.canvas = FigureCanvas(Figure(constrained_layout=True))
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.vertical_layout = QVBoxLayout()
self.vertical_layout.addStretch(1)
#self.canvas.axes = self.canvas.figure.add_subplot(111)
self.vertical_layout.addWidget(self.canvas, QtCore.Qt.AlignTop)
self.k = None
self.toolbar = NavigationToolbar(self.canvas, self)
self.vertical_layout.addWidget(self.toolbar, stretch=0)
self.setLayout(self.vertical_layout)
#self.addToolBar(NavigationToolbar(self.MplWidget.canvas, self))
def setAxes(self, k):
self.k = k
self.canvas.axes = self.canvas.figure.add_subplot(k, 1, 1)
return self.canvas.axes
def getAxes(self, pos):
self.canvas.axes = self.canvas.figure.add_subplot(self.k, 1, pos)
return self.canvas.axes
def getOnlyAxes(self):
return self.canvas.axes
def clearAxes(self):
self.canvas.axes = None | 35 | 97 | 0.684472 |
1fe3cb40139a13ef20a1915610c6e3ff0091e31b | 8,168 | py | Python | ScrapedIn/ScrapedIn.py | sudhanshu-jha/Scrapers | 1203c5ed3ebb4b0664af41e95bde3fc15662af64 | [
"MIT"
] | null | null | null | ScrapedIn/ScrapedIn.py | sudhanshu-jha/Scrapers | 1203c5ed3ebb4b0664af41e95bde3fc15662af64 | [
"MIT"
] | null | null | null | ScrapedIn/ScrapedIn.py | sudhanshu-jha/Scrapers | 1203c5ed3ebb4b0664af41e95bde3fc15662af64 | [
"MIT"
] | 1 | 2019-05-29T09:54:14.000Z | 2019-05-29T09:54:14.000Z | #!/usr/bin/python
import sys
import re
import time
import xlsxwriter
import json
import argparse
import requests
import subprocess
import urllib
import math
from thready import threaded
reload(sys)
sys.setdefaultencoding('utf-8')
""" Setup Argument Parameters """
parser = argparse.ArgumentParser(description='Discovery LinkedIn')
parser.add_argument('-u', '--keywords', help='Keywords to search')
parser.add_argument('-o', '--output', help='Output file (do not include extentions)')
args = parser.parse_args()
def get_search():
# Fetch the initial page to get results/page counts
#url = 'https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List()&keywords=%s&origin=GLOBAL_SEARCH_HEADER&q=guided&searchId=1489295486936&start=0' % search
url = "https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v-%%3EPEOPLE,facetGeoRegion-%%3Ear%%3A0)&keywords=%s&origin=FACETED_SEARCH&q=guided&start=0" % search
#url = 'https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v->PEOPLE,facetCurrentCompany->31752)&origin=GLOBAL_SEARCH_HEADER&q=guided&start=0'
#url = "https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v->PEOPLE,facetCurrentCompany->31752)&origin=OTHER&q=guided&start=0"
#url = 'https://www.linkedin.com/search/results/people/?facetCurrentCompany=%5B"75769"%5D'
headers = {'Csrf-Token':'ajax:7736867257193100830'}
cookies['JSESSIONID'] = 'ajax:7736867257193100830'
cookies['X-RestLi-Protocol-Version'] = '2.0.0'
r = requests.get(url, cookies=cookies, headers=headers)
content = json.loads(r.text)
data_total = content['paging']['total']
# Calculate pages off final results at 40 results/page
pages = data_total / 40
if data_total % 40 == 0:
# Becuase we count 0... Subtract a page if there are no left over results on the last page
pages = pages - 1
if pages == 0:
pages = 1
print "[Info] %i Results Found" % data_total
if data_total > 1000:
pages = 24
print "[Notice] LinkedIn only allows 1000 results. Refine keywords to capture all data"
print "[Info] Fetching %i Pages" % pages
print
# Set record position for XLSX
recordpos = 1
for p in range(pages):
# Request results for each page using the start offset
url = "https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List()&keywords=%s&origin=GLOBAL_SEARCH_HEADER&q=guided&searchId=1489295486936&start=%i" % (search, p*40)
url = "https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v-%%3EPEOPLE,facetGeoRegion-%%3Ear%%3A0)&keywords=%s&origin=FACETED_SEARCH&q=guided&start=%i" % (search, p*40)
#url = 'https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v->PEOPLE,facetCurrentCompany->31752)&origin=GLOBAL_SEARCH_HEADER&q=guided&start=%i' % (p*40)
#url = "https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v->PEOPLE,facetCurrentCompany->%s)&origin=OTHER&q=guided&start=%i" % (p*40)
#url = 'https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(v->PEOPLE,facetCurrentCompany->75769)&keywords=%s&origin=GLOBAL_SEARCH_HEADER&q=guided&searchId=1489295486936&start=%i' % (search, p*40)
#url = 'https://www.linkedin.com/voyager/api/search/cluster?count=40&guides=List(facetGeoRegion-%%3Ear%%3A0)&keywords=%s&origin=GLOBAL_SEARCH_HEADER&q=guided&searchId=1489295486936&start=%i' % (search, p*40)
#print url
#print
r = requests.get(url, cookies=cookies, headers=headers)
content = r.text.encode('UTF-8')
content = json.loads(content)
print "[Info] Fetching page %i with %i results" % (p+1,len(content['elements'][0]['elements']))
for c in content['elements'][0]['elements']:
try:
if c['hitInfo']['com.linkedin.voyager.search.SearchProfile']['headless'] == False:
try:
data_industry = c['hitInfo']['com.linkedin.voyager.search.SearchProfile']['industry']
except:
data_industry = ""
data_firstname = c['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['firstName']
data_lastname = c['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['lastName']
data_slug = "https://www.linkedin.com/in/%s" % c['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['publicIdentifier']
data_occupation = c['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['occupation']
data_location = c['hitInfo']['com.linkedin.voyager.search.SearchProfile']['location']
data_skills = c['hitInfo']['com.linkedin.voyager.search.SearchProfile']['skills']
try:
data_picture = "https://media.licdn.com/mpr/mpr/shrinknp_400_400%s" % c['hitInfo']['com.linkedin.voyager.search.SearchProfile']['miniProfile']['picture']['com.linkedin.voyager.common.MediaProcessorImage']['id']
except:
print "%s %s, %s" % (data_firstname, data_lastname, data_occupation ,data_skills)
# print "[Notice] No picture found for %s %s, %s" % (data_firstname, data_lastname, data_occupation)
data_picture = ""
# Write data to XLSX file
worksheet1.write('A%i' % recordpos, data_firstname)
worksheet1.write('B%i' % recordpos, data_lastname)
worksheet1.write('C%i' % recordpos, data_occupation)
worksheet1.write('D%i' % recordpos, data_location)
worksheet1.write('E%i' % recordpos, data_industry)
worksheet1.write('F%i' % recordpos, data_slug)
worksheet1.write('G%i' % recordpos, data_picture)
worksheet1.write('G%i' % recordpos, data_skills)
worksheet2.write('A%i' % recordpos, '=IMAGE(dataset!G%i)' % recordpos)
worksheet2.write('B%i' % recordpos, '=dataset!A%i&" "&dataset!B%i&"\n"&dataset!C%i&"\n"&dataset!D%i&"\n"&dataset!E%i' % (recordpos,recordpos,recordpos,recordpos,recordpos))
worksheet2.write('C%i' % recordpos, '=HYPERLINK(dataset!F%i)' % recordpos)
worksheet2.set_row(recordpos-1,125)
# Increment Record Position
recordpos = recordpos + 1
else:
print "[Notice] Headless profile found. Skipping"
except:
print "[Notice] Skipping"
continue
print
def authenticate():
try:
session = subprocess.Popen(['python', 'SI_login.py'], stdout=subprocess.PIPE).communicate()[0].replace("\n","")
if len(session) == 0:
sys.exit("[Error] Unable to login to LinkedIn.com")
print "[Info] Obtained new session: %s" % session
cookies = dict(li_at=session)
except Exception, e:
sys.exit("[Fatal] Could not authenticate to linkedin. %s" % e)
return cookies
if __name__ == '__main__':
# Prompt user for data variables
search = args.keywords if args.keywords!=None else raw_input("Enter search Keywords (use quotes for more percise results)\n")
outfile = args.output if args.output!=None else raw_input("Enter filename for output (exclude file extension)\n")
print
# URL Encode for the querystring
search = urllib.quote_plus(search)
cookies = authenticate()
# Initiate XLSX File
workbook = xlsxwriter.Workbook('results/%s.xlsx' % outfile)
worksheet1 = workbook.add_worksheet('dataset')
worksheet2 = workbook.add_worksheet('report')
worksheet2.set_column(0,0, 25)
worksheet2.set_column(1,2, 75)
# Initialize Scraping
get_search()
# Close XLSD File
workbook.close()
| 55.189189 | 234 | 0.635651 |
5fb7e29dd525816c207e25237e85c960c85c7ce1 | 90 | py | Python | inheritance/exercise/problem_05/beverage/hot_beverage.py | BoyanPeychinov/object_oriented_programming | a960721c7c17710bd7b151a9025647e953435962 | [
"MIT"
] | null | null | null | inheritance/exercise/problem_05/beverage/hot_beverage.py | BoyanPeychinov/object_oriented_programming | a960721c7c17710bd7b151a9025647e953435962 | [
"MIT"
] | null | null | null | inheritance/exercise/problem_05/beverage/hot_beverage.py | BoyanPeychinov/object_oriented_programming | a960721c7c17710bd7b151a9025647e953435962 | [
"MIT"
] | null | null | null | from problem_05.beverage.beverage import Beverage
class HotBeverage(Beverage):
pass
| 15 | 49 | 0.8 |
6a29a6d79737ac95f738ef0985a0585cb9255d22 | 31,441 | py | Python | oidc_provider/tests/cases/test_authorize_endpoint.py | SelfHacked/django-oidc-provider | 421f7f58d311a7e875d9512c24f4cd5d41c29992 | [
"MIT"
] | 1 | 2020-07-23T21:37:03.000Z | 2020-07-23T21:37:03.000Z | oidc_provider/tests/cases/test_authorize_endpoint.py | SelfHacked/django-oidc-provider | 421f7f58d311a7e875d9512c24f4cd5d41c29992 | [
"MIT"
] | null | null | null | oidc_provider/tests/cases/test_authorize_endpoint.py | SelfHacked/django-oidc-provider | 421f7f58d311a7e875d9512c24f4cd5d41c29992 | [
"MIT"
] | 1 | 2021-06-03T12:01:56.000Z | 2021-06-03T12:01:56.000Z | from oidc_provider.lib.errors import RedirectUriError
try:
from urllib.parse import urlencode, quote
except ImportError:
from urllib import urlencode, quote
try:
from urllib.parse import parse_qs, urlsplit
except ImportError:
from urlparse import parse_qs, urlsplit
import uuid
from mock import patch, mock
from django.contrib.auth.models import AnonymousUser
from django.core.management import call_command
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.test import (
RequestFactory,
override_settings,
)
from django.test import TestCase
from jwkest.jwt import JWT
from oidc_provider import settings
from oidc_provider.tests.app.utils import (
create_fake_user,
create_fake_client,
FAKE_CODE_CHALLENGE,
is_code_valid,
)
from oidc_provider.lib.utils.authorize import strip_prompt_login
from oidc_provider.views import AuthorizeView
from oidc_provider.lib.endpoints.authorize import AuthorizeEndpoint
class AuthorizeEndpointMixin(object):
def _auth_request(self, method, data=None, is_user_authenticated=False):
if data is None:
data = {}
url = reverse('oidc_provider:authorize')
if method.lower() == 'get':
query_str = urlencode(data).replace('+', '%20')
if query_str:
url += '?' + query_str
request = self.factory.get(url)
elif method.lower() == 'post':
request = self.factory.post(url, data=data)
else:
raise Exception('Method unsupported for an Authorization Request.')
# Simulate that the user is logged.
request.user = self.user if is_user_authenticated else AnonymousUser()
response = AuthorizeView.as_view()(request)
return response
class AuthorizationCodeFlowTestCase(TestCase, AuthorizeEndpointMixin):
"""
Test cases for Authorize Endpoint using Code Flow.
"""
def setUp(self):
call_command('creatersakey')
self.factory = RequestFactory()
self.user = create_fake_user()
self.client = create_fake_client(response_type='code')
self.client_with_no_consent = create_fake_client(
response_type='code', require_consent=False)
self.client_public = create_fake_client(response_type='code', is_public=True)
self.client_public_with_no_consent = create_fake_client(
response_type='code', is_public=True, require_consent=False)
self.state = uuid.uuid4().hex
self.nonce = uuid.uuid4().hex
def test_missing_parameters(self):
"""
If the request fails due to a missing, invalid, or mismatching
redirection URI, or if the client identifier is missing or invalid,
the authorization server SHOULD inform the resource owner of the error.
See: https://tools.ietf.org/html/rfc6749#section-4.1.2.1
"""
response = self._auth_request('get')
self.assertEqual(response.status_code, 200)
self.assertEqual(bool(response.content), True)
def test_invalid_response_type(self):
"""
The OP informs the RP by using the Error Response parameters defined
in Section 4.1.2.1 of OAuth 2.0.
See: http://openid.net/specs/openid-connect-core-1_0.html#AuthError
"""
# Create an authorize request with an unsupported response_type.
data = {
'client_id': self.client.client_id,
'response_type': 'something_wrong',
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}
response = self._auth_request('get', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.has_header('Location'), True)
# Should be an 'error' component in query.
self.assertIn('error=', response['Location'])
def test_user_not_logged(self):
"""
The Authorization Server attempts to Authenticate the End-User by
redirecting to the login view.
See: http://openid.net/specs/openid-connect-core-1_0.html#Authenticates
"""
data = {
'client_id': self.client.client_id,
'response_type': 'code',
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}
response = self._auth_request('get', data)
# Check if user was redirected to the login view.
self.assertIn(settings.get('OIDC_LOGIN_URL'), response['Location'])
def test_user_consent_inputs(self):
"""
Once the End-User is authenticated, the Authorization Server MUST
obtain an authorization decision before releasing information to
the Client.
See: http://openid.net/specs/openid-connect-core-1_0.html#Consent
"""
data = {
'client_id': self.client.client_id,
'response_type': 'code',
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
# PKCE parameters.
'code_challenge': FAKE_CODE_CHALLENGE,
'code_challenge_method': 'S256',
}
response = self._auth_request('get', data, is_user_authenticated=True)
# Check if hidden inputs exists in the form,
# also if their values are valid.
input_html = '<input name="{0}" type="hidden" value="{1}" />'
to_check = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': 'code',
'code_challenge': FAKE_CODE_CHALLENGE,
'code_challenge_method': 'S256',
}
for key, value in iter(to_check.items()):
is_input_ok = input_html.format(key, value) in response.content.decode('utf-8')
self.assertEqual(is_input_ok, True, msg='Hidden input for "' + key + '" fails.')
def test_user_consent_response(self):
"""
First,
if the user denied the consent we must ensure that
the error response parameters are added to the query component
of the Redirection URI.
Second,
if the user allow the RP then the server MUST return
the parameters defined in Section 4.1.2 of OAuth 2.0 [RFC6749]
by adding them as query parameters to the redirect_uri.
"""
data = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': 'code',
'scope': 'openid email',
'state': self.state,
# PKCE parameters.
'code_challenge': FAKE_CODE_CHALLENGE,
'code_challenge_method': 'S256',
}
response = self._auth_request('post', data, is_user_authenticated=True)
# Because user doesn't allow app, SHOULD exists an error parameter
# in the query.
self.assertIn('error=', response['Location'], msg='error param is missing in query.')
self.assertIn(
'access_denied', response['Location'], msg='"access_denied" code is missing in query.')
# Simulate user authorization.
data['allow'] = 'Accept' # Will be the value of the button.
response = self._auth_request('post', data, is_user_authenticated=True)
is_code_ok = is_code_valid(url=response['Location'],
user=self.user,
client=self.client)
self.assertEqual(is_code_ok, True, msg='Code returned is invalid.')
# Check if the state is returned.
state = (response['Location'].split('state='))[1].split('&')[0]
self.assertEqual(state, self.state, msg='State change or is missing.')
def test_user_consent_skipped(self):
"""
If users previously gave consent to some client (for a specific
list of scopes) and because they might be prompted for the same
authorization multiple times, the server skip it.
"""
data = {
'client_id': self.client_with_no_consent.client_id,
'redirect_uri': self.client_with_no_consent.default_redirect_uri,
'response_type': 'code',
'scope': 'openid email',
'state': self.state,
'allow': 'Accept',
}
request = self.factory.post(reverse('oidc_provider:authorize'),
data=data)
# Simulate that the user is logged.
request.user = self.user
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertIn('code', response['Location'], msg='Code is missing in the returned url.')
response = self._auth_request('post', data, is_user_authenticated=True)
is_code_ok = is_code_valid(url=response['Location'],
user=self.user,
client=self.client_with_no_consent)
self.assertEqual(is_code_ok, True, msg='Code returned is invalid.')
del data['allow']
response = self._auth_request('get', data, is_user_authenticated=True)
is_code_ok = is_code_valid(url=response['Location'],
user=self.user,
client=self.client_with_no_consent)
self.assertEqual(is_code_ok, True, msg='Code returned is invalid or missing.')
def test_response_uri_is_properly_constructed(self):
"""
Check that the redirect_uri matches the one configured for the client.
Only 'state' and 'code' should be appended.
"""
data = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': 'code',
'scope': 'openid email',
'state': self.state,
'allow': 'Accept',
}
response = self._auth_request('post', data, is_user_authenticated=True)
parsed = urlsplit(response['Location'])
params = parse_qs(parsed.query or parsed.fragment)
state = params['state'][0]
self.assertEquals(self.state, state, msg="State returned is invalid or missing")
is_code_ok = is_code_valid(url=response['Location'],
user=self.user,
client=self.client)
self.assertTrue(is_code_ok, msg='Code returned is invalid or missing')
self.assertEquals(
set(params.keys()), {'state', 'code'},
msg='More than state or code appended as query params')
self.assertTrue(
response['Location'].startswith(self.client.default_redirect_uri),
msg='Different redirect_uri returned')
def test_unknown_redirect_uris_are_rejected(self):
"""
If a redirect_uri is not registered with the client the request must be rejected.
See http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest.
"""
data = {
'client_id': self.client.client_id,
'response_type': 'code',
'redirect_uri': 'http://neverseenthis.com',
'scope': 'openid email',
'state': self.state,
}
response = self._auth_request('get', data)
self.assertIn(
RedirectUriError.error, response.content.decode('utf-8'), msg='No redirect_uri error')
def test_manipulated_redirect_uris_are_rejected(self):
"""
If a redirect_uri does not exactly match the registered uri it must be rejected.
See http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest.
"""
data = {
'client_id': self.client.client_id,
'response_type': 'code',
'redirect_uri': self.client.default_redirect_uri + "?some=query",
'scope': 'openid email',
'state': self.state,
}
response = self._auth_request('get', data)
self.assertIn(
RedirectUriError.error, response.content.decode('utf-8'), msg='No redirect_uri error')
def test_public_client_auto_approval(self):
"""
It's recommended not auto-approving requests for non-confidential
clients using Authorization Code.
"""
data = {
'client_id': self.client_public_with_no_consent.client_id,
'response_type': 'code',
'redirect_uri': self.client_public_with_no_consent.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}
response = self._auth_request('get', data, is_user_authenticated=True)
self.assertIn('Request for Permission', response.content.decode('utf-8'))
def test_prompt_none_parameter(self):
"""
Specifies whether the Authorization Server prompts the End-User for
reauthentication and consent.
See: http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
"""
data = {
'client_id': self.client.client_id,
'response_type': next(self.client.response_type_values()),
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
'prompt': 'none'
}
response = self._auth_request('get', data)
# An error is returned if an End-User is not already authenticated.
self.assertIn('login_required', response['Location'])
response = self._auth_request('get', data, is_user_authenticated=True)
# An error is returned if the Client does not have pre-configured
# consent for the requested Claims.
self.assertIn('consent_required', response['Location'])
@patch('oidc_provider.views.django_user_logout')
def test_prompt_login_parameter(self, logout_function):
"""
Specifies whether the Authorization Server prompts the End-User for
reauthentication and consent.
See: http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
"""
data = {
'client_id': self.client.client_id,
'response_type': next(self.client.response_type_values()),
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
'prompt': 'login'
}
response = self._auth_request('get', data)
self.assertIn(settings.get('OIDC_LOGIN_URL'), response['Location'])
self.assertNotIn(
quote('prompt=login'),
response['Location'],
"Found prompt=login, this leads to infinite login loop. See "
"https://github.com/juanifioren/django-oidc-provider/issues/197."
)
response = self._auth_request('get', data, is_user_authenticated=True)
self.assertIn(settings.get('OIDC_LOGIN_URL'), response['Location'])
self.assertTrue(logout_function.called_once())
self.assertNotIn(
quote('prompt=login'),
response['Location'],
"Found prompt=login, this leads to infinite login loop. See "
"https://github.com/juanifioren/django-oidc-provider/issues/197."
)
def test_prompt_login_none_parameter(self):
"""
Specifies whether the Authorization Server prompts the End-User for
reauthentication and consent.
See: http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
"""
data = {
'client_id': self.client.client_id,
'response_type': next(self.client.response_type_values()),
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
'prompt': 'login none'
}
response = self._auth_request('get', data)
self.assertIn('login_required', response['Location'])
response = self._auth_request('get', data, is_user_authenticated=True)
self.assertIn('login_required', response['Location'])
@patch('oidc_provider.views.render')
def test_prompt_consent_parameter(self, render_patched):
"""
Specifies whether the Authorization Server prompts the End-User for
reauthentication and consent.
See: http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
"""
data = {
'client_id': self.client.client_id,
'response_type': next(self.client.response_type_values()),
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
'prompt': 'consent'
}
response = self._auth_request('get', data)
self.assertIn(settings.get('OIDC_LOGIN_URL'), response['Location'])
response = self._auth_request('get', data, is_user_authenticated=True)
render_patched.assert_called_once()
self.assertTrue(
render_patched.call_args[0][1], settings.get('OIDC_TEMPLATES')['authorize'])
def test_prompt_consent_none_parameter(self):
"""
Specifies whether the Authorization Server prompts the End-User for
reauthentication and consent.
See: http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
"""
data = {
'client_id': self.client.client_id,
'response_type': next(self.client.response_type_values()),
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
'prompt': 'consent none'
}
response = self._auth_request('get', data)
self.assertIn('login_required', response['Location'])
response = self._auth_request('get', data, is_user_authenticated=True)
self.assertIn('consent_required', response['Location'])
def test_strip_prompt_login(self):
"""
Test for helper method test_strip_prompt_login.
"""
# Original paths
path0 = 'http://idp.com/?prompt=login'
path1 = 'http://idp.com/?prompt=consent login none'
path2 = ('http://idp.com/?response_type=code&client' +
'_id=112233&prompt=consent login')
path3 = ('http://idp.com/?response_type=code&client' +
'_id=112233&prompt=login none&redirect_uri' +
'=http://localhost:8000')
self.assertNotIn('prompt', strip_prompt_login(path0))
self.assertIn('prompt', strip_prompt_login(path1))
self.assertIn('consent', strip_prompt_login(path1))
self.assertIn('none', strip_prompt_login(path1))
self.assertNotIn('login', strip_prompt_login(path1))
self.assertIn('prompt', strip_prompt_login(path2))
self.assertIn('consent', strip_prompt_login(path1))
self.assertNotIn('login', strip_prompt_login(path2))
self.assertIn('prompt', strip_prompt_login(path3))
self.assertIn('none', strip_prompt_login(path3))
self.assertNotIn('login', strip_prompt_login(path3))
class AuthorizationImplicitFlowTestCase(TestCase, AuthorizeEndpointMixin):
"""
Test cases for Authorization Endpoint using Implicit Flow.
"""
def setUp(self):
call_command('creatersakey')
self.factory = RequestFactory()
self.user = create_fake_user()
self.client = create_fake_client(response_type='id_token token')
self.client_public = create_fake_client(response_type='id_token token', is_public=True)
self.client_public_no_consent = create_fake_client(
response_type='id_token token', is_public=True,
require_consent=False)
self.client_no_access = create_fake_client(response_type='id_token')
self.client_public_no_access = create_fake_client(response_type='id_token', is_public=True)
self.client_multiple_response_types = create_fake_client(
response_type=('id_token', 'id_token token'))
self.state = uuid.uuid4().hex
self.nonce = uuid.uuid4().hex
def test_missing_nonce(self):
"""
The `nonce` parameter is REQUIRED if you use the Implicit Flow.
"""
data = {
'client_id': self.client.client_id,
'response_type': next(self.client.response_type_values()),
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}
response = self._auth_request('get', data, is_user_authenticated=True)
self.assertIn('#error=invalid_request', response['Location'])
def test_idtoken_token_response(self):
"""
Implicit client requesting `id_token token` receives both id token
and access token as the result of the authorization request.
"""
data = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': next(self.client.response_type_values()),
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
'allow': 'Accept',
}
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
# same for public client
data['client_id'] = self.client_public.client_id,
data['redirect_uri'] = self.client_public.default_redirect_uri,
data['response_type'] = next(self.client_public.response_type_values()),
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
def test_idtoken_response(self):
"""
Implicit client requesting `id_token` receives
only an id token as the result of the authorization request.
"""
data = {
'client_id': self.client_no_access.client_id,
'redirect_uri': self.client_no_access.default_redirect_uri,
'response_type': next(self.client_no_access.response_type_values()),
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
'allow': 'Accept',
}
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertNotIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
# same for public client
data['client_id'] = self.client_public_no_access.client_id,
data['redirect_uri'] = self.client_public_no_access.default_redirect_uri,
data['response_type'] = next(self.client_public_no_access.response_type_values()),
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertNotIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
def test_idtoken_token_at_hash(self):
"""
Implicit client requesting `id_token token` receives
`at_hash` in `id_token`.
"""
data = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': next(self.client.response_type_values()),
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
'allow': 'Accept',
}
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertIn('id_token', response['Location'])
# obtain `id_token` portion of Location
components = urlsplit(response['Location'])
fragment = parse_qs(components[4])
id_token = JWT().unpack(fragment["id_token"][0].encode('utf-8')).payload()
self.assertIn('at_hash', id_token)
def test_idtoken_at_hash(self):
"""
Implicit client requesting `id_token` should not receive
`at_hash` in `id_token`.
"""
data = {
'client_id': self.client_no_access.client_id,
'redirect_uri': self.client_no_access.default_redirect_uri,
'response_type': next(self.client_no_access.response_type_values()),
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
'allow': 'Accept',
}
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertIn('id_token', response['Location'])
# obtain `id_token` portion of Location
components = urlsplit(response['Location'])
fragment = parse_qs(components[4])
id_token = JWT().unpack(fragment["id_token"][0].encode('utf-8')).payload()
self.assertNotIn('at_hash', id_token)
def test_public_client_implicit_auto_approval(self):
"""
Public clients using Implicit Flow should be able to reuse consent.
"""
data = {
'client_id': self.client_public_no_consent.client_id,
'response_type': next(self.client_public_no_consent.response_type_values()),
'redirect_uri': self.client_public_no_consent.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
}
response = self._auth_request('get', data, is_user_authenticated=True)
response_text = response.content.decode('utf-8')
self.assertEquals(response_text, '')
components = urlsplit(response['Location'])
fragment = parse_qs(components[4])
self.assertIn('access_token', fragment)
self.assertIn('id_token', fragment)
self.assertIn('expires_in', fragment)
def test_multiple_response_types(self):
"""
Clients should be able to be configured for multiple response types.
"""
data = {
'client_id': self.client_multiple_response_types.client_id,
'redirect_uri': self.client_multiple_response_types.default_redirect_uri,
'response_type': 'id_token',
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
'allow': 'Accept',
}
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertNotIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
# should also support "id_token token" response_type
data['response_type'] = 'id_token token'
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
class AuthorizationHybridFlowTestCase(TestCase, AuthorizeEndpointMixin):
"""
Test cases for Authorization Endpoint using Hybrid Flow.
"""
def setUp(self):
call_command('creatersakey')
self.factory = RequestFactory()
self.user = create_fake_user()
self.client_code_idtoken_token = create_fake_client(
response_type='code id_token token', is_public=True)
self.state = uuid.uuid4().hex
self.nonce = uuid.uuid4().hex
# Base data for the auth request.
self.data = {
'client_id': self.client_code_idtoken_token.client_id,
'redirect_uri': self.client_code_idtoken_token.default_redirect_uri,
'response_type': next(self.client_code_idtoken_token.response_type_values()),
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
'allow': 'Accept',
}
def test_code_idtoken_token_response(self):
"""
Implicit client requesting `id_token token` receives both id token
and access token as the result of the authorization request.
"""
response = self._auth_request('post', self.data, is_user_authenticated=True)
self.assertIn('#', response['Location'])
self.assertIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
self.assertIn('state', response['Location'])
self.assertIn('code', response['Location'])
# Validate code.
is_code_ok = is_code_valid(url=response['Location'],
user=self.user,
client=self.client_code_idtoken_token)
self.assertEqual(is_code_ok, True, msg='Code returned is invalid.')
@override_settings(OIDC_TOKEN_EXPIRE=36000)
def test_access_token_expiration(self):
"""
Add ten hours of expiration to access_token. Check for the expires_in query in fragment.
"""
response = self._auth_request('post', self.data, is_user_authenticated=True)
self.assertIn('expires_in=36000', response['Location'])
class TestCreateResponseURI(TestCase):
def setUp(self):
url = reverse('oidc_provider:authorize')
user = create_fake_user()
client = create_fake_client(response_type='code', is_public=True)
# Base data to create a uri response
data = {
'client_id': client.client_id,
'redirect_uri': client.default_redirect_uri,
'response_type': next(client.response_type_values()),
}
factory = RequestFactory()
self.request = factory.post(url, data=data)
self.request.user = user
@patch('oidc_provider.lib.endpoints.authorize.create_code')
@patch('oidc_provider.lib.endpoints.authorize.logger.exception')
def test_create_response_uri_logs_to_error(self, log_exception, create_code):
"""
A lot can go wrong when creating a response uri and this is caught
with a general Exception error. The information contained within this
error should show up in the error log so production servers have something
to work with when things don't work as expected.
"""
exception = Exception("Something went wrong!")
create_code.side_effect = exception
authorization_endpoint = AuthorizeEndpoint(self.request)
authorization_endpoint.validate_params()
with self.assertRaises(Exception):
authorization_endpoint.create_response_uri()
log_exception.assert_called_once_with(
'[Authorize] Error when trying to create response uri: %s', exception)
@override_settings(OIDC_SESSION_MANAGEMENT_ENABLE=True)
def test_create_response_uri_generates_session_state_if_session_management_enabled(self):
# RequestFactory doesn't support sessions, so we mock it
self.request.session = mock.Mock(session_key=None)
# mock `self.request.session.get('salt')`
self.request.session.get.return_value = 'TEST'
authorization_endpoint = AuthorizeEndpoint(self.request)
authorization_endpoint.validate_params()
uri = authorization_endpoint.create_response_uri()
self.assertIn('session_state=', uri)
| 39.105721 | 99 | 0.63115 |
41522b3c9afa2534ad237a228db44f0e3a164a2b | 3,769 | py | Python | FrontEndTool/settings.py | ashanyue/FrontEndTool | 2a2fce2da466d62ced53b6543630abe37cb23861 | [
"MIT"
] | 1 | 2020-12-16T21:22:33.000Z | 2020-12-16T21:22:33.000Z | FrontEndTool/settings.py | ashanyue/FrontEndTool | 2a2fce2da466d62ced53b6543630abe37cb23861 | [
"MIT"
] | null | null | null | FrontEndTool/settings.py | ashanyue/FrontEndTool | 2a2fce2da466d62ced53b6543630abe37cb23861 | [
"MIT"
] | null | null | null | """
Django settings for FrontEndTool project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')+y4$wte-zpngth7_mtdqx@k4_!%i+=jfd2gp4ew1lm2@t+tx('
# Password salt
PWD_SALT = 'zpngth7_mtdqx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
"SysCore.apps.SyscoreConfig",
'HomeBanner.apps.HomebannerConfig',
'ckeditor',
'PromoIntro.apps.PromointroConfig',
'BigWin.apps.BigwinConfig',
'SuperAdmin.apps.SuperadminConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'FrontEndTool.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'FrontEndTool.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'front.sqlite3'),
}
}
CKEDITOR_CONFIGS = {
'default': {
'allowedContent': True
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = (
os.path.join(BASE_DIR, 'static')
)
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, "static"),
# # '/var/www/static/',
# ]
MEDIA_URL = '/media/'
MEDIA_ROOT = (
os.path.join(BASE_DIR, 'static/media')
)
try:
from .local_settings import *
except ImportError:
pass
| 26.173611 | 91 | 0.69143 |
401c5763d9eed6a14491b1a02db7f96db1cbc801 | 2,626 | py | Python | installer.py | aws-greengrass/aws-greengrass-cloudwatch-metrics | b428cb0ca78ff1f67e346d6d4cce7994f6462860 | [
"Apache-2.0"
] | 1 | 2022-03-23T16:25:48.000Z | 2022-03-23T16:25:48.000Z | installer.py | aws-greengrass/aws-greengrass-cloudwatch-metrics | b428cb0ca78ff1f67e346d6d4cce7994f6462860 | [
"Apache-2.0"
] | 1 | 2022-03-23T16:35:49.000Z | 2022-03-23T20:23:32.000Z | installer.py | aws-greengrass/aws-greengrass-cloudwatch-metrics | b428cb0ca78ff1f67e346d6d4cce7994f6462860 | [
"Apache-2.0"
] | null | null | null | import os
import logging
import subprocess
import sys
logger = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
proxy_url = os.environ.get("ALL_PROXY")
if proxy_url is not None:
# Just try to check if proxy is http or https
# This does not validate the full url format
scheme = proxy_url.split(":")[0]
if scheme == "https":
logger.info("Creating certificate bundle with proxy root CA, and installing 'boto3', 'awsiotsdk' and 'urllib3>=1.26.7'")
try:
import certifi
except ImportError:
try:
from pip._vendor import certifi
except Exception:
logger.exception(
"Error creating certificate bundle with proxy root CA. Python certifi module is not available on the device")
sys.exit(1)
try:
with open(certifi.where(), 'r') as certify_root_ca, open(os.environ.get("GG_ROOT_CA_PATH"), 'r') as gg_root_ca, open('./ca-bundle.crt', 'w') as custom_cert_bundle:
custom_cert_bundle.write(certify_root_ca.read())
custom_cert_bundle.write(gg_root_ca.read())
except Exception:
logger.exception("Error creating certificate bundle with proxy root CA")
sys.exit(1)
try:
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install', '--cert', './ca-bundle.crt', 'boto3', 'awsiotsdk', 'urllib3>=1.26.7', '--user'])
sys.exit(0)
except Exception:
logger.exception(
"Error installing dependencies. Please set 'UseInstaller' to 'False' and pre-install 'boto3', 'awsiotsdk' and 'urllib3>=1.26.7'")
sys.exit(1)
if scheme == "http":
logger.info("Installing 'boto3', 'awsiotsdk' and 'urllib3>=1.26.7'")
try:
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install', 'boto3', 'awsiotsdk', 'urllib3>=1.26.7', '--user'])
sys.exit(0)
except Exception:
logger.exception(
"Error installing dependencies. Please set 'UseInstaller' to 'False' and pre-install 'boto3', 'awsiotsdk' and 'urllib3>=1.26.7'")
sys.exit(1)
logger.info("Installing 'boto3' and 'awsiotsdk'")
try:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'boto3', 'awsiotsdk', '--user'])
except Exception:
logger.exception(
"Error installing dependencies. Please set 'UseInstaller' to 'False' and pre-install 'boto3' and 'awsiotsdk'")
sys.exit(1)
| 41.68254 | 175 | 0.611196 |
6319d6359111b8f09c0dcb517799092c2d2b6cff | 6,546 | py | Python | frontend_assets/templatetags/frontend_assets.py | avryhof/frontend_assets | 70c562524754b1f4a56266ada91a9bb7f5fdb035 | [
"MIT"
] | null | null | null | frontend_assets/templatetags/frontend_assets.py | avryhof/frontend_assets | 70c562524754b1f4a56266ada91a9bb7f5fdb035 | [
"MIT"
] | null | null | null | frontend_assets/templatetags/frontend_assets.py | avryhof/frontend_assets | 70c562524754b1f4a56266ada91a9bb7f5fdb035 | [
"MIT"
] | null | null | null | """
@copyright Amos Vryhof
"""
import json
import os
from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
from .utils import join_url, render_css, render_javascript, render_javascript_code
register = template.Library()
static_url = getattr(settings, 'STATIC_URL', '/static/')
static_path = getattr(settings, "STATIC_ROOT", False)
use_cdn_default = getattr(settings, 'FRONTEND_USE_CDN', False)
cdn_config_file = open(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'static', 'cdn.json'))
cdn_config = json.load(cdn_config_file)
cdn_config_file.close()
@register.simple_tag
def fontawesome4_css(use_cdn=use_cdn_default):
if use_cdn:
font_awesome_url = {
'href': 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css',
'integrity': 'sha256-eZrrJcwDc/3uDhsdt61sL2oOBY362qM3lon1gyExkL0='
}
else:
font_awesome_url = join_url(static_url, 'css', 'font-awesome-4.min.css')
return render_css(font_awesome_url)
@register.simple_tag
def fontawesome5_css(shim=False, use_cdn=use_cdn_default):
if use_cdn:
font_awesome_urls = [{
'href': 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css',
'integrity': 'sha256-BtbhCIbtfeVWGsqxk1vOHEYXS6qcvQvLMZqjtpWUEx8='
}]
if shim:
font_awesome_urls.append({
'href': 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/v4-shims.min.css',
'integrity': 'sha256-D48AdNzxAOgva7Z31xE1yn/NfdqzjqOAzg/5P3CK1QM='
})
else:
font_awesome_urls = [join_url(static_url, 'css', 'all.min.css')]
if shim:
font_awesome_urls.append(join_url(static_url, 'css', 'v4-shims.min.css'))
return render_css(font_awesome_urls)
@register.simple_tag
def fontawesome5_javascript(shim=False, use_cdn=use_cdn_default):
if use_cdn:
fa_js_url = {
'src': 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/js/fontawesome.min.js',
'integrity': 'sha256-KZjyrvXj0bZPo6kaV2/gP3h2LXakV/QALQ6UmBhzqD0='
}
fa_js_all = {
'src': 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/js/all.min.js',
'integrity': 'sha256-JgGtkjMEDh4dZj7UtWqSbUcftdwTFLNR3ih7FH80RHs='
}
else:
fa_js_url = join_url(static_url, 'js', 'fontawesome.min.js')
fa_js_all = join_url(static_url, 'js', 'all.min.js')
javascripts = [fa_js_url, fa_js_all]
if shim:
if use_cdn:
javascripts.append({
'src': 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/js/v4-shims.min.js',
'integrity': 'sha256-Jk9FySjBvE0bRH9tO3VrPL8zuR+G6AhksO7bEdvXk5w='
})
else:
javascripts.append(join_url(static_url, 'js', 'v4-shims.min.js'))
return render_javascript(javascripts)
@register.simple_tag
def jquery(slim=False, use_cdn=use_cdn_default):
if slim:
if use_cdn:
jquery_url = {
'src': 'https://cdnjs.cloudflare.com/ajax/libs/jquery/3.4.1/jquery.slim.min.js',
'integrity': 'sha256-pasqAKBDmFT4eHoN2ndd6lN370kFiGUFyTiUHWhU7k8='
}
else:
jquery_url = join_url(static_url, 'js', 'jquery-3.3.1.slim.min.js')
else:
if use_cdn:
cdn = cdn_config.get('jquery')
jquery_url = {
'src': cdn.get('javascript_url'),
'integrity': cdn.get('javascript_integrity')
}
else:
jquery_url = join_url(static_url, 'js', 'jquery-3.3.1.min.js')
return render_javascript(jquery_url)
@register.simple_tag
def modernizr(use_cdn=use_cdn_default):
if use_cdn:
cdn = cdn_config.get('modernizr')
modernizr_url = {
'src': cdn.get('javascript_url'),
'integrity': cdn.get('javascript_integrity')
}
else:
modernizr_url = join_url(static_url, 'js', 'modernizr.js')
return render_javascript(modernizr_url)
@register.simple_tag
def ieshiv():
ieshiv_url = join_url(static_url, 'js', 'ieshiv.js')
return render_javascript(ieshiv_url)
@register.simple_tag
def leaflet_css(use_cdn=use_cdn_default):
if use_cdn:
cdn = cdn_config.get('leaflet')
leaflet_css_url = {
'href': cdn.get('css_url'),
'integrity': cdn.get('css_integrity')
}
else:
leaflet_css_url = join_url(static_url, 'css', 'leaflet.css')
return render_css(leaflet_css_url)
@register.simple_tag
def leaflet_javascript(use_cdn=use_cdn_default):
if use_cdn:
cdn = cdn_config.get('leaflet')
leaflet_js_url = {
'src': cdn.get('javascript_url'),
'integrity': cdn.get('javascript_integrity')
}
else:
leaflet_js_url = join_url(static_url, 'js', 'leaflet.js')
javascripts = [leaflet_js_url]
return render_javascript(javascripts)
@register.simple_tag
def leaflet_header(use_cdn=use_cdn_default):
leafletcss = leaflet_css(use_cdn)
leafletjs = leaflet_javascript(use_cdn)
header_code = leafletcss + leafletjs
return header_code
@register.simple_tag
def leaflet_map(latitude=None, longitude=None, zoom=16, map_prefix='leaflet', map_tiles=False, map_attr=False):
if not map_tiles:
map_tiles = 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'
map_attr = 'Map data © <a href="https://www.openstreetmap.org/">OpenStreetMap</a> contributors, ' \
'<a href="https://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, '
map_id = '%s_map' % map_prefix
div = '<div id="%s"></div>' % map_id
coords = 'var %s_coords = [%s, %s];' % (map_prefix, latitude, longitude)
map = 'var %s = L.map(\'%s\').setView(%s_coords, %s);' % (map_id, map_id, map_prefix, zoom)
tile_layer = 'L.tileLayer(\'%s\', {maxZoom: 18, attribution: \'%s\', id: \'%s_streets\'}).addTo(%s);' % (
map_tiles, map_attr, map_prefix, map_id)
return mark_safe(div) + render_javascript_code([coords, map, tile_layer])
@register.simple_tag
def leaflet_marker(map_prefix='leaflet', latitude=None, longitude=None):
map_id = '%s_map' % map_prefix
coords = 'var %s_marker_coords = [%s, %s];' % (map_prefix, latitude, longitude)
code = 'L.marker(%s_marker_coords).addTo(%s);' % (map_prefix, map_id)
return render_javascript_code([coords, code])
| 32.567164 | 112 | 0.647877 |
2a870b5cb760e5741fbc0377a1f21acbfd6f234f | 275 | py | Python | training/training/doctype/induction_training_assembly_area_set_2/induction_training_assembly_area_set_2.py | vhrspvl/Minda-Training | 6d54d44b718506d6fe460abe5796bdee9e74d0ad | [
"MIT"
] | null | null | null | training/training/doctype/induction_training_assembly_area_set_2/induction_training_assembly_area_set_2.py | vhrspvl/Minda-Training | 6d54d44b718506d6fe460abe5796bdee9e74d0ad | [
"MIT"
] | null | null | null | training/training/doctype/induction_training_assembly_area_set_2/induction_training_assembly_area_set_2.py | vhrspvl/Minda-Training | 6d54d44b718506d6fe460abe5796bdee9e74d0ad | [
"MIT"
] | 1 | 2020-02-14T12:56:53.000Z | 2020-02-14T12:56:53.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Ramya and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class InductionTrainingAssemblyAreaSet2(Document):
pass
| 25 | 50 | 0.796364 |
7749c8bdf73525d87751335588b7a8e31e496b8a | 149 | py | Python | main/admin.py | MeNsaaH/project-web-app | b46d551e856af7ed7c214dd0a25b8d8d6c02e07e | [
"BSD-2-Clause"
] | null | null | null | main/admin.py | MeNsaaH/project-web-app | b46d551e856af7ed7c214dd0a25b8d8d6c02e07e | [
"BSD-2-Clause"
] | null | null | null | main/admin.py | MeNsaaH/project-web-app | b46d551e856af7ed7c214dd0a25b8d8d6c02e07e | [
"BSD-2-Clause"
] | null | null | null | from django.contrib import admin
from main.models import Record, Prediction, Notification
admin.site.register([Record, Prediction, Notification])
| 21.285714 | 56 | 0.812081 |
0500b4a5118e75f1019e7d8db859b77d920aa5d9 | 513 | py | Python | djblog/users/admin.py | NahlaBenAbdallah/djblog | dcb770af952309919c3c8f7b4b252753063d9746 | [
"MIT"
] | null | null | null | djblog/users/admin.py | NahlaBenAbdallah/djblog | dcb770af952309919c3c8f7b4b252753063d9746 | [
"MIT"
] | null | null | null | djblog/users/admin.py | NahlaBenAbdallah/djblog | dcb770af952309919c3c8f7b4b252753063d9746 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from djblog.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name",)}),) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
| 28.5 | 83 | 0.746589 |
0c6df9cb0d37ac8b13fd3faeca5ef0f44c8698b5 | 1,133 | py | Python | ProjectEuler/p031.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | 1 | 2018-01-30T13:21:30.000Z | 2018-01-30T13:21:30.000Z | ProjectEuler/p031.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | null | null | null | ProjectEuler/p031.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | 1 | 2018-08-29T13:26:50.000Z | 2018-08-29T13:26:50.000Z |
# Execution time : 0.001408 seconds
# Solution Explanation
# Let v = { v1, v2, ..., vn } be values
# We want to find in how many ways we can sum s with
# element of v if we can get as many items of v as we need
# ( We can choose as many times and element as we want )
# So, we can define the following recurrence
# sol(i,s) = if s<0 -> 0, if i <= n -> sol(i,s-v[i]) + sol(i+1,s), if i==n -> 1 if s=0 else 0
# So the answer is sol(1,200)
# And we note that there are overlapping cases in this recurrence
# So we can implement it using DP
import time
width = 40
def solution():
v = [1,2,5,10,20,50,100,200]
memo = [[-1 for i in range(210)] for j in range(10)]
def ways(idx,s):
if s<0: return 0
if idx==8 and s!=0: return 0
if idx==8 and s==0: return 1
if memo[idx][s]!=-1: return memo[idx][s]
memo[idx][s] = ways(idx,s-v[idx])+ways(idx+1,s)
return memo[idx][s]
return ways(0,200)
if __name__=="__main__":
start_ = time.time()
print(' Answer -> %s '.center(width,'-') % ( solution() ))
print(' %f seconds '.center(width,'-') % ( time.time() - start_))
| 30.621622 | 93 | 0.595763 |
d3ef0c700af881736415469ca4301edc59cca6d5 | 1,118 | py | Python | tweetpik_cli/tweetpik_cli/utils/writer.py | bossjones/sandbox | 9ae6a01576d42ff2616f38bad0df2c6d73afe91f | [
"MIT"
] | 4 | 2022-02-23T11:03:32.000Z | 2022-03-07T20:01:42.000Z | tweetpik_cli/tweetpik_cli/utils/writer.py | bossjones/sandbox | 9ae6a01576d42ff2616f38bad0df2c6d73afe91f | [
"MIT"
] | null | null | null | tweetpik_cli/tweetpik_cli/utils/writer.py | bossjones/sandbox | 9ae6a01576d42ff2616f38bad0df2c6d73afe91f | [
"MIT"
] | 1 | 2022-02-23T11:03:32.000Z | 2022-02-23T11:03:32.000Z | """Write to disk asynchronously."""
# https://github.com/hackersandslackers/asyncio-tutorial/blob/0f4c99776b61ca3eafd850c43202bc7c52349552/asyncio_tutorial/part_II_aiohttp_aiofiles/writer.py
import aiofiles
import logging
from tweetpik_cli.dbx_logger import ( # noqa: E402
get_logger,
)
LOGGER = get_logger(__name__, provider="Writer", level=logging.DEBUG)
async def write_file(fname: str, body: bytes, filetype: str, directory: str):
"""
Write contents of fetched URL to new file in local directory.
:param str fname: URL which was fetched.
:param bytes body: Source HTML of a single fetched URL.
:param str filetype: File extension to save fetched data as.
:param str directory: Local directory to save exports to.
"""
try:
filename = f"{directory}/{fname}.{filetype}"
LOGGER.info(f"writing file -> {filename} ....")
async with aiofiles.open(filename, mode="wb+") as f:
await f.write(body)
await f.close()
except Exception as e:
LOGGER.error(f"Unexpected error while writing from `{fname}`: {e}")
return filename
| 39.928571 | 154 | 0.698569 |
64dfb20d7cc276421bcfbb4cddd10c8d49933dbf | 24,404 | py | Python | tests/app/views/test_marketplace.py | robot2051/dto-digitalmarketplace-buyer-frontend | cbb71123cb35ea8dc66b17ab4036d78269540b23 | [
"MIT"
] | null | null | null | tests/app/views/test_marketplace.py | robot2051/dto-digitalmarketplace-buyer-frontend | cbb71123cb35ea8dc66b17ab4036d78269540b23 | [
"MIT"
] | null | null | null | tests/app/views/test_marketplace.py | robot2051/dto-digitalmarketplace-buyer-frontend | cbb71123cb35ea8dc66b17ab4036d78269540b23 | [
"MIT"
] | null | null | null | # coding=utf-8
from flask.helpers import url_for
import mock
from nose.tools import assert_equal, assert_true, assert_in
from six import iteritems
from six.moves.urllib.parse import urlparse, parse_qs
from lxml import html
from ...helpers import BaseApplicationTest
from dmapiclient import APIError
import pytest
@pytest.mark.skipif(True, reason="not applicable to AU")
class TestHomepageAccountCreationVirtualPageViews(BaseApplicationTest):
@mock.patch('app.main.views.marketplace.data_api_client')
def test_data_analytics_track_page_view_is_shown_if_account_created_flag_flash_message(self, data_api_client):
with self.client.session_transaction() as session:
session['_flashes'] = [('flag', 'account-created')]
res = self.client.get("/")
data = res.get_data(as_text=True)
assert 'data-analytics="trackPageView" data-url="/vpv/?account-created=true"' in data
@mock.patch('app.main.views.marketplace.data_api_client')
def test_data_analytics_track_page_view_not_shown_if_no_account_created_flag_flash_message(self, data_api_client):
res = self.client.get("/")
data = res.get_data(as_text=True)
assert 'data-analytics="trackPageView" data-url="/vpv/?account-created=true"' not in data
class TestHomepageBrowseList(BaseApplicationTest):
@mock.patch('app.main.views.marketplace.data_api_client')
def test_homepage_headers(self, data_api_client):
res = self.client.get(self.expand_path('/'))
document = html.fromstring(res.get_data(as_text=True))
assert res.status_code == 200
headers = [
item.text_content().strip() for item in document.cssselect('article#content h2')
]
assert headers == [
'Government buyers',
'ICT sellers'
]
class TestStaticMarketplacePages(BaseApplicationTest):
def setup(self):
super(TestStaticMarketplacePages, self).setup()
def test_toc_page(self):
res = self.client.get(self.expand_path('/terms-of-use'))
assert_equal(200, res.status_code)
assert_true(
'TermsofUse'
in self._strip_whitespace(res.get_data(as_text=True))
)
class TestBriefPage(BaseApplicationTest):
def setup(self):
super(TestBriefPage, self).setup()
self._data_api_client = mock.patch(
'app.main.views.marketplace.data_api_client'
).start()
self.brief = self._get_dos_brief_fixture_data()
self._data_api_client.get_brief.return_value = self.brief
def teardown(self):
self._data_api_client.stop()
def _assert_page_title(self, document):
brief_title = self.brief['briefs']['title']
brief_organisation = self.brief['briefs']['organisation']
page_heading = document.xpath('//header[@class="page-heading-smaller"]')[0]
page_heading_h1 = page_heading.xpath('h1/text()')[0]
page_heading_context = page_heading.xpath('p[@class="context"]/text()')[0]
def test_dos_brief_404s_if_brief_is_draft(self):
self.brief['briefs']['status'] = 'draft'
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}').format(brief_id))
assert_equal(404, res.status_code)
def test_dos_brief_has_correct_title(self):
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}').format(brief_id))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
self._assert_page_title(document)
def test_dos_brief_pdf(self):
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/opportunity_{}.pdf')
.format(brief_id))
assert_equal(200, res.status_code)
assert_equal(res.mimetype, 'application/pdf')
@pytest.mark.skipif(True, reason="test failing on AU CI server")
def test_dos_brief_has_important_dates(self):
brief_id = self.brief['briefs']['id']
self.brief['briefs']['clarificationQuestionsClosedAt'] = "2016-03-10T11:08:28.054129Z"
self.brief['briefs']['applicationsClosedAt'] = "2016-03-11T11:08:28.054129Z"
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}').format(brief_id))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
brief_important_dates = document.xpath(
'(//table[@class="summary-item-body"])[1]/tbody/tr')
assert_equal(len(brief_important_dates), 3)
assert_true(brief_important_dates[0].xpath(
'td[@class="summary-item-field-first"]')[0].text_content().strip(), "Published")
assert_true(brief_important_dates[0].xpath(
'td[@class="summary-item-field"]')[0].text_content().strip(), "Thursday 25 February 2016")
assert_true(brief_important_dates[1].xpath(
'td[@class="summary-item-field-first"]')[0].text_content().strip(), "Deadline for asking questions")
assert_true(brief_important_dates[1].xpath(
'td[@class="summary-item-field"]')[0].text_content().strip(), "Thursday 10 March 2016")
assert_true(brief_important_dates[2].xpath(
'td[@class="summary-item-field-first"]')[0].text_content().strip(), "Closing date for applications")
assert_true(brief_important_dates[2].xpath(
'td[@class="summary-item-field"]')[0].text_content().strip(), "Thursday 11 March 2016")
def test_dos_brief_has_at_least_one_section(self):
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}').format(brief_id))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
section_heading = document.xpath('//h2[@class="summary-item-heading"]')[0]
section_attributes = section_heading.xpath('following-sibling::table[1]/tbody/tr')
start_date_key = section_attributes[2].xpath('td[1]/span/text()')
start_date_value = section_attributes[2].xpath('td[2]/span/text()')
contract_length_key = section_attributes[3].xpath('td[1]/span/text()')
contract_length_value = section_attributes[3].xpath('td[2]/span/text()')
assert_equal(section_heading.get('id'), 'opportunity-attributes-1')
assert_equal(section_heading.text.strip(), 'Overview')
assert_equal(start_date_key[0], 'What is the latest start date?')
assert_equal(start_date_value[0], '01/03/2016')
assert_equal(contract_length_key[0], 'How long is the contract?')
assert_equal(contract_length_value[0], '4 weeks')
def test_dos_brief_has_question_and_answer_session_details_link(self):
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}').format(brief_id))
document = html.fromstring(res.get_data(as_text=True))
qa_session_url = self.expand_path('/sellers/opportunities/{}/question-and-answer-session'.format(brief_id))
qa_session_link_text = document.xpath('//a[@href="{}"]/text()'.format(qa_session_url))[0].strip()
assert qa_session_link_text == "Log in to view question and answer session details"
def test_dos_brief_question_and_answer_session_details_hidden_when_questions_closed(self):
self.brief['briefs']['clarificationQuestionsAreClosed'] = True
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{})'.format(brief_id)))
assert "/question-and-answer-session" not in res.get_data(as_text=True)
def test_dos_brief_question_and_answer_session_details_hidden_when_empty(self):
del self.brief['briefs']['questionAndAnswerSessionDetails']
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}'.format(brief_id)))
assert "/question-and-answer-session" not in res.get_data(as_text=True)
def test_dos_brief_has_questions_and_answers(self):
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}'.format(brief_id)))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
xpath = '//h2[@id="clarification-questions"]/following-sibling::table/tbody/tr'
clarification_questions = document.xpath(xpath)
number = clarification_questions[0].xpath('td[1]/span/span/text()')[0].strip()
question = clarification_questions[0].xpath('td[1]/span/text()')[0].strip()
answer = clarification_questions[0].xpath('td[2]/span/text()')[0].strip()
qa_url = self.expand_path('/sellers/opportunities/{}/ask-a-question'.format(brief_id))
qa_link_text = document.xpath('//a[@href="{}"]/text()'.format(qa_url))[0].strip()
assert_equal(number, "1.")
assert_equal(question, "Why?")
assert_equal(answer, "Because")
assert_equal(qa_link_text, "Log in to ask a question")
def test_dos_brief_has_different_link_text_for_logged_in_supplier(self):
self.login_as_supplier()
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}'.format(brief_id)))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
qa_url = self.expand_path('/sellers/opportunities/{}/ask-a-question'.format(brief_id))
qa_link_text = document.xpath('//a[@href="{}"]/text()'.format(qa_url))[0]
assert_equal(qa_link_text.strip(), "Ask a question")
def test_can_apply_to_live_brief(self):
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}'.format(brief_id)))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
brief_response_url = self.expand_path('/sellers/opportunities/{}/responses/create'.format(brief_id))
apply_links = document.xpath('//a[@href="{}"]'.format(brief_response_url))
assert len(apply_links) == 1
def test_cannot_apply_to_closed_brief(self):
brief = self.brief.copy()
brief['briefs']['status'] = "closed"
brief['briefs']['publishedAt'] = "2000-01-25T12:00:00.000000Z"
brief['briefs']['applicationsClosedAt'] = "2000-02-25T12:00:00.000000Z"
self._data_api_client.get_brief.return_value = brief
brief_id = brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}'.format(brief_id)))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
brief_response_url = self.expand_path('/sellers/opportunities/{}/responses/create'.format(brief_id))
apply_links = document.xpath('//a[@href="{}"]'.format(brief_response_url))
assert len(apply_links) == 0
assert '25 February 2000' in document.xpath('//div[@class="callout--info"]')[0][1].text_content()
def test_dos_brief_specialist_role_displays_label(self):
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}'.format(brief_id)))
assert 'agileCoach' not in res.get_data(as_text=True)
assert 'Agile Coach' in res.get_data(as_text=True)
def _assert_start_application(self, document, brief_id):
brief_response_url = self.expand_path('/sellers/opportunities/{}/responses/create'.format(brief_id))
assert len(document.xpath(
'//a[@href="{0}"][contains(normalize-space(text()), normalize-space("{1}"))]'.format(
brief_response_url,
"Start application",
)
)) == 1
def _assert_view_application(self, document, brief_id):
assert len(document.xpath(
'//a[@href="{0}"][contains(normalize-space(text()), normalize-space("{1}"))]'.format(
self.expand_path('/sellers/opportunities/{}/responses/result'.format(brief_id)),
"View your application",
)
)) == 1
def test_unauthenticated_start_application(self):
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}'.format(brief_id)))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
self._assert_start_application(document, brief_id)
def test_buyer_start_application(self):
self.login_as_buyer()
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}'.format(brief_id)))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
self._assert_start_application(document, brief_id)
def test_supplier_start_application(self):
self.login_as_supplier()
# mocking that we haven't applied
self._data_api_client.find_brief_responses.return_value = {
"briefResponses": [],
}
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}'.format(brief_id)))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
self._assert_start_application(document, brief_id)
def test_supplier_applied_view_application(self):
self.login_as_supplier()
# mocking that we have applied
self._data_api_client.find_brief_responses.return_value = {
"briefResponses": [{"lazy": "mock"}],
}
brief_id = self.brief['briefs']['id']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities/{}'.format(brief_id)))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
self._assert_view_application(document, brief_id)
class TestCatalogueOfBriefsPage(BaseApplicationTest):
def setup(self):
super(TestCatalogueOfBriefsPage, self).setup()
self._data_api_client = mock.patch(
'app.main.views.marketplace.data_api_client'
).start()
self.briefs = self._get_dos_brief_fixture_data(multi=True)
self._data_api_client.find_briefs.return_value = self.briefs
self._data_api_client.get_framework.return_value = {'frameworks': {
'name': "Digital Service Professionals",
'slug': "digital-service-professionals",
'lots': [
{'name': 'Lot 1', 'slug': 'lot-one', 'allowsBrief': True},
{'name': 'Lot 2', 'slug': 'lot-two', 'allowsBrief': False},
{'name': 'Lot 3', 'slug': 'lot-three', 'allowsBrief': True},
{'name': 'Lot 4', 'slug': 'lot-four', 'allowsBrief': True},
]
}}
def teardown(self):
self._data_api_client.stop()
def test_catalogue_of_briefs_page(self):
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities'))
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
self._data_api_client.get_framework.assert_called_once_with("digital-service-professionals")
regular_args = {
k: v for k, v in iteritems(self._data_api_client.find_briefs.call_args[1]) if k not in ("status", "lot",)
}
assert regular_args == {
"framework": "digital-service-professionals",
"page": 1,
}
assert set(self._data_api_client.find_briefs.call_args[1]["status"].split(",")) == {"live", "closed"}
assert set(self._data_api_client.find_briefs.call_args[1]["lot"].split(",")) == {
"lot-one",
"lot-three",
"lot-four",
}
heading = document.xpath('normalize-space(//h1/text())')
assert heading == "Latest opportunities"
# to-do: fix this test
# assert 'lot 1, lot 3 and lot 4' in document.xpath(
# "normalize-space(//div[@class='marketplace-paragraph']/p/text())"
# )
ss_elem = document.xpath("//p[@class='search-summary']")[0]
assert self._normalize_whitespace(self._squashed_element_text(ss_elem)) == "%s opportunities" % \
self.briefs['meta']['total']
def test_catalogue_of_briefs_page_filtered(self):
with self.app.app_context():
original_url = url_for('main.list_opportunities', framework_slug='digital-service-professionals') + \
"?page=2&status=live&lot=lot-one&lot=lot-three"
res = self.client.get(original_url)
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
self._data_api_client.get_framework.assert_called_once_with("digital-service-professionals")
regular_args = {
k: v for k, v in iteritems(self._data_api_client.find_briefs.call_args[1]) if k not in ("status", "lot",)
}
assert regular_args == {
"framework": "digital-service-professionals",
"page": 2,
}
assert set(self._data_api_client.find_briefs.call_args[1]["status"].split(",")) == {"live"}
assert set(self._data_api_client.find_briefs.call_args[1]["lot"].split(",")) == {"lot-one", "lot-three"}
heading = document.xpath('normalize-space(//h1/text())')
assert heading == "Latest opportunities"
# to-do: fix this test
# assert 'lot 1, lot 3 and lot 4' in document.xpath(
# "normalize-space(//div[@class='marketplace-paragraph']/p/text())"
# )
lot_inputs = document.xpath("//form[@method='get']//input[@name='lot']")
assert {
element.get("value"): bool(element.get("checked"))
for element in lot_inputs
} == {
"lot-one": True,
"lot-three": True,
"lot-four": False,
}
status_inputs = document.xpath("//form[@method='get']//input[@name='status']")
assert {
element.get("value"): bool(element.get("checked"))
for element in status_inputs
} == {
"live": True,
"closed": False,
}
parsed_original_url = urlparse(original_url)
parsed_prev_url = urlparse(document.xpath("//li[@class='previous']/a/@href")[0])
parsed_next_url = urlparse(document.xpath("//li[@class='next']/a/@href")[0])
assert (parsed_original_url.path == parsed_prev_url.path == parsed_next_url.path) or \
'' == parsed_prev_url.path == parsed_next_url.path
normalize_qs = lambda qs: {k: set(v) for k, v in iteritems(parse_qs(qs)) if k != "page"}
assert normalize_qs(parsed_original_url.query) == \
normalize_qs(parsed_next_url.query) == \
normalize_qs(parsed_prev_url.query)
ss_elem = document.xpath("//p[@class='search-summary']")[0]
assert self._normalize_whitespace(self._squashed_element_text(ss_elem)) == "3 results"
def test_catalogue_of_briefs_page_filtered_all_options_selected(self):
with self.app.app_context():
original_url = url_for('main.list_opportunities', framework_slug='digital-service-professionals') + \
"?page=2&status=live&lot=lot-one&lot=lot-three&status=closed&lot=lot-four"
res = self.client.get(original_url)
assert_equal(200, res.status_code)
document = html.fromstring(res.get_data(as_text=True))
self._data_api_client.get_framework.assert_called_once_with("digital-service-professionals")
regular_args = {
k: v for k, v in iteritems(self._data_api_client.find_briefs.call_args[1]) if k not in ("status", "lot",)
}
assert regular_args == {
"framework": "digital-service-professionals",
"page": 2,
}
assert set(self._data_api_client.find_briefs.call_args[1]["status"].split(",")) == {"live", "closed"}
assert set(self._data_api_client.find_briefs.call_args[1]["lot"].split(",")) == {
"lot-one",
"lot-three",
"lot-four",
}
heading = document.xpath('normalize-space(//h1/text())')
assert heading == "Latest opportunities"
# to-do: fix this test
# assert 'lot 1, lot 3 and lot 4' in document.xpath(
# "normalize-space(//div[@class='marketplace-paragraph']/p/text())"
# )
lot_inputs = document.xpath("//form[@method='get']//input[@name='lot']")
assert {
element.get("value"): bool(element.get("checked"))
for element in lot_inputs
} == {
"lot-one": True,
"lot-three": True,
"lot-four": True,
}
status_inputs = document.xpath("//form[@method='get']//input[@name='status']")
assert {
element.get("value"): bool(element.get("checked"))
for element in status_inputs
} == {
"live": True,
"closed": True,
}
parsed_original_url = urlparse(original_url)
parsed_next_url = urlparse(document.xpath("//li[@class='next']/a/@href")[0])
parsed_prev_url = urlparse(document.xpath("//li[@class='previous']/a/@href")[0])
normalize_qs = lambda qs: {k: set(v) for k, v in iteritems(parse_qs(qs)) if k != "page"}
assert normalize_qs(parsed_original_url.query) == normalize_qs(parsed_prev_url.query) == \
normalize_qs(parsed_next_url.query)
ss_elem = document.xpath("//p[@class='search-summary']")[0]
assert self._normalize_whitespace(self._squashed_element_text(ss_elem)) == "3 results"
def test_catalogue_of_briefs_page_loads_as_buyer(self):
self.login_as_buyer()
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities'))
assert_equal(200, res.status_code)
def test_catalogue_of_briefs_400_if_invalid_status(self):
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities'
'?status=pining-for-fjords'))
assert res.status_code == 400
def test_catalogue_of_briefs_page_shows_pagination_if_more_pages(self):
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities?page=2'))
assert_equal(200, res.status_code)
page = res.get_data(as_text=True)
document = html.fromstring(page)
assert '<li class="previous">' in page
assert '<li class="next">' in page
prev_url = str(document.xpath('string(//li[@class="previous"]/a/@href)'))
next_url = str(document.xpath('string(//li[@class="next"]/a/@href)'))
assert prev_url.endswith('?page=1')
assert next_url.endswith('?page=3')
def test_no_pagination_if_no_more_pages(self):
self.briefs['meta']['per_page'] = self.briefs['meta']['total']
res = self.client.get(self.expand_path('/digital-service-professionals/opportunities'))
assert_equal(200, res.status_code)
page = res.get_data(as_text=True)
assert '<li class="previous">' not in page
assert '<li class="next">' not in page
def test_catalogue_of_briefs_page_404_for_framework_that_does_not_exist(self):
self._data_api_client.get_framework.return_value = {'frameworks': {}}
res = self.client.get(self.expand_path('/digital-giraffes-and-monkeys/opportunities'))
assert_equal(404, res.status_code)
self._data_api_client.get_framework.assert_called_once_with('digital-giraffes-and-monkeys')
| 46.219697 | 118 | 0.652926 |
d4caa90293407d7ca5ef8fa79472a1cf2bc8b3fc | 3,041 | py | Python | research/bayesian_deeplab/core/uncertainty_metrics.py | omegafragger/models | 6518e3e78d898398aa7c19c8cfe7133a859e60e6 | [
"Apache-2.0"
] | null | null | null | research/bayesian_deeplab/core/uncertainty_metrics.py | omegafragger/models | 6518e3e78d898398aa7c19c8cfe7133a859e60e6 | [
"Apache-2.0"
] | null | null | null | research/bayesian_deeplab/core/uncertainty_metrics.py | omegafragger/models | 6518e3e78d898398aa7c19c8cfe7133a859e60e6 | [
"Apache-2.0"
] | null | null | null | r"""The file contains utility methods to compute the mean prediction
as well as uncertainty metrics given the Monte Carlo samples as input.
The current implementation contains methods to compute the predictive entropy
and mutual information uncertainty metrics.
Useful reference:
Y. Gal. Uncertainty in Deep Learning. PhD thesis, University of Cambridge, 2016.
"""
import tensorflow as tf
import math
PREDICTIVE_ENTROPY = 'predictive_entropy'
MUTUAL_INFORMATION = 'mutual_information'
def mean_prediction(monte_carlo_samples):
""" Produces the mean of the given Monte Carlo samples.
Args:
monte_carlo_samples: A list of predicted tensors where each element in the list
has dimensions [batch, height, width, num_classes].
Returns:
The mean of the predictions. The dimension of this tensor is [batch, height, width, num_classes].
In order to get the actual predictions of the classes, an argmax operation is required on the
channels.
"""
return tf.reduce_mean(monte_carlo_samples, 0)
def predictive_entropy(monte_carlo_samples):
""" Produces the pixel-wise predictive entropy of the given Monte Carlo samples as an uncertainty metric.
Args:
monte_carlo_samples: A list of predicted tensors where each element in the list
has dimensions [batch, height, width, num_classes].
Returns:
The predictive entropy generated from the Monte Carlo samples. The dimension of the output tensor is
[batch, height, width, 1]. There is just one channel for each image where each pixel contains the
predictive entropy obtained from the MC samples.
"""
mean_from_trials = tf.reduce_mean(monte_carlo_samples, 0)
sh = mean_from_trials.get_shape().as_list()
jitter = tf.fill(sh, math.exp(-6))
log_mean_from_trials = tf.log(mean_from_trials + jitter)
product = tf.multiply(mean_from_trials, log_mean_from_trials)
res = -tf.expand_dims(tf.reduce_sum(product, axis=3), 3)
return res
def mutual_information(monte_carlo_samples):
""" Produces the pixel-wise mutual information of the given Monte Carlo samples as an uncertainty metric.
Args:
monte_carlo_samples: A list of predicted tensors where each element in the list
has dimensions [batch, height, width, num_classes].
Returns:
The mutual information generated from the Monte Carlo samples. The dimension of the output tensor is
[batch, height, width, 1]. There is just one channel for each image where each pixel contains the
predictive entropy obtained from the MC samples.
"""
processed_monte_carlo_samples = []
for sample in monte_carlo_samples:
sh = sample.get_shape().as_list()
jitter = tf.fill(sh, math.exp(-6))
processed_monte_carlo_samples.append(tf.multiply(sample, tf.log(sample + jitter)))
mean_processed_trials = tf.reduce_mean(processed_monte_carlo_samples, 0)
sum_part = tf.expand_dims(tf.reduce_sum(mean_processed_trials, axis=3), 3)
return predictive_entropy(monte_carlo_samples) + sum_part | 41.657534 | 107 | 0.751398 |
38688a6e248bdfae8045cc2ab0d47996cc9d8d68 | 6,999 | py | Python | pettingzoo/mpe/_mpe_utils/core.py | vstark21/PettingZoo | 0ebd8fb540e195f9dd91d996f190e9a89dedcf26 | [
"Apache-2.0"
] | 4 | 2021-12-17T08:00:28.000Z | 2022-02-12T12:25:24.000Z | pettingzoo/mpe/_mpe_utils/core.py | vstark21/PettingZoo | 0ebd8fb540e195f9dd91d996f190e9a89dedcf26 | [
"Apache-2.0"
] | null | null | null | pettingzoo/mpe/_mpe_utils/core.py | vstark21/PettingZoo | 0ebd8fb540e195f9dd91d996f190e9a89dedcf26 | [
"Apache-2.0"
] | 1 | 2021-01-25T22:57:41.000Z | 2021-01-25T22:57:41.000Z | import numpy as np
class EntityState(object): # physical/external base state of all entites
def __init__(self):
# physical position
self.p_pos = None
# physical velocity
self.p_vel = None
class AgentState(EntityState): # state of agents (including communication and internal/mental state)
def __init__(self):
super(AgentState, self).__init__()
# communication utterance
self.c = None
class Action(object): # action of the agent
def __init__(self):
# physical action
self.u = None
# communication action
self.c = None
class Entity(object): # properties and state of physical world entity
def __init__(self):
# name
self.name = ''
# properties:
self.size = 0.050
# entity can move / be pushed
self.movable = False
# entity collides with others
self.collide = True
# material density (affects mass)
self.density = 25.0
# color
self.color = None
# max speed and accel
self.max_speed = None
self.accel = None
# state
self.state = EntityState()
# mass
self.initial_mass = 1.0
@property
def mass(self):
return self.initial_mass
class Landmark(Entity): # properties of landmark entities
def __init__(self):
super(Landmark, self).__init__()
class Agent(Entity): # properties of agent entities
def __init__(self):
super(Agent, self).__init__()
# agents are movable by default
self.movable = True
# cannot send communication signals
self.silent = False
# cannot observe the world
self.blind = False
# physical motor noise amount
self.u_noise = None
# communication noise amount
self.c_noise = None
# control range
self.u_range = 1.0
# state
self.state = AgentState()
# action
self.action = Action()
# script behavior to execute
self.action_callback = None
class World(object): # multi-agent world
def __init__(self):
# list of agents and entities (can change at execution-time!)
self.agents = []
self.landmarks = []
# communication channel dimensionality
self.dim_c = 0
# position dimensionality
self.dim_p = 2
# color dimensionality
self.dim_color = 3
# simulation timestep
self.dt = 0.1
# physical damping
self.damping = 0.25
# contact response parameters
self.contact_force = 1e+2
self.contact_margin = 1e-3
# return all entities in the world
@property
def entities(self):
return self.agents + self.landmarks
# return all agents controllable by external policies
@property
def policy_agents(self):
return [agent for agent in self.agents if agent.action_callback is None]
# return all agents controlled by world scripts
@property
def scripted_agents(self):
return [agent for agent in self.agents if agent.action_callback is not None]
# update state of the world
def step(self):
# set actions for scripted agents
for agent in self.scripted_agents:
agent.action = agent.action_callback(agent, self)
# gather forces applied to entities
p_force = [None] * len(self.entities)
# apply agent physical controls
p_force = self.apply_action_force(p_force)
# apply environment forces
p_force = self.apply_environment_force(p_force)
# integrate physical state
self.integrate_state(p_force)
# update agent state
for agent in self.agents:
self.update_agent_state(agent)
# gather agent action forces
def apply_action_force(self, p_force):
# set applied forces
for i, agent in enumerate(self.agents):
if agent.movable:
noise = np.random.randn(*agent.action.u.shape) * agent.u_noise if agent.u_noise else 0.0
p_force[i] = agent.action.u + noise
return p_force
# gather physical forces acting on entities
def apply_environment_force(self, p_force):
# simple (but inefficient) collision response
for a, entity_a in enumerate(self.entities):
for b, entity_b in enumerate(self.entities):
if(b <= a):
continue
[f_a, f_b] = self.get_collision_force(entity_a, entity_b)
if(f_a is not None):
if(p_force[a] is None):
p_force[a] = 0.0
p_force[a] = f_a + p_force[a]
if(f_b is not None):
if(p_force[b] is None):
p_force[b] = 0.0
p_force[b] = f_b + p_force[b]
return p_force
# integrate physical state
def integrate_state(self, p_force):
for i, entity in enumerate(self.entities):
if not entity.movable:
continue
entity.state.p_vel = entity.state.p_vel * (1 - self.damping)
if (p_force[i] is not None):
entity.state.p_vel += (p_force[i] / entity.mass) * self.dt
if entity.max_speed is not None:
speed = np.sqrt(np.square(entity.state.p_vel[0]) + np.square(entity.state.p_vel[1]))
if speed > entity.max_speed:
entity.state.p_vel = entity.state.p_vel / np.sqrt(np.square(entity.state.p_vel[0]) + np.square(entity.state.p_vel[1])) * entity.max_speed
entity.state.p_pos += entity.state.p_vel * self.dt
def update_agent_state(self, agent):
# set communication state (directly for now)
if agent.silent:
agent.state.c = np.zeros(self.dim_c)
else:
noise = np.random.randn(*agent.action.c.shape) * agent.c_noise if agent.c_noise else 0.0
agent.state.c = agent.action.c + noise
# get collision forces for any contact between two entities
def get_collision_force(self, entity_a, entity_b):
if (not entity_a.collide) or (not entity_b.collide):
return [None, None] # not a collider
if (entity_a is entity_b):
return [None, None] # don't collide against itself
# compute actual distance between entities
delta_pos = entity_a.state.p_pos - entity_b.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
# minimum allowable distance
dist_min = entity_a.size + entity_b.size
# softmax penetration
k = self.contact_margin
penetration = np.logaddexp(0, -(dist - dist_min) / k) * k
force = self.contact_force * delta_pos / dist * penetration
force_a = +force if entity_a.movable else None
force_b = -force if entity_b.movable else None
return [force_a, force_b]
| 34.995 | 157 | 0.602229 |
4f3fe5a430e69d0dedf5cff9e22183da6d977785 | 3,579 | py | Python | tests/test_execution_order.py | celery/bootsteps | f2e788edb182d54037c5f2b9fad28dc81f701f8e | [
"BSD-3-Clause"
] | null | null | null | tests/test_execution_order.py | celery/bootsteps | f2e788edb182d54037c5f2b9fad28dc81f701f8e | [
"BSD-3-Clause"
] | 1 | 2019-10-24T16:46:50.000Z | 2019-10-24T16:46:50.000Z | tests/test_execution_order.py | celery/bootsteps | f2e788edb182d54037c5f2b9fad28dc81f701f8e | [
"BSD-3-Clause"
] | 1 | 2019-09-29T03:36:17.000Z | 2019-09-29T03:36:17.000Z | import itertools
import pytest
from hypothesis import HealthCheck, assume, given, settings
from hypothesis import strategies as st
from multiprocessing_generator import ParallelGenerator
from networkx import all_topological_sorts
from bootsteps.blueprint import ExecutionOrder
from tests.strategies import non_isomorphic_graph_builder
previous_graphs = []
steps_dependency_graph_builder = non_isomorphic_graph_builder(
min_nodes=1,
max_nodes=10,
min_edges=0,
self_loops=False,
connected=True,
previous_graphs=previous_graphs,
)
def test_initialization(bootsteps_graph):
execution_order = ExecutionOrder(bootsteps_graph)
assert (
execution_order._steps_dependency_graph
== execution_order._current_steps_dependency_graph
== bootsteps_graph
)
@given(
steps_dependency_graph=steps_dependency_graph_builder,
number_of_pending_nodes=st.integers(min_value=0, max_value=9),
)
@settings(suppress_health_check=(HealthCheck.too_slow, HealthCheck.filter_too_much))
def test_mark_as_pending_execution(steps_dependency_graph, number_of_pending_nodes):
assume(number_of_pending_nodes <= len(steps_dependency_graph.nodes))
execution_order = ExecutionOrder(steps_dependency_graph)
pending_steps = {
list(steps_dependency_graph.nodes)[i] for i in range(number_of_pending_nodes)
}
execution_order.mark_as_pending_execution(pending_steps)
assert all(
step not in execution_order._current_steps_dependency_graph.nodes
for step in pending_steps
)
assert [pending_steps] == execution_order._execution_order
@given(
steps_dependency_graph=steps_dependency_graph_builder.filter(
lambda g: any(not any(g.neighbors(n)) for n in g.nodes)
)
)
@settings(suppress_health_check=(HealthCheck.too_slow, HealthCheck.filter_too_much))
def test_steps_without_dependencies(steps_dependency_graph):
execution_order = ExecutionOrder(steps_dependency_graph)
steps_without_dependencies = execution_order.steps_without_dependencies()
assert steps_without_dependencies == {
step
for step in steps_dependency_graph
if not any(steps_dependency_graph.neighbors(step))
}
@given(steps_dependency_graph=steps_dependency_graph_builder)
@settings(
suppress_health_check=(HealthCheck.too_slow, HealthCheck.filter_too_much),
max_examples=1000,
deadline=1200000,
)
@pytest.mark.run(order=-1)
def test_execution_order(steps_dependency_graph, request):
cached_topsorts = request.config.cache.get(repr(steps_dependency_graph), default=[])
expected = (
cached_topsorts
if cached_topsorts
else ParallelGenerator(
(
tuple(reversed(topsort))
for topsort in all_topological_sorts(steps_dependency_graph)
),
max_lookahead=2,
)
)
execution_order = ExecutionOrder(steps_dependency_graph)
actual = tuple(itertools.chain.from_iterable(execution_order))
if cached_topsorts:
assert any(
list(actual) == topsort for topsort in cached_topsorts
), f"{actual} not found in {cached_topsorts}"
else:
with expected as topsorts:
done = False
for topsort in topsorts:
cached_topsorts.append(topsort)
if actual == topsort:
done = True
break
request.config.cache.set(
repr(steps_dependency_graph), tuple(cached_topsorts)
)
assert done
| 31.394737 | 88 | 0.725342 |
f93c2f84351d58612f03a5c6775bd77f92fdfe4a | 2,361 | py | Python | dependencies.py | JuninhoBoni/Sistema_de_Cadastro | d3adcbffdc0695aa202cf945cce3befa07bff677 | [
"Unlicense"
] | null | null | null | dependencies.py | JuninhoBoni/Sistema_de_Cadastro | d3adcbffdc0695aa202cf945cce3befa07bff677 | [
"Unlicense"
] | null | null | null | dependencies.py | JuninhoBoni/Sistema_de_Cadastro | d3adcbffdc0695aa202cf945cce3befa07bff677 | [
"Unlicense"
] | null | null | null | from services.validate import ValidateTokenData, ValidateUserDB
import os
from datetime import datetime, timedelta
from typing import Optional
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
from passlib.context import CryptContext
SECRET_KEY = os.environ['SECRET_KEY']
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
users_db = {
"teste": {
"username": "teste",
"hashed_password": "$2b$12$EixZaYVK1fsbw1ZfbX3OXePaWxn96p36WQoeG6Lruj3vjPGga31lW",
}
}
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token/authenticate")
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return ValidateUserDB(**user_dict)
def authenticate_user(fake_db, username: str, password: str):
user = get_user(fake_db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = ValidateTokenData(username=username)
except JWTError:
raise credentials_exception
user = get_user(users_db, username=token_data.username)
if user is None:
raise credentials_exception
return user
| 28.107143 | 90 | 0.722999 |
71ab4870f871ba4c5b457ec5abb6f8932a36c5b2 | 440 | py | Python | tests/doTesting.py | emirkmo/snpy | 2a0153c84477ba8a30310d7dbca3d5a8f24de3c6 | [
"MIT"
] | 6 | 2019-01-14T19:40:45.000Z | 2021-06-05T12:19:39.000Z | tests/doTesting.py | emirkmo/snpy | 2a0153c84477ba8a30310d7dbca3d5a8f24de3c6 | [
"MIT"
] | 3 | 2017-04-25T20:06:22.000Z | 2021-06-09T20:46:41.000Z | tests/doTesting.py | emirkmo/snpy | 2a0153c84477ba8a30310d7dbca3d5a8f24de3c6 | [
"MIT"
] | 8 | 2017-04-25T19:57:57.000Z | 2021-11-12T11:54:19.000Z | import unittest
import snpy
import os
class TestSNooPy(unittest.TestCase):
def setUp(self):
# First, we make sure we have the test data
self.base = os.path.dirname(snpy.__file__)
self.data = os.path.join(self.base, 'tests')
pass
def test_loadTxt(self):
s = snpy.get_sn(os.path.join(self.data, 'SN2006ax.txt'))
self.assertEqual(s.name, "SN2006ax")
if __name__ == '__main__':
unittest.main()
| 22 | 62 | 0.663636 |
53c0619c91fdc96062650e179177f99345237e6a | 4,135 | py | Python | upload_studio/step_executor.py | jerryrwu/harvest | 6f405254fef59c84637bc976c252eef703b1cbc5 | [
"Apache-2.0"
] | 9 | 2019-03-26T14:50:00.000Z | 2020-11-10T16:44:08.000Z | upload_studio/step_executor.py | jerryrwu/harvest | 6f405254fef59c84637bc976c252eef703b1cbc5 | [
"Apache-2.0"
] | 22 | 2019-03-02T23:16:13.000Z | 2022-02-27T10:36:36.000Z | upload_studio/step_executor.py | jerryrwu/harvest | 6f405254fef59c84637bc976c252eef703b1cbc5 | [
"Apache-2.0"
] | 5 | 2019-04-24T00:51:30.000Z | 2020-11-06T18:31:49.000Z | import os
import shutil
from io import BytesIO
from django.db import IntegrityError, transaction
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from Harvest.path_utils import copytree_into
from Harvest.utils import get_logger
from monitoring.models import LogEntry
from upload_studio.models import ProjectStep, Project, ProjectStepWarning, ProjectStepError
from upload_studio.upload_metadata import MusicMetadata, MusicMetadataSerializer
logger = get_logger(__name__)
class StepAbortException(Exception):
def __init__(self, status, *args, finish_project=False, **kwargs):
super().__init__(*args, **kwargs)
self.status = status
self.finish_project = finish_project
class StepExecutor:
name = None
description = None
def __init__(self, project: Project, step: ProjectStep, prev_step: ProjectStep):
self.project = project
self.step = step
self.prev_step = prev_step
self.metadata = None
if self.step.metadata_json:
data = JSONParser().parse(BytesIO(self.step.metadata_json.encode()))
serializer = MusicMetadataSerializer(data=data)
serializer.is_valid(raise_exception=True)
self.metadata = MusicMetadata(**serializer.validated_data)
@property
def completed_status(self):
return Project.STATUS_COMPLETE
@transaction.atomic
def add_warning(self, message, acked=False):
try:
ProjectStepWarning.objects.create(
step=self.step,
message=message,
acked=acked,
)
logger.warning('Project {} step({}) {} added warning {}.',
self.project.id, self.step.id, self.name, message)
except IntegrityError:
logger.info('Project {} step({}) {} warning already added: {}.',
self.project.id, self.step.id, self.name, message)
def raise_warnings(self):
for warning in self.step.projectstepwarning_set.all():
if not warning.acked:
self.step.status = Project.STATUS_WARNINGS
raise StepAbortException(Project.STATUS_WARNINGS)
def raise_error(self, message, finish_project=False):
logger.warning('Project {} step({}) {} raised error {}.',
self.project.id, self.step.id, self.name, message)
ProjectStepError.objects.create(step=self.step, message=message)
raise StepAbortException(
Project.STATUS_ERRORS,
finish_project=finish_project,
)
def clean_work_area(self):
try:
shutil.rmtree(self.step.path)
except FileNotFoundError:
pass
os.makedirs(self.step.data_path)
def copy_prev_step_area_files(self, area):
copytree_into(self.prev_step.get_area_path(area), self.step.get_area_path(area))
def copy_prev_step_files(self, exclude_areas=None):
if not self.prev_step:
self.raise_error('No previous step to copy files from.')
exclude_areas = exclude_areas if exclude_areas is not None else {}
for area in self.prev_step.get_areas():
if area in exclude_areas:
continue
self.copy_prev_step_area_files(area)
def handle_run(self):
raise NotImplementedError()
def run(self):
self.step.projectsteperror_set.all().delete()
try:
self.clean_work_area()
self.handle_run()
self.raise_warnings() # In case a warning was added after the last raise_warnings
self.step.status = self.completed_status
except StepAbortException as exc:
self.step.status = exc.status
if exc.finish_project:
LogEntry.info('Finished upload studio {} due to error.'.format(self.project))
self.project.finish()
if self.metadata:
data = MusicMetadataSerializer(self.metadata).data
self.step.metadata_json = JSONRenderer().render(data).decode()
self.step.save()
| 36.59292 | 94 | 0.650544 |
0230923a5b8917edd350a764b3700e69ff41f3f0 | 5,100 | py | Python | model-optimizer/extensions/front/GeLUMerger_Erf.py | guytamir/openvino | 6ad42182866cef1f2c8b19663f83638f86b1452a | [
"Apache-2.0"
] | null | null | null | model-optimizer/extensions/front/GeLUMerger_Erf.py | guytamir/openvino | 6ad42182866cef1f2c8b19663f83638f86b1452a | [
"Apache-2.0"
] | 19 | 2021-03-26T08:11:00.000Z | 2022-02-21T13:06:26.000Z | model-optimizer/extensions/front/GeLUMerger_Erf.py | jayabs2020/openvino | 67a82a040faaf66f109035acf7de6e4b7568bc08 | [
"Apache-2.0"
] | null | null | null | """
Copyright (C) 2017-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
from math import sqrt, fabs
from extensions.ops.gelu import GeLUOP
from mo.front.common.replacement import FrontReplacementPattern
from mo.graph.graph import Graph, rename_nodes
from mo.middle.pattern_match import apply_pattern
class GeLUMergerErf(FrontReplacementPattern):
enabled = True
def pattern1(self):
# (0.5 * x) * (1 + erf(x / sqrt(2))
return dict(
nodes=[
('mul', dict(op='Mul')),
('mul0', dict(op='Mul')),
('div', dict(op='Div')),
('erf', dict(op='Erf')),
('add', dict(op='Add')),
('mul_param', dict(op='Const')),
('div_param', dict(op='Const')),
('add_param', dict(op='Const')),
],
edges=[
('mul', 'mul0'),
('div', 'erf'),
('erf', 'add'),
('add', 'mul0'),
('mul_param', 'mul'),
('div_param', 'div'),
('add_param', 'add'),
])
def pattern2(self):
# 0.5 * (x * (1 + erf(x / sqrt(2)))
return dict(
nodes=[
('mul', dict(op='Mul')),
('mul0', dict(op='Mul')),
('div', dict(op='Div')),
('erf', dict(op='Erf')),
('add', dict(op='Add')),
('mul_param', dict(op='Const')),
('div_param', dict(op='Const')),
('add_param', dict(op='Const')),
],
edges=[
('div', 'erf'),
('erf', 'add'),
('add', 'mul'),
('mul', 'mul0'),
('mul_param', 'mul0'),
('div_param', 'div'),
('add_param', 'add'),
])
def pattern3(self):
# x * (0.5 * (1 + erf(x / sqrt(2)))
return dict(
nodes=[
('mul', dict(op='Mul')),
('mul0', dict(op='Mul')),
('div', dict(op='Div')),
('erf', dict(op='Erf')),
('add', dict(op='Add')),
('mul_param', dict(op='Const')),
('div_param', dict(op='Const')),
('add_param', dict(op='Const')),
],
edges=[
('div', 'erf'),
('erf', 'add'),
('add', 'mul'),
('mul', 'mul0'),
('mul_param', 'mul'),
('div_param', 'div'),
('add_param', 'add'),
])
def find_and_replace_pattern(self, graph: Graph):
log.info('Enabled GeLU Merger replacement for approximation with Erf')
apply_pattern(graph, **self.pattern1(), action=self.replace_gelu)
apply_pattern(graph, **self.pattern2(), action=self.replace_gelu)
apply_pattern(graph, **self.pattern3(), action=self.replace_gelu)
def replace_gelu(self, graph: Graph, match: dict):
# Gaussian Error Linear Unit
# f(x) = 0.5 * x * (1 + erf(x / sqrt(2))
out_node = match['mul0']
node_name = out_node.soft_get('name', out_node.id)
div = match['div']
inp_node = div.in_port(0).get_source().node
inp_name = inp_node.soft_get('name', out_node.id)
log.debug('Found potential Erf-based GeLU pattern after {} with name {}'.format(inp_node.op, inp_name))
# take the values of the mul, add and div
div_param = match['div_param']
add_param = match['add_param']
mul_param = match['mul_param']
if add_param.value.size == 1 and mul_param.value.size == 1 and div_param.value.size == 1:
mul_param = match['mul_param'].value.item()
add_param = match['add_param'].value.item()
div_param = match['div_param'].value.item()
sqrt2 = sqrt(2.0)
# check that the values match the approximation
if fabs(div_param - sqrt2) < 1e-06 and mul_param == 0.5 and add_param == 1.0:
log.debug('Confirmed Erf-based GELU pattern after {} with name {}'.format(inp_node.op, inp_name))
gelu = GeLUOP(graph, dict(name=inp_name + '/GELU_', approximation_mode='erf')).create_node()
div.in_port(0).get_connection().set_destination(gelu.in_port(0))
out_node.out_port(0).get_connection().set_source(gelu.out_port(0))
rename_nodes([(out_node, node_name + '/TBD'), (gelu, node_name)])
| 38.345865 | 113 | 0.511373 |
ba88283243a06eb5b5a5e3373d12e4a78d1131a3 | 97 | py | Python | MSArticle/__init__.py | shanu2405/minemeld-msarticle | a13dc9c6a723f767d600e838906aa58b13855938 | [
"Apache-2.0"
] | null | null | null | MSArticle/__init__.py | shanu2405/minemeld-msarticle | a13dc9c6a723f767d600e838906aa58b13855938 | [
"Apache-2.0"
] | null | null | null | MSArticle/__init__.py | shanu2405/minemeld-msarticle | a13dc9c6a723f767d600e838906aa58b13855938 | [
"Apache-2.0"
] | 5 | 2019-01-07T19:50:11.000Z | 2022-02-03T23:52:16.000Z | def prototypes():
import os
return os.path.join(os.path.dirname(__file__), 'prototypes') | 24.25 | 64 | 0.701031 |
961ab8d80bd10a4274b8f79da9bdb499177d5523 | 5,860 | py | Python | docs/conf.py | ezadev/pdf-tables | 362942d1c866d07ef3f3dd9aeab17ea1d5cefc7f | [
"MIT"
] | 1,108 | 2018-10-22T16:51:26.000Z | 2022-03-28T22:23:06.000Z | docs/conf.py | ezadev/pdf-tables | 362942d1c866d07ef3f3dd9aeab17ea1d5cefc7f | [
"MIT"
] | 130 | 2018-10-20T19:40:36.000Z | 2022-03-17T17:42:44.000Z | docs/conf.py | ezadev/pdf-tables | 362942d1c866d07ef3f3dd9aeab17ea1d5cefc7f | [
"MIT"
] | 183 | 2018-11-09T05:39:58.000Z | 2022-03-02T06:36:01.000Z | #
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('_themes'))
import excalibur
# -- Project information -----------------------------------------------------
project = 'Excalibur'
copyright = '2018, Camelot Developers'
author = 'Camelot Developers'
# The short X.Y version
version = excalibur.__version__
# The full version, including alpha/beta/rc tags
release = excalibur.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'flask_theme_support.FlaskyStyle'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'show_powered_by': False,
'github_user': 'camelot-dev',
'github_repo': 'excalibur',
'github_banner': True,
'show_related': False,
'note_bg': '#FFF59C'
}
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
html_sidebars = {
'index': ['sidebarintro.html', 'relations.html', 'sourcelink.html',
'searchbox.html', 'hacks.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html', 'hacks.html']
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Excaliburdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Excalibur.tex', 'Excalibur Documentation',
'Camelot Developers', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Excalibur', 'Excalibur Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Excalibur', 'Excalibur Documentation',
author, 'Excalibur', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| 31.170213 | 80 | 0.662116 |
1efdd8083eb3744a5a73bc2f282740ab814a4cb7 | 40 | py | Python | tests/test_tokenizer/__init__.py | karin0018/EduNLP | 172c36a2cd2c41a1f1c5833d7b1abcbc5e3bbd5f | [
"Apache-2.0"
] | 18 | 2021-02-15T13:10:42.000Z | 2022-03-17T12:57:34.000Z | tests/test_tokenizer/__init__.py | karin0018/EduNLP | 172c36a2cd2c41a1f1c5833d7b1abcbc5e3bbd5f | [
"Apache-2.0"
] | 81 | 2021-06-02T07:45:20.000Z | 2022-03-29T15:21:32.000Z | tests/test_tokenizer/__init__.py | karin0018/EduNLP | 172c36a2cd2c41a1f1c5833d7b1abcbc5e3bbd5f | [
"Apache-2.0"
] | 29 | 2021-05-18T08:34:58.000Z | 2022-03-12T00:19:09.000Z | # coding: utf-8
# 2021/8/1 @ tongshiwei
| 13.333333 | 23 | 0.65 |
97496b4bc4d41e37ee99142dec56282607bf660e | 106 | py | Python | dgmc/utils/__init__.py | rlckd159/deep-graph-matching-consensus | 1656cdae27c705a0aa6d2912a24e566b8b86e1b0 | [
"MIT"
] | 194 | 2020-01-17T08:59:09.000Z | 2022-03-29T10:16:30.000Z | dgmc/utils/__init__.py | rlckd159/deep-graph-matching-consensus | 1656cdae27c705a0aa6d2912a24e566b8b86e1b0 | [
"MIT"
] | 17 | 2020-01-31T10:58:06.000Z | 2021-10-05T14:48:49.000Z | dgmc/utils/__init__.py | rlckd159/deep-graph-matching-consensus | 1656cdae27c705a0aa6d2912a24e566b8b86e1b0 | [
"MIT"
] | 38 | 2020-01-19T01:23:24.000Z | 2022-03-23T21:56:24.000Z | from .data import PairDataset, ValidPairDataset
__all__ = [
'PairDataset',
'ValidPairDataset',
]
| 15.142857 | 47 | 0.707547 |
c6ecbe8ba90514e82e1a99f4c1404ea7f67fc5b2 | 1,676 | py | Python | config/wsgi.py | paradigmadigital/Cookiecutter | 7ea675cd9e92e04b0541cf29ed9659b2b7e64ae4 | [
"MIT"
] | null | null | null | config/wsgi.py | paradigmadigital/Cookiecutter | 7ea675cd9e92e04b0541cf29ed9659b2b7e64ae4 | [
"MIT"
] | null | null | null | config/wsgi.py | paradigmadigital/Cookiecutter | 7ea675cd9e92e04b0541cf29ed9659b2b7e64ae4 | [
"MIT"
] | null | null | null | """
WSGI config for shop project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# shop directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'shop'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 40.878049 | 79 | 0.795346 |
437f267861e25c18217e69b720619b572f83e88f | 428 | py | Python | 000562HeadFirstPy/000562_02_01_p097_Lists_04_Vowels02_20200225.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 000562HeadFirstPy/000562_02_01_p097_Lists_04_Vowels02_20200225.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 000562HeadFirstPy/000562_02_01_p097_Lists_04_Vowels02_20200225.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | vowels = ['a', 'e', 'i', 'o', 'u']
word = 'Milliways'
found = [] # объявляется пустой массив, в который будут складываться результаты поиска
for letter in word: # в каком слове ищем, перебираем каждый элемент [letter] списка [vowels]
if letter in vowels:
if letter not in found:
found.append(letter)
for vowels in found:
print(vowels)
# [задание] вывести список гласных в слове на печать
print(found) | 38.909091 | 92 | 0.684579 |
608ad256dc38a6df85c02ca3b7937e181f4cd62a | 8,522 | py | Python | django/conf/__init__.py | samupl/django | 85ada61ac492ec6031db813463f4ea6820fee20b | [
"PSF-2.0",
"BSD-3-Clause"
] | 19 | 2015-07-07T02:08:59.000Z | 2021-11-08T11:05:40.000Z | django/conf/__init__.py | chenlw0106/django | 36300ef336e3f130a0dadc1143163ff3d23dc843 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-01-31T11:30:21.000Z | 2020-01-31T11:30:21.000Z | django/conf/__init__.py | chenlw0106/django | 36300ef336e3f130a0dadc1143163ff3d23dc843 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-11-21T05:45:42.000Z | 2020-11-21T05:45:42.000Z | """
Settings and configuration for Django.
Read values from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global_settings.py
for a list of all possible variables.
"""
import importlib
import os
import time
import traceback
import warnings
from pathlib import Path
import django
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import RemovedInDjango31Warning
from django.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
FILE_CHARSET_DEPRECATED_MSG = (
'The FILE_CHARSET setting is deprecated. Starting with Django 3.1, all '
'files read from disk must be UTF-8 encoded.'
)
class SettingsReference(str):
"""
String subclass which references a current settings value. It's treated as
the value in memory but serializes to a settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time settings are needed, if the user hasn't
configured settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<LazySettings [Unevaluated]>'
return '<LazySettings "%(settings_module)s">' % {
'settings_module': self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
"""Return the value of a setting and cache it in self.__dict__."""
if self._wrapped is empty:
self._setup(name)
val = getattr(self._wrapped, name)
self.__dict__[name] = val
return val
def __setattr__(self, name, value):
"""
Set the value of setting. Clear all cached values if _wrapped changes
(@override_settings does this) or clear single values when set.
"""
if name == '_wrapped':
self.__dict__.clear()
else:
self.__dict__.pop(name, None)
super().__setattr__(name, value)
def __delattr__(self, name):
"""Delete a setting and clear it from cache if needed."""
super().__delattr__(name)
self.__dict__.pop(name, None)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""Return True if the settings have already been configured."""
return self._wrapped is not empty
@property
def FILE_CHARSET(self):
stack = traceback.extract_stack()
# Show a warning if the setting is used outside of Django.
# Stack index: -1 this line, -2 the caller.
filename, _line_number, _function_name, _text = stack[-2]
if not filename.startswith(os.path.dirname(django.__file__)):
warnings.warn(
FILE_CHARSET_DEPRECATED_MSG,
RemovedInDjango31Warning,
stacklevel=2,
)
return self.__getattr__('FILE_CHARSET')
class Settings:
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. " % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if self.is_overridden('FILE_CHARSET'):
warnings.warn(FILE_CHARSET_DEPRECATED_MSG, RemovedInDjango31Warning)
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = Path('/usr/share/zoneinfo')
zone_info_file = zoneinfo_root.joinpath(*self.TIME_ZONE.split('/'))
if zoneinfo_root.exists() and not zone_info_file.exists():
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
class UserSettingsHolder:
"""Holder for user configured settings."""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
if name == 'FILE_CHARSET':
warnings.warn(FILE_CHARSET_DEPRECATED_MSG, RemovedInDjango31Warning)
super().__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super().__delattr__(name)
def __dir__(self):
return sorted(
s for s in [*self.__dict__, *dir(self.default_settings)]
if s not in self._deleted
)
def is_overridden(self, setting):
deleted = (setting in self._deleted)
set_locally = (setting in self.__dict__)
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
return deleted or set_locally or set_on_default
def __repr__(self):
return '<%(cls)s>' % {
'cls': self.__class__.__name__,
}
settings = LazySettings()
| 36.26383 | 102 | 0.646327 |
04cb4de91d930121e44a15f65c09a7881eacc75e | 1,508 | py | Python | UVa 11107 life forms/sample/main1.py | tadvi/uva | 0ac0cbdf593879b4fb02a3efc09adbb031cb47d5 | [
"MIT"
] | 1 | 2020-11-24T03:17:21.000Z | 2020-11-24T03:17:21.000Z | UVa 11107 life forms/sample/main1.py | tadvi/uva | 0ac0cbdf593879b4fb02a3efc09adbb031cb47d5 | [
"MIT"
] | null | null | null | UVa 11107 life forms/sample/main1.py | tadvi/uva | 0ac0cbdf593879b4fb02a3efc09adbb031cb47d5 | [
"MIT"
] | 1 | 2021-04-11T16:22:31.000Z | 2021-04-11T16:22:31.000Z | import karkkainen_sanders as tks
import sys
sys.stdin = open('input.txt')
while True:
N = int(input())
if N == 0:
break
sStr = []
for i in range(N):
line = raw_input().strip()
for c in line:
sStr.append(ord(c) + 10000)
# sStr.append(c)
sStr.append(i + 100)
# sStr.append(chr(i + ord('1')))
# for s in sStr: print ord(s)
L = len(sStr)
SA = tks.simple_kark_sort(sStr)
LCP = tks.LCP(sStr, SA)
# for v in SA[:L]: print ''.join(sStr[v:])
# print LCP
belongLine = [0] * L
for i, v in enumerate(SA[:L]):
j = v
while sStr[j] > 10000:
j += 1
belongLine[i] = sStr[j]
# print belongLine
# find the start and end
result = 0
maxStart = []
for start in range(0, L - 1):
usedLine = set()
usedLine.add(belongLine[start])
usedLine.add(belongLine[start + 1])
end = start
while len(usedLine) < (N + 1) // 2 and end < L:
usedLine.add(belongLine[end])
end += 1
lcp = min(LCP[start:end + 1])
if result < lcp:
maxStart, result = [start], lcp
elif result == lcp:
maxStart.append(start)
if result == 0:
print('?')
else:
for start in maxStart:
rStr = []
for i in range(SA[start], SA[start] + result):
rStr.append(unichr(sStr[i] - 10000))
print(''.join(rStr))
print('')
| 25.559322 | 58 | 0.494032 |
e8614248b52b755d787c001d906261d28a9063f1 | 14,114 | py | Python | examples/contrib/cifar10/main.py | Devanshu24/ignite | 2f0ba3e65cfa36b43bc87b315733fd3f3585e430 | [
"BSD-3-Clause"
] | 1 | 2021-08-30T14:29:10.000Z | 2021-08-30T14:29:10.000Z | examples/contrib/cifar10/main.py | Devanshu24/ignite | 2f0ba3e65cfa36b43bc87b315733fd3f3585e430 | [
"BSD-3-Clause"
] | null | null | null | examples/contrib/cifar10/main.py | Devanshu24/ignite | 2f0ba3e65cfa36b43bc87b315733fd3f3585e430 | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
from pathlib import Path
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import GradScaler, autocast
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine
from ignite.metrics import Accuracy, Loss
from ignite.utils import manual_seed, setup_logger
def training(local_rank, config):
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name="CIFAR10-Training", distributed_rank=local_rank)
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
if config["stop_iteration"] is None:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
else:
now = f"stop-on-{config['stop_iteration']}"
folder_name = f"{config['model']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}"
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info(f"Output path: {config['output_path']}")
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
if config["with_clearml"]:
try:
from clearml import Task
except ImportError:
# Backwards-compatibility for legacy Trains SDK
from trains import Task
task = Task.init("CIFAR10-Training", task_name=output_path.stem)
task.connect_configuration(config)
# Log hyper parameters
hyper_params = [
"model",
"batch_size",
"momentum",
"weight_decay",
"num_epochs",
"learning_rate",
"num_warmup_epochs",
]
task.connect({k: config[k] for k in hyper_params})
# Setup dataflow, model, optimizer, criterion
train_loader, test_loader = get_dataflow(config)
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Create trainer for current task
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = {
"Accuracy": Accuracy(),
"Loss": Loss(criterion),
}
# We define two evaluators as they wont have exactly similar roles:
# - `evaluator` will save the best model based on validation score
evaluator = create_evaluator(model, metrics=metrics, config=config)
train_evaluator = create_evaluator(model, metrics=metrics, config=config)
def run_validation(engine):
epoch = trainer.state.epoch
state = train_evaluator.run(train_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(test_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation)
if rank == 0:
# Setup TensorBoard logging on trainer and evaluators. Logged values are:
# - Training metrics, e.g. running average loss values
# - Learning rate
# - Evaluation train/test metrics
evaluators = {"training": train_evaluator, "test": evaluator}
tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators)
# Store 2 best models by validation accuracy starting from num_epochs / 2:
best_model_handler = Checkpoint(
{"model": model},
get_save_handler(config),
filename_prefix="best",
n_saved=2,
global_step_transform=global_step_from_engine(trainer),
score_name="test_accuracy",
score_function=Checkpoint.get_default_score_fn("Accuracy"),
)
evaluator.add_event_handler(
Events.COMPLETED(lambda *_: trainer.state.epoch > config["num_epochs"] // 2), best_model_handler
)
# In order to check training resuming we can stop training on a given iteration
if config["stop_iteration"] is not None:
@trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"]))
def _():
logger.info(f"Stop training on {trainer.state.iteration} iteration")
trainer.terminate()
try:
trainer.run(train_loader, max_epochs=config["num_epochs"])
except Exception as e:
logger.exception("")
raise e
if rank == 0:
tb_logger.close()
def run(
seed=543,
data_path="/tmp/cifar10",
output_path="/tmp/output-cifar10/",
model="resnet18",
batch_size=512,
momentum=0.9,
weight_decay=1e-4,
num_workers=12,
num_epochs=24,
learning_rate=0.4,
num_warmup_epochs=4,
validate_every=3,
checkpoint_every=1000,
backend=None,
resume_from=None,
log_every_iters=15,
nproc_per_node=None,
stop_iteration=None,
with_clearml=False,
with_amp=False,
**spawn_kwargs,
):
"""Main entry to train an model on CIFAR10 dataset.
Args:
seed (int): random state seed to set. Default, 543.
data_path (str): input dataset path. Default, "/tmp/cifar10".
output_path (str): output path. Default, "/tmp/output-cifar10".
model (str): model name (from torchvision) to setup model to train. Default, "resnet18".
batch_size (int): total batch size. Default, 512.
momentum (float): optimizer's momentum. Default, 0.9.
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 24.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 1000.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.
with_clearml (bool): if True, experiment ClearML logger is setup. Default, False.
with_amp (bool): if True, enables native automatic mixed precision. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
"""
# catch all local parameters
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
spawn_kwargs["nproc_per_node"] = nproc_per_node
if backend == "xla-tpu" and with_amp:
raise RuntimeError("The value of with_amp should be False if backend is xla")
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
def get_dataflow(config):
# - Get train/test datasets
if idist.get_rank() > 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
if idist.get_rank() == 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
)
return train_loader, test_loader
def initialize(config):
model = utils.get_model(config["model"])
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
optimizer = optim.SGD(
model.parameters(),
lr=config["learning_rate"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
def log_metrics(logger, epoch, elapsed, tag, metrics):
metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()])
logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {elapsed:.2f} - {tag} metrics:\n {metrics_output}")
def log_basic_info(logger, config):
logger.info(f"Train {config['model']} on CIFAR10")
logger.info(f"- PyTorch version: {torch.__version__}")
logger.info(f"- Ignite version: {ignite.__version__}")
if torch.cuda.is_available():
# explicitly import cudnn as
# torch.backends.cudnn can not be pickled with hvd spawning procs
from torch.backends import cudnn
logger.info(f"- GPU Device: {torch.cuda.get_device_name(idist.get_local_rank())}")
logger.info(f"- CUDA version: {torch.version.cuda}")
logger.info(f"- CUDNN version: {cudnn.version()}")
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info(f"\t{key}: {value}")
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info(f"\tbackend: {idist.backend()}")
logger.info(f"\tworld size: {idist.get_world_size()}")
logger.info("\n")
def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):
device = idist.device()
# Setup Ignite trainer:
# - let's define training step
# - add other common handlers:
# - TerminateOnNan,
# - handler to setup learning rate scheduling,
# - ModelCheckpoint
# - RunningAverage` on `train_step` output
# - Two progress bars on epochs and optionally on iterations
with_amp = config["with_amp"]
scaler = GradScaler(enabled=with_amp)
def train_step(engine, batch):
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
model.train()
with autocast(enabled=with_amp):
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
return {
"batch loss": loss.item(),
}
trainer = Engine(train_step)
trainer.logger = logger
to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
metric_names = [
"batch loss",
]
common.setup_common_training_handlers(
trainer=trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=config["checkpoint_every"],
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
output_names=metric_names if config["log_every_iters"] > 0 else None,
with_pbars=False,
clear_cuda_cache=False,
)
resume_from = config["resume_from"]
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found"
logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}")
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def create_evaluator(model, metrics, config, tag="val"):
with_amp = config["with_amp"]
device = idist.device()
@torch.no_grad()
def evaluate_step(engine: Engine, batch):
model.eval()
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
with autocast(enabled=with_amp):
output = model(x)
return output, y
evaluator = Engine(evaluate_step)
for name, metric in metrics.items():
metric.attach(evaluator, name)
if idist.get_rank() == 0 and (not config["with_clearml"]):
common.ProgressBar(desc=f"Evaluation ({tag})", persist=False).attach(evaluator)
return evaluator
def get_save_handler(config):
if config["with_clearml"]:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=config["output_path"])
return DiskSaver(config["output_path"], require_empty=False)
if __name__ == "__main__":
fire.Fire({"run": run})
| 36.005102 | 120 | 0.662463 |
c00e1e89ec6018068d5444b21ef5ca3a6d81c355 | 8,674 | py | Python | forms.py | sheffali/Django-project | ad3132a5c67aed5492766288e50a6789c885f13b | [
"MIT"
] | null | null | null | forms.py | sheffali/Django-project | ad3132a5c67aed5492766288e50a6789c885f13b | [
"MIT"
] | null | null | null | forms.py | sheffali/Django-project | ad3132a5c67aed5492766288e50a6789c885f13b | [
"MIT"
] | null | null | null | from django import forms
from test1.models import OTest
from django.core import validators
from test1.models import Regist
from test1.models import Test_r
ans= [
('2', 'YES'),
('1', 'Frequently'),
('idk', 'NO'),
]
ans2= [
('M', 'Male'),
('F', 'Female'),
('O', 'Others'),
]
yn= [
('y', 'yes'),
('n', 'no'),
('idk', 'i dont know ')
]
lr= [
('l', 'left'),
('r', 'right'),
('idk', 'i dont know ')
]
ba= [
('b', 'Before'),
('a', 'after'),
('idk', 'i dont knw'),
]
class Test5(forms.ModelForm):
class Meta():
model=Test_r
fields='__all__'
name= forms.CharField(label="enter name")
type= forms.CharField(widget = forms.HiddenInput(),required=False)
ans1= forms.CharField(label="3 + 2=")
ans2= forms.CharField(label="5 + 5=")
ans3= forms.CharField(label="10 - 6=")
ans4= forms.CharField(label="8 - 8=")
ans5= forms.CharField(label="11 + 0=")
ans6= forms.CharField(label="5 + 4= ")
ans7= forms.CharField(label="8 - 3=")
ans8= forms.CharField(label="0 + 0=")
ans9= forms.CharField(label="0 - 0=")
ans10= forms.CharField(label="3 - 3=")
Tscore=forms.CharField(widget = forms.HiddenInput(),required=False)
class Test2(forms.ModelForm):
class Meta():
model=Test_r
fields='__all__'
name= forms.CharField(label="enter name")
type= forms.CharField(widget = forms.HiddenInput(),required=False)
ans1= forms.CharField(label="10 > 5",widget=forms.RadioSelect(choices=yn))
ans2= forms.CharField(label="2 > 5",widget=forms.RadioSelect(choices=yn))
ans3= forms.CharField(label="6 > 5",widget=forms.RadioSelect(choices=yn))
ans4= forms.CharField(label="7 > 7",widget=forms.RadioSelect(choices=yn))
ans5= forms.CharField(label="10 > 11",widget=forms.RadioSelect(choices=yn))
ans6= forms.CharField(label="5 > 15",widget=forms.RadioSelect(choices=yn))
ans7= forms.CharField(label="23 > 51",widget=forms.RadioSelect(choices=yn))
ans8= forms.CharField(label="30 > 50",widget=forms.RadioSelect(choices=yn))
ans9= forms.CharField(label="300 > 335",widget=forms.RadioSelect(choices=yn))
ans10= forms.CharField(label="5 > 0",widget=forms.RadioSelect(choices=yn))
Tscore=forms.CharField(widget = forms.HiddenInput(),required=False)
class Test3(forms.ModelForm):
class Meta():
model=Test_r
fields='__all__'
name= forms.CharField(label="enter name")
type= forms.CharField(widget = forms.HiddenInput(),required=False)
ans1= forms.CharField(label="Which one is SIX? 6 or 9",widget=forms.RadioSelect(choices=lr))
ans2= forms.CharField(label="Which one is SEVEN? 1 or 7",widget=forms.RadioSelect(choices=lr))
ans3= forms.CharField(label="Which one is EIGHT? 8 or 0",widget=forms.RadioSelect(choices=lr))
ans4= forms.CharField(label="Which one is FIVE? 6 or 5",widget=forms.RadioSelect(choices=lr))
ans5= forms.CharField(label="Which one is THREE? 8 or 3",widget=forms.RadioSelect(choices=lr))
ans6= forms.CharField(label="Which one is ONE? 9 or 1",widget=forms.RadioSelect(choices=lr))
ans7= forms.CharField(label="Which one is NINE? 9 or 6",widget=forms.RadioSelect(choices=lr))
ans8= forms.CharField(label="Which one is TWO? 7 or 2",widget=forms.RadioSelect(choices=lr))
ans9= forms.CharField(label="Which one is FOUR? 5 OR 4",widget=forms.RadioSelect(choices=lr))
ans10= forms.CharField(label="Which one is TEN? 10 or 01",widget=forms.RadioSelect(choices=lr))
Tscore=forms.CharField(widget = forms.HiddenInput(),required=False)
class Test4(forms.ModelForm):
class Meta():
model=Test_r
fields='__all__'
name= forms.CharField(label="enter name")
type= forms.CharField(widget = forms.HiddenInput(),required=False)
ans1= forms.CharField(label="10 ___ 12",widget=forms.RadioSelect(choices=ba))
ans2= forms.CharField(label="1 ___ 0",widget=forms.RadioSelect(choices=ba))
ans3= forms.CharField(label="110 ___ 101",widget=forms.RadioSelect(choices=ba))
ans4= forms.CharField(label="102 ___ 120",widget=forms.RadioSelect(choices=ba))
ans5= forms.CharField(label="40 ___ 44",widget=forms.RadioSelect(choices=ba))
ans6= forms.CharField(label="58 ___ 50",widget=forms.RadioSelect(choices=ba))
ans7= forms.CharField(label="3 ___ 6",widget=forms.RadioSelect(choices=ba))
ans8= forms.CharField(label="80 ___ 76",widget=forms.RadioSelect(choices=ba))
ans9= forms.CharField(label="34 ___ 43",widget=forms.RadioSelect(choices=ba))
ans10= forms.CharField(label="99 ___ 96",widget=forms.RadioSelect(choices=ba))
Tscore=forms.CharField(widget = forms.HiddenInput(),required=False)
class Regist(forms.ModelForm):
class Meta():
model=Regist
fields='__all__'
name= forms.CharField(label="Enter name of child")
phone=forms.CharField(label="Parent's phone number",validators=[validators.MinLengthValidator(10),validators.MaxLengthValidator(10),])
age= forms.IntegerField(label="enter age of child", validators=[validators.MaxValueValidator(10),validators.MinValueValidator(3)])
password1 = forms.CharField(widget=forms.PasswordInput(),validators=[validators.MinLengthValidator(8)])
paremail= forms.EmailField(label="Parent's email id")
gender=forms.CharField(label="select gender",widget=forms.RadioSelect(choices=ans2))
class TestForm1(forms.ModelForm):
class Meta():
model=OTest
fields='__all__'
ans1= forms.CharField(label="Did your child struggle to learn to count?",widget=forms.RadioSelect(choices=ans))
ans2= forms.CharField(label="Does she say numbers out of order — long after peers have mastered this skill?",widget=forms.RadioSelect(choices=ans))
ans3= forms.CharField(label="Does your child have difficulty writing numbers clearly or keeping his work neat when solving math problems?",widget=forms.RadioSelect(choices=ans))
ans4= forms.CharField(label="Does your child not seem to understand the connection between the symbol “4” and the word “four?” Does he make mistakes when reading or following directions involving number words and symbols?",widget=forms.RadioSelect(choices=ans))
ans5= forms.CharField(label="Does your child struggle to connect the concept of numbers to real-world items? When you ask him how many cookies are left, for example, does he seem confused by the question or answer incorrectly?",widget=forms.RadioSelect(choices=ans))
ans6= forms.CharField(label="Does your child not seem to understand the difference between adding and subtracting? Does she confuse the + and – symbols when completing math problems? ",widget=forms.RadioSelect(choices=ans))
ans7= forms.CharField(label="Does your child still count on his fingers past third grade?",widget=forms.RadioSelect(choices=ans))
ans8= forms.CharField(label="Does your child have difficulty telling time on an analog clock?",widget=forms.RadioSelect(choices=ans))
ans9= forms.CharField(label="Does your child struggle to understand money, and have difficulty making change or sticking to a budget?",widget=forms.RadioSelect(choices=ans))
ans10= forms.CharField(label="Does your child get lost, even in familiar surroundings?",widget=forms.RadioSelect(choices=ans))
ans11= forms.CharField(label="Does your child struggle to read graphs or charts without help",widget=forms.RadioSelect(choices=ans))
ans12= forms.CharField(label="Does your child struggle to sort objects by shape, color, or size?",widget=forms.RadioSelect(choices=ans))
ans13= forms.CharField(label="Does your child have difficulty applying fractions to real-world objects? Is she unable to determine that a dollar equals four quarters, for instance, or that one-half of the year is equal to six months?",widget=forms.RadioSelect(choices=ans))
ans14= forms.CharField(label="Does your child ever get unnaturally upset or complain of feeling ill while completing math homework?",widget=forms.RadioSelect(choices=ans))
ans15= forms.CharField(label="Does your child have trouble solving word problems or multi-step math problems? Does she struggle to articulate what strategies she’ll use along the way?",widget=forms.RadioSelect(choices=ans))
Tscore=forms.CharField(widget = forms.HiddenInput(),required=False)
#Result=forms.CharField(widget = forms.HiddenInput(),required=False)
class Login(forms.Form):
email=forms.EmailField()
password1=forms.CharField(widget=forms.PasswordInput())
| 54.898734 | 278 | 0.707517 |
0f2d6b81d46846cb2936f993ded4536c4f86a5f6 | 919 | py | Python | setup.py | perfide/px-totp | 28d8851cd4b5cb54a479e6b0a6a75cb209c42413 | [
"BSD-2-Clause"
] | 1 | 2021-06-26T23:18:33.000Z | 2021-06-26T23:18:33.000Z | setup.py | perfide/px-totp | 28d8851cd4b5cb54a479e6b0a6a75cb209c42413 | [
"BSD-2-Clause"
] | null | null | null | setup.py | perfide/px-totp | 28d8851cd4b5cb54a479e6b0a6a75cb209c42413 | [
"BSD-2-Clause"
] | 1 | 2021-06-26T23:18:35.000Z | 2021-06-26T23:18:35.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright:
# 2020 P. H. <github.com/perfide>
# License:
# BSD-2-Clause (BSD 2-Clause "Simplified" License)
# https://spdx.org/licenses/BSD-2-Clause.html
"""Configuration script for setuptools to install px-totp"""
import setuptools
with open('README.md', 'r') as fh:
LONG_DESCRIPTION = fh.read()
setuptools.setup(
name='px-totp',
version='0.0.1',
scripts=['px-totp'],
author='P. H.',
author_email='px-totp.perfide@safersignup.de',
description='Time-based One-Time Password Generator',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/perfide/px-totp',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
],
)
| 27.029412 | 60 | 0.658324 |
84c55110f0d24a84b133c03ddc65c6c52b578bc3 | 1,608 | py | Python | exerciciosAC3.py | rodrigolins92/exercicios-diversos | c2ea729b7ea6d522d73ef228a5ab59d883ee1617 | [
"Apache-2.0"
] | null | null | null | exerciciosAC3.py | rodrigolins92/exercicios-diversos | c2ea729b7ea6d522d73ef228a5ab59d883ee1617 | [
"Apache-2.0"
] | null | null | null | exerciciosAC3.py | rodrigolins92/exercicios-diversos | c2ea729b7ea6d522d73ef228a5ab59d883ee1617 | [
"Apache-2.0"
] | null | null | null | dicionario_teste = {111: 'Nome1' , 222: 'Nome2', 333: 'Nome3'}
class Contato:
def __init__(self, nome, telefone, email):
self.nome = nome
self.telefone = telefone
self.email = email
def imprime(self):
return "Nome: {} , Telefone: {} , E-mail: {}".format(self.nome, self.telefone, self.email)
def reset(self):
self.nome = None
self.telefone = None
self.email = None
def conta_elementos(dicionario):
return len(dicionario)
def verifica_presenca(dicionario, chave):
if chave in dicionario.keys():
return True
else:
return False
def insere(dicionario, chave, objeto):
if chave not in dicionario.keys():
dicionario[chave] = objeto
return True
else:
return False
def remove(dicionario, chave):
if chave in dicionario:
del dicionario[chave]
print(dicionario)
return True
else:
return False
def imprime(dicionario):
print("+ Iniciando - Imprimindo informacoes do dicionario:")
for chave, valor in dicionario.items():
print ("Chave [{}] : valor [{}]".format(chave, valor))
print("+ Finalizando - Imprimindo informacoes do dicionario:")
#TESTES DAS FUNÇÕES ABAIXO
#print ( remove(dicionario_teste, 222) )
#print(imprime(dicionario_teste))
#obj = Contato('Rodrigo', 3781, 'rodrigo@uol')
#obj.imprime()
#print( insere(dicionario_teste, 666, 'Nome3') )
#print(verifica_presenca(dicionario_teste, 242))
#print(conta_elementos(dicionario_teste)) | 25.125 | 99 | 0.620647 |
db2d45d87a8ce892954d36a0f9753b38279ee44d | 7,094 | py | Python | libs/hhr/utils/utils.py | pranav1812/CV_research_assignment_submission | 62a2032c16a2c446645f7f5020b160e18c477eed | [
"MIT"
] | 258 | 2020-07-02T07:08:06.000Z | 2022-03-31T09:19:02.000Z | libs/hhr/utils/utils.py | pranav1812/CV_research_assignment_submission | 62a2032c16a2c446645f7f5020b160e18c477eed | [
"MIT"
] | 48 | 2020-07-24T03:30:48.000Z | 2022-03-29T11:32:42.000Z | libs/hhr/utils/utils.py | pranav1812/CV_research_assignment_submission | 62a2032c16a2c446645f7f5020b160e18c477eed | [
"MIT"
] | 37 | 2020-07-03T03:47:37.000Z | 2022-03-25T01:11:11.000Z | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Shichao Li (nicholas.li@connect.ust.hk)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import time
from collections import namedtuple
from pathlib import Path
import torch
import torch.optim as optim
import torch.nn as nn
def create_logger(cfg, cfg_name, phase='train'):
root_output_dir = Path(cfg.OUTPUT_DIR)
# set up logger
if not root_output_dir.exists():
print('=> creating {}'.format(root_output_dir))
root_output_dir.mkdir()
dataset = cfg.DATASET.DATASET + '_' + cfg.DATASET.HYBRID_JOINTS_TYPE \
if cfg.DATASET.HYBRID_JOINTS_TYPE else cfg.DATASET.DATASET
dataset = dataset.replace(':', '_')
model = cfg.MODEL.NAME
cfg_name = os.path.basename(cfg_name).split('.')[0]
final_output_dir = root_output_dir / dataset / model / cfg_name
print('=> creating {}'.format(final_output_dir))
final_output_dir.mkdir(parents=True, exist_ok=True)
time_str = time.strftime('%Y-%m-%d-%H-%M')
log_file = '{}_{}_{}.log'.format(cfg_name, time_str, phase)
final_log_file = final_output_dir / log_file
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file),
format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / \
(cfg_name + '_' + time_str)
print('=> creating {}'.format(tensorboard_log_dir))
tensorboard_log_dir.mkdir(parents=True, exist_ok=True)
return logger, str(final_output_dir), str(tensorboard_log_dir)
def get_optimizer(cfg, model):
optimizer = None
if cfg.TRAIN.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
model.parameters(),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WD,
nesterov=cfg.TRAIN.NESTEROV
)
elif cfg.TRAIN.OPTIMIZER == 'adam':
optimizer = optim.Adam(
model.parameters(),
lr=cfg.TRAIN.LR
)
return optimizer
def save_checkpoint(states, is_best, output_dir,
filename='checkpoint.pth'):
torch.save(states, os.path.join(output_dir, filename))
if is_best and 'state_dict' in states:
torch.save(states['best_state_dict'],
os.path.join(output_dir, 'model_best.pth'))
def get_model_summary(model, *input_tensors, item_length=26, verbose=False):
"""
:param model:
:param input_tensors:
:param item_length:
:return:
"""
summary = []
ModuleDetails = namedtuple(
"Layer", ["name", "input_size", "output_size", "num_parameters", "multiply_adds"])
hooks = []
layer_instances = {}
def add_hooks(module):
def hook(module, input, output):
class_name = str(module.__class__.__name__)
instance_index = 1
if class_name not in layer_instances:
layer_instances[class_name] = instance_index
else:
instance_index = layer_instances[class_name] + 1
layer_instances[class_name] = instance_index
layer_name = class_name + "_" + str(instance_index)
params = 0
if class_name.find("Conv") != -1 or class_name.find("BatchNorm") != -1 or \
class_name.find("Linear") != -1:
for param_ in module.parameters():
params += param_.view(-1).size(0)
flops = "Not Available"
if class_name.find("Conv") != -1 and hasattr(module, "weight"):
flops = (
torch.prod(
torch.LongTensor(list(module.weight.data.size()))) *
torch.prod(
torch.LongTensor(list(output.size())[2:]))).item()
elif isinstance(module, nn.Linear):
flops = (torch.prod(torch.LongTensor(list(output.size()))) \
* input[0].size(1)).item()
if isinstance(input[0], list):
input = input[0]
if isinstance(output, list):
output = output[0]
summary.append(
ModuleDetails(
name=layer_name,
input_size=list(input[0].size()),
output_size=list(output.size()),
num_parameters=params,
multiply_adds=flops)
)
if not isinstance(module, nn.ModuleList) \
and not isinstance(module, nn.Sequential) \
and module != model:
hooks.append(module.register_forward_hook(hook))
model.eval()
model.apply(add_hooks)
space_len = item_length
model(*input_tensors)
for hook in hooks:
hook.remove()
details = ''
if verbose:
details = "Model Summary" + \
os.linesep + \
"Name{}Input Size{}Output Size{}Parameters{}Multiply Adds (Flops){}".format(
' ' * (space_len - len("Name")),
' ' * (space_len - len("Input Size")),
' ' * (space_len - len("Output Size")),
' ' * (space_len - len("Parameters")),
' ' * (space_len - len("Multiply Adds (Flops)"))) \
+ os.linesep + '-' * space_len * 5 + os.linesep
params_sum = 0
flops_sum = 0
for layer in summary:
params_sum += layer.num_parameters
if layer.multiply_adds != "Not Available":
flops_sum += layer.multiply_adds
if verbose:
details += "{}{}{}{}{}{}{}{}{}{}".format(
layer.name,
' ' * (space_len - len(layer.name)),
layer.input_size,
' ' * (space_len - len(str(layer.input_size))),
layer.output_size,
' ' * (space_len - len(str(layer.output_size))),
layer.num_parameters,
' ' * (space_len - len(str(layer.num_parameters))),
layer.multiply_adds,
' ' * (space_len - len(str(layer.multiply_adds)))) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += os.linesep \
+ "Total Parameters: {:,}".format(params_sum) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += "Total Multiply Adds (For Convolution and Linear Layers only): {:,} GFLOPs".format(flops_sum/(1024**3)) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += "Number of Layers" + os.linesep
for layer in layer_instances:
details += "{} : {} layers ".format(layer, layer_instances[layer])
return details
| 34.604878 | 120 | 0.561178 |
ede2df3fe7c8bf8ecfce0823e8c37566cd20a316 | 1,428 | py | Python | check_configusage.py | ggmirandac/Template-Informe | 1fc16310ef4b6f4ba414c6c7efe4c0eb7c3e1049 | [
"MIT"
] | 72 | 2017-07-26T01:09:38.000Z | 2022-03-31T10:22:24.000Z | check_configusage.py | ggmirandac/Template-Informe | 1fc16310ef4b6f4ba414c6c7efe4c0eb7c3e1049 | [
"MIT"
] | 8 | 2018-04-21T18:09:36.000Z | 2021-12-03T15:44:07.000Z | check_configusage.py | ggmirandac/Template-Informe | 1fc16310ef4b6f4ba414c6c7efe4c0eb7c3e1049 | [
"MIT"
] | 41 | 2017-08-16T18:13:10.000Z | 2022-03-20T19:48:09.000Z | # Chequea el total de configuraciones utilizados
# uso (win): py -3 check_configusage.py ..\Template-Tesis
# uso (mac): python3 Template-Informe/check_configusage.py Template-Poster
import sys
assert len(sys.argv) == 2, 'Debe pasar path'
p = sys.argv[1]
import os
assert os.path.isdir(p), 'Argumento de función "{0}" debe ser una carpeta con el template'.format(p)
# Busca el archivo del template
os.chdir(p)
templf = 'template.tex'
conff = 'template_config.tex'
assert os.path.exists(templf), 'Archivo template.tex dentro de directorio no existe'
assert os.path.exists(conff), 'Archivo template_config.tex dentro de directorio no existe'
tdata = []
f = open(templf, 'r', encoding = 'utf8')
for j in f:
tdata.append(j.strip())
f.close()
def count_def(x) -> int:
k = 0
for w in tdata:
if x in w:
k += 1
return k
# Abre el archivo de configuraciones
f = open(conff, 'r', encoding = 'utf8')
conf = []
for j in f:
j = j.split('{')
if len(j)>0:
j = j[0]
if '\\def' not in j:
continue
j = j.replace('\\def', '').replace('\\', '').replace(' ', '').strip()
conf.append(j)
conf.sort()
# Finalmente, por cada configuración, cuenta las entradas
print('Resultados análisis para {0} ({1} configuraciones):'.format(p, len(conf)))
for j in conf:
k = '\\{0}'.format(j).ljust(30)
r = count_def(j)
print('{0}{1}{2}'.format(k, r, ' <===' if r == 0 else '')) | 29.142857 | 100 | 0.635154 |
1c79f1a0985f12ff32fae237e2face95e83c3617 | 2,379 | py | Python | imagepy/menus/Table/Statistic/statistic_plgs.py | muxevola/imagepy | d6d8cbf214f47a4a545a0d283ae393a6932c4c0f | [
"BSD-4-Clause"
] | null | null | null | imagepy/menus/Table/Statistic/statistic_plgs.py | muxevola/imagepy | d6d8cbf214f47a4a545a0d283ae393a6932c4c0f | [
"BSD-4-Clause"
] | null | null | null | imagepy/menus/Table/Statistic/statistic_plgs.py | muxevola/imagepy | d6d8cbf214f47a4a545a0d283ae393a6932c4c0f | [
"BSD-4-Clause"
] | null | null | null | from imagepy.core.engine import Table
import pandas as pd
from imagepy import IPy
class Statistic(Table):
title = 'Table Statistic'
note = ['snap', 'only_num', 'row_msk', 'col_msk']
para = {'axis':'Column', 'sum':True, 'mean':True,'max':False,
'min':False,'var':False,'std':False,'skew':False,'kurt':False}
view = [(list, 'axis', ['Row', 'Column'], str, 'axis', ''),
(bool, 'sum', 'sum'),
(bool, 'mean', 'mean'),
(bool, 'max', 'max'),
(bool, 'min', 'min'),
(bool, 'var', 'var'),
(bool, 'std', 'std'),
(bool, 'skew', 'skew'),
(bool, 'kurt', 'kurt')]
def run(self, tps, data, snap, para=None):
rst, axis = {}, (0,1)[para['axis']=='Row']
if para['sum']:rst['sum'] = snap.sum(axis=axis)
if para['mean']:rst['mean'] = snap.mean(axis=axis)
if para['max']:rst['max'] = snap.max(axis=axis)
if para['min']:rst['min'] = snap.min(axis=axis)
if para['var']:rst['var'] = snap.var(axis=axis)
if para['std']:rst['std'] = snap.std(axis=axis)
if para['skew']:rst['skew'] = snap.skew(axis=axis)
if para['kurt']:rst['kurt'] = snap.kurt(axis=axis)
IPy.show_table(pd.DataFrame(rst), tps.title+'-statistic')
class GroupStatistic(Table):
title = 'Group Statistic'
para = {'major':None, 'minor':None, 'sum':True, 'mean':True,'max':False,
'min':False,'var':False,'std':False,'skew':False,'kurt':False, 'cn':[]}
view = [('fields', 'cn', 'field to statistic'),
('field', 'major', 'group by', 'major'),
('field', 'minor', 'group by', 'key'),
(bool, 'sum', 'sum'),
(bool, 'mean', 'mean'),
(bool, 'max', 'max'),
(bool, 'min', 'min'),
(bool, 'var', 'var'),
(bool, 'std', 'std'),
(bool, 'skew', 'skew')]
def run(self, tps, data, snap, para=None):
by = [i for i in [para['major'], para['minor']] if i!='None']
gp = data.groupby(by)[para['cn']]
rst = []
def post(a, fix):
a.columns = ['%s-%s'%(i,fix) for i in a.columns]
return a
if para['sum']:rst.append(post(gp.sum(), 'sum'))
if para['mean']:rst.append(post(gp.mean(), 'mean'))
if para['max']:rst.append(post(gp.max(), 'max'))
if para['min']:rst.append(post(gp.min(), 'min'))
if para['var']:rst.append(post(gp.var(), 'var'))
if para['std']:rst.append(post(gp.std(), 'std'))
if para['skew']:rst.append(post(gp.skew(), 'skew'))
IPy.show_table(pd.concat(rst, axis=1), tps.title+'-statistic')
plgs = [Statistic, GroupStatistic] | 33.507042 | 74 | 0.575452 |
72ba23446198258d8424d44f196e3157cae44f9c | 17,386 | py | Python | PixivDownloadHandler.py | TimothyCSULB/PixivUtil2 | d8d99eb84b3b8eef2dbe9caa3abe852ee47c52f9 | [
"BSD-2-Clause"
] | null | null | null | PixivDownloadHandler.py | TimothyCSULB/PixivUtil2 | d8d99eb84b3b8eef2dbe9caa3abe852ee47c52f9 | [
"BSD-2-Clause"
] | null | null | null | PixivDownloadHandler.py | TimothyCSULB/PixivUtil2 | d8d99eb84b3b8eef2dbe9caa3abe852ee47c52f9 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import codecs
import gc
import os
import sys
import time
import traceback
import urllib
import mechanize
import PixivBrowserFactory
import PixivConstant
from PixivException import PixivException
import PixivHelper
import PixivConfig
def download_image(caller,
url,
filename,
referer,
overwrite,
max_retry,
backup_old_file=False,
image=None,
page=None,
notifier=None):
'''return download result and filename if ok'''
# caller function/method
# TODO: ideally to be removed or passed as argument
db = caller.__dbManager__
config: PixivConfig = caller.__config__
if notifier is None:
notifier = PixivHelper.dummy_notifier
temp_error_code = None
retry_count = 0
# Issue #548
filename_save = filename
# test once and set the result
if caller.UTF8_FS is None:
filename_test = os.path.dirname(filename_save) + os.sep + "あいうえお"
try:
PixivHelper.makeSubdirs(filename_test)
test_utf = open(filename_test + '.test', "wb")
test_utf.close()
os.remove(filename_test + '.test')
caller.UTF8_FS = True
except UnicodeEncodeError:
caller.UTF8_FS = False
if not caller.UTF8_FS:
filename_save = filename.encode('utf-8') # For file operations, force the usage of a utf-8 encode filename
while retry_count <= max_retry:
res = None
req = None
try:
try:
if not overwrite and not config.alwaysCheckFileSize:
PixivHelper.print_and_log(None, '\rChecking local filename...', newline=False)
if os.path.isfile(filename_save):
PixivHelper.print_and_log('info', f"\rLocal file exists: {filename}")
return (PixivConstant.PIXIVUTIL_SKIP_DUPLICATE, filename_save)
# Issue #807
if config.checkLastModified and os.path.isfile(filename_save) and image is not None:
local_timestamp = os.path.getmtime(filename_save)
remote_timestamp = time.mktime(image.worksDateDateTime.timetuple())
if local_timestamp == remote_timestamp:
PixivHelper.print_and_log('info', f"\rLocal file timestamp match with remote: {filename} => {image.worksDateDateTime}")
return (PixivConstant.PIXIVUTIL_SKIP_DUPLICATE, filename_save)
remote_file_size = get_remote_filesize(url, referer, config, notifier)
# 837
if config.skipUnknownSize and os.path.isfile(filename_save) and remote_file_size == -1:
PixivHelper.print_and_log('info', f"\rSkipped because file exists and cannot get remote file size for: {filename}")
return (PixivConstant.PIXIVUTIL_SKIP_DUPLICATE, filename_save)
# 576
if remote_file_size > 0:
if config.minFileSize != 0 and remote_file_size <= config.minFileSize:
result = PixivConstant.PIXIVUTIL_SIZE_LIMIT_SMALLER
return (result, filename_save)
if config.maxFileSize != 0 and remote_file_size >= config.maxFileSize:
result = PixivConstant.PIXIVUTIL_SIZE_LIMIT_LARGER
return (result, filename_save)
# check if existing ugoira file exists
if filename.endswith(".zip"):
# non-converted zip (no animation.json)
if os.path.isfile(filename_save):
old_size = os.path.getsize(filename_save)
# update for #451, always return identical?
check_result = PixivHelper.check_file_exists(overwrite, filename_save, remote_file_size, old_size, backup_old_file)
if config.createUgoira:
handle_ugoira(image, filename_save, config, notifier)
return (check_result, filename)
# converted to ugoira (has animation.json)
ugo_name = filename[:-4] + ".ugoira"
if os.path.isfile(ugo_name):
old_size = PixivHelper.get_ugoira_size(ugo_name)
check_result = PixivHelper.check_file_exists(overwrite, ugo_name, remote_file_size, old_size, backup_old_file)
if check_result != PixivConstant.PIXIVUTIL_OK:
# try to convert existing file.
handle_ugoira(image, filename_save, config, notifier)
return (check_result, filename)
elif os.path.isfile(filename_save):
# other image? files
old_size = os.path.getsize(filename_save)
check_result = PixivHelper.check_file_exists(overwrite, filename, remote_file_size, old_size, backup_old_file)
if check_result != PixivConstant.PIXIVUTIL_OK:
return (check_result, filename)
# check based on filename stored in DB using image id
if image is not None:
db_filename = None
if page is not None:
row = db.selectImageByImageIdAndPage(image.imageId, page)
if row is not None:
db_filename = row[2]
else:
row = db.selectImageByImageId(image.imageId)
if row is not None:
db_filename = row[3]
if db_filename is not None and os.path.isfile(db_filename):
old_size = os.path.getsize(db_filename)
# if file_size < 0:
# file_size = get_remote_filesize(url, referer)
check_result = PixivHelper.check_file_exists(overwrite, db_filename, remote_file_size, old_size, backup_old_file)
if check_result != PixivConstant.PIXIVUTIL_OK:
ugo_name = None
if db_filename.endswith(".zip"):
ugo_name = filename[:-4] + ".ugoira"
if config.createUgoira:
handle_ugoira(image, db_filename, config, notifier)
if db_filename.endswith(".ugoira"):
ugo_name = db_filename
handle_ugoira(image, db_filename, config, notifier)
return (check_result, db_filename)
# actual download
notifier(type="DOWNLOAD", message=f"Start downloading {url} to {filename_save}")
(downloadedSize, filename_save) = perform_download(url, remote_file_size, filename_save, overwrite, config, referer)
# set last-modified and last-accessed timestamp
if image is not None and config.setLastModified and filename_save is not None and os.path.isfile(filename_save):
ts = time.mktime(image.worksDateDateTime.timetuple())
os.utime(filename_save, (ts, ts))
# check the downloaded file size again
if remote_file_size > 0 and downloadedSize != remote_file_size:
raise PixivException(f"Incomplete Downloaded for {url}", PixivException.DOWNLOAD_FAILED_OTHER)
elif config.verifyImage and filename_save.endswith((".jpg", ".png", ".gif")):
fp = None
try:
from PIL import Image, ImageFile
fp = open(filename_save, "rb")
# Fix Issue #269, refer to https://stackoverflow.com/a/42682508
ImageFile.LOAD_TRUNCATED_IMAGES = True
img = Image.open(fp)
img.load()
fp.close()
PixivHelper.print_and_log('info', ' Image verified.')
except BaseException:
if fp is not None:
fp.close()
PixivHelper.print_and_log('info', ' Image invalid, deleting...')
os.remove(filename_save)
raise
elif config.verifyImage and filename_save.endswith((".ugoira", ".zip")):
fp = None
try:
import zipfile
fp = open(filename_save, "rb")
zf = zipfile.ZipFile(fp)
check_result = None
try:
check_result = zf.testzip()
except RuntimeError as e:
if 'encrypted' in str(e):
PixivHelper.print_and_log('info', ' archive is encrypted, cannot verify.')
else:
raise
fp.close()
if check_result is None:
PixivHelper.print_and_log('info', ' Image verified.')
else:
PixivHelper.print_and_log('info', f' Corrupted file in archive: {check_result}.')
raise PixivException(f"Incomplete Downloaded for {url}", PixivException.DOWNLOAD_FAILED_OTHER)
except BaseException:
if fp is not None:
fp.close()
PixivHelper.print_and_log('info', ' Image invalid, deleting...')
os.remove(filename_save)
raise
else:
PixivHelper.print_and_log('info', ' done.')
# write to downloaded lists
if caller.start_iv or config.createDownloadLists:
dfile = codecs.open(caller.dfilename, 'a+', encoding='utf-8')
dfile.write(filename + "\n")
dfile.close()
return (PixivConstant.PIXIVUTIL_OK, filename)
except urllib.error.HTTPError as httpError:
PixivHelper.print_and_log('error', f'[download_image()] HTTP Error: {httpError} at {url}')
if httpError.code == 404 or httpError.code == 502 or httpError.code == 500:
return (PixivConstant.PIXIVUTIL_NOT_OK, None)
temp_error_code = PixivException.DOWNLOAD_FAILED_NETWORK
raise
except urllib.error.URLError as urlError:
PixivHelper.print_and_log('error', f'[download_image()] URL Error: {urlError} at {url}')
temp_error_code = PixivException.DOWNLOAD_FAILED_NETWORK
raise
except IOError as ioex:
if ioex.errno == 28:
PixivHelper.print_and_log('error', str(ioex))
input("Press Enter to retry.")
return (PixivConstant.PIXIVUTIL_NOT_OK, None)
temp_error_code = PixivException.DOWNLOAD_FAILED_IO
raise
except KeyboardInterrupt:
PixivHelper.print_and_log('info', 'Aborted by user request => Ctrl-C')
return (PixivConstant.PIXIVUTIL_ABORTED, None)
finally:
if res is not None:
del res
if req is not None:
del req
except BaseException:
if temp_error_code is None:
temp_error_code = PixivException.DOWNLOAD_FAILED_OTHER
caller.ERROR_CODE = temp_error_code
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
PixivHelper.print_and_log('error', f'Error at download_image(): {sys.exc_info()} at {url} ({caller.ERROR_CODE})')
if retry_count < max_retry:
retry_count = retry_count + 1
PixivHelper.print_and_log(None, f"\rRetrying [{retry_count}]...", newline=False)
PixivHelper.print_delay(config.retryWait)
else:
raise
def perform_download(url, file_size, filename, overwrite, config, referer=None, notifier=None):
if notifier is None:
notifier = PixivHelper.dummy_notifier
if referer is None:
referer = config.referer
# actual download
PixivHelper.print_and_log(None, '\rStart downloading...', newline=False)
# fetch filesize
req = PixivHelper.create_custom_request(url, config, referer)
br = PixivBrowserFactory.getBrowser(config=config)
res = br.open_novisit(req)
if file_size < 0: # final check before download for download progress bar.
try:
content_length = res.info()['Content-Length']
if content_length is not None:
file_size = int(content_length)
except KeyError:
file_size = -1
PixivHelper.print_and_log('info', "\tNo file size information!")
(downloadedSize, filename) = PixivHelper.download_image(url, filename, res, file_size, overwrite)
res.close()
gc.collect()
return (downloadedSize, filename)
# issue #299
def get_remote_filesize(url, referer, config, notifier=None):
if notifier is None:
notifier = PixivHelper.dummy_notifier
PixivHelper.print_and_log(None, 'Getting remote filesize...')
# open with HEAD method, might be expensive
req = PixivHelper.create_custom_request(url, config, referer, head=True)
file_size = -1
try:
br = PixivBrowserFactory.getBrowser(config=config)
res = br.open_novisit(req)
content_length = res.info()['Content-Length']
if content_length is not None:
file_size = int(content_length)
else:
PixivHelper.print_and_log('info', "\tNo file size information!")
res.close()
except KeyError:
PixivHelper.print_and_log('info', "\tNo file size information!")
except mechanize.HTTPError as e:
# fix Issue #503
# handle http errors explicit by code
if int(e.code) in (404, 500):
PixivHelper.print_and_log('info', "\tNo file size information!")
else:
raise
PixivHelper.print_and_log(None, f"Remote filesize = {PixivHelper.size_in_str(file_size)} ({file_size} Bytes)")
return file_size
def handle_ugoira(image, zip_filename, config, notifier):
if notifier is None:
notifier = PixivHelper.dummy_notifier
if zip_filename.endswith(".zip"):
ugo_name = zip_filename[:-4] + ".ugoira"
else:
ugo_name = zip_filename
if not os.path.exists(ugo_name):
PixivHelper.print_and_log('info', f"Creating ugoira archive => {ugo_name}")
image.create_ugoira(zip_filename)
# set last-modified and last-accessed timestamp
if config.setLastModified and ugo_name is not None and os.path.isfile(ugo_name):
ts = time.mktime(image.worksDateDateTime.timetuple())
os.utime(ugo_name, (ts, ts))
if config.createGif:
gif_filename = ugo_name[:-7] + ".gif"
if not os.path.exists(gif_filename):
PixivHelper.ugoira2gif(ugo_name,
gif_filename,
image=image)
if config.createApng:
apng_filename = ugo_name[:-7] + ".png"
if not os.path.exists(apng_filename):
PixivHelper.ugoira2apng(ugo_name,
apng_filename,
image=image)
if config.createWebm:
webm_filename = ugo_name[:-7] + "." + config.ffmpegExt
if not os.path.exists(webm_filename):
PixivHelper.ugoira2webm(ugo_name,
webm_filename,
config.ffmpeg,
config.ffmpegCodec,
config.ffmpegParam,
config.ffmpegExt,
image)
if config.createWebp:
webp_filename = ugo_name[:-7] + ".webp"
if not os.path.exists(webp_filename):
PixivHelper.ugoira2webm(ugo_name,
webp_filename,
config.ffmpeg,
config.webpCodec,
config.webpParam,
"webp",
image)
if config.deleteZipFile and os.path.exists(zip_filename) and zip_filename.endswith(".zip"):
PixivHelper.print_and_log('info', f"Deleting zip file => {zip_filename}")
os.remove(zip_filename)
if config.deleteUgoira and os.path.exists(ugo_name) and ugo_name.endswith(".ugoira"):
PixivHelper.print_and_log('info', f"Deleting ugoira file => {ugo_name}")
os.remove(ugo_name)
| 46.486631 | 143 | 0.552226 |
8bb4808f351e53087ca54398c6df591e2eefb31e | 199 | py | Python | docs/materials/osg/files/wn-geoip/src/geoip2/__init__.py | eharstad/virtual-school-pilot-2020 | d27b77f9e2bd51c5b602c2306329c97265c0c361 | [
"CC-BY-4.0"
] | 2 | 2020-11-26T06:07:28.000Z | 2021-09-17T01:33:56.000Z | docs/materials/osg/files/wn-geoip/src/geoip2/__init__.py | eharstad/virtual-school-pilot-2020 | d27b77f9e2bd51c5b602c2306329c97265c0c361 | [
"CC-BY-4.0"
] | 21 | 2020-07-09T12:53:43.000Z | 2021-01-21T16:24:46.000Z | docs/materials/osg/files/wn-geoip/src/geoip2/__init__.py | eharstad/virtual-school-pilot-2020 | d27b77f9e2bd51c5b602c2306329c97265c0c361 | [
"CC-BY-4.0"
] | 12 | 2020-07-02T19:02:23.000Z | 2020-07-20T22:39:14.000Z | # pylint:disable=C0111
__title__ = 'geoip2'
__version__ = '3.0.0'
__author__ = 'Gregory Oschwald'
__license__ = 'Apache License, Version 2.0'
__copyright__ = 'Copyright (c) 2013-2019 Maxmind, Inc.'
| 24.875 | 55 | 0.733668 |
ca5466b91581713d678d0cb045d377cb00497b22 | 230 | py | Python | sanic/sanic-0.1.9/sanic/log.py | Mu-L/annotated-py-projects | a5ce6a3c87d13243f745c334bab43cd77455ff3f | [
"MIT"
] | 315 | 2019-03-07T02:10:17.000Z | 2022-03-31T09:00:20.000Z | sanic-0.1.9/sanic/log.py | LeeXN/annotated-py-sanic | 710364474b6a8fbc30bec9bb587d33775d9b0b70 | [
"MIT"
] | 1 | 2020-03-07T12:05:47.000Z | 2021-05-28T07:34:31.000Z | sanic-0.1.9/sanic/log.py | LeeXN/annotated-py-sanic | 710364474b6a8fbc30bec9bb587d33775d9b0b70 | [
"MIT"
] | 96 | 2019-03-08T06:56:36.000Z | 2022-03-21T04:07:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
#
# 日志选项配置:
# - 日志格式
#
logging.basicConfig(
level=logging.INFO, format="%(asctime)s: %(levelname)s: %(message)s")
#
# 日志记录器:
#
log = logging.getLogger(__name__)
| 14.375 | 73 | 0.634783 |
00a8b641897b43f33d30afa41d1dd331cd7a404f | 2,875 | py | Python | storage/nox.py | butla/google-cloud-python | 2f24f859a6691a7ee316706def587bd19e4e7cd9 | [
"Apache-2.0"
] | null | null | null | storage/nox.py | butla/google-cloud-python | 2f24f859a6691a7ee316706def587bd19e4e7cd9 | [
"Apache-2.0"
] | null | null | null | storage/nox.py | butla/google-cloud-python | 2f24f859a6691a7ee316706def587bd19e4e7cd9 | [
"Apache-2.0"
] | 1 | 2022-03-24T01:37:10.000Z | 2022-03-24T01:37:10.000Z | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = ('../core/',)
@nox.session
@nox.parametrize('python_version', ['2.7', '3.4', '3.5', '3.6'])
def unit_tests(session, python_version):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
session.interpreter = 'python{}'.format(python_version)
# Install all test dependencies, then install this package in-place.
session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)
session.install('-e', '.')
# Run py.test against the unit tests.
session.run('py.test', '--quiet',
'--cov=google.cloud.storage', '--cov=tests.unit', '--cov-append',
'--cov-config=.coveragerc', '--cov-report=', '--cov-fail-under=97',
'tests/unit',
)
@nox.session
@nox.parametrize('python_version', ['2.7', '3.6'])
def system_tests(session, python_version):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
return
# Run the system tests against latest Python 2 and Python 3 only.
session.interpreter = 'python{}'.format(python_version)
# Install all test dependencies, then install this package into the
# virutalenv's dist-packages.
session.install('mock', 'pytest', *LOCAL_DEPS)
session.install('../test_utils/')
session.install('.')
# Run py.test against the system tests.
session.run('py.test', '--quiet', 'tests/system.py')
@nox.session
def lint(session):
"""Run flake8.
Returns a failure if flake8 finds linting errors or sufficiently
serious code quality issues.
"""
session.interpreter = 'python3.6'
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google/cloud/storage')
@nox.session
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.interpreter = 'python3.6'
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
| 31.593407 | 77 | 0.68487 |
61c145e865b3bc803499cad65b46864cd2437d49 | 2,176 | py | Python | utilities/get_top_gainers.py | csmith1210/kucoin-volatility-trading-bot | a5da02c112cf34c08ecdf67ee8dfad486dd269ef | [
"MIT"
] | null | null | null | utilities/get_top_gainers.py | csmith1210/kucoin-volatility-trading-bot | a5da02c112cf34c08ecdf67ee8dfad486dd269ef | [
"MIT"
] | null | null | null | utilities/get_top_gainers.py | csmith1210/kucoin-volatility-trading-bot | a5da02c112cf34c08ecdf67ee8dfad486dd269ef | [
"MIT"
] | null | null | null | # get the top 30 coinmarketcap gainers for the past 7d
from requests import Request, Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
from kucoin.client import Market
import os, json, yaml, time
if os.path.exists('./signalsample.txt'):
os.remove('./signalsample.txt')
def load_config(file):
try:
with open(file) as file:
return yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError as fe:
exit(f'Could not find {file}')
except Exception as e:
exit(f'Encountered exception...\n {e}')
CREDS_FILEPATH = './creds.yml' # replace with path to your creds file
parsed_creds = load_config(CREDS_FILEPATH)
coinmarketcap_API_key = parsed_creds['cmc']['key']
stable_coins = ['USDT','USDC','BUSD','DAI','BTCB','UST','TUSD','PAX','HUSD','RSR','USDN','GUSD','FEI','LUSD','FRAX','VAI','EURS','QC', 'USDJ','SUSD']
# set up API call
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
parameters = {
'start':'1',
'limit':'1000', # number of tickers to return
'sort':'percent_change_7d', # sort based on % change in 7d and sort desc automatically
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': coinmarketcap_API_key,
}
session = Session()
session.headers.update(headers)
# call API
try:
response = session.get(url, params=parameters)
tickers = json.loads(response.text)['data']
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
market = Market(url='https://api.kucoin.com')
tracker = 0
for ticker in tickers:
if tracker >= 30:
break
symbol = ticker['symbol'] + '-USDT'
kucoin_ticker = market.get_24h_stats(symbol)
if not kucoin_ticker['volValue'] == None and float(kucoin_ticker['volValue']) >= 500000:
tracker += 1
if ticker['symbol'] not in stable_coins:
with open('./signalsample.txt','a+') as f1:
f1.write(ticker['symbol'] + '\n')
with open('./tickers.txt','a+') as f2:
f2.seek(0, os.SEEK_SET)
line_found = any(ticker['symbol'] in line for line in f2)
if not line_found:
f2.seek(0, os.SEEK_END)
f2.write(ticker['symbol'] + '\n') | 35.672131 | 149 | 0.674632 |
e848bd6fc993a8da80af210666bb365ea78f954e | 11,536 | py | Python | conans/test/functional/generators/components/pkg_config_test.py | ssaavedra/conan | e15dc7902fbbeaf469798a3b9948ead1ecfc8e3c | [
"MIT"
] | 1 | 2021-08-05T15:33:08.000Z | 2021-08-05T15:33:08.000Z | conans/test/functional/generators/components/pkg_config_test.py | ssaavedra/conan | e15dc7902fbbeaf469798a3b9948ead1ecfc8e3c | [
"MIT"
] | null | null | null | conans/test/functional/generators/components/pkg_config_test.py | ssaavedra/conan | e15dc7902fbbeaf469798a3b9948ead1ecfc8e3c | [
"MIT"
] | null | null | null | import os
import textwrap
import unittest
import pytest
from conans.client.tools import PkgConfig, environment_append
from conans.model.ref import ConanFileReference
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient
@pytest.mark.tool_compiler
class PkgConfigGeneratorWithComponentsTest(unittest.TestCase):
@staticmethod
def _create_greetings(client, custom_names=False, components=True):
conanfile_greetings = textwrap.dedent("""
from conans import ConanFile, CMake
class GreetingsConan(ConanFile):
name = "greetings"
version = "0.0.1"
settings = "os", "compiler", "build_type", "arch"
def package_info(self):
%s
""")
if components:
info = textwrap.dedent("""
self.cpp_info.components["hello"].libs = ["hello"]
self.cpp_info.components["bye"].libs = ["bye"]
""")
if custom_names:
info += textwrap.dedent("""
self.cpp_info.names["pkg_config"] = "Greetings"
self.cpp_info.components["hello"].names["pkg_config"] = "Hello"
self.cpp_info.components["bye"].names["pkg_config"] = "Bye"
""")
else:
info = textwrap.dedent("""
self.cpp_info.libs = ["hello", "bye"]
""")
wrapper = textwrap.TextWrapper(width=81, initial_indent=" ", subsequent_indent=" ")
conanfile_greetings = conanfile_greetings % wrapper.fill(info)
client.save({"conanfile.py": conanfile_greetings})
client.run("create .")
@staticmethod
def _create_world(client, conanfile=None):
_conanfile_world = textwrap.dedent("""
from conans import ConanFile, CMake
class WorldConan(ConanFile):
name = "world"
version = "0.0.1"
settings = "os", "compiler", "build_type", "arch"
requires = "greetings/0.0.1"
def package_info(self):
self.cpp_info.components["helloworld"].requires = ["greetings::hello"]
self.cpp_info.components["helloworld"].libs = ["helloworld"]
self.cpp_info.components["worldall"].requires = ["helloworld",
"greetings::greetings"]
self.cpp_info.components["worldall"].libs = ["worldall"]
""")
client.save({"conanfile.py": conanfile or _conanfile_world})
client.run("create .")
@staticmethod
def _get_libs_from_pkg_config(library, folder):
with environment_append({"PKG_CONFIG_PATH": folder}):
pconfig = PkgConfig(library)
libs = pconfig.libs_only_l
return libs
def test_basic(self):
client = TestClient()
self._create_greetings(client)
self._create_world(client)
client.run("install world/0.0.1@ -g pkg_config")
self.assertNotIn("Requires:", client.load("hello.pc"))
self.assertNotIn("Requires:", client.load("bye.pc"))
self.assertIn("Requires: bye hello", client.load("greetings.pc"))
for f in ["hello.pc", "bye.pc", "greetings.pc", "world.pc", "helloworld.pc", "worldall.pc"]:
self.assertIn("Version: 0.0.1", client.load(f))
libs = self._get_libs_from_pkg_config("greetings", client.current_folder)
self.assertListEqual(["-lbye", "-lhello"], libs)
def test_pkg_config_general(self):
client = TestClient()
self._create_greetings(client, custom_names=True)
conanfile = textwrap.dedent("""
from conans import ConanFile, CMake
class WorldConan(ConanFile):
name = "world"
version = "0.0.1"
settings = "os", "compiler", "build_type", "arch"
requires = "greetings/0.0.1"
def package_info(self):
self.cpp_info.names["pkg_config"] = "World"
self.cpp_info.components["helloworld"].names["pkg_config"] = "Helloworld"
self.cpp_info.components["helloworld"].requires = ["greetings::hello"]
self.cpp_info.components["helloworld"].libs = ["Helloworld"]
self.cpp_info.components["worldall"].names["pkg_config"] = "Worldall"
self.cpp_info.components["worldall"].requires = ["greetings::bye", "helloworld"]
self.cpp_info.components["worldall"].libs = ["Worldall"]
""")
self._create_world(client, conanfile=conanfile)
client.run("install world/0.0.1@ -g pkg_config")
libs = self._get_libs_from_pkg_config("Worldall", client.current_folder)
self.assertListEqual(["-lWorldall", "-lbye", "-lHelloworld", "-lhello"], libs)
libs = self._get_libs_from_pkg_config("Helloworld", client.current_folder)
self.assertListEqual(["-lHelloworld", "-lhello"], libs)
libs = self._get_libs_from_pkg_config("World", client.current_folder)
self.assertListEqual(["-lWorldall", "-lbye", "-lHelloworld", "-lhello"], libs)
for f in ["Hello.pc", "Bye.pc", "Greetings.pc", "World.pc", "Helloworld.pc", "Worldall.pc"]:
self.assertIn("Version: 0.0.1", client.load(f))
def test_pkg_config_components(self):
client = TestClient()
self._create_greetings(client)
conanfile2 = textwrap.dedent("""
from conans import ConanFile, CMake
class WorldConan(ConanFile):
name = "world"
version = "0.0.1"
settings = "os", "compiler", "build_type", "arch"
requires = "greetings/0.0.1"
def package_info(self):
self.cpp_info.components["helloworld"].requires = ["greetings::hello"]
self.cpp_info.components["helloworld"].libs = ["helloworld"]
self.cpp_info.components["worldall"].requires = ["helloworld", "greetings::bye"]
self.cpp_info.components["worldall"].libs = ["worldall"]
""")
self._create_world(client, conanfile=conanfile2)
client.run("install world/0.0.1@ -g pkg_config")
libs = self._get_libs_from_pkg_config("helloworld", client.current_folder)
self.assertListEqual(["-lhelloworld", "-lhello"], libs)
libs = self._get_libs_from_pkg_config("worldall", client.current_folder)
self.assertListEqual(["-lworldall", "-lhelloworld", "-lhello", "-lbye"], libs)
world_pc = client.load("world.pc")
self.assertIn("Requires: helloworld worldall", world_pc)
libs = self._get_libs_from_pkg_config("world", client.current_folder)
self.assertListEqual(["-lworldall", "-lhelloworld", "-lhello", "-lbye"], libs)
for f in ["hello.pc", "bye.pc", "greetings.pc", "world.pc", "helloworld.pc", "worldall.pc"]:
self.assertIn("Version: 0.0.1", client.load(f))
def test_recipe_with_components_requiring_recipe_without_components(self):
client = TestClient()
self._create_greetings(client, components=False)
conanfile = textwrap.dedent("""
from conans import ConanFile, CMake
class WorldConan(ConanFile):
name = "world"
version = "0.0.1"
settings = "os", "compiler", "build_type", "arch"
requires = "greetings/0.0.1"
def package_info(self):
self.cpp_info.components["helloworld"].requires = ["greetings::greetings"]
self.cpp_info.components["helloworld"].libs = ["helloworld"]
self.cpp_info.components["worldall"].requires = ["helloworld",
"greetings::greetings"]
self.cpp_info.components["worldall"].libs = ["worldall"]
""")
self._create_world(client, conanfile=conanfile)
client.run("install world/0.0.1@ -g pkg_config")
self.assertFalse(os.path.isfile(os.path.join(client.current_folder, "hello.pc")))
self.assertFalse(os.path.isfile(os.path.join(client.current_folder, "bye.pc")))
greetings_pc = client.load("greetings.pc")
self.assertNotIn("Requires:", greetings_pc)
libs = self._get_libs_from_pkg_config("greetings", client.current_folder)
self.assertListEqual(["-lhello", "-lbye"], libs)
libs = self._get_libs_from_pkg_config("world", client.current_folder)
self.assertListEqual(["-lworldall", "-lhelloworld", "-lhello", "-lbye"], libs)
libs = self._get_libs_from_pkg_config("helloworld", client.current_folder)
self.assertListEqual(["-lhelloworld", "-lhello", "-lbye"], libs)
libs = self._get_libs_from_pkg_config("worldall", client.current_folder)
self.assertListEqual(["-lworldall", "-lhelloworld", "-lhello", "-lbye"], libs)
for f in ["greetings.pc", "world.pc", "helloworld.pc", "worldall.pc"]:
self.assertIn("Version: 0.0.1", client.load(f))
def test_same_names(self):
client = TestClient()
conanfile_greetings = textwrap.dedent("""
from conans import ConanFile, CMake
class HelloConan(ConanFile):
name = "hello"
version = "0.0.1"
settings = "os", "compiler", "build_type", "arch"
def package_info(self):
self.cpp_info.components["global"].name = "hello"
self.cpp_info.components["global"].libs = ["hello"]
""")
client.save({"conanfile.py": conanfile_greetings})
client.run("create .")
client.run("install hello/0.0.1@ -g pkg_config")
self.assertNotIn("Requires:", client.load("hello.pc"))
self.assertIn("Version: 0.0.1", client.load("hello.pc"))
def test_component_not_found_same_name_as_pkg_require(self):
zlib = GenConanfile("zlib", "0.1").with_setting("build_type")\
.with_generator("pkg_config")
mypkg = GenConanfile("mypkg", "0.1").with_setting("build_type")\
.with_generator("pkg_config")
final = GenConanfile("final", "0.1").with_setting("build_type")\
.with_generator("pkg_config")\
.with_require(ConanFileReference("zlib", "0.1", None, None))\
.with_require(ConanFileReference("mypkg", "0.1", None, None))\
.with_package_info(cpp_info={"components": {"cmp": {"requires": ["mypkg::zlib",
"zlib::zlib"]}}},
env_info={})
consumer = GenConanfile("consumer", "0.1").with_setting("build_type")\
.with_generator("pkg_config")\
.with_requirement(ConanFileReference("final", "0.1", None, None))
client = TestClient()
client.save({"zlib.py": zlib, "mypkg.py": mypkg, "final.py": final, "consumer.py": consumer})
client.run("create zlib.py")
client.run("create mypkg.py")
client.run("create final.py")
client.run("install consumer.py", assert_error=True)
self.assertIn("Component 'mypkg::zlib' not found in 'mypkg' package requirement", client.out)
| 49.51073 | 101 | 0.583564 |
070a8be9de0f7d82f12733aa991ed56d7d1d8061 | 1,872 | py | Python | read_datasets.py | pulkitag/dataset_utils | c9aab6c713e2001bc8a5d14aa7fea5c86d49ed8d | [
"BSD-3-Clause"
] | null | null | null | read_datasets.py | pulkitag/dataset_utils | c9aab6c713e2001bc8a5d14aa7fea5c86d49ed8d | [
"BSD-3-Clause"
] | null | null | null | read_datasets.py | pulkitag/dataset_utils | c9aab6c713e2001bc8a5d14aa7fea5c86d49ed8d | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import struct
from os import path as osp
from easydict import EasyDict as edict
DATASET_PATH = '/data0/pulkitag/data_sets'
class DataSet(object):
def get_images(self, setName='train'):
pass
def get_labels(self, setName='train'):
pass
def get_images_nd_labels(self, setName='train'):
pass
class MNISTData(DataSet):
def __init__(self):
mnistPath = osp.join(DATASET_PATH, 'mnist')
self.pths_ = edict()
self.pths_.train = edict()
self.pths_.test = edict()
self.pths_.train.ims = osp.join(mnistPath, 'train-images-idx3-ubyte')
self.pths_.train.lb = osp.join(mnistPath, 'train-labels-idx1-ubyte')
self.pths_.test.ims = osp.join(mnistPath, 't10k-images-idx3-ubyte')
self.pths_.test.lb = osp.join(mnistPath, 't10k-labels-idx1-ubyte')
def get_images(self, setName='train'):
return self._load_images(self.pths_[setName].ims)
def get_labels(self, setName='train'):
return self._load_images(self.pths_[setName].lb)
def _load_images(self, fileName):
print (fileName)
f = open(fileName,'rb')
magicNum = struct.unpack('>i',f.read(4))
N = struct.unpack('>i',f.read(4))[0]
nr = struct.unpack('>i',f.read(4))[0]
nc = struct.unpack('>i',f.read(4))[0]
print "Num Images: %d, numRows: %d, numCols: %d" % (N,nr,nc)
im = np.zeros((N,nr,nc),dtype=np.uint8)
for i in range(N):
for r in range(nr):
for c in range(nc):
im[i,r,c] = struct.unpack('>B',f.read(1))[0]
f.close()
return im
def _load_labels(self, labelFile):
f = open(labelFile,'rb')
magicNum = struct.unpack('>i',f.read(4))
N = struct.unpack('>i',f.read(4))[0]
print "Number of labels found: %d" % N
label = np.zeros((N,1),dtype=np.uint8)
for i in range(N):
label[i] = struct.unpack('>B',f.read(1))[0]
f.close()
return label
| 30.193548 | 73 | 0.636752 |
c73c088d6e6f61ceab228148eb741299292d9290 | 1,939 | py | Python | loader.py | MartorSkull/NTRDB | 409bcb9b0c1fad1b3d539afec52ebf17c172c4e6 | [
"MIT"
] | 1 | 2019-03-09T21:21:32.000Z | 2019-03-09T21:21:32.000Z | loader.py | MartorSkull/NTRDB | 409bcb9b0c1fad1b3d539afec52ebf17c172c4e6 | [
"MIT"
] | null | null | null | loader.py | MartorSkull/NTRDB | 409bcb9b0c1fad1b3d539afec52ebf17c172c4e6 | [
"MIT"
] | null | null | null | from os import listdir
from base64 import b64encode
themes = {}
with open('resources/favicon.png', 'rb') as f:
favicon = f.read()
with open('resources/icon.png', 'rb') as f:
icon = f.read()
with open('html/error.html') as f:
error = f.read()
with open('html/index.html') as f:
index = f.read()
with open('html/desc.html') as f:
desc = f.read()
with open('html/base.html') as f:
base = f.read()
with open('html/message.html') as f:
messagehtml = f.read()
with open('html/addfile.html') as f:
addfile = f.read()
with open('html/links.html') as f:
links = f.read()
with open('html/links_manage.html') as f:
links_mng = f.read()
with open('html/nbar_loggedin.html') as f:
nbar_loggedin = f.read()
with open('html/nbar_login.html') as f:
nbar_login = f.read()
with open('resources/MailRegText.txt') as f:
actmsg = f.read()
with open('html/edit.html') as f:
editpage = f.read()
with open('html/manage.html') as f:
managepage = f.read()
with open('html/removal.html') as f:
removal = f.read()
with open('html/links_mod.html') as f:
links_mod = f.read()
with open('html/mod.html') as f:
mod = f.read()
with open('resources/Icon_Any.png', 'rb') as f:
iany = str(b64encode(f.read()), 'utf-8')
with open('resources/Icon_New.png', 'rb') as f:
inew = str(b64encode(f.read()), 'utf-8')
with open('resources/Icon_Old.png', 'rb') as f:
iold = str(b64encode(f.read()), 'utf-8')
for item in listdir('html/themes'):
with open('html/themes/%s' % item, 'rb') as f:
themes[item[:-4]] = f.read()
with open('html/adminmenu.html') as f:
adminmenu = f.read()
with open('html/links_adminmenu.html') as f:
links_adminmenu = f.read()
with open('html/thememenu.html') as f:
thememenu = f.read()
with open('html/links_thememenu.html') as f:
links_thememenu = f.read()
with open('resources/robots.txt', 'rb') as f:
robots = f.read()
print("Pages loaded")
| 32.316667 | 50 | 0.640536 |
518e958cad483ecef5f1c742d940d7c1b62f99e7 | 9,073 | py | Python | src/sage/tests/books/computational-mathematics-with-sagemath/integration_doctest.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/sage/tests/books/computational-mathematics-with-sagemath/integration_doctest.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/sage/tests/books/computational-mathematics-with-sagemath/integration_doctest.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | ## -*- encoding: utf-8 -*-
"""
This file (./integration_doctest.sage) was *autogenerated* from ./integration.tex,
with sagetex.sty version 2011/05/27 v2.3.1.
It contains the contents of all the sageexample environments from this file.
You should be able to doctest this file with:
sage -t ./integration_doctest.sage
It is always safe to delete this file; it is not used in typesetting your
document.
Sage example in ./integration.tex, line 73::
sage: x = var('x'); f(x) = exp(-x^2) * log(x)
sage: result = integrate(f, x, 1, 3)
...
sage: N(result) # abs tol 1e-14
0.03586029499126769
Sage example in ./integration.tex, line 78::
sage: plot(f, 1, 3, fill='axis')
Graphics object consisting of 2 graphics primitives
Sage example in ./integration.tex, line 104::
sage: N(integrate(sin(x^2)/(x^2), x, 1, infinity)) # abs tol 2e-15
0.285736646322853 - 6.93889390390723e-18*I
Sage example in ./integration.tex, line 108::
sage: plot(sin(x^2)/(x^2), x, 1, 10, fill='axis')
Graphics object consisting of 2 graphics primitives
Sage example in ./integration.tex, line 162::
sage: fp = plot(f, 1, 3, color='red')
sage: n = 4
sage: interp_points = [(1+2*u/(n-1), N(f(1+2*u/(n-1))))
....: for u in range(n)]
sage: A = PolynomialRing(RR, 'x')
sage: pp = plot(A.lagrange_polynomial(interp_points), 1, 3, fill='axis')
sage: fp+pp
Graphics object consisting of 3 graphics primitives
Sage example in ./integration.tex, line 522::
sage: N(integrate(cos(log(cos(x))), x, 0, pi/4)) # rel tol 2e-12
0.7766520331543109
Sage example in ./integration.tex, line 536::
sage: integrate(log(1+x)*x, x, 0, 1)
1/4
sage: N(integrate(log(1+x)*x, x, 0, 1))
0.250000000000000
Sage example in ./integration.tex, line 562::
sage: numerical_integral(cos(log(cos(x))), 0, pi/4) # rel tol 2e-11
(0.7766520331543109, 8.622569693298564e-15)
Sage example in ./integration.tex, line 600::
sage: numerical_integral(exp(-x^100), 0, 1.1)
(0.99432585119150..., 4.0775730...e-09)
sage: numerical_integral(exp(-x^100), 0, 1.1, algorithm='qng') # abs tol 2e-12
(0.9943275385765319, 0.016840666914705607)
Sage example in ./integration.tex, line 612::
sage: integrate(cos(log(cos(x))), x, 0, pi/4)
integrate(cos(log(cos(x))), x, 0, 1/4*pi)
Sage example in ./integration.tex, line 622::
sage: N(integrate(cos(log(cos(x))), x, 0, pi/4), digits=60) # abs tol 2e-12
0.7766520331543109
Sage example in ./integration.tex, line 628::
sage: N(integrate(sin(x)*exp(cos(x)), x, 0, pi), digits=60)
2.35040238728760291376476370119120163031143596266819174045913
Sage example in ./integration.tex, line 644::
sage: sage.calculus.calculus.nintegral(sin(sin(x)), x, 0, 1)
(0.430606103120690..., 4.78068810228705...e-15, 21, 0)
Sage example in ./integration.tex, line 654::
sage: g = sin(sin(x))
sage: g.nintegral(x, 0, 1)
(0.430606103120690..., 4.78068810228705...e-15, 21, 0)
Ensure consistent results on 32-bit and 64-bit systems by using the same
precision::
sage: _ = gp.default('realprecision', 38)
Sage example in ./integration.tex, line 703::
sage: gp('intnum(x=17, 20, exp(-x^2)*log(x))')
2.5657285005610514829173563961304785900 E-127
Sage example in ./integration.tex, line 717::
sage: gp('intnum(x=0, 1, sin(sin(x)))')
0.43060610312069060491237735524846578643
sage: old_prec = gp.set_precision(50)
sage: gp('intnum(x=0, 1, sin(sin(x)))')
0.43060610312069060491237735524846578643360804182200
Sage example in ./integration.tex, line 746::
sage: p = gp.set_precision(old_prec) # we reset the default precision
sage: gp('intnum(x=0, 1, x^(-99/100))') # rel tol 1e-9
73.629142577870966597465391764897770039
Sage example in ./integration.tex, line 754::
sage: gp('intnum(x=[0, -99/100], 1, x^(-99/100))')
100.00000000000000000000000000000000000
Sage example in ./integration.tex, line 766::
sage: gp('intnum(x=[0, -1/42], 1, x^(-99/100))') # rel tol 1e-9
74.472749314025559405335761513474670714
Sage example in ./integration.tex, line 785::
sage: import mpmath
sage: mpmath.mp.prec = 53
sage: mpmath.quad(lambda x: mpmath.sin(mpmath.sin(x)), [0, 1])
mpf('0.43060610312069059')
Sage example in ./integration.tex, line 795::
sage: a = RDF(pi); b = mpmath.mpf(a); b
mpf('3.1415926535897931')
sage: c = RDF(b); c
3.141592653589793
Sage example in ./integration.tex, line 824::
sage: mpmath.mp.prec = 113
sage: mpmath.quad(lambda x: mpmath.sin(mpmath.sin(x)), [0, 1])
mpf('0.430606103120690604912377355248465809')
Sage example in ./integration.tex, line 846::
sage: f(x) = sin(sin(x))
sage: mpmath.quad(f, [0, 1])
Traceback (most recent call last):
...
TypeError: no canonical coercion from <class 'sage.libs.mpmath.ext_main.mpf'> to ...
Sage example in ./integration.tex, line 866::
sage: g(x) = max_symbolic(sin(x), cos(x))
sage: mpmath.mp.prec = 100
sage: mpmath.quadts(lambda x: g(N(x, 100)), [0, 1])
mpf('0.873912416263035435957979086252')
Sage example in ./integration.tex, line 878::
sage: mpmath.mp.prec = 170
sage: mpmath.quadts(lambda x: g(N(x, 190)), [0, 1])
mpf('0.87391090757400975205393005981962476344054148354188794')
sage: N(sqrt(2) - cos(1), 100)
0.87391125650495533140075211677
Sage example in ./integration.tex, line 892::
sage: mpmath.quadts(lambda x: g(N(x, 170)), [0, mpmath.pi / 4, 1])
mpf('0.87391125650495533140075211676672147483736145475902551')
Sage example in ./integration.tex, line 979::
sage: y = var('y'); integrate(exp(y*sin(x)), (x, 0, sqrt(y))) # long time
integrate(e^(y*sin(x)), x, 0, sqrt(y))
Sage example in ./integration.tex, line 990::
sage: f = lambda y: numerical_integral(lambda x: exp(y*sin(x)), \
0, sqrt(y))[0]
sage: f(0.0), f(0.5), f(1.0) # abs tol 2e-15
(0.0, 0.8414895067661431, 1.6318696084180513)
Sage example in ./integration.tex, line 998::
sage: numerical_integral(f, 0, 1) # abs tol 2e-16
(0.8606791942204567, 6.301207560882073e-07)
Sage example in ./integration.tex, line 1008::
sage: f = lambda y: sage.calculus.calculus.nintegral(exp(y*sin(x)), \
x, 0, sqrt(y))[0]
sage: numerical_integral(f, 0, 1) # abs tol 2e-16
(0.8606791942204567, 6.301207560882096e-07)
Sage example in ./integration.tex, line 1016::
sage: f = lambda y: RDF(mpmath.quad(lambda x: mpmath.exp(y*mpmath.sin(x)), \
[0, sqrt(y)]))
sage: numerical_integral(f, 0, 1) # abs tol 2e-16
(0.8606791942204567, 6.301207561187562e-07)
Sage example in ./integration.tex, line 1027::
sage: mpmath.mp.dps = 60
sage: f = lambda x, y: mpmath.exp(y*mpmath.sin(x))
sage: mpmath.quad(f, [0,1], [0,1])
mpf('1.28392205755238471754385917646324675741664250325189751108716305')
Sage example in ./integration.tex, line 1044::
sage: def evalI(n):
....: f = lambda y: numerical_integral(lambda x: exp(y*sin(x)),
....: 0, sqrt(y), algorithm='qng', max_points=n)[0]
....: return numerical_integral(f, 0, 1, algorithm='qng', max_points=n)
sage: evalI(100) # abs tol 2e-12
(0.8606792028826138, 5.553962923506737e-07)
Sage example in ./integration.tex, line 1228::
sage: T = ode_solver()
Sage example in ./integration.tex, line 1244::
sage: def f_1(t,y,params): return [y[1],params[0]*(1-y[0]^2)*y[1]-y[0]]
sage: T.function = f_1
Sage example in ./integration.tex, line 1266::
sage: def j_1(t,y,params):
....: return [[0, 1],
....: [-2*params[0]*y[0]*y[1]-1, params[0]*(1-y[0]^2)],
....: [0,0]]
sage: T.jacobian = j_1
Sage example in ./integration.tex, line 1279::
sage: T.algorithm = "rk8pd"
sage: T.ode_solve(y_0=[1,0], t_span=[0,100], params=[10],
....: num_points=1000)
sage: f = T.interpolate_solution()
Sage example in ./integration.tex, line 1302::
sage: plot(f, 0, 100)
Graphics object consisting of 1 graphics primitive
Sage example in ./integration.tex, line 1363::
sage: t, y = var('t, y')
sage: desolve_rk4(t*y*(2-y), y, ics=[0,1], end_points=[0, 1], step=0.5)
[[0, 1], [0.5, 1.12419127424558], [1.0, 1.461590162288825]]
Sage example in ./integration.tex, line 1399::
sage: import mpmath
sage: mpmath.mp.prec = 53
sage: sol = mpmath.odefun(lambda t, y: y, 0, 1)
sage: sol(1)
mpf('2.7182818284590451')
sage: mpmath.mp.prec = 100
sage: sol(1)
mpf('2.7182818284590452353602874802307')
sage: N(exp(1), 100)
2.7182818284590452353602874714
Sage example in ./integration.tex, line 1436::
sage: mpmath.mp.prec = 53
sage: f = mpmath.odefun(lambda t, y: [-y[1], y[0]], 0, [1, 0])
sage: f(3)
[mpf('-0.98999249660044542'), mpf('0.14112000805986721')]
sage: (cos(3.), sin(3.))
(-0.989992496600445, 0.141120008059867)
Sage example in ./integration.tex, line 1497::
sage: mpmath.mp.prec = 10
sage: sol = mpmath.odefun(lambda t, y: y, 0, 1)
sage: sol(1)
mpf('2.7148')
sage: mpmath.mp.prec = 100
sage: sol(1)
mpf('2.7135204235459511323824699502438')
"""
| 30.96587 | 86 | 0.655902 |
e06c06b1f83481990e24d23779d730d28fb8aa21 | 1,633 | py | Python | aliyun-python-sdk-facebody/aliyunsdkfacebody/request/v20191230/GenerateHumanSketchStyleRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-facebody/aliyunsdkfacebody/request/v20191230/GenerateHumanSketchStyleRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-facebody/aliyunsdkfacebody/request/v20191230/GenerateHumanSketchStyleRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkfacebody.endpoint import endpoint_data
class GenerateHumanSketchStyleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'facebody', '2019-12-30', 'GenerateHumanSketchStyle','facebody')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ReturnType(self):
return self.get_body_params().get('ReturnType')
def set_ReturnType(self,ReturnType):
self.add_body_params('ReturnType', ReturnType)
def get_ImageURL(self):
return self.get_body_params().get('ImageURL')
def set_ImageURL(self,ImageURL):
self.add_body_params('ImageURL', ImageURL) | 37.113636 | 93 | 0.768524 |
18b1d427bb8053ba4c9b31a781c00e614b952bb0 | 541 | py | Python | pollapp/polls/models.py | JG-Mike/pollapp | b01b893c718326815454ff446d28684fed7e03ac | [
"MIT"
] | null | null | null | pollapp/polls/models.py | JG-Mike/pollapp | b01b893c718326815454ff446d28684fed7e03ac | [
"MIT"
] | null | null | null | pollapp/polls/models.py | JG-Mike/pollapp | b01b893c718326815454ff446d28684fed7e03ac | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models. CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| 24.590909 | 68 | 0.678373 |
5f85adf6664841bfe1d4f631d29f73d9bd0e4e0b | 3,355 | py | Python | iterative.py | kazerthekeen/Chaos-Dynamical-Systems | 1aded2e5b9af2126cbd96a8e747e72a0eb76caed | [
"MIT"
] | null | null | null | iterative.py | kazerthekeen/Chaos-Dynamical-Systems | 1aded2e5b9af2126cbd96a8e747e72a0eb76caed | [
"MIT"
] | null | null | null | iterative.py | kazerthekeen/Chaos-Dynamical-Systems | 1aded2e5b9af2126cbd96a8e747e72a0eb76caed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Cobweb plot gotten primarily from https://scipython.com/blog/cobweb-plots/
and modified to work with my data set.
@author: kazer
"""
import math
import numpy as np
from matplotlib import rc
import matplotlib.pyplot as plt
# Use LaTeX throughout the figure for consistency
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 16})
rc('text', usetex=True)
# Figure dpi
dpi = 72
def colatz(ar):
if type(ar) != type([]): # changes type so i can do route(num)
ar = [ar] # instead of route([num]) for recursion purposes
if ar[-1] in ar[:-1]: # ar[-1] is newest value in my list, if its 1 im done.
return ar
if ar[-1]%2 == 0: # Adds the next number in the sequence to the list
ar.append(int(ar[-1]/2))
else:
ar.append(ar[-1]*5 + 1)
return colatz(ar) # recursivly call it to find the next number.
def newtons(f, df, ar, n=20):
if type(ar) != type([]): # changes type so i can do route(num)
ar = [ar]
if len(ar) >= n: # ar[-1] is newest value in my list, if its 1 im done.
return ar
x = ar[-1]
xn = (-f(x)/df(x) + x)
ar.append(xn)
return newtons(f,df,ar)
def plot_cobweb(f, x0, name, iterations=20, bound=[]):
"""Make a cobweb plot.
Plot y = f(x) and y = x for 0 <= x <= 1, and illustrate the behaviour of
iterating x = f(x) starting at x = x0.
"""
nmax = iterations*2
fig = plt.figure(figsize=(600/dpi, 450/dpi), dpi=dpi)
ax = fig.add_subplot(111)
# Iterate x = f(x) for nmax steps, starting at (x0, 0).
px, py = np.empty((2,nmax+1,2))
px[0], py[0] = x0, 0
x = []
for n in range(1, nmax, 2):
x.append("{:.5} ".format(px[n-1][0]))
px[n] = px[n-1]
py[n] = f(px[n-1])
px[n+1] = py[n]
py[n+1] = py[n]
if bound ==[]:
m = np.amax(px)*1.2
n = np.amin(px)*1.2
else:
n = bound[0]
m = bound[1]
r = np.linspace(n,m,100)
if n < 0 < m:
ax.plot(r,np.zeros((100)), c='#444444', lw=1)
ax.plot(np.zeros((100)),r, c='#444444', lw=1)
# Plot y = f(x) and y = x
ax.plot(r, r, c='#777777', lw=2)
ax.plot(r, f(r), c='#000000', lw=2)
# Plot the path traced out by the iteration.
ax.plot(px, py, c='b', alpha=0.7)
# Annotate and tidy the plot.
ax.minorticks_on()
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.5)
ax.set_aspect('equal')
ax.set_xlabel(r'$x$')
ax.set_ylabel(f.latex_label)
ax.set_title(name)
plt.savefig('cobweb_{:}.png'.format(name), dpi=dpi)
# print(*x, sep=", ")
class AnnotatedFunction:
"""A small class representing a mathematical function.
This class is callable so it acts like a Python function, but it also
defines a string giving its latex representation.
"""
def __init__(self, func, latex_label):
self.func = func
self.latex_label = latex_label
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
#Newtons
#func = AnnotatedFunction(lambda x: x**2-2, '$x_{n+1} = 6/x_n -1$')
#df = AnnotatedFunction(lambda x: 2*x, '$x_{n+1} = 6/x_n -1$')
#
#print(newtons(func,df, 1.5))
| 27.056452 | 89 | 0.556781 |
161661ef63e5781ee8eade216319dcbfa3939a43 | 6,718 | py | Python | python/LA-MCTS_replaceSVM/lamcts/Node.py | winie-the-pooh/LA-MCTS-in-CPP | 75cadc0b283108ac6106b2e12e42e9b4058c1c52 | [
"Apache-2.0"
] | null | null | null | python/LA-MCTS_replaceSVM/lamcts/Node.py | winie-the-pooh/LA-MCTS-in-CPP | 75cadc0b283108ac6106b2e12e42e9b4058c1c52 | [
"Apache-2.0"
] | null | null | null | python/LA-MCTS_replaceSVM/lamcts/Node.py | winie-the-pooh/LA-MCTS-in-CPP | 75cadc0b283108ac6106b2e12e42e9b4058c1c52 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .Classifier import Classifier
import json
import numpy as np
import math
import operator
class Node:
obj_counter = 0
# If a leave holds >= SPLIT_THRESH, we split into two new nodes.
def __init__(self, parent = None, dims = 0, reset_id = False, kernel_type = "rbf", gamma_type = "auto"):
# Note: every node is initialized as a leaf,
# only internal nodes equip with classifiers to make decisions
# if not is_root:
# assert type( parent ) == type( self )
self.dims = dims
self.x_bar = float('inf')
self.n = 0
self.uct = 0
self.classifier = Classifier( [], self.dims, kernel_type, gamma_type )
#insert curt into the kids of parent
self.parent = parent
self.kids = [] # 0:good, 1:bad
self.bag = []
self.is_svm_splittable = False
if reset_id:
Node.obj_counter = 0
self.id = Node.obj_counter
#data for good and bad kids, respectively
Node.obj_counter += 1
def update_kids(self, good_kid, bad_kid):
assert len(self.kids) == 0
self.kids.append( good_kid )
self.kids.append( bad_kid )
assert self.kids[0].classifier.get_mean() > self.kids[1].classifier.get_mean()
def is_good_kid(self):
if self.parent is not None:
if self.parent.kids[0] == self:
return True
else:
return False
else:
return False
def is_leaf(self):
if len(self.kids) == 0:
return True
else:
return False
def visit(self):
self.n += 1
def print_bag(self):
sorted_bag = sorted(self.bag.items(), key=operator.itemgetter(1))
print("BAG"+"#"*10)
for item in sorted_bag:
print(item[0],"==>", item[1])
print("BAG"+"#"*10)
print('\n')
def update_bag(self, samples):
assert len(samples) > 0
self.bag.clear()
self.bag.extend( samples )
self.classifier.update_samples( self.bag )
if len(self.bag) <= 2:
self.is_svm_splittable = False
else:
self.is_svm_splittable = self.classifier.is_splittable_svm()
self.x_bar = self.classifier.get_mean()
self.n = len( self.bag )
def clear_data(self):
self.bag.clear()
def get_name(self):
# state is a list of jsons
return "node" + str(self.id)
def pad_str_to_8chars(self, ins, total):
if len(ins) <= total:
ins += ' '*(total - len(ins) )
return ins
else:
return ins[0:total]
def get_rand_sample_from_bag(self):
if len( self.bag ) > 0:
upeer_boundary = len(list(self.bag))
rand_idx = np.random.randint(0, upeer_boundary)
return self.bag[rand_idx][0]
else:
return None
def get_parent_str(self):
return self.parent.get_name()
def propose_samples_bo(self, num_samples, path, lb, ub, samples):
proposed_X = self.classifier.propose_samples_bo(num_samples, path, lb, ub, samples)
return proposed_X
def propose_samples_turbo(self, num_samples, path, func):
proposed_X, fX = self.classifier.propose_samples_turbo(num_samples, path, func)
return proposed_X, fX
def propose_samples_rand(self, num_samples):
assert num_samples > 0
samples = self.classifier.propose_samples_rand(num_samples)
return samples
def __str__(self):
name = self.get_name()
name = self.pad_str_to_8chars(name, 7)
name += ( self.pad_str_to_8chars( 'is good:' + str(self.is_good_kid() ), 15 ) )
name += ( self.pad_str_to_8chars( 'is leaf:' + str(self.is_leaf() ), 15 ) )
val = 0
name += ( self.pad_str_to_8chars( ' val:{0:.4f} '.format(round(self.get_xbar(), 3) ), 20 ) )
name += ( self.pad_str_to_8chars( ' uct:{0:.4f} '.format(round(self.get_uct(), 3) ), 20 ) )
name += self.pad_str_to_8chars( 'sp/n:'+ str(len(self.bag))+"/"+str(self.n), 15 )
upper_bound = np.around( np.max(self.classifier.X, axis = 0), decimals=2 )
lower_bound = np.around( np.min(self.classifier.X, axis = 0), decimals=2 )
boundary = ''
for idx in range(0, self.dims):
boundary += str(lower_bound[idx])+'>'+str(upper_bound[idx])+' '
#name += ( self.pad_str_to_8chars( 'bound:' + boundary, 60 ) )
parent = '----'
if self.parent is not None:
parent = self.parent.get_name()
parent = self.pad_str_to_8chars(parent, 10)
name += (' parent:' + parent)
kids = ''
kid = ''
for k in self.kids:
kid = self.pad_str_to_8chars( k.get_name(), 10 )
kids += kid
name += (' kids:' + kids)
return name
def get_uct(self, Cp = 10 ):
if self.parent == None:
return float('inf')
if self.n == 0:
return float('inf')
return self.x_bar + 2*Cp*math.sqrt( 2* np.power(self.parent.n, 0.5) / self.n )
def get_xbar(self):
return self.x_bar
def get_n(self):
return self.n
def train_and_split(self):
assert len(self.bag) >= 2
self.classifier.update_samples( self.bag )
good_kid_data, bad_kid_data = self.classifier.split_data()
assert len( good_kid_data ) + len( bad_kid_data ) == len( self.bag )
return good_kid_data, bad_kid_data
def plot_samples_and_boundary(self, func):
name = self.get_name() + ".pdf"
self.classifier.plot_samples_and_boundary(func, name)
def sample_arch(self):
if len(self.bag) == 0:
return None
net_str = np.random.choice( list(self.bag.keys() ) )
del self.bag[net_str]
return json.loads(net_str )
# print(root)
#
# with open('features.json', 'r') as infile:
# data=json.loads( infile.read() )
# samples = {}
# for d in data:
# samples[ json.dumps(d['feature']) ] = d['acc']
# n1 = Node(samples, root)
# print(n1)
#
# n1 =
| 32.61165 | 108 | 0.547484 |
992e2c76029f65312f586574807d1d25f25f04a1 | 823 | py | Python | pygenomeworks/genomeworks/cuda/__init__.py | r-mafi/ClaraGenomicsAnalysis | a33d606af7902f27d5cf86a80099061bb1a34603 | [
"Apache-2.0"
] | null | null | null | pygenomeworks/genomeworks/cuda/__init__.py | r-mafi/ClaraGenomicsAnalysis | a33d606af7902f27d5cf86a80099061bb1a34603 | [
"Apache-2.0"
] | 1 | 2020-07-17T13:57:56.000Z | 2020-07-17T13:57:56.000Z | pygenomeworks/genomeworks/cuda/__init__.py | r-mafi/ClaraGenomicsAnalysis | a33d606af7902f27d5cf86a80099061bb1a34603 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""Init file for cuda package."""
from genomeworks.cuda.cuda import CudaRuntimeError, CudaStream
from genomeworks.cuda.cuda import cuda_get_device_count, cuda_set_device, cuda_get_device, cuda_get_mem_info
__all__ = ["CudaRuntimeError",
"CudaStream",
"cuda_get_device_count",
"cuda_set_device",
"cuda_get_device",
"cuda_get_mem_info"]
| 37.409091 | 108 | 0.754557 |
e41b691a5c218165970a5fce6df309047b5455ea | 9,880 | py | Python | oncogemini/gemini_loh.py | fakedrtom/cancer_gemini | e2dd5cf056004409b58f1e2d98589f847a7e0e2f | [
"MIT"
] | 8 | 2019-12-18T12:38:25.000Z | 2021-07-14T02:11:42.000Z | oncogemini/gemini_loh.py | fakedrtom/cancer_gemini | e2dd5cf056004409b58f1e2d98589f847a7e0e2f | [
"MIT"
] | 1 | 2021-05-27T21:48:55.000Z | 2021-05-27T21:48:55.000Z | oncogemini/gemini_loh.py | fakedrtom/cancer_gemini | e2dd5cf056004409b58f1e2d98589f847a7e0e2f | [
"MIT"
] | 1 | 2021-01-15T03:57:12.000Z | 2021-01-15T03:57:12.000Z | #!/usr/bin/env python
from __future__ import absolute_import
from . import GeminiQuery
from . import gemini_utils as utils
import sys
# LOH mutations are categorized as being heterozygous
# in a normal tissue sample, but have risen homozygous
# levels in the tumor samples.
# Here we allow for a maximum and minimum allele
# frequency in the normal sample to set the boundaries
# for the heterozygous calls in the normal.
# A minimum allele frequency for the tumor samples is
# also used to define homozygous calls in tumor samples.
def loh(parser, args):
# create a connection to the database that was
# passed in as an argument via the command line
gq = GeminiQuery.GeminiQuery(args.db)
# get paramters from the args for filtering
if args.patient is not None:
patient = args.patient
else:
patient = 'none'
if args.minDP is None:
minDP = int(-1)
else:
minDP = int(args.minDP)
if args.minGQ is None:
minGQ = int(-1)
else:
minGQ = int(args.minGQ)
if args.samples is None or args.samples.lower() == 'all':
samples = 'All'
else:
samples = args.samples.split(',')
if args.specific is None:
somatic = 'none'
else:
somatic = args.specific
if args.cancers is None:
cancers = 'none'
else:
query = "pragma table_info(variants)"
gq.run(query)
utils.check_cancer_annotations(gq)
cancers = args.cancers.split(',')
if args.maxNorm is None:
maxNorm = float(0.7)
else:
maxNorm = float(args.maxNorm)
if args.minNorm is None:
minNorm = float(0.3)
else:
minNorm = float(args.minNorm)
if args.minTumor is None:
minTumor = float(0.8)
else:
minTumor = float(args.minTumor)
if args.purity:
query = "select name, purity from samples"
purity = {}
gq.run(query)
utils.get_purity(gq, purity)
# define sample search query
query = "select patient_id, name, time from samples"
# execute the sample search query
gq.run(query)
# designating which patient to perform the query on
# if no patient is specified at the command line
# and only 1 patient is present in the database
# that patient will be used
# also verify that patient is among possible patient_ids
# sample names are saved to patient specific dict
patients = []
names = {}
utils.get_names(gq,patients,names)
patient = utils.get_patient(patient,patients)
samples = utils.get_samples(patient,names,samples)
if somatic != 'none' and somatic not in samples:
sys.exit("Error: Specified sample name with --specific is not found, make sure a single sample only is provided and check the sample manifest file for available sample names")
# iterate again through each sample and save which sample is the normal
# non-normal, tumor sample names are saved to a list
# establish which timepoints belong to which samples names
# this is done for the specified --patient and --samples
# designate the last and first time points
gq.run(query)
normal_samples = []
tumor_samples = []
timepoints = {}
samples_tps = {}
utils.sort_samples(gq,normal_samples,tumor_samples,timepoints,samples_tps,patient,samples)
startpoint = min(timepoints.keys())
# if only sample included with --specific is the first timepoint, --specific won't work
if somatic != 'none':
if samples_tps[somatic] == startpoint:
sys.exit("Error: Specified sample with --specific is the first timepoint, specify a sample that has a preceding sample")
# check arrays to see if samples have been added
# if arrays are empty there is probably a problem in samples
# check the ped file being loaded into the db
if len(normal_samples) == 0 and somatic == 'none':
sys.exit("Error: There are no normal samples; check the sample manifest file for proper format and loading")
if len(tumor_samples) == 0 and somatic == 'none':
sys.exit("Error: There are no tumor samples; check the sample manifest file for proper format and loading")
# create a new connection to the database that includes the genotype columns
# using the database passed in as an argument via the command line
gq = GeminiQuery.GeminiQuery(args.db, include_gt_cols=True)
# define the loh query
if args.columns is not None:
columns = args.columns
if cancers != 'none':
columns = args.columns + ",civic_gene_abbreviations,cgi_gene_abbreviations"
else:
columns = args.columns
if args.filter is not None:
filter = args.filter
else:
filter = str(1)
query = utils.make_query(columns,filter)
# execute a new query to process the variants
gq.run(query)#, gt_filter)
# get the sample index numbers so we can get sample specific GT info (AFs, DPs, etc.)
smp2idx = gq.sample_to_idx
# print header and add the AFs of included samples
addHeader = []
header = gq.header.split('\t')
if cancers != 'none':
addHeader.extend(header[:len(header)-2])
else:
addHeader.extend(header)
if somatic == 'none':
for key in timepoints:
for s in timepoints[key]:
if s in samples:
af = 'alt_AF.' + s
addHeader.append(af)
if args.purity:
raw = 'raw.alt_AF.' + s
addHeader.append(raw)
elif somatic != 'none':
preceding = samples_tps[somatic] - 1
for s in timepoints[preceding]:
if s in samples:
af = 'alt_AF.' + s
addHeader.append(af)
if args.purity:
raw = 'raw.alt_AF.' + s
addHeader.append(raw)
af = 'alt_AF.' + somatic
addHeader.append(af)
if args.purity:
raw = 'raw.alt_AF.' + somatic
addHeader.append(raw)
print '\t'.join(addHeader)
# iterate through each row of the results and print
for row in gq:
output = []
out = str(row).split('\t')
if cancers != 'none':
output.extend(out[:len(out)-2])
else:
output.extend(out)
normAFs = []
tumsAFs = []
depths = []
quals = []
addEnd = []
if somatic == 'none':
for key in timepoints:
for s in timepoints[key]:
if s in samples:
smpidx = smp2idx[s]
if args.purity:
sampleAF = utils.purityAF(row['gt_alt_freqs'][smpidx],purity[s])
rawAF = row['gt_alt_freqs'][smpidx]
else:
sampleAF = row['gt_alt_freqs'][smpidx]
if s in normal_samples and sampleAF >= 0:
normAFs.append(sampleAF)
if s in tumor_samples and sampleAF >= 0:
tumsAFs.append(sampleAF)
sampleDP = row['gt_depths'][smpidx]
depths.append(sampleDP)
sampleGQ = row['gt_quals'][smpidx]
quals.append(sampleGQ)
addEnd.append(str(sampleAF))
if args.purity:
addEnd.append(str(rawAF))
elif somatic != 'none':
preceding = samples_tps[somatic] - 1
for s in timepoints[preceding]:
smpidx = smp2idx[s]
if args.purity:
sampleAF = utils.purityAF(row['gt_alt_freqs'][smpidx],purity[s])
rawAF = row['gt_alt_freqs'][smpidx]
else:
sampleAF = row['gt_alt_freqs'][smpidx]
if sampleAF >= 0:
normAFs.append(sampleAF)
sampleDP = row['gt_depths'][smpidx]
depths.append(sampleDP)
sampleGQ = row['gt_quals'][smpidx]
quals.append(sampleGQ)
addEnd.append(str(sampleAF))
if args.purity:
addEnd.append(str(rawAF))
smpidx = smp2idx[somatic]
if args.purity:
sampleAF = utils.purityAF(row['gt_alt_freqs'][smpidx],purity[s])
rawAF = row['gt_alt_freqs'][smpidx]
else:
sampleAF = row['gt_alt_freqs'][smpidx]
if sampleAF >= 0:
tumsAFs.append(sampleAF)
sampleDP = row['gt_depths'][smpidx]
depths.append(sampleDP)
sampleGQ = row['gt_quals'][smpidx]
quals.append(sampleGQ)
addEnd.append(str(sampleAF))
if args.purity:
addEnd.append(str(rawAF))
#check that requirements have been met
if min(depths) < minDP or min(quals) < minGQ:
continue
if len(normAFs) == 0 or len(tumsAFs) == 0:
continue
if any(af < minNorm or af > maxNorm for af in normAFs):
continue
if any(af < minTumor for af in tumsAFs):
continue
# print results that meet the requirements
# if args.cancer has been used, filter results to cancer matches
# add selected sample AFs
output.extend(addEnd)
if cancers != 'none':
abbrevs = str(row['civic_gene_abbreviations']).split(',') + str(row['cgi_gene_abbreviations']).split(',')
include = 0
for c in cancers:
if c in abbrevs:
include += 1
if include > 0:
print '\t'.join(output)
else:
print '\t'.join(output)
| 37.854406 | 183 | 0.574494 |
8fb3ebe4feff406e2a7ef763af3a7e2e26dde436 | 1,488 | py | Python | kangradb/interface.py | manparvesh/kangraDB | ac1417febc3a2933d2892f2e23a576040f7427e0 | [
"MIT"
] | null | null | null | kangradb/interface.py | manparvesh/kangraDB | ac1417febc3a2933d2892f2e23a576040f7427e0 | [
"MIT"
] | null | null | null | kangradb/interface.py | manparvesh/kangraDB | ac1417febc3a2933d2892f2e23a576040f7427e0 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
The main interface that will be exported for use
"""
from kangradb.binary_tree import BinaryTree
from kangradb.physical_storage import Storage
class KangraDB(object):
"""
KangraDB interface that will be used to do stuff
"""
def __init__(self, f):
self._storage = Storage(f)
self._tree = BinaryTree(self._storage)
def _assert_not_closed(self):
if self._storage.closed:
raise ValueError('Database closed.')
def close(self):
"""
close storage
"""
self._storage.close()
def commit(self):
"""
commit data from tree to storage
"""
self._assert_not_closed()
self._tree.commit()
def __getitem__(self, key):
"""
invoked when called like this: db[key]
:param key:
:return:
"""
self._assert_not_closed()
return self._tree.get(key)
def __setitem__(self, key, value):
"""
invoked when called like this: db[key] = value
:param key:
:return:
"""
self._assert_not_closed()
return self._tree.set(key, value)
def __delitem__(self, key):
self._assert_not_closed()
return self._tree.pop(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def __len__(self):
return len(self._tree)
| 22.208955 | 54 | 0.568548 |
4a7e27b3369f0c9d30226ebcf03fd7a1028117b8 | 8,949 | py | Python | repos/wavegrad/learner.py | Iamgoofball/VocoderComparisons | 3dfc5cb604ccf3756321e2cdf9934aa933314145 | [
"MIT"
] | 6 | 2020-12-09T07:53:34.000Z | 2021-12-10T09:59:29.000Z | repos/wavegrad/learner.py | Iamgoofball/VocoderComparisons | 3dfc5cb604ccf3756321e2cdf9934aa933314145 | [
"MIT"
] | 1 | 2021-12-14T18:48:59.000Z | 2021-12-14T19:48:18.000Z | repos/wavegrad/learner.py | Iamgoofball/VocoderComparisons | 3dfc5cb604ccf3756321e2cdf9934aa933314145 | [
"MIT"
] | 1 | 2020-11-02T08:22:28.000Z | 2020-11-02T08:22:28.000Z | # Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import torch
import torch.nn as nn
from glob import glob
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from wavegrad.dataset import from_path as dataset_from_path
from wavegrad.model import WaveGrad
def _nested_map(struct, map_fn):
if isinstance(struct, tuple):
return tuple(_nested_map(x, map_fn) for x in struct)
if isinstance(struct, list):
return [_nested_map(x, map_fn) for x in struct]
if isinstance(struct, dict):
return { k: _nested_map(v, map_fn) for k, v in struct.items() }
return map_fn(struct)
class WaveGradLearner:
def __init__(self, model_dir, model, dataset, optimizer, params, *args, **kwargs):
os.makedirs(model_dir, exist_ok=True)
self.model_dir = model_dir
self.model = model
self.dataset = dataset
self.optimizer = optimizer
self.params = params
self.autocast = torch.cuda.amp.autocast(enabled=kwargs.get('fp16', False))
self.scaler = torch.cuda.amp.GradScaler(enabled=kwargs.get('fp16', False))
self.step = 0
self.is_master = True
beta = np.array(self.params.noise_schedule)
noise_level = np.cumprod(1 - beta)**0.5
noise_level = np.concatenate([[1.0], noise_level], axis=0)
self.noise_level = torch.tensor(noise_level.astype(np.float32))
self.loss_fn = nn.L1Loss()
self.summary_writer = None
def state_dict(self):
if hasattr(self.model, 'module') and isinstance(self.model.module, nn.Module):
model_state = self.model.module.state_dict()
else:
model_state = self.model.state_dict()
return {
'step': self.step,
'model': { k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in model_state.items() },
'optimizer': { k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in self.optimizer.state_dict().items() },
'params': dict(self.params),
'scaler': self.scaler.state_dict(),
}
def load_state_dict(self, state_dict):
if hasattr(self.model, 'module') and isinstance(self.model.module, nn.Module):
current_model_dict = self.model.module.state_dict()
safe_dict = {k: v for k, v in state_dict['model'].items() if k in current_model_dict.keys() and v.shape == current_model_dict[k].shape}
invalid_dict = {k: v for k, v in state_dict['model'].items() if not (k in current_model_dict.keys() and v.shape == current_model_dict[k].shape)}
current_model_dict.update(safe_dict)
self.model.module.load_state_dict(current_model_dict)
del current_model_dict
else:
current_model_dict = self.model.state_dict()
safe_dict = {k: v for k, v in state_dict['model'].items() if k in current_model_dict.keys() and v.shape == current_model_dict[k].shape}
invalid_dict = {k: v for k, v in state_dict['model'].items() if not (k in current_model_dict.keys() and v.shape == current_model_dict[k].shape)}
current_model_dict.update(safe_dict)
self.model.load_state_dict(current_model_dict)
del current_model_dict
warm_started = bool(len(invalid_dict.keys()))
if 'optimizer' in state_dict.keys() and state_dict['optimizer'] is not None and not warm_started:
self.optimizer.load_state_dict(state_dict['optimizer'])
if 'scaler' in state_dict.keys() and state_dict['scaler'] is not None:
if len(state_dict['scaler'].keys()):
self.scaler.load_state_dict(state_dict['scaler'])
if 'step' in state_dict.keys() and state_dict['step'] is not None:
self.step = state_dict['step']
def save_to_checkpoint(self, filename='weights', n_models_to_keep=2):
save_basename = f'{filename}-{self.step:08}.pt'
save_name = f'{self.model_dir}/{save_basename}'
link_name = f'{self.model_dir}/{filename}.pt'
torch.save(self.state_dict(), save_name)
if os.name == 'nt':
torch.save(self.state_dict(), link_name)
else:
if os.path.islink(link_name):
os.unlink(link_name)
os.symlink(save_basename, link_name)
# find and delete old checkpoints
cp_list = sorted(glob(f'{self.model_dir}/{filename}-????????.pt'))
if len(cp_list) > n_models_to_keep:
for cp in cp_list[:-n_models_to_keep]:
open(cp, 'w').close()# empty file contents
os.unlink(cp)# delete file (move to trash when using Colab)
def restore_from_checkpoint(self, filename='weights'):
try:
# find and delete old checkpoints
cp_list = sorted(glob(f'{self.model_dir}/{filename}-????????.pt'))
if len(cp_list) < 1:
return False
checkpoint = torch.load(cp_list[-1])
self.load_state_dict(checkpoint)
return True
except FileNotFoundError:
return False
def train(self, max_steps=None, checkpoint_interval=5000, n_models_to_keep=2):
device = next(self.model.parameters()).device
while True:
for features in tqdm(self.dataset, desc=f'Epoch {self.step // len(self.dataset)} Iter {self.step} ') if self.is_master else self.dataset:
if max_steps is not None and self.step >= max_steps:
return
features = _nested_map(features, lambda x: x.to(device) if isinstance(x, torch.Tensor) else x)
loss = self.train_step(features)
if torch.isnan(loss).any():
raise RuntimeError(f'Detected NaN loss at step {self.step}.')
if self.is_master:
if self.step % 100 == 0:
self._write_summary(self.step, features, loss)
if self.step % checkpoint_interval == 0:
self.save_to_checkpoint(n_models_to_keep=n_models_to_keep)
self.step += 1
def train_step(self, features):
for param in self.model.parameters():
param.grad = None
audio = features['audio']
spectrogram = features['spectrogram']
N, T = audio.shape
S = 1000
device = audio.device
self.noise_level = self.noise_level.to(device)
with self.autocast:
s = torch.randint(1, S + 1, [N], device=audio.device)
l_a, l_b = self.noise_level[s-1], self.noise_level[s]
noise_scale = l_a + torch.rand(N, device=audio.device) * (l_b - l_a)
noise_scale = noise_scale.unsqueeze(1)
noise = torch.randn_like(audio)
noisy_audio = noise_scale * audio + (1.0 - noise_scale**2)**0.5 * noise
predicted = self.model(noisy_audio, spectrogram, noise_scale.squeeze(1))
loss = self.loss_fn(noise, predicted.squeeze(1))
self.scaler.scale(loss).backward()
self.scaler.unscale_(self.optimizer)
self.grad_norm = nn.utils.clip_grad_norm_(self.model.parameters(), self.params.max_grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
return loss
def _write_summary(self, step, features, loss):
writer = self.summary_writer or SummaryWriter(self.model_dir, purge_step=step)
writer.add_audio('audio/reference', features['audio'][0], step, sample_rate=self.params.sample_rate)
writer.add_scalar('train/loss', loss, step)
writer.add_scalar('train/grad_norm', self.grad_norm, step)
writer.flush()
self.summary_writer = writer
def _train_impl(replica_id, model, dataset, args, params):
torch.backends.cudnn.benchmark = True
opt = torch.optim.Adam(model.parameters(), lr=params.learning_rate)
learner = WaveGradLearner(args.model_dir, model, dataset, opt, params, fp16=args.fp16)
learner.is_master = (replica_id == 0)
learner.restore_from_checkpoint()
learner.train(max_steps=args.max_steps, checkpoint_interval=args.checkpoint_interval, n_models_to_keep=args.n_models_to_keep)
def train(args, params):
dataset = dataset_from_path(args.data_dirs, params)
model = WaveGrad(params).cuda()
_train_impl(0, model, dataset, args, params)
def train_distributed(replica_id, replica_count, port, args, params):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(port)
torch.distributed.init_process_group('nccl', rank=replica_id, world_size=replica_count)
device = torch.device('cuda', replica_id)
torch.cuda.set_device(device)
model = WaveGrad(params).to(device)
model = DistributedDataParallel(model, device_ids=[replica_id])
_train_impl(replica_id, model, dataset_from_path(args.data_dirs, params, is_distributed=True), args, params)
| 42.212264 | 150 | 0.692927 |
4dc8a4c2ea70bbd6b714a2d9a0436c8b1ca6707d | 2,881 | py | Python | tests/devices/_internal/test_candidate_device.py | rwalton-arm/mbed-tools | 131605540f4829116f977695a47dc10b3ac96450 | [
"Apache-2.0"
] | null | null | null | tests/devices/_internal/test_candidate_device.py | rwalton-arm/mbed-tools | 131605540f4829116f977695a47dc10b3ac96450 | [
"Apache-2.0"
] | null | null | null | tests/devices/_internal/test_candidate_device.py | rwalton-arm/mbed-tools | 131605540f4829116f977695a47dc10b3ac96450 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import pathlib
from unittest import TestCase
from mbed_tools.devices._internal.candidate_device import CandidateDevice
def build_candidate_data(**overrides):
defaults = {
"product_id": "0x1234",
"vendor_id": "0x5678",
"mount_points": (pathlib.Path("./foo"),),
"serial_number": "qwer",
"serial_port": "COM1",
}
return {**defaults, **overrides}
class TestCandidateDevice(TestCase):
def test_produces_a_valid_candidate(self):
candidate_data = build_candidate_data()
candidate = CandidateDevice(**candidate_data)
self.assertEqual(candidate.product_id, candidate_data["product_id"])
self.assertEqual(candidate.vendor_id, candidate_data["vendor_id"])
self.assertEqual(candidate.mount_points, candidate_data["mount_points"])
self.assertEqual(candidate.serial_number, candidate_data["serial_number"])
self.assertEqual(candidate.serial_port, candidate_data["serial_port"])
def test_raises_when_product_id_is_empty(self):
candidate_data = build_candidate_data(product_id="")
with self.assertRaisesRegex(ValueError, "product_id"):
CandidateDevice(**candidate_data)
def test_raises_when_product_id_is_not_hex(self):
candidate_data = build_candidate_data(product_id="TEST")
with self.assertRaisesRegex(ValueError, "product_id"):
CandidateDevice(**candidate_data)
def test_prefixes_product_id_hex_value(self):
candidate_data = build_candidate_data(product_id="ff01")
candidate = CandidateDevice(**candidate_data)
self.assertEqual(candidate.product_id, "0xff01")
def test_raises_when_vendor_id_is_empty(self):
candidate_data = build_candidate_data(vendor_id="")
with self.assertRaisesRegex(ValueError, "vendor_id"):
CandidateDevice(**candidate_data)
def test_raises_when_vendor_id_is_not_hex(self):
candidate_data = build_candidate_data(vendor_id="TEST")
with self.assertRaisesRegex(ValueError, "vendor_id"):
CandidateDevice(**candidate_data)
def test_prefixes_vendor_id_hex_value(self):
candidate_data = build_candidate_data(vendor_id="cbad")
candidate = CandidateDevice(**candidate_data)
self.assertEqual(candidate.vendor_id, "0xcbad")
def test_raises_when_mount_points_are_empty(self):
with self.assertRaisesRegex(ValueError, "mount_points"):
CandidateDevice(product_id="1234", vendor_id="1234", mount_points=[], serial_number="1234")
def test_raises_when_serial_number_is_empty(self):
candidate_data = build_candidate_data(serial_number="")
with self.assertRaisesRegex(ValueError, "serial_number"):
CandidateDevice(**candidate_data)
| 41.157143 | 103 | 0.722666 |
904a7ea4522745bb5c57ecd95ef6a0576989fe8d | 4,866 | py | Python | nlpaug/augmenter/sentence/lambada.py | techthiyanes/nlpaug | bb2fc63349bf949f6f6047ff447a0efb16983c0a | [
"MIT"
] | 3,121 | 2019-04-21T07:02:47.000Z | 2022-03-31T22:17:36.000Z | nlpaug/augmenter/sentence/lambada.py | techthiyanes/nlpaug | bb2fc63349bf949f6f6047ff447a0efb16983c0a | [
"MIT"
] | 186 | 2019-05-31T18:18:13.000Z | 2022-03-28T10:11:05.000Z | nlpaug/augmenter/sentence/lambada.py | techthiyanes/nlpaug | bb2fc63349bf949f6f6047ff447a0efb16983c0a | [
"MIT"
] | 371 | 2019-03-17T17:59:56.000Z | 2022-03-31T01:45:15.000Z | """
Augmenter that apply operation (sentence level) to textual input based on abstractive summarization.
"""
import os
import json
from nlpaug.augmenter.sentence import SentenceAugmenter
import nlpaug.model.lang_models as nml
from nlpaug.util import Action, Doc
LAMBADA_MODELS = {}
def init_lambada_model(model_dir, threshold, min_length, max_length, batch_size,
temperature, top_k, top_p, repetition_penalty, device, force_reload):
global LAMBADA_MODELS
model_name = '_'.join([os.path.basename(model_dir), str(device)])
if model_name in LAMBADA_MODELS and not force_reload:
LAMBADA_MODELS[model_name].threshold = threshold
LAMBADA_MODELS[model_name].min_length = min_length
LAMBADA_MODELS[model_name].max_length = max_length
LAMBADA_MODELS[model_name].batch_size = batch_size
LAMBADA_MODELS[model_name].temperature = temperature
LAMBADA_MODELS[model_name].top_k = top_k
LAMBADA_MODELS[model_name].top_p = top_p
LAMBADA_MODELS[model_name].repetition_penalty = repetition_penalty
return LAMBADA_MODELS[model_name]
model = nml.Lambada(
cls_model_dir=os.path.join(model_dir, 'cls'), gen_model_dir=os.path.join(model_dir, 'gen'),
threshold=threshold, max_length=max_length, min_length=min_length, temperature=temperature,
top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, batch_size=batch_size,
device=device)
LAMBADA_MODELS[model_name] = model
return model
class LambadaAug(SentenceAugmenter):
"""
Augmenter that leverage contextual word embeddings to find top n similar word for augmentation.
:param str model_dir: Directory of model. It is generated from train_lambada.sh under scritps folders.n
:param float threshold: The threshold of classification probabilty for accpeting generated text. Return all result if threshold
is None.
:param int batch_size: Batch size.
:param int min_length: The min length of output text.
:param int max_length: The max length of output text.
:param float temperature: The value used to module the next token probabilities.
:param int top_k: The number of highest probability vocabulary tokens to keep for top-k-filtering.
:param float top_p: If set to float < 1, only the most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
:param float repetition_penalty : The parameter for repetition penalty. 1.0 means no penalty.
:param str device: Default value is CPU. If value is CPU, it uses CPU for processing. If value is CUDA, it uses GPU
for processing. Possible values include 'cuda' and 'cpu'.
:param bool force_reload: Force reload the contextual word embeddings model to memory when initialize the class.
Default value is False and suggesting to keep it as False if performance is the consideration.
:param str name: Name of this augmenter
>>> import nlpaug.augmenter.sentence as nas
>>> aug = nas.LambadaAug()
"""
def __init__(self, model_dir, threshold=None, min_length=100, max_length=300,
batch_size=16, temperature=1.0, top_k=50, top_p=0.9, repetition_penalty=1.0,
name='Lambada_Aug', device='cpu', force_reload=False, verbose=0):
super().__init__(
action=Action.INSERT, name=name, tokenizer=None, stopwords=None, device=device,
include_detail=False, verbose=verbose)
self.model_dir = model_dir
with open(os.path.join(model_dir, 'cls', 'label_encoder.json')) as json_file:
self.label2id = json.load(json_file)
self.model = self.get_model(
model_dir=model_dir, threshold=threshold, max_length=max_length, min_length=min_length,
temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty,
batch_size=batch_size, device=device, force_reload=force_reload)
self.device = self.model.get_device()
def insert(self, data, n=10):
if not data:
return data
if isinstance(data, list):
all_data = data
else:
if data.strip() == '':
return data
all_data = [data]
for d in all_data:
if d not in self.label2id:
raise Exception('Label {} does not exist. Possible labels are {}'.format(d, self.label2id.keys()))
return self.model.predict(all_data, n=n)
@classmethod
def get_model(cls, model_dir, threshold, min_length, max_length, batch_size,
temperature, top_k, top_p, repetition_penalty, device='cuda', force_reload=False):
return init_lambada_model(model_dir, threshold, min_length, max_length, batch_size,
temperature, top_k, top_p, repetition_penalty, device, force_reload)
| 45.90566 | 131 | 0.708385 |
f811be657325d6732d36d4f2dc3042837ee3fa8c | 43,524 | py | Python | lib/sqlalchemy/testing/requirements.py | immerrr/sqlalchemy | 995fb577a64061a9cbab62b481c65a4c4d3e5a67 | [
"MIT"
] | 1 | 2020-07-21T16:06:40.000Z | 2020-07-21T16:06:40.000Z | lib/sqlalchemy/testing/requirements.py | taogeYT/sqlalchemy | e88dc004e6bcd1418cb8eb811d0aa580c2a44b8f | [
"MIT"
] | 4 | 2020-04-23T19:00:28.000Z | 2021-09-28T18:14:58.000Z | lib/sqlalchemy/testing/requirements.py | taogeYT/sqlalchemy | e88dc004e6bcd1418cb8eb811d0aa580c2a44b8f | [
"MIT"
] | null | null | null | # testing/requirements.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exclusions.
"""
import platform
from . import exclusions
from . import only_on
from .. import create_engine
from .. import util
from ..pool import QueuePool
class Requirements:
pass
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def table_ddl_if_exists(self):
"""target platform supports IF NOT EXISTS / IF EXISTS for tables."""
return exclusions.closed()
@property
def index_ddl_if_exists(self):
"""target platform supports IF NOT EXISTS / IF EXISTS for indexes."""
return exclusions.closed()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def table_value_constructor(self):
"""Database / dialect supports a query like::
SELECT * FROM VALUES ( (c1, c2), (c1, c2), ...)
AS some_table(col1, col2)
SQLAlchemy generates this with the :func:`_sql.values` function.
"""
return exclusions.closed()
@property
def standard_cursor_sql(self):
"""Target database passes SQL-92 style statements to cursor.execute()
when a statement like select() or insert() is run.
A very small portion of dialect-level tests will ensure that certain
conditions are present in SQL strings, and these tests use very basic
SQL that will work on any SQL-like platform in order to assert results.
It's normally a given for any pep-249 DBAPI that a statement like
"SELECT id, name FROM table WHERE some_table.id=5" will work.
However, there are dialects that don't actually produce SQL Strings
and instead may work with symbolic objects instead, or dialects that
aren't working with SQL, so for those this requirement can be marked
as excluded.
"""
return exclusions.open()
@property
def on_update_cascade(self):
"""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled
or self.deferrable_fks.enabled
)
@property
def queue_pool(self):
"""target database is using QueuePool"""
def go(config):
return isinstance(config.db.pool, QueuePool)
return exclusions.only_if(go)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def implicitly_named_constraints(self):
"""target database must apply names to unnamed constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a
SELECT.
"""
return exclusions.open()
@property
def bound_limit_offset(self):
"""target database can render LIMIT and/or OFFSET using a bound
parameter
"""
return exclusions.open()
@property
def sql_expression_limit_offset(self):
"""target database can render LIMIT and/or OFFSET with a complete
SQL expression, such as one that uses the addition operator.
parameter
"""
return exclusions.open()
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite.
"""
return exclusions.open()
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullable_booleans(self):
"""Target database allows boolean columns to store NULL."""
return exclusions.open()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.open()
@property
def standalone_null_binds_whereclause(self):
"""target database/driver supports bound parameters with NULL in the
WHERE clause, in situations where it has to be typed.
"""
return exclusions.open()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def ctes(self):
"""Target database supports CTEs"""
return exclusions.closed()
@property
def ctes_with_update_delete(self):
"""target database supports CTES that ride on top of a normal UPDATE
or DELETE statement which refers to the CTE in a correlated subquery.
"""
return exclusions.closed()
@property
def ctes_on_dml(self):
"""target database supports CTES which consist of INSERT, UPDATE
or DELETE *within* the CTE, e.g. WITH x AS (UPDATE....)"""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def group_by_complex_expression(self):
"""target platform supports SQL expressions in GROUP BY
e.g.
SELECT x + y AS somelabel FROM table GROUP BY x + y
"""
return exclusions.open()
@property
def sane_rowcount(self):
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_sane_rowcount,
"driver doesn't support 'sane' rowcount",
)
@property
def sane_multi_rowcount(self):
return exclusions.fails_if(
lambda config: not config.db.dialect.supports_sane_multi_rowcount,
"driver %(driver)s %(doesnt_support)s 'sane' multi row count",
)
@property
def sane_rowcount_w_returning(self):
return exclusions.fails_if(
lambda config: not (
config.db.dialect.supports_sane_rowcount_returning
),
"driver doesn't support 'sane' rowcount when returning is on",
)
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert
or config.db.dialect.supports_default_values
or config.db.dialect.supports_default_metavalue,
"empty inserts not supported",
)
@property
def empty_inserts_executemany(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent, within executemany()"""
return self.empty_inserts
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def full_returning(self):
"""target platform supports RETURNING completely, including
multiple rows returned.
"""
return exclusions.only_if(
lambda config: config.db.dialect.full_returning,
"%(database)s %(does_support)s 'RETURNING of multiple rows'",
)
@property
def insert_executemany_returning(self):
"""target platform supports RETURNING when INSERT is used with
executemany(), e.g. multiple parameter sets, indicating
as many rows come back as do parameter sets were passed.
"""
return exclusions.only_if(
lambda config: config.db.dialect.insert_executemany_returning,
"%(database)s %(does_support)s 'RETURNING of "
"multiple rows with INSERT executemany'",
)
@property
def returning(self):
"""target platform supports RETURNING for at least one row.
.. seealso::
:attr:`.Requirements.full_returning`
"""
return exclusions.only_if(
lambda config: config.db.dialect.implicit_returning,
"%(database)s %(does_support)s 'RETURNING of a single row'",
)
@property
def tuple_in(self):
"""Target platform supports the syntax
"(x, y) IN ((x1, y1), (x2, y2), ...)"
"""
return exclusions.closed()
@property
def tuple_in_w_empty(self):
"""Target platform tuple IN w/ empty set"""
return self.tuple_in
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names.",
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts.",
)
@property
def implements_get_lastrowid(self):
"""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
"""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def emulated_lastrowid_even_with_sequences(self):
"""target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes, even if the table has a
Sequence on it.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
"""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return only_on(lambda config: config.db.dialect.supports_schemas)
@property
def cross_schema_fk_reflection(self):
"""target system must support reflection of inter-schema
foreign keys"""
return exclusions.closed()
@property
def foreign_key_constraint_name_reflection(self):
"""Target supports refleciton of FOREIGN KEY constraints and
will return the name of the constraint that was used in the
"CONSTRAINT <name> FOREIGN KEY" DDL.
MySQL prior to version 8 and MariaDB prior to version 10.5
don't support this.
"""
return exclusions.closed()
@property
def implicit_default_schema(self):
"""target system has a strong concept of 'default' schema that can
be referred to implicitly.
basically, PostgreSQL.
"""
return exclusions.closed()
@property
def default_schema_name_switch(self):
"""target dialect implements provisioning module including
set_default_schema_on_connection"""
return exclusions.closed()
@property
def server_side_cursors(self):
"""Target dialect must support server side cursors."""
return exclusions.only_if(
[lambda config: config.db.dialect.supports_server_side_cursors],
"no server side cursors support",
)
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if(
[lambda config: config.db.dialect.supports_sequences],
"no sequence support",
)
@property
def no_sequences(self):
"""the opposite of "sequences", DB does not support sequences at
all."""
return exclusions.NotPredicate(self.sequences)
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if(
[
lambda config: config.db.dialect.supports_sequences
and config.db.dialect.sequences_optional
],
"no sequence support, or sequences not optional",
)
@property
def supports_lastrowid(self):
"""target database / driver supports cursor.lastrowid as a means
of retrieving the last inserted primary key value.
note that if the target DB supports sequences also, this is still
assumed to work. This is a new use case brought on by MariaDB 10.3.
"""
return exclusions.only_if(
[lambda config: config.db.dialect.postfetch_lastrowid]
)
@property
def no_lastrowid_support(self):
"""the opposite of supports_lastrowid"""
return exclusions.only_if(
[lambda config: not config.db.dialect.postfetch_lastrowid]
)
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
"""target database has general support for table reflection"""
return exclusions.open()
@property
def reflect_tables_no_columns(self):
"""target database supports creation and reflection of tables with no
columns, or at least tables that seem to have no columns."""
return exclusions.closed()
@property
def comment_reflection(self):
return exclusions.closed()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW
definition."""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_option_reflection_ondelete(self):
return exclusions.closed()
@property
def fk_constraint_option_reflection_ondelete_restrict(self):
return exclusions.closed()
@property
def fk_constraint_option_reflection_ondelete_noaction(self):
return exclusions.closed()
@property
def foreign_key_constraint_option_reflection_onupdate(self):
return exclusions.closed()
@property
def fk_constraint_option_reflection_onupdate_restrict(self):
return exclusions.closed()
@property
def temp_table_reflection(self):
return exclusions.open()
@property
def temp_table_reflect_indexes(self):
return self.temp_table_reflection
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return exclusions.closed()
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return exclusions.open()
@property
def temporary_views(self):
"""target database supports temporary views"""
return exclusions.closed()
@property
def index_reflection(self):
return exclusions.open()
@property
def index_reflects_included_columns(self):
return exclusions.closed()
@property
def indexes_with_ascdesc(self):
"""target database supports CREATE INDEX with per-column ASC/DESC."""
return exclusions.open()
@property
def indexes_with_expressions(self):
"""target database supports CREATE INDEX against SQL expressions."""
return exclusions.closed()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def check_constraint_reflection(self):
"""target dialect supports reflection of check constraints"""
return exclusions.closed()
@property
def duplicate_key_raises_integrity_error(self):
"""target dialect raises IntegrityError when reporting an INSERT
with a primary key violation. (hint: it should)
"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol
names.
"""
return exclusions.closed()
@property
def symbol_names_w_double_quote(self):
"""Target driver can create tables with a name like 'some " table'"""
return exclusions.open()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def timestamp_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects but only
if TIMESTAMP is used."""
return exclusions.closed()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def autocommit(self):
"""target dialect supports 'AUTOCOMMIT' as an isolation_level"""
return exclusions.closed()
@property
def isolation_level(self):
"""target dialect supports general isolation level settings.
Note that this requirement, when enabled, also requires that
the get_isolation_levels() method be implemented.
"""
return exclusions.closed()
def get_isolation_levels(self, config):
"""Return a structure of supported isolation levels for the current
testing dialect.
The structure indicates to the testing suite what the expected
"default" isolation should be, as well as the other values that
are accepted. The dictionary has two keys, "default" and "supported".
The "supported" key refers to a list of all supported levels and
it should include AUTOCOMMIT if the dialect supports it.
If the :meth:`.DefaultRequirements.isolation_level` requirement is
not open, then this method has no return value.
E.g.::
>>> testing.requirements.get_isolation_levels()
{
"default": "READ_COMMITTED",
"supported": [
"SERIALIZABLE", "READ UNCOMMITTED",
"READ COMMITTED", "REPEATABLE READ",
"AUTOCOMMIT"
]
}
"""
with config.db.connect() as conn:
try:
supported = conn.dialect.get_isolation_level_values(
conn.connection.dbapi_connection
)
except NotImplementedError:
return None
else:
return {
"default": conn.dialect.default_isolation_level,
"supported": supported,
}
@property
def get_isolation_level_values(self):
"""target dialect supports the
:meth:`_engine.Dialect.get_isolation_level_values`
method added in SQLAlchemy 2.0.
"""
def go(config):
with config.db.connect() as conn:
try:
conn.dialect.get_isolation_level_values(
conn.connection.dbapi_connection
)
except NotImplementedError:
return False
else:
return True
return exclusions.only_if(go)
@property
def dialect_level_isolation_level_param(self):
"""test that the dialect allows the 'isolation_level' argument
to be handled by DefaultDialect"""
def go(config):
try:
e = create_engine(
config.db.url, isolation_level="READ COMMITTED"
)
except:
return False
else:
return (
e.dialect._on_connect_isolation_level == "READ COMMITTED"
)
return exclusions.only_if(go)
@property
def json_type(self):
"""target platform implements a native JSON type."""
return exclusions.closed()
@property
def json_array_indexes(self):
"""target platform supports numeric array indexes
within a JSON structure"""
return self.json_type
@property
def json_index_supplementary_unicode_element(self):
return exclusions.open()
@property
def legacy_unconditional_json_extract(self):
"""Backend has a JSON_EXTRACT or similar function that returns a
valid JSON string in all cases.
Used to test a legacy feature and is not needed.
"""
return exclusions.closed()
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.open()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def cast_precision_numerics_many_significant_digits(self):
"""same as precision_numerics_many_significant_digits but within the
context of a CAST statement (hello MySQL)
"""
return self.precision_numerics_many_significant_digits
@property
def implicit_decimal_binds(self):
"""target backend will return a selected Decimal as a Decimal, not
a string.
e.g.::
expr = decimal.Decimal("15.7563")
value = e.scalar(
select(literal(expr))
)
assert value == expr
See :ticket:`4036`
"""
return exclusions.open()
@property
def numeric_received_as_decimal_untyped(self):
"""target backend will return result columns that are explicitly
against NUMERIC or similar precision-numeric datatypes (not including
FLOAT or INT types) as Python Decimal objects, and not as floats
or ints, including when no SQLAlchemy-side typing information is
associated with the statement (e.g. such as a raw SQL string).
This should be enabled if either the DBAPI itself returns Decimal
objects, or if the dialect has set up DBAPI-specific return type
handlers such that Decimal objects come back automatically.
"""
return exclusions.open()
@property
def nested_aggregates(self):
"""target database can select an aggregate from a subquery that's
also using an aggregate
"""
return exclusions.open()
@property
def recursive_fk_cascade(self):
"""target database must support ON DELETE CASCADE on a self-referential
foreign key
"""
return exclusions.open()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def infinity_floats(self):
"""The Float type can persist and load float('inf'), float('-inf')."""
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def expressions_against_unbounded_text(self):
"""target database supports use of an unbounded textual field in a
WHERE clause."""
return exclusions.open()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return exclusions.open()
@property
def savepoints(self):
"""Target database must support savepoints."""
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return exclusions.closed()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def delete_from(self):
"""Target must support DELETE FROM..FROM or DELETE..USING syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE (or DELETE) where the same table is
present in a subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as::
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def percent_schema_names(self):
"""target backend supports weird identifiers with percent signs
in them, e.g. 'some % column'.
this is a very weird use case but often has problems because of
DBAPIs that use python formatting. It's not a critical use
case either.
"""
return exclusions.closed()
@property
def order_by_col_from_union(self):
"""target database supports ordering by a column from a SELECT
inside of a UNION
E.g. (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
"""
return exclusions.open()
@property
def order_by_label_with_expression(self):
"""target backend supports ORDER BY a column label within an
expression.
Basically this::
select data as foo from test order by foo || 'bar'
Lots of databases including PostgreSQL don't support this,
so this is off by default.
"""
return exclusions.closed()
@property
def order_by_collation(self):
def check(config):
try:
self.get_order_by_collation(config)
return False
except NotImplementedError:
return True
return exclusions.skip_if(check)
def get_order_by_collation(self, config):
raise NotImplementedError()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at
all.
"""
return exclusions.open()
@property
def graceful_disconnects(self):
"""Target driver must raise a DBAPI-level exception, such as
InterfaceError, when the underlying connection has been closed
and the execute() method is called.
"""
return exclusions.open()
@property
def independent_connections(self):
"""
Target must support simultaneous, independent database connections.
"""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
@property
def ad_hoc_engines(self):
"""Test environment must allow ad-hoc engine/connection creation.
DBs that scale poorly for many connections, even when closed, i.e.
Oracle, may use the "--low-connections" option which flags this
requirement as not present.
"""
return exclusions.skip_if(
lambda config: config.options.low_connections
)
@property
def no_windows(self):
return exclusions.skip_if(self._running_on_windows())
def _running_on_windows(self):
return exclusions.LambdaPredicate(
lambda: platform.system() == "Windows",
description="running on Windows",
)
@property
def timing_intensive(self):
return exclusions.requires_tag("timing_intensive")
@property
def memory_intensive(self):
return exclusions.requires_tag("memory_intensive")
@property
def threading_with_mock(self):
"""Mark tests that use threading and mock at the same time - stability
issues have been observed with coverage
"""
return exclusions.skip_if(
lambda config: config.options.has_coverage,
"Stability issues with coverage",
)
@property
def sqlalchemy2_stubs(self):
def check(config):
try:
__import__("sqlalchemy-stubs.ext.mypy")
except ImportError:
return False
else:
return True
return exclusions.only_if(check)
@property
def python38(self):
return exclusions.only_if(
lambda: util.py38, "Python 3.8 or above required"
)
@property
def cpython(self):
return exclusions.only_if(
lambda: util.cpython, "cPython interpreter needed"
)
@property
def patch_library(self):
def check_lib():
try:
__import__("patch")
except ImportError:
return False
else:
return True
return exclusions.only_if(check_lib, "patch library needed")
@property
def predictable_gc(self):
"""target platform must remove all cycles unconditionally when
gc.collect() is called, as well as clean out unreferenced subclasses.
"""
return self.cpython
@property
def no_coverage(self):
"""Test should be skipped if coverage is enabled.
This is to block tests that exercise libraries that seem to be
sensitive to coverage, such as PostgreSQL notice logging.
"""
return exclusions.skip_if(
lambda config: config.options.has_coverage,
"Issues observed when coverage is enabled",
)
def _has_mysql_on_windows(self, config):
return False
def _has_mysql_fully_case_sensitive(self, config):
return False
@property
def sqlite(self):
return exclusions.skip_if(lambda: not self._has_sqlite())
@property
def cextensions(self):
return exclusions.skip_if(
lambda: not util.has_compiled_ext(), "C extensions not installed"
)
def _has_sqlite(self):
from sqlalchemy import create_engine
try:
create_engine("sqlite://")
return True
except ImportError:
return False
@property
def async_dialect(self):
"""dialect makes use of await_() to invoke operations on the DBAPI."""
return exclusions.closed()
@property
def greenlet(self):
def go(config):
try:
import greenlet # noqa F401
except ImportError:
return False
else:
return True
return exclusions.only_if(go)
@property
def computed_columns(self):
"Supports computed columns"
return exclusions.closed()
@property
def computed_columns_stored(self):
"Supports computed columns with `persisted=True`"
return exclusions.closed()
@property
def computed_columns_virtual(self):
"Supports computed columns with `persisted=False`"
return exclusions.closed()
@property
def computed_columns_default_persisted(self):
"""If the default persistence is virtual or stored when `persisted`
is omitted"""
return exclusions.closed()
@property
def computed_columns_reflect_persisted(self):
"""If persistence information is returned by the reflection of
computed columns"""
return exclusions.closed()
@property
def supports_distinct_on(self):
"""If a backend supports the DISTINCT ON in a select"""
return exclusions.closed()
@property
def supports_is_distinct_from(self):
"""Supports some form of "x IS [NOT] DISTINCT FROM y" construct.
Different dialects will implement their own flavour, e.g.,
sqlite will emit "x IS NOT y" instead of "x IS DISTINCT FROM y".
.. seealso::
:meth:`.ColumnOperators.is_distinct_from`
"""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_is_distinct_from,
"driver doesn't support an IS DISTINCT FROM construct",
)
@property
def identity_columns(self):
"""If a backend supports GENERATED { ALWAYS | BY DEFAULT }
AS IDENTITY"""
return exclusions.closed()
@property
def identity_columns_standard(self):
"""If a backend supports GENERATED { ALWAYS | BY DEFAULT }
AS IDENTITY with a standard syntax.
This is mainly to exclude MSSql.
"""
return exclusions.closed()
@property
def regexp_match(self):
"""backend supports the regexp_match operator."""
return exclusions.closed()
@property
def regexp_replace(self):
"""backend supports the regexp_replace operator."""
return exclusions.closed()
@property
def fetch_first(self):
"""backend supports the fetch first clause."""
return exclusions.closed()
@property
def fetch_percent(self):
"""backend supports the fetch first clause with percent."""
return exclusions.closed()
@property
def fetch_ties(self):
"""backend supports the fetch first clause with ties."""
return exclusions.closed()
@property
def fetch_no_order_by(self):
"""backend supports the fetch first without order by"""
return exclusions.closed()
@property
def fetch_offset_with_options(self):
"""backend supports the offset when using fetch first with percent
or ties. basically this is "not mssql"
"""
return exclusions.closed()
@property
def fetch_expression(self):
"""backend supports fetch / offset with expression in them, like
SELECT * FROM some_table
OFFSET 1 + 1 ROWS FETCH FIRST 1 + 1 ROWS ONLY
"""
return exclusions.closed()
@property
def autoincrement_without_sequence(self):
"""If autoincrement=True on a column does not require an explicit
sequence. This should be false only for oracle.
"""
return exclusions.open()
@property
def generic_classes(self):
"If X[Y] can be implemented with ``__class_getitem__``. py3.7+"
return exclusions.open()
@property
def json_deserializer_binary(self):
"indicates if the json_deserializer function is called with bytes"
return exclusions.closed()
| 28.900398 | 81 | 0.636385 |
72d4b6cbdb2a52554573dbc3af40824bb2010ce3 | 747 | py | Python | Lesson4/flowers.py | shinkai-tester/python_beginner | a934328c9a50241cc3f02a423060e16aab53b425 | [
"Apache-2.0"
] | 2 | 2021-06-01T13:24:04.000Z | 2021-06-01T13:27:47.000Z | Lesson4/flowers.py | shinkai-tester/python_beginner | a934328c9a50241cc3f02a423060e16aab53b425 | [
"Apache-2.0"
] | null | null | null | Lesson4/flowers.py | shinkai-tester/python_beginner | a934328c9a50241cc3f02a423060e16aab53b425 | [
"Apache-2.0"
] | null | null | null | number = int(input())
max1 = 0
max2 = 0
max3 = 0
kind1 = 'a'
kind2 = 'b'
kind3 = 'c'
while number <= 3:
print('Количество цветов должно быть больше трёх!')
number = int(input('Введите количество цветов еще раз: '))
for k in range(number):
kind = input()
NumberKind = int(input())
if NumberKind > max1:
max3 = max2
max2 = max1
max1 = NumberKind
kind3 = kind2
kind2 = kind1
kind1 = kind
else:
if NumberKind > max2:
max3 = max2
max2 = NumberKind
kind3 = kind2
kind2 = kind
else:
if NumberKind > max3:
max3 = NumberKind
kind3 = kind
print(kind1, kind2, kind3, sep = '\n') | 24.096774 | 62 | 0.522088 |
d0455ffd7e22ff5be24195344ff465f3b03bd8e1 | 2,271 | py | Python | .history/postImages/index_20201006213946.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | 2 | 2020-10-21T22:14:15.000Z | 2020-10-21T22:14:16.000Z | .history/postImages/index_20201006213946.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | null | null | null | .history/postImages/index_20201006213946.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | null | null | null | import csv
import requests
df = open("bridgeData3.csv",'r').readlines()
fin = open('final.csv','r').readlines()
finCsv = fin[1:]
# url = https://b2ptc.herokuapp.com/bridges
finalCsv = df[1:]
obj = {}
for i in finalCsv:
x = i.split(',')
obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3][0:-1]}
finalObj = {}
for i in finCsv:
x = i.split(',')
id = x[6]
finalObj[id]= {}
if x[6] in obj:
finalObj[x[6]]['before_img'] = obj[x[6]]['before_img']
finalObj[x[6]]['after_img'] = obj[x[6]]['after_img'][0:-1]
# finalObj[x[6]]['district'] = x[1]
# finalObj[x[6]]['sector'] = x[2]
# finalObj[x[6]]['cell'] = x[3]
# finalObj[x[6]]['bridge_site'] = x[4]
# finalObj[x[6]]['stage'] = x[5]
# finalObj[x[6]]['id'] = int(x[6])
# finalObj[x[6]]['type'] = x[7]
# finalObj[x[6]]['latt'] = float(x[8])
# finalObj[x[6]]['long'] = float(x[9])
# try:
# serv = float(x[10])
# except:
# serv = x[10]
# sv = x[13].split(' ')[2]
# finalObj[x[6]]['served'] = serv
# finalObj[x[6]]['community_served'] = x[14]
# try:
# pId = int(x[15])
# except :
# pId = x[15]
# finalObj[x[6]]['provId'] = pId
# finalObj[x[6]]['districtId'] = int(x[16])
# finalObj[x[6]]['sectorId'] = int(x[17])
# finalObj[x[6]]['cellId'] = int(x[18][0:-1])
# print(id)
row = fin[0].split(',')
for i in range(len(row)):
key = row[i].replace(' ',"_")
key = key.strip()
if i == 8:
key = 'latitude'
if i == 9:
key = 'longitude'
if i == 11:
continue
finalObj[id][key.lower()] = x[i]
print(finalObj['1013351'])
# for i in finalCsv:
# x = i.split(',')
# requests.put(url+x[0],data={before:x[2],after:x[3]})
# pull each id,before image and after from df
# for each data item do a put request with the id as the param id
# and then put the before and after image in an dict and place it as the data for the put request
| 31.109589 | 97 | 0.468516 |
0104a09e89241637e3a45794eab842599831997c | 16,673 | py | Python | pipenv/patched/notpip/_internal/req/req_uninstall.py | Enzime/pipenv | d4f710be4a39e09a82a5133b7b3a277ee9bfb13a | [
"MIT"
] | 1 | 2019-09-04T15:17:41.000Z | 2019-09-04T15:17:41.000Z | pipenv/patched/notpip/_internal/req/req_uninstall.py | Enzime/pipenv | d4f710be4a39e09a82a5133b7b3a277ee9bfb13a | [
"MIT"
] | 3 | 2020-06-05T18:53:36.000Z | 2021-06-10T20:47:05.000Z | pipenv/patched/notpip/_internal/req/req_uninstall.py | Enzime/pipenv | d4f710be4a39e09a82a5133b7b3a277ee9bfb13a | [
"MIT"
] | 1 | 2018-07-17T07:39:47.000Z | 2018-07-17T07:39:47.000Z | from __future__ import absolute_import
import csv
import functools
import logging
import os
import sys
import sysconfig
from pipenv.patched.notpip._vendor import pkg_resources
from pipenv.patched.notpip._internal.compat import WINDOWS, cache_from_source, uses_pycache
from pipenv.patched.notpip._internal.exceptions import UninstallationError
from pipenv.patched.notpip._internal.locations import bin_py, bin_user
from pipenv.patched.notpip._internal.utils.logging import indent_log
from pipenv.patched.notpip._internal.utils.misc import (
FakeFile, ask, dist_in_usersite, dist_is_local, egg_link_path, is_local,
normalize_path, renames,
)
from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory
logger = logging.getLogger(__name__)
def _script_names(dist, script_name, is_gui):
"""Create the fully qualified name of the files created by
{console,gui}_scripts for the given ``dist``.
Returns the list of file names
"""
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
exe_name = os.path.join(bin_dir, script_name)
paths_to_remove = [exe_name]
if WINDOWS:
paths_to_remove.append(exe_name + '.exe')
paths_to_remove.append(exe_name + '.exe.manifest')
if is_gui:
paths_to_remove.append(exe_name + '-script.pyw')
else:
paths_to_remove.append(exe_name + '-script.py')
return paths_to_remove
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def compact(paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
sep = os.path.sep
short_paths = set()
for path in sorted(paths, key=len):
should_add = any(
path.startswith(shortpath.rstrip("*")) and
path[len(shortpath.rstrip("*").rstrip(sep))] == sep
for shortpath in short_paths
)
if not should_add:
short_paths.add(path)
return short_paths
def compress_for_output_listing(paths):
"""Returns a tuple of 2 sets of which paths to display to user
The first set contains paths that would be deleted. Files of a package
are not added and the top-level directory of the package has a '*' added
at the end - to signify that all it's contents are removed.
The second set contains files that would have been skipped in the above
folders.
"""
will_remove = list(paths)
will_skip = set()
# Determine folders and files
folders = set()
files = set()
for path in will_remove:
if path.endswith(".pyc"):
continue
if path.endswith("__init__.py") or ".dist-info" in path:
folders.add(os.path.dirname(path))
files.add(path)
folders = compact(folders)
# This walks the tree using os.walk to not miss extra folders
# that might get added.
for folder in folders:
for dirpath, _, dirfiles in os.walk(folder):
for fname in dirfiles:
if fname.endswith(".pyc"):
continue
file_ = os.path.normcase(os.path.join(dirpath, fname))
if os.path.isfile(file_) and file_ not in files:
# We are skipping this file. Add it to the set.
will_skip.add(file_)
will_remove = files | {
os.path.join(folder, "*") for folder in folders
}
return will_remove, will_skip
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = TempDirectory(kind="uninstall")
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def add(self, path):
head, tail = os.path.split(path)
# we normalize the head to resolve parent directory symlinks, but not
# the tail, since we only want to uninstall symlinks, not their targets
path = os.path.join(normalize_path(head), os.path.normcase(tail))
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created,
# due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def _stash(self, path):
return os.path.join(
self.save_dir.path, os.path.splitdrive(path)[1].lstrip(os.path.sep)
)
def remove(self, auto_confirm=False, verbose=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
dist_name_version = (
self.dist.project_name + "-" + self.dist.version
)
logger.info('Uninstalling %s:', dist_name_version)
with indent_log():
if auto_confirm or self._allowed_to_proceed(verbose):
self.save_dir.create()
for path in sorted(compact(self.paths)):
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.info('Successfully uninstalled %s', dist_name_version)
def _allowed_to_proceed(self, verbose):
"""Display which files would be deleted and prompt for confirmation
"""
def _display(msg, paths):
if not paths:
return
logger.info(msg)
with indent_log():
for path in sorted(compact(paths)):
logger.info(path)
if not verbose:
will_remove, will_skip = compress_for_output_listing(self.paths)
else:
# In verbose mode, display all the files that are going to be
# deleted.
will_remove = list(self.paths)
will_skip = set()
_display('Would remove:', will_remove)
_display('Would not remove (might be manually added):', will_skip)
_display('Would not remove (outside of prefix):', self._refuse)
return ask('Proceed (y/n)? ', ('y', 'n')) == 'y'
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir.path is None:
logger.error(
"Can't roll back %s; was not uninstalled",
self.dist.project_name,
)
return False
logger.info('Rolling back uninstall of %s', self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.debug('Replacing %s', path)
renames(tmp_path, path)
for pth in self.pth.values():
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
self.save_dir.cleanup()
self._moved_paths = []
@classmethod
def from_dist(cls, dist):
dist_path = normalize_path(dist.location)
if not dist_is_local(dist):
logger.info(
"Not uninstalling %s at %s, outside environment %s",
dist.key,
dist_path,
sys.prefix,
)
return cls(dist)
if dist_path in {p for p in {sysconfig.get_path("stdlib"),
sysconfig.get_path("platstdlib")}
if p}:
logger.info(
"Not uninstalling %s at %s, as it is in the standard library.",
dist.key,
dist_path,
)
return cls(dist)
paths_to_remove = cls(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
paths_to_remove.add(path + '.pyo')
elif distutils_egg_info:
raise UninstallationError(
"Cannot uninstall {!r}. It is a distutils installed project "
"and thus we cannot accurately determine which files belong "
"to it which would lead to only a partial uninstall.".format(
dist.project_name,
)
)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in uninstallation_paths(dist):
paths_to_remove.add(path)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, dist.project_name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location,
)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
_scripts_to_remove = []
console_scripts = dist.get_entry_map(group='console_scripts')
for name in console_scripts.keys():
_scripts_to_remove.extend(_script_names(dist, name, False))
# find gui_scripts
gui_scripts = dist.get_entry_map(group='gui_scripts')
for name in gui_scripts.keys():
_scripts_to_remove.extend(_script_names(dist, name, True))
for s in _scripts_to_remove:
paths_to_remove.add(s)
return paths_to_remove
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError(
"Cannot remove entries from nonexistent file %s" % pth_file
)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if WINDOWS and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.debug('Removing pth entries from %s:', self.file)
with open(self.file, 'rb') as fh:
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
if any(b'\r\n' in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
# handle missing trailing newline
if lines and not lines[-1].endswith(endline.encode("utf-8")):
lines[-1] = lines[-1] + endline.encode("utf-8")
for entry in self.entries:
try:
logger.debug('Removing entry: %s', entry)
lines.remove((entry + endline).encode("utf-8"))
except ValueError:
pass
with open(self.file, 'wb') as fh:
fh.writelines(lines)
def rollback(self):
if self._saved_lines is None:
logger.error(
'Cannot roll back changes to %s, none were made', self.file
)
return False
logger.debug('Rolling %s back to previous state', self.file)
with open(self.file, 'wb') as fh:
fh.writelines(self._saved_lines)
return True
| 36.563596 | 91 | 0.584118 |
c890c1197e931ab7273f535803815b6bde1d7895 | 674 | py | Python | trails/feeds/turris.py | Jahismighty/maltrail | 9bc70430993b2140ceb4dbac4b487251a9254416 | [
"MIT"
] | 1 | 2021-01-10T14:35:36.000Z | 2021-01-10T14:35:36.000Z | trails/feeds/turris.py | Jahismighty/maltrail | 9bc70430993b2140ceb4dbac4b487251a9254416 | [
"MIT"
] | 29 | 2018-10-18T20:14:49.000Z | 2019-07-08T07:45:08.000Z | trails/feeds/turris.py | Jahismighty/maltrail | 9bc70430993b2140ceb4dbac4b487251a9254416 | [
"MIT"
] | 2 | 2018-11-29T22:50:57.000Z | 2019-04-12T03:35:35.000Z | #!/usr/bin/env python
"""
Copyright (c) 2014-2018 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
from core.common import retrieve_content
__url__ = "https://www.turris.cz/greylist-data/greylist-latest.csv"
__check__ = ".1"
__info__ = "bad reputation"
__reference__ = "turris.cz"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line.split(',')[0].strip()] = (__info__, __reference__)
return retval
| 24.962963 | 74 | 0.637982 |
d3a035e88034369cb75f0e4f977e90c13beb13e7 | 3,781 | py | Python | tests/bandits/test_epsilon_greedy.py | pm3310/pulpo | 5bd8209c38115394efc286848ec5297e31ce7369 | [
"MIT"
] | 2 | 2020-05-11T18:21:34.000Z | 2020-05-15T14:51:46.000Z | tests/bandits/test_epsilon_greedy.py | pm3310/pulpo | 5bd8209c38115394efc286848ec5297e31ce7369 | [
"MIT"
] | 3 | 2020-03-26T11:08:50.000Z | 2020-05-11T21:17:19.000Z | tests/bandits/test_epsilon_greedy.py | pm3310/pulpo | 5bd8209c38115394efc286848ec5297e31ce7369 | [
"MIT"
] | 2 | 2020-05-12T03:34:02.000Z | 2020-05-15T14:52:02.000Z | import random
from unittest import TestCase
from pulpo.bandits.dataclasses import BanditConfig
from pulpo.bandits.dataclasses import Feedback, EpsilonGreedyArm
from pulpo.bandits.epsilon_greedy import EGreedy
class EGreedyTest(TestCase):
def test_should_choose_an_arm(self):
arm_names = ['arm1', 'arm2']
arm = [EpsilonGreedyArm(name, n=1, reward_sum=1) for name in arm_names]
egreedy = EGreedy('my_bandit', arm, epsilon=0.1)
chosen_arm = egreedy.choose()
assert chosen_arm.arm_id == 'arm1' or chosen_arm.arm_id == 'arm2'
def test_should_update_an_arm_with_feedback(self):
arm_names = ['arm1', 'arm2']
arm = [EpsilonGreedyArm(name, n=1, reward_sum=1) for name in arm_names]
egreedy = EGreedy('my_bandit', arm, epsilon=0.1)
feedback = Feedback('arm1', reward=1)
egreedy.update(feedback)
assert egreedy.arms_dict['arm1'].reward_sum == 2 and egreedy.arms_dict['arm1'].n == 2
def test_should_run_for_several_iterations_and_sample_all_arms(self):
arm_names = ['arm1', 'arm2', 'arm3']
arm = [EpsilonGreedyArm(name, n=1, reward_sum=1) for name in arm_names]
egreedy = EGreedy('my_bandit', arm, epsilon=0.1)
num_steps = 100
sampled_arm_ids = set()
for _ in range(num_steps):
arm = egreedy.choose()
sampled_arm_ids.add(arm.arm_id)
reward = 1.0 if random.random() < 0.2 else 0.0
egreedy.update(Feedback(arm.arm_id, reward))
assert sampled_arm_ids == set(arm_names)
def test_should_reset_value_to_default(self):
arm_names = ['arm1', 'arm2', 'arm3']
arm = [EpsilonGreedyArm(name, n=1, reward_sum=1) for name in arm_names]
egreedy = EGreedy('my_bandit', arm, epsilon=0.1)
num_steps = 20
for _ in range(num_steps):
arm = egreedy.choose()
reward = 1.0 if random.random() < 0.2 else 0.0
egreedy.update(Feedback(arm_id=arm.arm_id, reward=reward))
egreedy.reset()
assert all([arm.reward_sum == 0 and arm.n == 0.001 for arm in egreedy.arms_dict.values()])
def test_should_always_select_winner_with_eps_1(self):
loosing_arm = EpsilonGreedyArm('loosing_arm', n=1, reward_sum=1)
winning_arm = EpsilonGreedyArm('winning_arm', n=1, reward_sum=1000.0)
egreedy = EGreedy('my_bandit', [loosing_arm, winning_arm], epsilon=1.0)
for _ in range(100):
assert egreedy.choose().arm_id == 'winning_arm'
def test_should_be_constructed_from_config(self):
config = BanditConfig(
bandit_id="test_bandit",
arm_ids=["arm1", "arm2", "arm3"],
priors={'n': 2, 'reward_sum': 2},
parameters={'epsilon': 0.80}
)
egreedy: EGreedy = EGreedy.make_from_bandit_config(config)
assert egreedy.bandit_id == "test_bandit"
assert egreedy.epsilon == 0.80
assert egreedy.arms_dict == {"arm1": EpsilonGreedyArm("arm1", 2, 2),
"arm2": EpsilonGreedyArm("arm2", 2, 2),
"arm3": EpsilonGreedyArm("arm3", 2, 2)}
def test_should_be_constructed_from_config_with_default_fallbacks(self):
config = BanditConfig(
bandit_id="test_bandit",
arm_ids=["arm1", "arm2", "arm3"]
)
egreedy: EGreedy = EGreedy.make_from_bandit_config(config)
assert egreedy.bandit_id == "test_bandit"
assert egreedy.epsilon == 0.90
assert egreedy.arms_dict == {"arm1": EpsilonGreedyArm("arm1", 1, 0),
"arm2": EpsilonGreedyArm("arm2", 1, 0),
"arm3": EpsilonGreedyArm("arm3", 1, 0)}
| 37.81 | 98 | 0.617561 |
0febddb1043caa3aaba954b058b030bfd261a2f3 | 1,774 | py | Python | promql2csv.py | daviddetorres/promql2csv | dad560631623b15e4484be9772b96747806a66f9 | [
"Apache-2.0"
] | null | null | null | promql2csv.py | daviddetorres/promql2csv | dad560631623b15e4484be9772b96747806a66f9 | [
"Apache-2.0"
] | null | null | null | promql2csv.py | daviddetorres/promql2csv | dad560631623b15e4484be9772b96747806a66f9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import configargparse
import logging
import time
from extractor import extractor
VERSION = '0.0.1'
# Setup logger
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Setup command line arguments
parser = configargparse.ArgParser()
parser.add('-o',
'--output',
required=False,
env_var='OUTPUT',
default='output.csv',
dest='output',
help='Output file')
parser.add('-u',
'--url',
required=False,
env_var='URL',
default='localhost',
dest='url',
help='Prometheus server URL')
parser.add('-p',
'--port',
required=False,
env_var='PORT',
default='9090',
dest='port',
help='Prometheus server port')
parser.add('-q',
'--query',
required=True,
env_var='QUERY',
default='',
dest='query',
help='PromQL query')
parser.add('-t',
'--time',
required=False,
env_var='TIME',
default=300,
dest='time',
help='Seconds to query')
parser.add('-s',
'--step',
required=False,
env_var='STEP',
default=60,
dest='step',
help='Interval between samples (in seconds)')
def main():
try:
args = parser.parse_args()
except:
parser.print_help()
exit(1)
logger.info('Starting promql2csv v{}'.format(VERSION))
logger.debug('Arguments: \n{}'.format(parser.format_values()))
extractor.extract(args, logger)
logger.info('Finished')
if __name__ == '__main__':
main() | 22.455696 | 67 | 0.534949 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.