hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f722a056832cd666350d8f658c55c7138da5a34c | 272 | py | Python | fiasco/io/tests/test_datalayer.py | fluxtransport/fiasco | 9d70d8bdb03197be1ddfd433e1392e214a1468e8 | [
"BSD-3-Clause"
] | 14 | 2017-08-19T17:58:09.000Z | 2022-03-22T17:46:34.000Z | fiasco/io/tests/test_datalayer.py | fluxtransport/fiasco | 9d70d8bdb03197be1ddfd433e1392e214a1468e8 | [
"BSD-3-Clause"
] | 161 | 2017-09-01T16:16:37.000Z | 2022-03-20T18:51:48.000Z | fiasco/io/tests/test_datalayer.py | fluxtransport/fiasco | 9d70d8bdb03197be1ddfd433e1392e214a1468e8 | [
"BSD-3-Clause"
] | 10 | 2017-09-01T16:15:12.000Z | 2022-03-22T17:46:42.000Z | """
Tests for the HDF5 datalayer
"""
import pytest
from fiasco.io import DataIndexer
from fiasco.util.exceptions import MissingDatabaseError
def test_missingdatabase():
with pytest.raises(MissingDatabaseError):
DataIndexer.create_indexer('foo/bar.h5', '/')
| 20.923077 | 55 | 0.761029 | import pytest
from fiasco.io import DataIndexer
from fiasco.util.exceptions import MissingDatabaseError
def test_missingdatabase():
with pytest.raises(MissingDatabaseError):
DataIndexer.create_indexer('foo/bar.h5', '/')
| true | true |
f722a122fe64136b0f5328d8f244de8bf959f947 | 7,267 | py | Python | lib/modules/composites_simulator.py | yuzhd/Text2Scene | a357b7d869f559f7d09a5ac6002757ec705b2a76 | [
"MIT",
"Unlicense"
] | 109 | 2019-06-14T01:00:29.000Z | 2022-01-23T16:03:17.000Z | lib/modules/composites_simulator.py | yuzhd/Text2Scene | a357b7d869f559f7d09a5ac6002757ec705b2a76 | [
"MIT",
"Unlicense"
] | 10 | 2019-10-16T15:40:40.000Z | 2021-08-16T00:53:57.000Z | lib/modules/composites_simulator.py | yuzhd/Text2Scene | a357b7d869f559f7d09a5ac6002757ec705b2a76 | [
"MIT",
"Unlicense"
] | 24 | 2019-06-26T05:21:59.000Z | 2021-11-01T00:33:21.000Z | #!/usr/bin/env python
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
from nntable import AllCategoriesTables
from composites_utils import *
class simulator(object):
def __init__(self, db, batch_size=None, nn_table=None):
self.db = db
self.cfg = db.cfg
self.batch_size = batch_size if batch_size is not None else self.cfg.batch_size
if nn_table is None:
self.nn_table = AllCategoriesTables(db)
self.nn_table.build_nntables_for_all_categories()
else:
self.nn_table = nn_table
def reset(self):
self.scenes = []
frames = []
if self.cfg.use_color_volume:
channel_dim = 3 * self.cfg.output_vocab_size
else:
channel_dim = 4 + self.cfg.output_vocab_size
for i in range(self.batch_size):
scene = {}
scene['out_inds'] = []
scene['out_vecs'] = []
scene['out_patches'] = []
frame = np.zeros(
( self.cfg.input_image_size[1],
self.cfg.input_image_size[0],
channel_dim
)
)
scene['last_frame'] = frame
scene['last_label'] = np.zeros(
( self.cfg.input_image_size[1],
self.cfg.input_image_size[0]
), dtype=np.int32
)
scene['last_mask'] = np.zeros(
( self.cfg.input_image_size[1],
self.cfg.input_image_size[0]
), dtype=np.float32
)
self.scenes.append(scene)
frames.append(frame)
frames = np.stack(frames, axis=0)
return torch.from_numpy(frames)
def batch_render_to_pytorch(self, out_inds, out_vecs):
assert(len(out_inds) == self.batch_size)
outputs = []
for i in range(self.batch_size):
frame = self.update_scene(self.scenes[i],
{'out_inds': out_inds[i], 'out_vec': out_vecs[i]})
outputs.append(frame)
outputs = np.stack(outputs, 0)
return torch.from_numpy(outputs)
def batch_redraw(self, return_sequence=False):
out_frames, out_noises, out_masks, out_labels, out_scenes = [], [], [], [], []
for i in range(len(self.scenes)):
predicted_scene = self.db.prediction_outputs_to_scene(self.scenes[i], self.nn_table)
predicted_scene['patches'] = self.scenes[i]['out_patches']
frames, noises, masks, labels = self.render_predictions_as_output(predicted_scene, return_sequence)
if not return_sequence:
frames = frames[None, ...]
noises = noises[None, ...]
masks = masks[None, ...]
labels = labels[None, ...]
out_frames.append(frames)
out_noises.append(noises)
out_masks.append(masks)
out_labels.append(labels)
out_scenes.append(predicted_scene)
return out_frames, out_noises, out_masks, out_labels, out_scenes
def render_predictions_as_output(self, scene, return_sequence):
width = scene['width']
height = scene['height']
clses = scene['clses']
boxes = scene['boxes']
patches = scene['patches']
if self.cfg.use_color_volume:
channel_dim = 3 * self.cfg.output_vocab_size
else:
channel_dim = 4 + self.cfg.output_vocab_size
frame = np.zeros((height, width, channel_dim))
noise = np.zeros((height, width, channel_dim))
label = np.zeros((height, width), dtype=np.int32)
mask = np.zeros((height, width), dtype=np.float32)
out_frames, out_noises, out_labels, out_masks = [], [], [], []
for i in range(len(clses)):
cls_ind = clses[i]
xywh = boxes[i]
patch = patches[i]
xyxy = xywh_to_xyxy(xywh, width, height)
if self.cfg.use_color_volume:
frame[:,:,3*cls_ind:3*(cls_ind+1)], mask, _, label, noise[:,:,3*cls_ind:3*(cls_ind+1)] = \
patch_compose_and_erose(frame[:,:,3*cls_ind:3*(cls_ind+1)], mask, label, \
xyxy, patch, self.db, noise[:,:,3*cls_ind:3*(cls_ind+1)])
else:
frame[:,:,-3:], mask, _, label, noise[:,:,-3:] = \
patch_compose_and_erose(frame[:,:,-3:], mask, label, xyxy, patch, self.db, noise[:,:,-3:])
frame[:,:,-4] = np.maximum(mask*255, frame[:,:,-4])
frame[:,:,cls_ind] = np.maximum(mask*255, frame[:,:,cls_ind])
out_frames.append(frame.copy())
out_noises.append(noise.copy())
out_labels.append(label.copy())
out_masks.append(mask.copy())
if len(clses) == 0:
out_frames.append(frame.copy())
out_noises.append(noise.copy())
out_labels.append(label.copy())
out_masks.append(mask.copy())
if return_sequence:
return np.stack(out_frames, 0), np.stack(out_noises, 0), np.stack(out_masks, 0), np.stack(out_labels, 0)
else:
return out_frames[-1], out_noises[-1], out_masks[-1], out_labels[-1]
def update_scene(self, scene, step_prediction):
##############################################################
# Update the scene and the last instance of the scene
##############################################################
out_inds = step_prediction['out_inds'].flatten()
out_vec = step_prediction['out_vec'].flatten()
scene['out_inds'].append(out_inds)
scene['out_vecs'].append(out_vec)
scene['last_frame'], scene['last_mask'], scene['last_label'], current_patch = \
self.update_frame(scene['last_frame'], scene['last_mask'], scene['last_label'], out_inds, out_vec)
scene['out_patches'].append(current_patch)
return scene['last_frame']
def update_frame(self, input_frame, input_mask, input_label, input_inds, input_vec):
if input_inds[0] <= self.cfg.EOS_idx:
return input_frame, input_mask, input_label, None
w = input_frame.shape[-2]
h = input_frame.shape[-3]
cls_ind = input_inds[0]
xywh = self.db.index2box(input_inds[1:])
xywh = xywh * np.array([w, h, w, h])
xyxy = xywh_to_xyxy(xywh, w, h)
patch = self.nn_table.retrieve(cls_ind, input_vec)[0]
# print(patch)
# print(patch['name'])
# update the frame
if self.cfg.use_color_volume:
input_frame[:,:,3*cls_ind:3*(cls_ind+1)], input_mask, _, input_label, _ = \
patch_compose_and_erose(input_frame[:,:,3*cls_ind:3*(cls_ind+1)], input_mask, input_label, xyxy, patch, self.db)
else:
input_frame[:,:,-3:], input_mask, _, input_label, _ = \
patch_compose_and_erose(input_frame[:,:,-3:], input_mask, input_label, xyxy, patch, self.db)
input_frame[:,:,-4] = np.maximum(255*input_mask, input_frame[:,:,-4])
input_frame[:,:,cls_ind] = np.maximum(255*input_mask, input_frame[:,:,cls_ind])
return input_frame, input_mask, input_label, patch
| 43.255952 | 128 | 0.564194 |
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
from nntable import AllCategoriesTables
from composites_utils import *
class simulator(object):
def __init__(self, db, batch_size=None, nn_table=None):
self.db = db
self.cfg = db.cfg
self.batch_size = batch_size if batch_size is not None else self.cfg.batch_size
if nn_table is None:
self.nn_table = AllCategoriesTables(db)
self.nn_table.build_nntables_for_all_categories()
else:
self.nn_table = nn_table
def reset(self):
self.scenes = []
frames = []
if self.cfg.use_color_volume:
channel_dim = 3 * self.cfg.output_vocab_size
else:
channel_dim = 4 + self.cfg.output_vocab_size
for i in range(self.batch_size):
scene = {}
scene['out_inds'] = []
scene['out_vecs'] = []
scene['out_patches'] = []
frame = np.zeros(
( self.cfg.input_image_size[1],
self.cfg.input_image_size[0],
channel_dim
)
)
scene['last_frame'] = frame
scene['last_label'] = np.zeros(
( self.cfg.input_image_size[1],
self.cfg.input_image_size[0]
), dtype=np.int32
)
scene['last_mask'] = np.zeros(
( self.cfg.input_image_size[1],
self.cfg.input_image_size[0]
), dtype=np.float32
)
self.scenes.append(scene)
frames.append(frame)
frames = np.stack(frames, axis=0)
return torch.from_numpy(frames)
def batch_render_to_pytorch(self, out_inds, out_vecs):
assert(len(out_inds) == self.batch_size)
outputs = []
for i in range(self.batch_size):
frame = self.update_scene(self.scenes[i],
{'out_inds': out_inds[i], 'out_vec': out_vecs[i]})
outputs.append(frame)
outputs = np.stack(outputs, 0)
return torch.from_numpy(outputs)
def batch_redraw(self, return_sequence=False):
out_frames, out_noises, out_masks, out_labels, out_scenes = [], [], [], [], []
for i in range(len(self.scenes)):
predicted_scene = self.db.prediction_outputs_to_scene(self.scenes[i], self.nn_table)
predicted_scene['patches'] = self.scenes[i]['out_patches']
frames, noises, masks, labels = self.render_predictions_as_output(predicted_scene, return_sequence)
if not return_sequence:
frames = frames[None, ...]
noises = noises[None, ...]
masks = masks[None, ...]
labels = labels[None, ...]
out_frames.append(frames)
out_noises.append(noises)
out_masks.append(masks)
out_labels.append(labels)
out_scenes.append(predicted_scene)
return out_frames, out_noises, out_masks, out_labels, out_scenes
def render_predictions_as_output(self, scene, return_sequence):
width = scene['width']
height = scene['height']
clses = scene['clses']
boxes = scene['boxes']
patches = scene['patches']
if self.cfg.use_color_volume:
channel_dim = 3 * self.cfg.output_vocab_size
else:
channel_dim = 4 + self.cfg.output_vocab_size
frame = np.zeros((height, width, channel_dim))
noise = np.zeros((height, width, channel_dim))
label = np.zeros((height, width), dtype=np.int32)
mask = np.zeros((height, width), dtype=np.float32)
out_frames, out_noises, out_labels, out_masks = [], [], [], []
for i in range(len(clses)):
cls_ind = clses[i]
xywh = boxes[i]
patch = patches[i]
xyxy = xywh_to_xyxy(xywh, width, height)
if self.cfg.use_color_volume:
frame[:,:,3*cls_ind:3*(cls_ind+1)], mask, _, label, noise[:,:,3*cls_ind:3*(cls_ind+1)] = \
patch_compose_and_erose(frame[:,:,3*cls_ind:3*(cls_ind+1)], mask, label, \
xyxy, patch, self.db, noise[:,:,3*cls_ind:3*(cls_ind+1)])
else:
frame[:,:,-3:], mask, _, label, noise[:,:,-3:] = \
patch_compose_and_erose(frame[:,:,-3:], mask, label, xyxy, patch, self.db, noise[:,:,-3:])
frame[:,:,-4] = np.maximum(mask*255, frame[:,:,-4])
frame[:,:,cls_ind] = np.maximum(mask*255, frame[:,:,cls_ind])
out_frames.append(frame.copy())
out_noises.append(noise.copy())
out_labels.append(label.copy())
out_masks.append(mask.copy())
if len(clses) == 0:
out_frames.append(frame.copy())
out_noises.append(noise.copy())
out_labels.append(label.copy())
out_masks.append(mask.copy())
if return_sequence:
return np.stack(out_frames, 0), np.stack(out_noises, 0), np.stack(out_masks, 0), np.stack(out_labels, 0)
else:
return out_frames[-1], out_noises[-1], out_masks[-1], out_labels[-1]
def update_scene(self, scene, step_prediction):
| true | true |
f722a1e34c5f98d0bb0b83434b0a51c8ec1d9d91 | 11,356 | py | Python | gtsfm/utils/viz.py | PratyushaMaiti/gtsfm | 0d03dca0b6fb9293c9a3fb619a2141903168269a | [
"Apache-2.0"
] | null | null | null | gtsfm/utils/viz.py | PratyushaMaiti/gtsfm | 0d03dca0b6fb9293c9a3fb619a2141903168269a | [
"Apache-2.0"
] | null | null | null | gtsfm/utils/viz.py | PratyushaMaiti/gtsfm | 0d03dca0b6fb9293c9a3fb619a2141903168269a | [
"Apache-2.0"
] | null | null | null | """Functions to visualize outputs at different stages of GTSFM.
Authors: Ayush Baid
"""
import os
from typing import List, Optional, Tuple
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
from gtsam import Pose3
from matplotlib.axes._axes import Axes
import gtsfm.utils.geometry_comparisons as comp_utils
import gtsfm.utils.images as image_utils
import gtsfm.utils.io as io_utils
from gtsfm.common.gtsfm_data import GtsfmData
from gtsfm.common.image import Image
from gtsfm.common.keypoints import Keypoints
COLOR_RED = (255, 0, 0)
COLOR_GREEN = (0, 255, 0)
def set_axes_equal(ax: Axes):
"""
Make axes of 3D plot have equal scale so that spheres appear as spheres, cubes as cubes, etc.. This is one
possible solution to Matplotlib's ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Ref: https://github.com/borglab/gtsam/blob/develop/python/gtsam/utils/plot.py#L13
Args:
ax: axis for the plot.
"""
# get the min and max value for each of (x, y, z) axes as 3x2 matrix.
# This gives us the bounds of the minimum volume cuboid encapsulating all
# data.
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
# find the centroid of the cuboid
centroid = np.mean(limits, axis=1)
# pick the largest edge length for this cuboid
largest_edge_length = np.max(np.abs(limits[:, 1] - limits[:, 0]))
# set new limits to draw a cube using the largest edge length
radius = 0.5 * largest_edge_length
ax.set_xlim3d([centroid[0] - radius, centroid[0] + radius])
ax.set_ylim3d([centroid[1] - radius, centroid[1] + radius])
ax.set_zlim3d([centroid[2] - radius, centroid[2] + radius])
def draw_circle_cv2(image: Image, x: int, y: int, color: Tuple[int, int, int], circle_size: int = 10) -> Image:
"""Draw a solid circle on the image.
Args:
image: image to draw the circle on.
x: x coordinate of the center of the circle.
y: y coordinate of the center of the circle.
color: RGB color of the circle.
circle_size (optional): the size of the circle (in pixels). Defaults to 10.
Returns:
Image: image with the circle drawn on it.
"""
return Image(
cv.circle(image.value_array, center=(x, y), radius=circle_size, color=color, thickness=-1) # solid circle
)
def draw_line_cv2(
image: Image, x1: int, y1: int, x2: int, y2: int, line_color: Tuple[int, int, int], line_thickness: int = 10,
) -> Image:
"""Draw a line on the image from coordinates (x1, y1) to (x2, y2).
Args:
image: image to draw the line on.
x1: x coordinate of start of the line.
y1: y coordinate of start of the line.
x2: x coordinate of end of the line.
y2: y coordinate of end of the line.
line_color: color of the line.
line_thickness (optional): line thickness. Defaults to 10.
Returns:
Image: image with the line drawn on it.
"""
return Image(cv.line(image.value_array, (x1, y1), (x2, y2), line_color, line_thickness, cv.LINE_AA))
def plot_twoview_correspondences(
image_i1: Image,
image_i2: Image,
kps_i1: Keypoints,
kps_i2: Keypoints,
corr_idxs_i1i2: np.ndarray,
inlier_mask: Optional[np.ndarray] = None,
dot_color: Optional[Tuple[int, int, int]] = None,
max_corrs: Optional[int] = 50,
) -> Image:
"""Plot correspondences between two images as lines between two circles.
Args:
image_i1: first image.
image_i2: second image.
kps_i1: keypoints for image_i1.
kps_i2: keypoints for image_i2.
corr_idxs_i1i2: indices of correspondences between i1 and i2.
inlier_mask (optional): inlier mask for correspondences as boolean array. Defaults to None.
dot_color (optional): color for keypoints. Defaults to (0, 0, 0).
max_corrs (optional): max number of correspondences to plot. Defaults to 50.
Returns:
image visualizing correspondences between two images.
"""
image_i1, image_i2, scale_i1, scale_i2 = image_utils.match_image_widths(image_i1, image_i2)
result = image_utils.vstack_image_pair(image_i1, image_i2)
if max_corrs is not None and corr_idxs_i1i2.shape[0] > max_corrs:
# subsample matches
corr_idxs_i1i2 = corr_idxs_i1i2[np.random.choice(corr_idxs_i1i2.shape[0], max_corrs)]
for corr_idx in range(corr_idxs_i1i2.shape[0]):
# mark the points in both images as circles, and draw connecting line
idx_i1, idx_i2 = corr_idxs_i1i2[corr_idx]
x_i1 = (kps_i1.coordinates[idx_i1, 0] * scale_i1[0]).astype(np.int32)
y_i1 = (kps_i1.coordinates[idx_i1, 1] * scale_i1[1]).astype(np.int32)
x_i2 = (kps_i2.coordinates[idx_i2, 0] * scale_i2[0]).astype(np.int32)
y_i2 = (kps_i2.coordinates[idx_i2, 1] * scale_i2[1]).astype(np.int32) + image_i1.height
# drawing correspondences with optional inlier mask
if inlier_mask is None:
line_color = tuple([int(c) for c in np.random.randint(0, 255 + 1, 3)])
elif inlier_mask[corr_idx]:
line_color = COLOR_GREEN
else:
line_color = COLOR_RED
result = draw_line_cv2(result, x_i1, y_i1, x_i2, y_i2, line_color, line_thickness=2)
if dot_color is None:
dot_color = line_color
result = draw_circle_cv2(result, x_i1, y_i1, dot_color, circle_size=2)
result = draw_circle_cv2(result, x_i2, y_i2, dot_color, circle_size=2)
return result
def plot_sfm_data_3d(sfm_data: GtsfmData, ax: Axes, max_plot_radius: float = 50) -> None:
"""Plot the camera poses and landmarks in 3D matplotlib plot.
Args:
sfm_data: SfmData object with camera and tracks.
ax: axis to plot on.
max_plot_radius: maximum distance threshold away from any camera for which a point
will be plotted
"""
camera_poses = [sfm_data.get_camera(i).pose() for i in sfm_data.get_valid_camera_indices()]
plot_poses_3d(camera_poses, ax)
num_tracks = sfm_data.number_tracks()
# Restrict 3d points to some radius of camera poses
points_3d = np.array([list(sfm_data.get_track(j).point3()) for j in range(num_tracks)])
nearby_points_3d = comp_utils.get_points_within_radius_of_cameras(camera_poses, points_3d, max_plot_radius)
# plot 3D points
for landmark in nearby_points_3d:
ax.plot(landmark[0], landmark[1], landmark[2], "g.", markersize=1)
def plot_poses_3d(
wTi_list: List[Pose3], ax: Axes, center_marker_color: str = "k", label_name: Optional[str] = None
) -> None:
"""Plot poses in 3D as dots for centers and lines denoting the orthonormal
coordinate system for each camera.
Color convention: R -> x axis, G -> y axis, B -> z axis.
Args:
wTi_list: list of poses to plot.
ax: axis to plot on.
center_marker_color (optional): color for camera center marker. Defaults to "k".
name:
"""
spec = "{}.".format(center_marker_color)
for i, wTi in enumerate(wTi_list):
x, y, z = wTi.translation().squeeze()
if i > 0:
# for the first loop iteration, add the label to the plot
# for the rest of iterations, set label to None (otherwise would be duplicated in legend)
label_name = None
ax.plot(x, y, z, spec, markersize=10, label=label_name)
R = wTi.rotation().matrix()
# getting the direction of the coordinate system (x, y, z axes)
default_axis_length = 0.5
v1 = R[:, 0] * default_axis_length
v2 = R[:, 1] * default_axis_length
v3 = R[:, 2] * default_axis_length
ax.plot3D([x, x + v1[0]], [y, y + v1[1]], [z, z + v1[2]], c="r")
ax.plot3D([x, x + v2[0]], [y, y + v2[1]], [z, z + v2[2]], c="g")
ax.plot3D([x, x + v3[0]], [y, y + v3[1]], [z, z + v3[2]], c="b")
def plot_and_compare_poses_3d(wTi_list: List[Pose3], wTi_list_: List[Pose3]) -> None:
"""Plots two sets poses in 3D with different markers to compare.
The markers are colored black (k) and cyan (c) for the two lists.
Args:
wTi_list: first set of poses.
wTi_list_: second set of poses.
"""
fig = plt.figure()
ax = fig.gca(projection="3d")
plot_poses_3d(wTi_list, ax, center_marker_color="k")
plot_poses_3d(wTi_list_, ax, center_marker_color="c")
set_axes_equal(ax)
plt.show()
def save_twoview_correspondences_viz(
image_i1: Image,
image_i2: Image,
keypoints_i1: Keypoints,
keypoints_i2: Keypoints,
corr_idxs_i1i2: np.ndarray,
file_path: str,
) -> None:
"""Visualize correspondences between pairs of images.
Args:
image_i1: image #i1.
image_i2: image #i2.
keypoints_i1: detected Keypoints for image #i1.
keypoints_i2: detected Keypoints for image #i2.
corr_idxs_i1i2: correspondence indices.
file_path: file path to save the visualization.
"""
plot_img = plot_twoview_correspondences(image_i1, image_i2, keypoints_i1, keypoints_i2, corr_idxs_i1i2)
io_utils.save_image(plot_img, file_path)
def save_sfm_data_viz(sfm_data: GtsfmData, folder_name: str) -> None:
"""Visualize the camera poses and 3d points in SfmData.
Args:
sfm_data: data to visualize.
folder_name: folder to save the visualization at.
"""
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
plot_sfm_data_3d(sfm_data, ax)
set_axes_equal(ax)
# save the 3D plot in the original view
fig.savefig(os.path.join(folder_name, "3d.png"))
# save the BEV representation
default_camera_elevation = 100 # in metres above ground
ax.view_init(azim=0, elev=default_camera_elevation)
fig.savefig(os.path.join(folder_name, "bev.png"))
plt.close(fig)
def save_camera_poses_viz(
pre_ba_sfm_data: GtsfmData, post_ba_sfm_data: GtsfmData, gt_pose_graph: Optional[List[Pose3]], folder_name: str
) -> None:
"""Visualize the camera pose and save to disk.
Args:
pre_ba_sfm_data: data input to bundle adjustment.
post_ba_sfm_data: output of bundle adjustment.
gt_pose_graph: ground truth poses.
folder_name: folder to save the visualization at.
"""
# extract camera poses
pre_ba_poses = []
for i in pre_ba_sfm_data.get_valid_camera_indices():
pre_ba_poses.append(pre_ba_sfm_data.get_camera(i).pose())
post_ba_poses = []
for i in post_ba_sfm_data.get_valid_camera_indices():
post_ba_poses.append(post_ba_sfm_data.get_camera(i).pose())
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
if gt_pose_graph is not None:
plot_poses_3d(gt_pose_graph, ax, center_marker_color="m", label_name="GT")
plot_poses_3d(pre_ba_poses, ax, center_marker_color="c", label_name="Pre-BA")
plot_poses_3d(post_ba_poses, ax, center_marker_color="k", label_name="Post-BA")
ax.legend(loc="upper left")
set_axes_equal(ax)
# save the 3D plot in the original view
fig.savefig(os.path.join(folder_name, "poses_3d.png"))
# save the BEV representation
default_camera_elevation = 100 # in metres above ground
ax.view_init(azim=0, elev=default_camera_elevation)
fig.savefig(os.path.join(folder_name, "poses_bev.png"))
plt.close(fig)
| 35.4875 | 115 | 0.672244 | import os
from typing import List, Optional, Tuple
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
from gtsam import Pose3
from matplotlib.axes._axes import Axes
import gtsfm.utils.geometry_comparisons as comp_utils
import gtsfm.utils.images as image_utils
import gtsfm.utils.io as io_utils
from gtsfm.common.gtsfm_data import GtsfmData
from gtsfm.common.image import Image
from gtsfm.common.keypoints import Keypoints
COLOR_RED = (255, 0, 0)
COLOR_GREEN = (0, 255, 0)
def set_axes_equal(ax: Axes):
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
centroid = np.mean(limits, axis=1)
largest_edge_length = np.max(np.abs(limits[:, 1] - limits[:, 0]))
radius = 0.5 * largest_edge_length
ax.set_xlim3d([centroid[0] - radius, centroid[0] + radius])
ax.set_ylim3d([centroid[1] - radius, centroid[1] + radius])
ax.set_zlim3d([centroid[2] - radius, centroid[2] + radius])
def draw_circle_cv2(image: Image, x: int, y: int, color: Tuple[int, int, int], circle_size: int = 10) -> Image:
return Image(
cv.circle(image.value_array, center=(x, y), radius=circle_size, color=color, thickness=-1)
)
def draw_line_cv2(
image: Image, x1: int, y1: int, x2: int, y2: int, line_color: Tuple[int, int, int], line_thickness: int = 10,
) -> Image:
return Image(cv.line(image.value_array, (x1, y1), (x2, y2), line_color, line_thickness, cv.LINE_AA))
def plot_twoview_correspondences(
image_i1: Image,
image_i2: Image,
kps_i1: Keypoints,
kps_i2: Keypoints,
corr_idxs_i1i2: np.ndarray,
inlier_mask: Optional[np.ndarray] = None,
dot_color: Optional[Tuple[int, int, int]] = None,
max_corrs: Optional[int] = 50,
) -> Image:
image_i1, image_i2, scale_i1, scale_i2 = image_utils.match_image_widths(image_i1, image_i2)
result = image_utils.vstack_image_pair(image_i1, image_i2)
if max_corrs is not None and corr_idxs_i1i2.shape[0] > max_corrs:
corr_idxs_i1i2 = corr_idxs_i1i2[np.random.choice(corr_idxs_i1i2.shape[0], max_corrs)]
for corr_idx in range(corr_idxs_i1i2.shape[0]):
idx_i1, idx_i2 = corr_idxs_i1i2[corr_idx]
x_i1 = (kps_i1.coordinates[idx_i1, 0] * scale_i1[0]).astype(np.int32)
y_i1 = (kps_i1.coordinates[idx_i1, 1] * scale_i1[1]).astype(np.int32)
x_i2 = (kps_i2.coordinates[idx_i2, 0] * scale_i2[0]).astype(np.int32)
y_i2 = (kps_i2.coordinates[idx_i2, 1] * scale_i2[1]).astype(np.int32) + image_i1.height
if inlier_mask is None:
line_color = tuple([int(c) for c in np.random.randint(0, 255 + 1, 3)])
elif inlier_mask[corr_idx]:
line_color = COLOR_GREEN
else:
line_color = COLOR_RED
result = draw_line_cv2(result, x_i1, y_i1, x_i2, y_i2, line_color, line_thickness=2)
if dot_color is None:
dot_color = line_color
result = draw_circle_cv2(result, x_i1, y_i1, dot_color, circle_size=2)
result = draw_circle_cv2(result, x_i2, y_i2, dot_color, circle_size=2)
return result
def plot_sfm_data_3d(sfm_data: GtsfmData, ax: Axes, max_plot_radius: float = 50) -> None:
camera_poses = [sfm_data.get_camera(i).pose() for i in sfm_data.get_valid_camera_indices()]
plot_poses_3d(camera_poses, ax)
num_tracks = sfm_data.number_tracks()
points_3d = np.array([list(sfm_data.get_track(j).point3()) for j in range(num_tracks)])
nearby_points_3d = comp_utils.get_points_within_radius_of_cameras(camera_poses, points_3d, max_plot_radius)
for landmark in nearby_points_3d:
ax.plot(landmark[0], landmark[1], landmark[2], "g.", markersize=1)
def plot_poses_3d(
wTi_list: List[Pose3], ax: Axes, center_marker_color: str = "k", label_name: Optional[str] = None
) -> None:
spec = "{}.".format(center_marker_color)
for i, wTi in enumerate(wTi_list):
x, y, z = wTi.translation().squeeze()
if i > 0:
label_name = None
ax.plot(x, y, z, spec, markersize=10, label=label_name)
R = wTi.rotation().matrix()
default_axis_length = 0.5
v1 = R[:, 0] * default_axis_length
v2 = R[:, 1] * default_axis_length
v3 = R[:, 2] * default_axis_length
ax.plot3D([x, x + v1[0]], [y, y + v1[1]], [z, z + v1[2]], c="r")
ax.plot3D([x, x + v2[0]], [y, y + v2[1]], [z, z + v2[2]], c="g")
ax.plot3D([x, x + v3[0]], [y, y + v3[1]], [z, z + v3[2]], c="b")
def plot_and_compare_poses_3d(wTi_list: List[Pose3], wTi_list_: List[Pose3]) -> None:
fig = plt.figure()
ax = fig.gca(projection="3d")
plot_poses_3d(wTi_list, ax, center_marker_color="k")
plot_poses_3d(wTi_list_, ax, center_marker_color="c")
set_axes_equal(ax)
plt.show()
def save_twoview_correspondences_viz(
image_i1: Image,
image_i2: Image,
keypoints_i1: Keypoints,
keypoints_i2: Keypoints,
corr_idxs_i1i2: np.ndarray,
file_path: str,
) -> None:
plot_img = plot_twoview_correspondences(image_i1, image_i2, keypoints_i1, keypoints_i2, corr_idxs_i1i2)
io_utils.save_image(plot_img, file_path)
def save_sfm_data_viz(sfm_data: GtsfmData, folder_name: str) -> None:
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
plot_sfm_data_3d(sfm_data, ax)
set_axes_equal(ax)
fig.savefig(os.path.join(folder_name, "3d.png"))
default_camera_elevation = 100
ax.view_init(azim=0, elev=default_camera_elevation)
fig.savefig(os.path.join(folder_name, "bev.png"))
plt.close(fig)
def save_camera_poses_viz(
pre_ba_sfm_data: GtsfmData, post_ba_sfm_data: GtsfmData, gt_pose_graph: Optional[List[Pose3]], folder_name: str
) -> None:
pre_ba_poses = []
for i in pre_ba_sfm_data.get_valid_camera_indices():
pre_ba_poses.append(pre_ba_sfm_data.get_camera(i).pose())
post_ba_poses = []
for i in post_ba_sfm_data.get_valid_camera_indices():
post_ba_poses.append(post_ba_sfm_data.get_camera(i).pose())
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
if gt_pose_graph is not None:
plot_poses_3d(gt_pose_graph, ax, center_marker_color="m", label_name="GT")
plot_poses_3d(pre_ba_poses, ax, center_marker_color="c", label_name="Pre-BA")
plot_poses_3d(post_ba_poses, ax, center_marker_color="k", label_name="Post-BA")
ax.legend(loc="upper left")
set_axes_equal(ax)
fig.savefig(os.path.join(folder_name, "poses_3d.png"))
default_camera_elevation = 100
ax.view_init(azim=0, elev=default_camera_elevation)
fig.savefig(os.path.join(folder_name, "poses_bev.png"))
plt.close(fig)
| true | true |
f722a36ec11a8705c0cce95ff0216ab9e221ca46 | 4,676 | py | Python | .Config/FslFontHelper/FormatPluginFontBasicKerningUtil.py | alejandrolozano2/OpenGL_DemoFramework | 5fd85f05c98cc3d0c0a68bac438035df8cabaee7 | [
"MIT",
"BSD-3-Clause"
] | 3 | 2019-01-19T20:21:24.000Z | 2021-08-10T02:11:32.000Z | .Config/FslFontHelper/FormatPluginFontBasicKerningUtil.py | alejandrolozano2/OpenGL_DemoFramework | 5fd85f05c98cc3d0c0a68bac438035df8cabaee7 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | .Config/FslFontHelper/FormatPluginFontBasicKerningUtil.py | alejandrolozano2/OpenGL_DemoFramework | 5fd85f05c98cc3d0c0a68bac438035df8cabaee7 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-08-10T02:11:33.000Z | 2021-08-10T02:11:33.000Z | #!/usr/bin/env python3
#****************************************************************************************************************************************************
#* BSD 3-Clause License
#*
#* Copyright (c) 2015, Mana Battery
#* All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#*
#* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the
#* documentation and/or other materials provided with the distribution.
#* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this
#* software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
#* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
#* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
#* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#****************************************************************************************************************************************************
def AddHeader(list, version):
# Magic
# Version
AddUInt32(list, 0x00415442)
AddUInt32(list, version)
def AddByteToList(list, value):
if( value < 0 or value > 255 ):
raise Exception("Invalid value")
list.append(value & 0xFF)
def SetByteInList(list, index, value):
if( value < 0 or value > 255 ):
raise Exception("Invalid value")
list[index] = value & 0xFF
def SetUInt32(list, index, value):
SetByteInList(list, index, value & 0xFF)
SetByteInList(list, index + 1, (value & 0xFF00) >> 8)
SetByteInList(list, index + 2, (value & 0xFF0000) >> 16)
SetByteInList(list, index + 3, (value & 0xFF000000) >> 24)
def AddUInt32(list, value):
AddByteToList(list, value & 0xFF)
AddByteToList(list, (value & 0xFF00) >> 8)
AddByteToList(list, (value & 0xFF0000) >> 16)
AddByteToList(list, (value & 0xFF000000) >> 24)
def AddInt32(list, value):
AddByteToList(list, value & 0xFF)
AddByteToList(list, (value & 0xFF00) >> 8)
AddByteToList(list, (value & 0xFF0000) >> 16)
AddByteToList(list, (value & 0xFF000000) >> 24)
def AddEncodedInt32(list, value):
# ZigZag encode signed numbers
if value >= 0:
return AddEncodedUInt32(list, value << 1);
else:
return AddEncodedUInt32(list, (value << 1) ^ (~0))
# Encodes a integer into a variable length encoding where the length can be determined from the first byte.
# in a way that favors small values.
def AddEncodedUInt32(list, value):
if (value <= 0x7F):
# <=7 bits value
AddByteToList(list, value)
return 1
elif (value <= 0x3FFF):
# <=14 bits value
AddByteToList(list, 0x80 | (value & 0x3F))
AddByteToList(list, (value & 0x3FC0) >> 6)
return 2
elif (value <= 0x1FFFFF):
# <=21 bits value
AddByteToList(list, 0xC0 | (value & 0x1F))
AddByteToList(list, (value & 0x001FE0) >> 5)
AddByteToList(list, (value & 0x1FE000) >> 13)
return 3
elif (value <= 0xFFFFFFF):
# <=28 bits value
AddByteToList(list, 0xE0 | (value & 0x0F))
AddByteToList(list, (value & 0x00000FF0) >> 4)
AddByteToList(list, (value & 0x000FF000) >> 12)
AddByteToList(list, (value & 0x0FF00000) >> 20)
return 4
else:
# >28 bits value
AddByteToList(list, 0xF0 | (value & 0x07))
AddByteToList(list, (value & 0x000007F8) >> 3)
AddByteToList(list, (value & 0x0007F800) >> 11)
AddByteToList(list, (value & 0x07F80000) >> 19)
AddByteToList(list, (value & 0xF8000000) >> 27)
return 5;
def AddString(list, value):
value = bytearray(value.encode('utf-8'))
AddEncodedUInt32(list, len(value))
for entry in value:
AddByteToList(list, entry)
| 42.126126 | 149 | 0.634303 |
def AddHeader(list, version):
AddUInt32(list, 0x00415442)
AddUInt32(list, version)
def AddByteToList(list, value):
if( value < 0 or value > 255 ):
raise Exception("Invalid value")
list.append(value & 0xFF)
def SetByteInList(list, index, value):
if( value < 0 or value > 255 ):
raise Exception("Invalid value")
list[index] = value & 0xFF
def SetUInt32(list, index, value):
SetByteInList(list, index, value & 0xFF)
SetByteInList(list, index + 1, (value & 0xFF00) >> 8)
SetByteInList(list, index + 2, (value & 0xFF0000) >> 16)
SetByteInList(list, index + 3, (value & 0xFF000000) >> 24)
def AddUInt32(list, value):
AddByteToList(list, value & 0xFF)
AddByteToList(list, (value & 0xFF00) >> 8)
AddByteToList(list, (value & 0xFF0000) >> 16)
AddByteToList(list, (value & 0xFF000000) >> 24)
def AddInt32(list, value):
AddByteToList(list, value & 0xFF)
AddByteToList(list, (value & 0xFF00) >> 8)
AddByteToList(list, (value & 0xFF0000) >> 16)
AddByteToList(list, (value & 0xFF000000) >> 24)
def AddEncodedInt32(list, value):
if value >= 0:
return AddEncodedUInt32(list, value << 1);
else:
return AddEncodedUInt32(list, (value << 1) ^ (~0))
def AddEncodedUInt32(list, value):
if (value <= 0x7F):
AddByteToList(list, value)
return 1
elif (value <= 0x3FFF):
AddByteToList(list, 0x80 | (value & 0x3F))
AddByteToList(list, (value & 0x3FC0) >> 6)
return 2
elif (value <= 0x1FFFFF):
AddByteToList(list, 0xC0 | (value & 0x1F))
AddByteToList(list, (value & 0x001FE0) >> 5)
AddByteToList(list, (value & 0x1FE000) >> 13)
return 3
elif (value <= 0xFFFFFFF):
AddByteToList(list, 0xE0 | (value & 0x0F))
AddByteToList(list, (value & 0x00000FF0) >> 4)
AddByteToList(list, (value & 0x000FF000) >> 12)
AddByteToList(list, (value & 0x0FF00000) >> 20)
return 4
else:
AddByteToList(list, 0xF0 | (value & 0x07))
AddByteToList(list, (value & 0x000007F8) >> 3)
AddByteToList(list, (value & 0x0007F800) >> 11)
AddByteToList(list, (value & 0x07F80000) >> 19)
AddByteToList(list, (value & 0xF8000000) >> 27)
return 5;
def AddString(list, value):
value = bytearray(value.encode('utf-8'))
AddEncodedUInt32(list, len(value))
for entry in value:
AddByteToList(list, entry)
| true | true |
f722a588e3102203b44de3453078b22826ce6fb2 | 3,002 | py | Python | dataset.py | aidiary/freesound-audio-tagging | 71093fdc838214f4ec2dc5b29b00e7de72ad36d0 | [
"MIT"
] | 1 | 2019-01-04T16:14:16.000Z | 2019-01-04T16:14:16.000Z | dataset.py | aidiary/freesound-audio-tagging | 71093fdc838214f4ec2dc5b29b00e7de72ad36d0 | [
"MIT"
] | null | null | null | dataset.py | aidiary/freesound-audio-tagging | 71093fdc838214f4ec2dc5b29b00e7de72ad36d0 | [
"MIT"
] | 3 | 2018-07-11T03:41:57.000Z | 2020-04-21T09:49:36.000Z | import os
import librosa
import numpy as np
import torch.utils.data
def random_crop(y, max_length=176400):
"""音声波形を固定長にそろえる
max_lengthより長かったらランダムに切り取る
max_lengthより短かったらランダムにパディングする
"""
if len(y) > max_length:
max_offset = len(y) - max_length
offset = np.random.randint(max_offset)
y = y[offset:max_length + offset]
else:
if max_length > len(y):
max_offset = max_length - len(y)
offset = np.random.randint(max_offset)
else:
offset = 0
y = np.pad(y, (offset, max_length - len(y) - offset), 'constant')
return y
class AudioDataset(torch.utils.data.Dataset):
def __init__(self, df, wav_dir, test=False,
sr=None, max_length=4.0, window_size=0.02, hop_size=0.01,
n_feature=64, feature='mfcc', model_type='alex2d'):
if not os.path.exists(wav_dir):
print('ERROR: not found %s' % wav_dir)
exit(1)
self.df = df
self.wav_dir = wav_dir
self.test = test
self.sr = sr
self.max_length = max_length # sec
self.window_size = window_size # sec
self.hop_size = hop_size # sec
self.n_feature = n_feature
self.feature = feature
self.model_type = model_type
def __len__(self):
return len(self.df)
def __getitem__(self, index):
fpath = os.path.join(self.wav_dir, self.df.fname[index])
y, sr = librosa.load(fpath, sr=self.sr)
if sr is None:
print('WARNING:', fpath)
sr = 44100
# ランダムクロップ
y = random_crop(y, int(self.max_length * sr))
# 特徴抽出
n_fft = int(self.window_size * sr)
hop_length = int(self.hop_size * sr)
if self.feature == 'mfcc':
feature = librosa.feature.mfcc(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mfcc=self.n_feature)
elif self.feature == 'melgram':
feature = librosa.feature.melspectrogram(y, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=self.n_feature)
else:
print('Invalid feature name: %s' % self.feature)
exit(1)
data = torch.from_numpy(feature).float()
s = data.size()
if self.model_type == 'alex2d' or self.model_type == 'resnet':
# Conv2dの場合は (channel, features, frames)
data.resize_(1, s[0], s[1])
elif self.model_type == 'alex1d' or self.model_type == 'lstm':
# Conv1dの場合は (features, frames)
data.resize_(s[0], s[1])
else:
print('Invalid conv type: %s' % self.model_type)
exit(1)
mean = data.mean()
std = data.std()
if std != 0:
data.add_(-mean)
data.div_(std)
if self.test:
# テストモードのときは正解ラベルがないのでデータだけ返す
return data
else:
# label
label = self.df.label_idx[index]
return data, label
| 30.632653 | 121 | 0.562292 | import os
import librosa
import numpy as np
import torch.utils.data
def random_crop(y, max_length=176400):
if len(y) > max_length:
max_offset = len(y) - max_length
offset = np.random.randint(max_offset)
y = y[offset:max_length + offset]
else:
if max_length > len(y):
max_offset = max_length - len(y)
offset = np.random.randint(max_offset)
else:
offset = 0
y = np.pad(y, (offset, max_length - len(y) - offset), 'constant')
return y
class AudioDataset(torch.utils.data.Dataset):
def __init__(self, df, wav_dir, test=False,
sr=None, max_length=4.0, window_size=0.02, hop_size=0.01,
n_feature=64, feature='mfcc', model_type='alex2d'):
if not os.path.exists(wav_dir):
print('ERROR: not found %s' % wav_dir)
exit(1)
self.df = df
self.wav_dir = wav_dir
self.test = test
self.sr = sr
self.max_length = max_length
self.window_size = window_size
self.hop_size = hop_size
self.n_feature = n_feature
self.feature = feature
self.model_type = model_type
def __len__(self):
return len(self.df)
def __getitem__(self, index):
fpath = os.path.join(self.wav_dir, self.df.fname[index])
y, sr = librosa.load(fpath, sr=self.sr)
if sr is None:
print('WARNING:', fpath)
sr = 44100
y = random_crop(y, int(self.max_length * sr))
n_fft = int(self.window_size * sr)
hop_length = int(self.hop_size * sr)
if self.feature == 'mfcc':
feature = librosa.feature.mfcc(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mfcc=self.n_feature)
elif self.feature == 'melgram':
feature = librosa.feature.melspectrogram(y, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=self.n_feature)
else:
print('Invalid feature name: %s' % self.feature)
exit(1)
data = torch.from_numpy(feature).float()
s = data.size()
if self.model_type == 'alex2d' or self.model_type == 'resnet':
data.resize_(1, s[0], s[1])
elif self.model_type == 'alex1d' or self.model_type == 'lstm':
data.resize_(s[0], s[1])
else:
print('Invalid conv type: %s' % self.model_type)
exit(1)
mean = data.mean()
std = data.std()
if std != 0:
data.add_(-mean)
data.div_(std)
if self.test:
return data
else:
label = self.df.label_idx[index]
return data, label
| true | true |
f722a6f28981d75409c2f21d5d019c10ec2481cf | 5,630 | py | Python | flask_aiohttp/__init__.py | Hardtack/Flask-aiohttp | 50cfc8c1f71bae9e10a4a475b06b5f6bd124e33b | [
"MIT"
] | 142 | 2015-04-05T06:28:11.000Z | 2021-05-05T14:40:04.000Z | flask_aiohttp/__init__.py | Hardtack/Flask-aiohttp | 50cfc8c1f71bae9e10a4a475b06b5f6bd124e33b | [
"MIT"
] | 9 | 2015-06-01T18:45:35.000Z | 2018-08-29T16:39:01.000Z | flask_aiohttp/__init__.py | chgeonu/Flask-aiohttp | 50cfc8c1f71bae9e10a4a475b06b5f6bd124e33b | [
"MIT"
] | 20 | 2015-04-09T15:09:10.000Z | 2021-11-04T12:24:59.000Z | """:mod:`flask_aiohttp` --- Asynchronous Flask with aiohttp
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides Flask extension for asynchronous I/O.
With this extension, we can use `asyncio.coroutine` as Flask's view function.
So, we can add
asyncio-redis <https://github.com/jonathanslenders/asyncio-redis>`_, or
websocket support to your application.
To make view asynchronous, just simply add :func:`helper.async` decorator to
your view function ::
@app.route('/foo')
@async
def lazy():
yield from asyncio.sleep(3)
return 'Done'
You have to run your flask application with :class:`AioHTTP` ::
aio = AioHTTP(app)
aio.run(app)
And you can also use gunicorn ::
aio = AioHTTP(flask_app)
app = aio.create_aiohttp_app(flask_app)
# Run gunicorn by
#
# gunicorn your_module:app -k aiohttp.worker.GunicornWebWorker
# -b localhost:8080
You can even use aiohttp's websocket in your Flask application using
:func:`helper.websocket` ::
aio = AioHTTP(flask_app)
@app.route('echo')
@websocket
def echo():
while True:
msg = yield from aio.ws.receive_msg()
if msg.tp == aiohttp.MsgType.text:
aio.ws.send_str(msg.data)
elif msg.tp == aiohttp.MsgType.close:
print('websocket connection closed')
break
elif msg.tp == aiohttp.MsgType.error:
print('ws connection closed with exception %s',
aio.ws.exception())
break
"""
import os
import asyncio
import logging
import flask
import aiohttp.web
from flask import request
from werkzeug.debug import DebuggedApplication
from werkzeug.serving import run_with_reloader
from .helper import async, websocket, has_websocket, wrap_wsgi_middleware
from .handler import WSGIHandlerBase, WSGIWebSocketHandler
__all__ = ['AioHTTP', 'async', 'websocket', 'has_websocket',
'wrap_wsgi_middleware']
class AioHTTP(object):
"""Flask middleware for aiohttp"""
def __init__(self, app: flask.Flask=None, *,
handler_factory=WSGIWebSocketHandler):
"""
:param app:
Flask application
:param handler_factory:
aiohttp request handler factory. Factory should accept a single
flask application.
"""
self.handler_factory = handler_factory
if app is not None:
self.init_app(app)
def init_app(self, app: flask.Flask):
"""Init Flask app
:param app: Flask application
"""
app.aiohttp_app = self.create_aiohttp_app(app)
def create_aiohttp_app(self, app: flask.Flask) -> aiohttp.web.Application:
"""Create aiohttp web application from Flask application
:param app: Flask application
:returns: aiohttp web application
"""
# aiohttp web application instance
aio_app = aiohttp.web.Application()
# WSGI handler for aiohttp
wsgi_handler = self.handler_factory(app)
# aiohttp's router should accept any possible HTTP method of request.
aio_app.router.add_route('*', r'/{path:.*}', wsgi_handler)
return aio_app
@staticmethod
def run(app: flask.Flask, *,
host='127.0.0.1', port=None, debug=False, loop=None):
"""Run Flask application on aiohttp
:param app: Flask application
:param host: host name or ip
:param port: port (default is 5000)
:param debug: debug?
"""
# Check initialization status of flask app.
if getattr(app, 'aiohttp_app', None) is None:
raise RuntimeError(
"This application is not initialized for Flask-aiohttp. "
"Please initialize the app by `aio.init_app(app)`.")
# Configure args
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[-1])
else:
port = 5000
loop = loop or asyncio.get_event_loop()
# Define run_server
def run_server():
# run_server can be called in another thread
asyncio.set_event_loop(loop)
coroutine = loop.create_server(
app.aiohttp_app.make_handler(), host, port)
loop.run_until_complete(coroutine)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Configure logging
file_handler = logging.StreamHandler()
app.logger.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
if debug:
# Logging
app.logger.setLevel(logging.DEBUG)
# Wrap WSGI app with werkzeug debugger.
app.wsgi_app = wrap_wsgi_middleware(DebuggedApplication)(
app.wsgi_app)
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
app.logger.info(' * Running on http://{}:{}/'
.format(host, port))
# Run with reloader
run_with_reloader(run_server)
else:
app.logger.info(' * Running on http://{}:{}/'.format(host, port))
run_server()
@property
def ws(self) -> aiohttp.web.WebSocketResponse:
"""Websocket response of aiohttp"""
ws = request.environ.get('wsgi.websocket', None)
if ws is None:
raise RuntimeError('Request context is not a WebSocket context.')
return ws
| 29.631579 | 78 | 0.601243 | """:mod:`flask_aiohttp` --- Asynchronous Flask with aiohttp
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides Flask extension for asynchronous I/O.
With this extension, we can use `asyncio.coroutine` as Flask's view function.
So, we can add
asyncio-redis <https://github.com/jonathanslenders/asyncio-redis>`_, or
websocket support to your application.
To make view asynchronous, just simply add :func:`helper.async` decorator to
your view function ::
@app.route('/foo')
@async
def lazy():
yield from asyncio.sleep(3)
return 'Done'
You have to run your flask application with :class:`AioHTTP` ::
aio = AioHTTP(app)
aio.run(app)
And you can also use gunicorn ::
aio = AioHTTP(flask_app)
app = aio.create_aiohttp_app(flask_app)
# Run gunicorn by
#
# gunicorn your_module:app -k aiohttp.worker.GunicornWebWorker
# -b localhost:8080
You can even use aiohttp's websocket in your Flask application using
:func:`helper.websocket` ::
aio = AioHTTP(flask_app)
@app.route('echo')
@websocket
def echo():
while True:
msg = yield from aio.ws.receive_msg()
if msg.tp == aiohttp.MsgType.text:
aio.ws.send_str(msg.data)
elif msg.tp == aiohttp.MsgType.close:
print('websocket connection closed')
break
elif msg.tp == aiohttp.MsgType.error:
print('ws connection closed with exception %s',
aio.ws.exception())
break
"""
import os
import asyncio
import logging
import flask
import aiohttp.web
from flask import request
from werkzeug.debug import DebuggedApplication
from werkzeug.serving import run_with_reloader
from .helper import async, websocket, has_websocket, wrap_wsgi_middleware
from .handler import WSGIHandlerBase, WSGIWebSocketHandler
__all__ = ['AioHTTP', 'async', 'websocket', 'has_websocket',
'wrap_wsgi_middleware']
class AioHTTP(object):
"""Flask middleware for aiohttp"""
def __init__(self, app: flask.Flask=None, *,
handler_factory=WSGIWebSocketHandler):
"""
:param app:
Flask application
:param handler_factory:
aiohttp request handler factory. Factory should accept a single
flask application.
"""
self.handler_factory = handler_factory
if app is not None:
self.init_app(app)
def init_app(self, app: flask.Flask):
"""Init Flask app
:param app: Flask application
"""
app.aiohttp_app = self.create_aiohttp_app(app)
def create_aiohttp_app(self, app: flask.Flask) -> aiohttp.web.Application:
"""Create aiohttp web application from Flask application
:param app: Flask application
:returns: aiohttp web application
"""
aio_app = aiohttp.web.Application()
wsgi_handler = self.handler_factory(app)
aio_app.router.add_route('*', r'/{path:.*}', wsgi_handler)
return aio_app
@staticmethod
def run(app: flask.Flask, *,
host='127.0.0.1', port=None, debug=False, loop=None):
"""Run Flask application on aiohttp
:param app: Flask application
:param host: host name or ip
:param port: port (default is 5000)
:param debug: debug?
"""
# Check initialization status of flask app.
if getattr(app, 'aiohttp_app', None) is None:
raise RuntimeError(
"This application is not initialized for Flask-aiohttp. "
"Please initialize the app by `aio.init_app(app)`.")
# Configure args
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[-1])
else:
port = 5000
loop = loop or asyncio.get_event_loop()
# Define run_server
def run_server():
# run_server can be called in another thread
asyncio.set_event_loop(loop)
coroutine = loop.create_server(
app.aiohttp_app.make_handler(), host, port)
loop.run_until_complete(coroutine)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Configure logging
file_handler = logging.StreamHandler()
app.logger.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
if debug:
# Logging
app.logger.setLevel(logging.DEBUG)
# Wrap WSGI app with werkzeug debugger.
app.wsgi_app = wrap_wsgi_middleware(DebuggedApplication)(
app.wsgi_app)
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
app.logger.info(' * Running on http://{}:{}/'
.format(host, port))
# Run with reloader
run_with_reloader(run_server)
else:
app.logger.info(' * Running on http://{}:{}/'.format(host, port))
run_server()
@property
def ws(self) -> aiohttp.web.WebSocketResponse:
"""Websocket response of aiohttp"""
ws = request.environ.get('wsgi.websocket', None)
if ws is None:
raise RuntimeError('Request context is not a WebSocket context.')
return ws
| false | true |
f722a782ae9258889684d63a291930f09d41f5ed | 29,165 | py | Python | src/redfish/ris/ris.py | killionadmin/ILOscripts | 951f53df9bf58bc7b186a501e2d123645f0e55a3 | [
"Apache-2.0"
] | null | null | null | src/redfish/ris/ris.py | killionadmin/ILOscripts | 951f53df9bf58bc7b186a501e2d123645f0e55a3 | [
"Apache-2.0"
] | null | null | null | src/redfish/ris/ris.py | killionadmin/ILOscripts | 951f53df9bf58bc7b186a501e2d123645f0e55a3 | [
"Apache-2.0"
] | null | null | null | ###
# Copyright 2016 Hewlett Packard Enterprise, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
"""RIS implementation"""
#---------Imports---------
import re
import sys
import logging
import threading
import urlparse2 #pylint warning disable
from Queue import Queue
from collections import (OrderedDict)
import jsonpath_rw
import jsonpointer
from jsonpointer import set_pointer
import redfish.rest.v1
from redfish.ris.sharedtypes import Dictable
#---------End of imports---------
#---------Debug logger---------
LOGGER = logging.getLogger(__name__)
#---------End of debug logger---------
class BiosUnregisteredError(Exception):
"""Raised when BIOS has not been registered correctly in iLO"""
pass
class SessionExpiredRis(Exception):
"""Raised when session has expired"""
pass
class RisMonolithMemberBase(Dictable):
"""RIS monolith member base class"""
pass
class RisMonolithMemberv100(RisMonolithMemberBase):
"""Wrapper around RestResponse that adds the monolith data"""
def __init__(self, restresp, isredfish):
self._resp = restresp
self._patches = list()
self._type = None
if isredfish:
self._typestring = u'@odata.type'
else:
self._typestring = u'Type'
def _get_type(self):
"""Return type from monolith"""
if self._typestring in self._resp.dict:
return self._resp.dict[self._typestring]
elif u'type' in self._resp.dict:
return self._resp.dict[u'type']
return None
type = property(_get_type, None)
def _get_maj_type(self):
"""Return maj type from monolith"""
if self.type:
return self.type[:-4]
return None
maj_type = property(_get_maj_type, None)
def _get_resp(self):
"""Return resp from monolith"""
return self._resp
resp = property(_get_resp, None)
def _get_patches(self):
"""Return patches from monolith"""
return self._patches
patches = property(_get_patches, None)
def to_dict(self):
"""Convert monolith to dict"""
result = OrderedDict()
if self.type:
result[u'Type'] = self.type
if self.maj_type == u'Collection.1' and \
u'MemberType' in self._resp.dict:
result[u'MemberType'] = self._resp.dict[u'MemberType']
result[u'links'] = OrderedDict()
result[u'links'][u'href'] = ''
headers = dict()
for header in self._resp.getheaders():
headers[header[0]] = header[1]
result[u'Headers'] = headers
if 'etag' in headers:
result[u'ETag'] = headers['etag']
result[u'OriginalUri'] = self._resp.request.path
result[u'Content'] = self._resp.dict
result[u'Patches'] = self._patches
return result
def load_from_dict(self, src):
"""Load variables from dict monolith
:param src: source to load from
:type src: dict
"""
if u'Type' in src:
self._type = src[u'Type']
restreq = redfish.rest.v1.RestRequest(method='GET', \
path=src[u'OriginalUri'])
src['restreq'] = restreq
self._resp = redfish.rest.v1.StaticRestResponse(**src)
self._patches = src[u'Patches']
def _reducer(self, indict, breadcrumbs=None, outdict=OrderedDict()):
"""Monolith reducer
:param indict: input dictionary.
:type indict: dict.
:param breadcrumbs: breadcrumbs from previous operations.
:type breadcrumbs: dict.
:param outdict: expected output format.
:type outdict: dictionary type.
:returns: returns outdict
"""
if breadcrumbs is None:
breadcrumbs = []
if isinstance(indict, dict):
for key, val in indict.items():
breadcrumbs.append(key) # push
if isinstance(val, dict):
self._reducer(val, breadcrumbs, outdict)
elif isinstance(val, list) or isinstance(val, tuple):
for i in range(0, len(val)):
breadcrumbs.append(u'%s' % i) # push
self._reducer(val[i], breadcrumbs, outdict)
del breadcrumbs[-1] # pop
elif isinstance(val, tuple):
self._reducer(val, breadcrumbs, outdict)
else:
self._reducer(val, breadcrumbs, outdict)
del breadcrumbs[-1] # pop
else:
outkey = '/'.join(breadcrumbs)
outdict[outkey] = indict
return outdict
def _jsonpath_reducer(self, indict, breadcrumbs=None, \
outdict=OrderedDict()):
"""JSON Path Reducer
:param indict: input dictionary.
:type indict: dict.
:param breadcrumbs: breadcrumbs from previous operations.
:type breadcrumbs: dict.
:param outdict: expected output format.
:type outdict: dictionary type.
:returns: returns outdict
"""
if breadcrumbs is None:
breadcrumbs = []
if isinstance(indict, dict):
for key, val in indict.items():
breadcrumbs.append(key) # push
if isinstance(val, dict):
self._reducer(val, breadcrumbs, outdict)
elif isinstance(val, list) or isinstance(val, tuple):
for i in range(0, len(val)):
breadcrumbs.append(u'[%s]' % i) # push
self._reducer(val[i], breadcrumbs, outdict)
del breadcrumbs[-1] # pop
elif isinstance(val, tuple):
self._reducer(val, breadcrumbs, outdict)
else:
self._reducer(val, breadcrumbs, outdict)
del breadcrumbs[-1] # pop
else:
outkey = '.'.join(breadcrumbs)
outkey = outkey.replace(u'.[', u'[')
outdict[outkey] = indict
return outdict
def reduce(self):
"""Returns a "flatten" dict with nested data represented in
JSONpath notation"""
result = OrderedDict()
if self.type:
result[u'Type'] = self.type
if self.maj_type == u'Collection.1' and \
u'MemberType' in self._resp.dict:
result[u'MemberType'] = self._resp.dict[u'MemberType']
self._reducer(self._resp.dict)
result[u'OriginalUri'] = self._resp.request.path
result[u'Content'] = self._reducer(self._resp.dict)
return result
class RisMonolithv100(Dictable):
"""Monolithic cache of RIS data"""
def __init__(self, client):
"""Initialize RisMonolith
:param client: client to utilize
:type client: RmcClient object
"""
self._client = client
self.name = u"Monolithic output of RIS Service"
self.types = OrderedDict()
self._visited_urls = list()
self._current_location = '/' # "root"
self.queue = Queue()
self._type = None
self._name = None
self.progress = 0
self.reload = False
self.is_redfish = client._rest_client.is_redfish
if self.is_redfish:
self._resourcedir = '/redfish/v1/ResourceDirectory/'
self._typestring = u'@odata.type'
self._hrefstring = u'@odata.id'
else:
self._resourcedir = '/rest/v1/ResourceDirectory'
self._typestring = u'Type'
self._hrefstring = u'href'
def _get_type(self):
"""Return monolith version type"""
return u"Monolith.1.0.0"
type = property(_get_type, None)
def update_progress(self):
"""Simple function to increment the dot progress"""
if self.progress % 6 == 0:
sys.stdout.write('.')
def get_visited_urls(self):
"""Return the visited URLS"""
return self._visited_urls
def set_visited_urls(self, visited_urls):
"""Set visited URLS to given list."""
self._visited_urls = visited_urls
def load(self, path=None, includelogs=False, skipinit=False, \
skipcrawl=False, loadtype='href', loadcomplete=False):
"""Walk entire RIS model and cache all responses in self.
:param path: path to start load from.
:type path: str.
:param includelogs: flag to determine if logs should be downloaded also.
:type includelogs: boolean.
:param skipinit: flag to determine if first run of load.
:type skipinit: boolean.
:param skipcrawl: flag to determine if load should traverse found links.
:type skipcrawl: boolean.
:param loadtype: flag to determine if load is meant for only href items.
:type loadtype: str.
:param loadcomplete: flag to download the entire monolith
:type loadcomplete: boolean
"""
if not skipinit:
if LOGGER.getEffectiveLevel() == 40:
sys.stdout.write("Discovering data...")
else:
LOGGER.warning("Discovering data...")
self.name = self.name + u' at %s' % self._client.base_url
if not self.types:
self.types = OrderedDict()
if not threading.active_count() >= 6:
for _ in range(5):
workhand = SuperDuperWorker(self.queue)
workhand.setDaemon(True)
workhand.start()
selectivepath = path
if not selectivepath:
selectivepath = self._client._rest_client.default_prefix
self._load(selectivepath, skipcrawl=skipcrawl, includelogs=includelogs,\
skipinit=skipinit, loadtype=loadtype, loadcomplete=loadcomplete)
self.queue.join()
if not skipinit:
if LOGGER.getEffectiveLevel() == 40:
sys.stdout.write("Done\n")
else:
LOGGER.warning("Done\n")
def _load(self, path, skipcrawl=False, originaluri=None, includelogs=False,\
skipinit=False, loadtype='href', loadcomplete=False):
"""Helper function to main load function.
:param path: path to start load from.
:type path: str.
:param skipcrawl: flag to determine if load should traverse found links.
:type skipcrawl: boolean.
:param originaluri: variable to assist in determining originating path.
:type originaluri: str.
:param includelogs: flag to determine if logs should be downloaded also.
:type includelogs: boolean.
:param skipinit: flag to determine if first run of load.
:type skipinit: boolean.
:param loadtype: flag to determine if load is meant for only href items.
:type loadtype: str.
:param loadcomplete: flag to download the entire monolith
:type loadcomplete: boolean
"""
if path.endswith("?page=1"):
return
elif not includelogs:
if "/Logs/" in path:
return
#TODO: need to find a better way to support non ascii characters
path = path.replace("|", "%7C")
#remove fragments
newpath = urlparse2.urlparse(path)
newpath.fragment = ''
path = urlparse2.urlunparse(newpath)
LOGGER.debug(u'_loading %s', path)
if not self.reload:
if path.lower() in self._visited_urls:
return
resp = self._client.get(path)
if resp.status != 200 and path.lower() == self._client.typepath.defs.\
biospath:
raise BiosUnregisteredError()
elif resp.status != 200:
path = path + '/'
resp = self._client.get(path)
if resp.status == 401:
raise SessionExpiredRis("Invalid session. Please logout and "\
"log back in or include credentials.")
elif resp.status != 200:
return
if loadtype == "ref":
self.parse_schema(resp)
self.queue.put((resp, path, skipinit, self))
if loadtype == 'href':
#follow all the href attributes
if self.is_redfish:
jsonpath_expr = jsonpath_rw.parse(u"$..'@odata.id'")
else:
jsonpath_expr = jsonpath_rw.parse(u'$..href')
matches = jsonpath_expr.find(resp.dict)
if 'links' in resp.dict and 'NextPage' in resp.dict['links']:
if originaluri:
next_link_uri = originaluri + '?page=' + \
str(resp.dict['links']['NextPage']['page'])
href = u'%s' % next_link_uri
self._load(href, originaluri=originaluri, \
includelogs=includelogs, skipcrawl=skipcrawl, \
skipinit=skipinit)
else:
next_link_uri = path + '?page=' + \
str(resp.dict['links']['NextPage']['page'])
href = u'%s' % next_link_uri
self._load(href, originaluri=path, includelogs=includelogs,\
skipcrawl=skipcrawl, skipinit=skipinit)
(newversion, dirmatch) = self.check_for_directory(matches)
if not newversion and not skipcrawl:
for match in matches:
if path == "/rest/v1":
if str(match.full_path) == "links.Schemas.href" or \
str(match.full_path) == "links.Registries.href":
continue
else:
if str(match.full_path) == "Registries.@odata.id" or \
str(match.full_path) == "JsonSchemas.@odata.id":
continue
if match.value == path:
continue
href = u'%s' % match.value
self._load(href, skipcrawl=skipcrawl, \
originaluri=originaluri, includelogs=includelogs, \
skipinit=skipinit)
elif not skipcrawl:
href = u'%s' % dirmatch.value
self._load(href, skipcrawl=skipcrawl, originaluri=originaluri, \
includelogs=includelogs, skipinit=skipinit)
if loadcomplete:
for match in matches:
self._load(match.value, skipcrawl=skipcrawl, originaluri=\
originaluri, includelogs=includelogs, skipinit=skipinit)
def parse_schema(self, resp):
"""Function to get and replace schema $ref with data
:param resp: response data containing ref items.
:type resp: str.
"""
#pylint: disable=maybe-no-member
jsonpath_expr = jsonpath_rw.parse(u'$.."$ref"')
matches = jsonpath_expr.find(resp.dict)
respcopy = resp.dict
listmatch = None
if matches:
for match in matches:
fullpath = str(match.full_path)
jsonfile = match.value.split('#')[0]
jsonpath = match.value.split('#')[1]
if '@odata' in fullpath:
schemapath = '/' + fullpath.replace('@odata.', '~').\
replace('.', '/').replace('~', '@odata.')
else:
schemapath = '/' + fullpath.replace('.', '/')
if '.json' in jsonfile:
itempath = schemapath
if self.is_redfish:
if resp.request.path[-1] == '/':
newpath = '/'.join(resp.request.path.split('/')\
[:-2]) + '/' + jsonfile + '/'
else:
newpath = '/'.join(resp.request.path.split('/')\
[:-1]) + '/' + jsonfile + '/'
else:
newpath = '/'.join(resp.request.path.split('/')[:-1]) \
+ '/' + jsonfile
if 'href.json' in newpath:
continue
if not newpath.lower() in self._visited_urls:
self.load(newpath, skipcrawl=True, includelogs=False, \
skipinit=True, loadtype='ref')
instance = list()
if u'st' in self.types:
for stitem in self.types[u'st'][u'Instances']:
instance.append(stitem)
if u'ob' in self.types:
for obitem in self.types[u'ob'][u'Instances']:
instance.append(obitem)
for item in instance:
if jsonfile in item.resp._rest_request._path:
if 'anyOf' in fullpath:
break
dictcopy = item.resp.dict
listmatch = re.search('[[][0-9]+[]]', itempath)
if listmatch:
start = listmatch.regs[0][0]
end = listmatch.regs[0][1]
newitempath = [itempath[:start], itempath[end:]]
start = jsonpointer.JsonPointer(newitempath[0])
end = jsonpointer.JsonPointer(newitempath[1])
del start.parts[-1], end.parts[-1]
vals = start.resolve(respcopy)
count = 0
for val in vals:
try:
if '$ref' in end.resolve(val).iterkeys():
end.resolve(val).pop('$ref')
end.resolve(val).update(dictcopy)
replace_pointer = jsonpointer.\
JsonPointer(end.path + jsonpath)
data = replace_pointer.resolve(val)
set_pointer(val, end.path, data)
start.resolve(respcopy)[count].\
update(val)
break
except:
count += 1
else:
itempath = jsonpointer.JsonPointer(itempath)
del itempath.parts[-1]
if '$ref' in itempath.resolve(respcopy).\
iterkeys():
itempath.resolve(respcopy).pop('$ref')
itempath.resolve(respcopy).update(dictcopy)
if jsonpath:
if 'anyOf' in fullpath:
continue
if not jsonfile:
replacepath = jsonpointer.JsonPointer(jsonpath)
schemapath = schemapath.replace('/$ref', '')
schemapath = jsonpointer.JsonPointer(schemapath)
data = replacepath.resolve(respcopy)
if '$ref' in schemapath.resolve(respcopy):
schemapath.resolve(respcopy).pop('$ref')
schemapath.resolve(respcopy).update(data)
else:
if not listmatch:
schemapath = schemapath.replace('/$ref', '')
replacepath = schemapath + jsonpath
replace_pointer = jsonpointer.\
JsonPointer(replacepath)
data = replace_pointer.resolve(respcopy)
set_pointer(respcopy, schemapath, data)
resp.json(respcopy)
else:
resp.json(respcopy)
def check_for_directory(self, matches):
"""Function to allow checking for new directory
:param matches: current found matches.
:type matches: dict.
"""
for match in matches:
if match.value == self._resourcedir:
return (True, match)
return (False, None)
def branch_worker(self, resp, path, skipinit):
"""Helper for load function, creates threaded worker
:param resp: response received.
:type resp: str.
:param path: path correlating to the response.
:type path: str.
:param skipinit: flag to determine if progress bar should be updated.
:type skipinit: boolean.
"""
self._visited_urls.append(path.lower())
member = RisMonolithMemberv100(resp, self.is_redfish)
if not member.type:
return
self.update_member(member)
if not skipinit:
self.progress += 1
if LOGGER.getEffectiveLevel() == 40:
self.update_progress()
def update_member(self, member):
"""Adds member to this monolith. If the member already exists the
data is updated in place.
:param member: Ris monolith member object made by branch worker.
:type member: RisMonolithMemberv100.
"""
if member.maj_type not in self.types:
self.types[member.maj_type] = OrderedDict()
self.types[member.maj_type][u'Instances'] = list()
found = False
for indices in xrange(len(self.types[member.maj_type][u'Instances'])):
inst = self.types[member.maj_type][u'Instances'][indices]
if inst.resp.request.path == member.resp.request.path:
self.types[member.maj_type][u'Instances'][indices] = member
self.types[member.maj_type][u'Instances'][indices].patches.\
extend([patch for patch in inst.patches])
found = True
break
if not found:
self.types[member.maj_type][u'Instances'].append(member)
def load_from_dict(self, src):
"""Load data to monolith from dict
:param src: data receive from rest operation.
:type src: str.
"""
self._type = src[u'Type']
self._name = src[u'Name']
self.types = OrderedDict()
for typ in src[u'Types']:
for inst in typ[u'Instances']:
member = RisMonolithMemberv100(None, self.is_redfish)
member.load_from_dict(inst)
self.update_member(member)
return
def to_dict(self):
"""Convert data to monolith from dict"""
result = OrderedDict()
result[u'Type'] = self.type
result[u'Name'] = self.name
types_list = list()
for typ in self.types.keys():
type_entry = OrderedDict()
type_entry[u'Type'] = typ
type_entry[u'Instances'] = list()
for inst in self.types[typ][u'Instances']:
type_entry[u'Instances'].append(inst.to_dict())
types_list.append(type_entry)
result[u'Types'] = types_list
return result
def reduce(self):
"""Reduce monolith data"""
result = OrderedDict()
result[u'Type'] = self.type
result[u'Name'] = self.name
types_list = list()
for typ in self.types.keys():
type_entry = OrderedDict()
type_entry[u'Type'] = typ
for inst in self.types[typ][u'Instances']:
type_entry[u'Instances'] = inst.reduce()
types_list.append(type_entry)
result[u'Types'] = types_list
return result
def _jsonpath2jsonpointer(self, instr):
"""Convert json path to json pointer
:param instr: input path to be converted to pointer.
:type instr: str.
"""
outstr = instr.replace('.[', '[')
outstr = outstr.replace('[', '/')
outstr = outstr.replace(']', '/')
if outstr.endswith('/'):
outstr = outstr[:-1]
return outstr
def _get_current_location(self):
"""Return current location"""
return self._current_location
def _set_current_location(self, newval):
"""Set current location"""
self._current_location = newval
location = property(_get_current_location, _set_current_location)
def list(self, lspath=None):
"""Function for list command
:param lspath: path list.
:type lspath: list.
"""
results = list()
path_parts = [u'Types'] # Types is always assumed
if isinstance(lspath, list) and len(lspath) > 0:
lspath = lspath[0]
path_parts.extend(lspath.split(u'/'))
elif not lspath:
lspath = u'/'
else:
path_parts.extend(lspath.split(u'/'))
currpos = self.to_dict()
for path_part in path_parts:
if not path_part:
continue
if isinstance(currpos, RisMonolithMemberv100):
break
elif isinstance(currpos, dict) and path_part in currpos:
currpos = currpos[path_part]
elif isinstance(currpos, list):
for positem in currpos:
if u'Type' in positem and path_part == positem[u'Type']:
currpos = positem
break
results.append(currpos)
return results
def killthreads(self):
"""Function to kill threads on logout"""
#TODO: revisit to make sure this is correct
threads = []
for thread in threading.enumerate():
if isinstance(thread, SuperDuperWorker):
self.queue.put(('KILL', 'KILL', 'KILL', 'KILL'))
threads.append(thread)
for thread in threads:
thread.join()
class RisMonolith(RisMonolithv100):
"""Latest implementation of RisMonolith"""
def __init__(self, client):
"""Initialize Latest RisMonolith
:param client: client to utilize
:type client: RmcClient object
"""
super(RisMonolith, self).__init__(client)
class SuperDuperWorker(threading.Thread):
"""Recursive worker implementation"""
def __init__(self, queue):
"""Initialize SuperDuperWorker
:param queue: queue for worker
:type queue: Queue object
"""
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Thread creator"""
while True:
(resp, path, skipinit, thobj) = self.queue.get()
if resp == 'KILL' and path == 'KILL' and skipinit == 'KILL' and\
thobj == 'KILL':
break
thobj.branch_worker(resp, path, skipinit)
self.queue.task_done()
| 36.365337 | 82 | 0.506189 |
import re
import sys
import logging
import threading
import urlparse2
from Queue import Queue
from collections import (OrderedDict)
import jsonpath_rw
import jsonpointer
from jsonpointer import set_pointer
import redfish.rest.v1
from redfish.ris.sharedtypes import Dictable
LOGGER = logging.getLogger(__name__)
class BiosUnregisteredError(Exception):
pass
class SessionExpiredRis(Exception):
pass
class RisMonolithMemberBase(Dictable):
pass
class RisMonolithMemberv100(RisMonolithMemberBase):
def __init__(self, restresp, isredfish):
self._resp = restresp
self._patches = list()
self._type = None
if isredfish:
self._typestring = u'@odata.type'
else:
self._typestring = u'Type'
def _get_type(self):
if self._typestring in self._resp.dict:
return self._resp.dict[self._typestring]
elif u'type' in self._resp.dict:
return self._resp.dict[u'type']
return None
type = property(_get_type, None)
def _get_maj_type(self):
if self.type:
return self.type[:-4]
return None
maj_type = property(_get_maj_type, None)
def _get_resp(self):
return self._resp
resp = property(_get_resp, None)
def _get_patches(self):
return self._patches
patches = property(_get_patches, None)
def to_dict(self):
result = OrderedDict()
if self.type:
result[u'Type'] = self.type
if self.maj_type == u'Collection.1' and \
u'MemberType' in self._resp.dict:
result[u'MemberType'] = self._resp.dict[u'MemberType']
result[u'links'] = OrderedDict()
result[u'links'][u'href'] = ''
headers = dict()
for header in self._resp.getheaders():
headers[header[0]] = header[1]
result[u'Headers'] = headers
if 'etag' in headers:
result[u'ETag'] = headers['etag']
result[u'OriginalUri'] = self._resp.request.path
result[u'Content'] = self._resp.dict
result[u'Patches'] = self._patches
return result
def load_from_dict(self, src):
if u'Type' in src:
self._type = src[u'Type']
restreq = redfish.rest.v1.RestRequest(method='GET', \
path=src[u'OriginalUri'])
src['restreq'] = restreq
self._resp = redfish.rest.v1.StaticRestResponse(**src)
self._patches = src[u'Patches']
def _reducer(self, indict, breadcrumbs=None, outdict=OrderedDict()):
if breadcrumbs is None:
breadcrumbs = []
if isinstance(indict, dict):
for key, val in indict.items():
breadcrumbs.append(key)
if isinstance(val, dict):
self._reducer(val, breadcrumbs, outdict)
elif isinstance(val, list) or isinstance(val, tuple):
for i in range(0, len(val)):
breadcrumbs.append(u'%s' % i)
self._reducer(val[i], breadcrumbs, outdict)
del breadcrumbs[-1]
elif isinstance(val, tuple):
self._reducer(val, breadcrumbs, outdict)
else:
self._reducer(val, breadcrumbs, outdict)
del breadcrumbs[-1]
else:
outkey = '/'.join(breadcrumbs)
outdict[outkey] = indict
return outdict
def _jsonpath_reducer(self, indict, breadcrumbs=None, \
outdict=OrderedDict()):
if breadcrumbs is None:
breadcrumbs = []
if isinstance(indict, dict):
for key, val in indict.items():
breadcrumbs.append(key)
if isinstance(val, dict):
self._reducer(val, breadcrumbs, outdict)
elif isinstance(val, list) or isinstance(val, tuple):
for i in range(0, len(val)):
breadcrumbs.append(u'[%s]' % i)
self._reducer(val[i], breadcrumbs, outdict)
del breadcrumbs[-1]
elif isinstance(val, tuple):
self._reducer(val, breadcrumbs, outdict)
else:
self._reducer(val, breadcrumbs, outdict)
del breadcrumbs[-1]
else:
outkey = '.'.join(breadcrumbs)
outkey = outkey.replace(u'.[', u'[')
outdict[outkey] = indict
return outdict
def reduce(self):
result = OrderedDict()
if self.type:
result[u'Type'] = self.type
if self.maj_type == u'Collection.1' and \
u'MemberType' in self._resp.dict:
result[u'MemberType'] = self._resp.dict[u'MemberType']
self._reducer(self._resp.dict)
result[u'OriginalUri'] = self._resp.request.path
result[u'Content'] = self._reducer(self._resp.dict)
return result
class RisMonolithv100(Dictable):
def __init__(self, client):
self._client = client
self.name = u"Monolithic output of RIS Service"
self.types = OrderedDict()
self._visited_urls = list()
self._current_location = '/'
self.queue = Queue()
self._type = None
self._name = None
self.progress = 0
self.reload = False
self.is_redfish = client._rest_client.is_redfish
if self.is_redfish:
self._resourcedir = '/redfish/v1/ResourceDirectory/'
self._typestring = u'@odata.type'
self._hrefstring = u'@odata.id'
else:
self._resourcedir = '/rest/v1/ResourceDirectory'
self._typestring = u'Type'
self._hrefstring = u'href'
def _get_type(self):
return u"Monolith.1.0.0"
type = property(_get_type, None)
def update_progress(self):
if self.progress % 6 == 0:
sys.stdout.write('.')
def get_visited_urls(self):
return self._visited_urls
def set_visited_urls(self, visited_urls):
self._visited_urls = visited_urls
def load(self, path=None, includelogs=False, skipinit=False, \
skipcrawl=False, loadtype='href', loadcomplete=False):
if not skipinit:
if LOGGER.getEffectiveLevel() == 40:
sys.stdout.write("Discovering data...")
else:
LOGGER.warning("Discovering data...")
self.name = self.name + u' at %s' % self._client.base_url
if not self.types:
self.types = OrderedDict()
if not threading.active_count() >= 6:
for _ in range(5):
workhand = SuperDuperWorker(self.queue)
workhand.setDaemon(True)
workhand.start()
selectivepath = path
if not selectivepath:
selectivepath = self._client._rest_client.default_prefix
self._load(selectivepath, skipcrawl=skipcrawl, includelogs=includelogs,\
skipinit=skipinit, loadtype=loadtype, loadcomplete=loadcomplete)
self.queue.join()
if not skipinit:
if LOGGER.getEffectiveLevel() == 40:
sys.stdout.write("Done\n")
else:
LOGGER.warning("Done\n")
def _load(self, path, skipcrawl=False, originaluri=None, includelogs=False,\
skipinit=False, loadtype='href', loadcomplete=False):
if path.endswith("?page=1"):
return
elif not includelogs:
if "/Logs/" in path:
return
path = path.replace("|", "%7C")
newpath = urlparse2.urlparse(path)
newpath.fragment = ''
path = urlparse2.urlunparse(newpath)
LOGGER.debug(u'_loading %s', path)
if not self.reload:
if path.lower() in self._visited_urls:
return
resp = self._client.get(path)
if resp.status != 200 and path.lower() == self._client.typepath.defs.\
biospath:
raise BiosUnregisteredError()
elif resp.status != 200:
path = path + '/'
resp = self._client.get(path)
if resp.status == 401:
raise SessionExpiredRis("Invalid session. Please logout and "\
"log back in or include credentials.")
elif resp.status != 200:
return
if loadtype == "ref":
self.parse_schema(resp)
self.queue.put((resp, path, skipinit, self))
if loadtype == 'href':
if self.is_redfish:
jsonpath_expr = jsonpath_rw.parse(u"$..'@odata.id'")
else:
jsonpath_expr = jsonpath_rw.parse(u'$..href')
matches = jsonpath_expr.find(resp.dict)
if 'links' in resp.dict and 'NextPage' in resp.dict['links']:
if originaluri:
next_link_uri = originaluri + '?page=' + \
str(resp.dict['links']['NextPage']['page'])
href = u'%s' % next_link_uri
self._load(href, originaluri=originaluri, \
includelogs=includelogs, skipcrawl=skipcrawl, \
skipinit=skipinit)
else:
next_link_uri = path + '?page=' + \
str(resp.dict['links']['NextPage']['page'])
href = u'%s' % next_link_uri
self._load(href, originaluri=path, includelogs=includelogs,\
skipcrawl=skipcrawl, skipinit=skipinit)
(newversion, dirmatch) = self.check_for_directory(matches)
if not newversion and not skipcrawl:
for match in matches:
if path == "/rest/v1":
if str(match.full_path) == "links.Schemas.href" or \
str(match.full_path) == "links.Registries.href":
continue
else:
if str(match.full_path) == "Registries.@odata.id" or \
str(match.full_path) == "JsonSchemas.@odata.id":
continue
if match.value == path:
continue
href = u'%s' % match.value
self._load(href, skipcrawl=skipcrawl, \
originaluri=originaluri, includelogs=includelogs, \
skipinit=skipinit)
elif not skipcrawl:
href = u'%s' % dirmatch.value
self._load(href, skipcrawl=skipcrawl, originaluri=originaluri, \
includelogs=includelogs, skipinit=skipinit)
if loadcomplete:
for match in matches:
self._load(match.value, skipcrawl=skipcrawl, originaluri=\
originaluri, includelogs=includelogs, skipinit=skipinit)
def parse_schema(self, resp):
jsonpath_expr = jsonpath_rw.parse(u'$.."$ref"')
matches = jsonpath_expr.find(resp.dict)
respcopy = resp.dict
listmatch = None
if matches:
for match in matches:
fullpath = str(match.full_path)
jsonfile = match.value.split('#')[0]
jsonpath = match.value.split('#')[1]
if '@odata' in fullpath:
schemapath = '/' + fullpath.replace('@odata.', '~').\
replace('.', '/').replace('~', '@odata.')
else:
schemapath = '/' + fullpath.replace('.', '/')
if '.json' in jsonfile:
itempath = schemapath
if self.is_redfish:
if resp.request.path[-1] == '/':
newpath = '/'.join(resp.request.path.split('/')\
[:-2]) + '/' + jsonfile + '/'
else:
newpath = '/'.join(resp.request.path.split('/')\
[:-1]) + '/' + jsonfile + '/'
else:
newpath = '/'.join(resp.request.path.split('/')[:-1]) \
+ '/' + jsonfile
if 'href.json' in newpath:
continue
if not newpath.lower() in self._visited_urls:
self.load(newpath, skipcrawl=True, includelogs=False, \
skipinit=True, loadtype='ref')
instance = list()
if u'st' in self.types:
for stitem in self.types[u'st'][u'Instances']:
instance.append(stitem)
if u'ob' in self.types:
for obitem in self.types[u'ob'][u'Instances']:
instance.append(obitem)
for item in instance:
if jsonfile in item.resp._rest_request._path:
if 'anyOf' in fullpath:
break
dictcopy = item.resp.dict
listmatch = re.search('[[][0-9]+[]]', itempath)
if listmatch:
start = listmatch.regs[0][0]
end = listmatch.regs[0][1]
newitempath = [itempath[:start], itempath[end:]]
start = jsonpointer.JsonPointer(newitempath[0])
end = jsonpointer.JsonPointer(newitempath[1])
del start.parts[-1], end.parts[-1]
vals = start.resolve(respcopy)
count = 0
for val in vals:
try:
if '$ref' in end.resolve(val).iterkeys():
end.resolve(val).pop('$ref')
end.resolve(val).update(dictcopy)
replace_pointer = jsonpointer.\
JsonPointer(end.path + jsonpath)
data = replace_pointer.resolve(val)
set_pointer(val, end.path, data)
start.resolve(respcopy)[count].\
update(val)
break
except:
count += 1
else:
itempath = jsonpointer.JsonPointer(itempath)
del itempath.parts[-1]
if '$ref' in itempath.resolve(respcopy).\
iterkeys():
itempath.resolve(respcopy).pop('$ref')
itempath.resolve(respcopy).update(dictcopy)
if jsonpath:
if 'anyOf' in fullpath:
continue
if not jsonfile:
replacepath = jsonpointer.JsonPointer(jsonpath)
schemapath = schemapath.replace('/$ref', '')
schemapath = jsonpointer.JsonPointer(schemapath)
data = replacepath.resolve(respcopy)
if '$ref' in schemapath.resolve(respcopy):
schemapath.resolve(respcopy).pop('$ref')
schemapath.resolve(respcopy).update(data)
else:
if not listmatch:
schemapath = schemapath.replace('/$ref', '')
replacepath = schemapath + jsonpath
replace_pointer = jsonpointer.\
JsonPointer(replacepath)
data = replace_pointer.resolve(respcopy)
set_pointer(respcopy, schemapath, data)
resp.json(respcopy)
else:
resp.json(respcopy)
def check_for_directory(self, matches):
for match in matches:
if match.value == self._resourcedir:
return (True, match)
return (False, None)
def branch_worker(self, resp, path, skipinit):
self._visited_urls.append(path.lower())
member = RisMonolithMemberv100(resp, self.is_redfish)
if not member.type:
return
self.update_member(member)
if not skipinit:
self.progress += 1
if LOGGER.getEffectiveLevel() == 40:
self.update_progress()
def update_member(self, member):
if member.maj_type not in self.types:
self.types[member.maj_type] = OrderedDict()
self.types[member.maj_type][u'Instances'] = list()
found = False
for indices in xrange(len(self.types[member.maj_type][u'Instances'])):
inst = self.types[member.maj_type][u'Instances'][indices]
if inst.resp.request.path == member.resp.request.path:
self.types[member.maj_type][u'Instances'][indices] = member
self.types[member.maj_type][u'Instances'][indices].patches.\
extend([patch for patch in inst.patches])
found = True
break
if not found:
self.types[member.maj_type][u'Instances'].append(member)
def load_from_dict(self, src):
self._type = src[u'Type']
self._name = src[u'Name']
self.types = OrderedDict()
for typ in src[u'Types']:
for inst in typ[u'Instances']:
member = RisMonolithMemberv100(None, self.is_redfish)
member.load_from_dict(inst)
self.update_member(member)
return
def to_dict(self):
result = OrderedDict()
result[u'Type'] = self.type
result[u'Name'] = self.name
types_list = list()
for typ in self.types.keys():
type_entry = OrderedDict()
type_entry[u'Type'] = typ
type_entry[u'Instances'] = list()
for inst in self.types[typ][u'Instances']:
type_entry[u'Instances'].append(inst.to_dict())
types_list.append(type_entry)
result[u'Types'] = types_list
return result
def reduce(self):
result = OrderedDict()
result[u'Type'] = self.type
result[u'Name'] = self.name
types_list = list()
for typ in self.types.keys():
type_entry = OrderedDict()
type_entry[u'Type'] = typ
for inst in self.types[typ][u'Instances']:
type_entry[u'Instances'] = inst.reduce()
types_list.append(type_entry)
result[u'Types'] = types_list
return result
def _jsonpath2jsonpointer(self, instr):
outstr = instr.replace('.[', '[')
outstr = outstr.replace('[', '/')
outstr = outstr.replace(']', '/')
if outstr.endswith('/'):
outstr = outstr[:-1]
return outstr
def _get_current_location(self):
return self._current_location
def _set_current_location(self, newval):
self._current_location = newval
location = property(_get_current_location, _set_current_location)
def list(self, lspath=None):
results = list()
path_parts = [u'Types']
if isinstance(lspath, list) and len(lspath) > 0:
lspath = lspath[0]
path_parts.extend(lspath.split(u'/'))
elif not lspath:
lspath = u'/'
else:
path_parts.extend(lspath.split(u'/'))
currpos = self.to_dict()
for path_part in path_parts:
if not path_part:
continue
if isinstance(currpos, RisMonolithMemberv100):
break
elif isinstance(currpos, dict) and path_part in currpos:
currpos = currpos[path_part]
elif isinstance(currpos, list):
for positem in currpos:
if u'Type' in positem and path_part == positem[u'Type']:
currpos = positem
break
results.append(currpos)
return results
def killthreads(self):
threads = []
for thread in threading.enumerate():
if isinstance(thread, SuperDuperWorker):
self.queue.put(('KILL', 'KILL', 'KILL', 'KILL'))
threads.append(thread)
for thread in threads:
thread.join()
class RisMonolith(RisMonolithv100):
def __init__(self, client):
super(RisMonolith, self).__init__(client)
class SuperDuperWorker(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
(resp, path, skipinit, thobj) = self.queue.get()
if resp == 'KILL' and path == 'KILL' and skipinit == 'KILL' and\
thobj == 'KILL':
break
thobj.branch_worker(resp, path, skipinit)
self.queue.task_done()
| true | true |
f722a82c58bac35d0593073d6622630648eef805 | 2,950 | py | Python | openbb_terminal/portfolio/reportlab_helpers.py | tehcoderer/GamestonkTerminal | 54a1b6f545a0016c576e9e00eef5c003d229dacf | [
"MIT"
] | 255 | 2022-03-29T16:43:51.000Z | 2022-03-31T23:57:08.000Z | openbb_terminal/portfolio/reportlab_helpers.py | tehcoderer/GamestonkTerminal | 54a1b6f545a0016c576e9e00eef5c003d229dacf | [
"MIT"
] | 14 | 2022-03-29T14:20:33.000Z | 2022-03-31T23:39:20.000Z | openbb_terminal/portfolio/reportlab_helpers.py | tehcoderer/GamestonkTerminal | 54a1b6f545a0016c576e9e00eef5c003d229dacf | [
"MIT"
] | 24 | 2022-03-29T15:28:56.000Z | 2022-03-31T23:54:15.000Z | """Reportlab Helpers"""
__docformat__ = "numpy"
from datetime import datetime
from typing import List
from reportlab.lib import colors
from reportlab.pdfgen import canvas
from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
from reportlab.platypus import Paragraph, Table, TableStyle
def base_format(report: canvas.Canvas, header: str) -> None:
"""Applies a base page format to each page
Parameters
----------
report : canvas.Canvas
The report to be formatted
header : str
The header for the page
"""
report.setLineWidth(0.3)
report.setFont("Helvetica", 12)
report.drawString(30, 760, "OpenBB Terminal")
report.drawString(500, 760, datetime.now().strftime("%Y/%m/%d"))
report.drawString(275, 750, "Annual Report")
report.line(50, 730, 580, 730)
report.setFont("Helvetica", 20)
report.drawString(50, 705, header)
report.setFont("Helvetica", 12)
def draw_paragraph(
report: canvas.Canvas, msg: str, x: int, y: int, max_width: int, max_height: int
) -> None:
"""Draws a given paragraph
Parameters
----------
report : canvas.Canvas
The report to be formatted
msg : str
The contents of the paragraph
x : int
The x coordinate for the paragraph
y : int
The y coordinate for the paragraph
max_width : int
The maximum width allowed for the paragraph
max_height : int
The maximum height allowed for the paragraph
"""
message_style = ParagraphStyle("Normal")
message = msg.replace("\n", "<br />")
paragraph = Paragraph(message, style=message_style)
_, h = paragraph.wrap(max_width, max_height)
paragraph.drawOn(report, x, y - h)
def draw_table(
report: canvas.Canvas,
header_txt: str,
aW: int,
aH: int,
x: int,
data: List[List[str]],
) -> None:
"""Draw a table at given coordinates
Parameters
----------
report : canvas.Canvas
The report to be formatted
header_txt : str
The header for the table
aW : int
The width for the table
aH : int
The height for the table
x : int
The x coordinate for the table
data : List[List[str]]
Data to show
"""
style = getSampleStyleSheet()["BodyText"]
header = Paragraph(f"<bold><font size=14>{header_txt}</font></bold>", style)
t = Table(data)
t.setStyle(
TableStyle(
[
("BOX", (0, 0), (-1, -1), 0.25, colors.black),
("INNERGRID", (0, 0), (-1, -1), 0.25, colors.black),
]
)
)
for each in range(len(data)):
bg_color = colors.whitesmoke if each % 2 == 0 else colors.lightgrey
t.setStyle(TableStyle([("BACKGROUND", (0, each), (-1, each), bg_color)]))
_, h = header.wrap(aW, aH)
header.drawOn(report, x, aH)
aH = aH - h
_, h = t.wrap(aW, aH)
t.drawOn(report, x, aH - h)
| 27.314815 | 84 | 0.610169 | __docformat__ = "numpy"
from datetime import datetime
from typing import List
from reportlab.lib import colors
from reportlab.pdfgen import canvas
from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
from reportlab.platypus import Paragraph, Table, TableStyle
def base_format(report: canvas.Canvas, header: str) -> None:
report.setLineWidth(0.3)
report.setFont("Helvetica", 12)
report.drawString(30, 760, "OpenBB Terminal")
report.drawString(500, 760, datetime.now().strftime("%Y/%m/%d"))
report.drawString(275, 750, "Annual Report")
report.line(50, 730, 580, 730)
report.setFont("Helvetica", 20)
report.drawString(50, 705, header)
report.setFont("Helvetica", 12)
def draw_paragraph(
report: canvas.Canvas, msg: str, x: int, y: int, max_width: int, max_height: int
) -> None:
message_style = ParagraphStyle("Normal")
message = msg.replace("\n", "<br />")
paragraph = Paragraph(message, style=message_style)
_, h = paragraph.wrap(max_width, max_height)
paragraph.drawOn(report, x, y - h)
def draw_table(
report: canvas.Canvas,
header_txt: str,
aW: int,
aH: int,
x: int,
data: List[List[str]],
) -> None:
style = getSampleStyleSheet()["BodyText"]
header = Paragraph(f"<bold><font size=14>{header_txt}</font></bold>", style)
t = Table(data)
t.setStyle(
TableStyle(
[
("BOX", (0, 0), (-1, -1), 0.25, colors.black),
("INNERGRID", (0, 0), (-1, -1), 0.25, colors.black),
]
)
)
for each in range(len(data)):
bg_color = colors.whitesmoke if each % 2 == 0 else colors.lightgrey
t.setStyle(TableStyle([("BACKGROUND", (0, each), (-1, each), bg_color)]))
_, h = header.wrap(aW, aH)
header.drawOn(report, x, aH)
aH = aH - h
_, h = t.wrap(aW, aH)
t.drawOn(report, x, aH - h)
| true | true |
f722a879937c014080a753f5ec6fbc90db70abc0 | 12,619 | py | Python | libcloud/common/base.py | cloudkick/libcloud | 9c8605e1518c6b5e2511f0780e1946089a7256dd | [
"Apache-2.0"
] | 4 | 2015-11-04T10:32:52.000Z | 2020-04-06T03:39:01.000Z | libcloud/common/base.py | cloudkick/libcloud | 9c8605e1518c6b5e2511f0780e1946089a7256dd | [
"Apache-2.0"
] | null | null | null | libcloud/common/base.py | cloudkick/libcloud | 9c8605e1518c6b5e2511f0780e1946089a7256dd | [
"Apache-2.0"
] | 6 | 2015-01-05T23:33:17.000Z | 2020-04-06T03:39:34.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import urllib
import time
import hashlib
import StringIO
import ssl
import os
import socket
import struct
from pipes import quote as pquote
import libcloud
from libcloud.httplib_ssl import LibcloudHTTPSConnection
from httplib import HTTPConnection as LibcloudHTTPConnection
class RawResponse(object):
def __init__(self, response=None):
self._status = None
self._response = None
self._headers = {}
self._error = None
self._reason = None
@property
def response(self):
if not self._response:
self._response = self.connection.connection.getresponse()
return self._response
@property
def status(self):
if not self._status:
self._status = self.response.status
return self._status
@property
def headers(self):
if not self._headers:
self._headers = dict(self.response.getheaders())
return self._headers
@property
def reason(self):
if not self._reason:
self._reason = self.response.reason
return self._reason
class Response(object):
"""
A Base Response class to derive from.
"""
NODE_STATE_MAP = {}
object = None
body = None
status = httplib.OK
headers = {}
error = None
connection = None
def __init__(self, response):
self.body = response.read()
self.status = response.status
self.headers = dict(response.getheaders())
self.error = response.reason
if not self.success():
raise Exception(self.parse_error())
self.object = self.parse_body()
def parse_body(self):
"""
Parse response body.
Override in a provider's subclass.
@return: Parsed body.
"""
return self.body
def parse_error(self):
"""
Parse the error messages.
Override in a provider's subclass.
@return: Parsed error.
"""
return self.body
def success(self):
"""
Determine if our request was successful.
The meaning of this can be arbitrary; did we receive OK status? Did
the node get created? Were we authenticated?
@return: C{True} or C{False}
"""
return self.status == httplib.OK or self.status == httplib.CREATED
#TODO: Move this to a better location/package
class LoggingConnection():
"""
Debug class to log all HTTP(s) requests as they could be made
with the C{curl} command.
@cvar log: file-like object that logs entries are written to.
"""
log = None
def _log_response(self, r):
rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r))
ht = ""
v = r.version
if r.version == 10:
v = "HTTP/1.0"
if r.version == 11:
v = "HTTP/1.1"
ht += "%s %s %s\r\n" % (v, r.status, r.reason)
body = r.read()
for h in r.getheaders():
ht += "%s: %s\r\n" % (h[0].title(), h[1])
ht += "\r\n"
# this is evil. laugh with me. ha arharhrhahahaha
class fakesock:
def __init__(self, s):
self.s = s
def makefile(self, mode, foo):
return StringIO.StringIO(self.s)
rr = r
if r.chunked:
ht += "%x\r\n" % (len(body))
ht += body
ht += "\r\n0\r\n"
else:
ht += body
rr = httplib.HTTPResponse(fakesock(ht),
method=r._method,
debuglevel=r.debuglevel)
rr.begin()
rv += ht
rv += ("\n# -------- end %d:%d response ----------\n"
% (id(self), id(r)))
return (rr, rv)
def _log_curl(self, method, url, body, headers):
cmd = ["curl", "-i"]
cmd.extend(["-X", pquote(method)])
for h in headers:
cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))])
# TODO: in python 2.6, body can be a file-like object.
if body is not None and len(body) > 0:
cmd.extend(["--data-binary", pquote(body)])
cmd.extend([pquote("https://%s:%d%s" % (self.host, self.port, url))])
return " ".join(cmd)
class LoggingHTTPSConnection(LoggingConnection, LibcloudHTTPSConnection):
"""
Utility Class for logging HTTPS connections
"""
def getresponse(self):
r = LibcloudHTTPSConnection.getresponse(self)
if self.log is not None:
r, rv = self._log_response(r)
self.log.write(rv + "\n")
self.log.flush()
return r
def request(self, method, url, body=None, headers=None):
headers.update({'X-LC-Request-ID': str(id(self))})
if self.log is not None:
pre = "# -------- begin %d request ----------\n" % id(self)
self.log.write(pre +
self._log_curl(method, url, body, headers) + "\n")
self.log.flush()
return LibcloudHTTPSConnection.request(self, method, url, body, headers)
class LoggingHTTPConnection(LoggingConnection, LibcloudHTTPConnection):
"""
Utility Class for logging HTTP connections
"""
def getresponse(self):
r = LibcloudHTTPConnection.getresponse(self)
if self.log is not None:
r, rv = self._log_response(r)
self.log.write(rv + "\n")
self.log.flush()
return r
def request(self, method, url, body=None, headers=None):
headers.update({'X-LC-Request-ID': str(id(self))})
if self.log is not None:
pre = "# -------- begin %d request ----------\n" % id(self)
self.log.write(pre +
self._log_curl(method, url, body, headers) + "\n")
self.log.flush()
return LibcloudHTTPConnection.request(self, method, url,
body, headers)
class ConnectionKey(object):
"""
A Base Connection class to derive from.
"""
#conn_classes = (LoggingHTTPSConnection)
conn_classes = (LibcloudHTTPConnection, LibcloudHTTPSConnection)
responseCls = Response
rawResponseCls = RawResponse
connection = None
host = '127.0.0.1'
port = (80, 443)
secure = 1
driver = None
action = None
def __init__(self, key, secure=True, host=None, force_port=None):
"""
Initialize `user_id` and `key`; set `secure` to an C{int} based on
passed value.
"""
self.key = key
self.secure = secure and 1 or 0
self.ua = []
if host:
self.host = host
if force_port:
self.port = (force_port, force_port)
def connect(self, host=None, port=None):
"""
Establish a connection with the API server.
@type host: C{str}
@param host: Optional host to override our default
@type port: C{int}
@param port: Optional port to override our default
@returns: A connection
"""
host = host or self.host
port = port or self.port[self.secure]
kwargs = {'host': host, 'port': port}
connection = self.conn_classes[self.secure](**kwargs)
# You can uncoment this line, if you setup a reverse proxy server
# which proxies to your endpoint, and lets you easily capture
# connections in cleartext when you setup the proxy to do SSL
# for you
#connection = self.conn_classes[False]("127.0.0.1", 8080)
self.connection = connection
def _user_agent(self):
return 'libcloud/%s (%s)%s' % (
libcloud.__version__,
self.driver.name,
"".join([" (%s)" % x for x in self.ua]))
def user_agent_append(self, token):
"""
Append a token to a user agent string.
Users of the library should call this to uniquely identify thier requests
to a provider.
@type token: C{str}
@param token: Token to add to the user agent.
"""
self.ua.append(token)
def request(self,
action,
params=None,
data='',
headers=None,
method='GET',
raw=False):
"""
Request a given `action`.
Basically a wrapper around the connection
object's `request` that does some helpful pre-processing.
@type action: C{str}
@param action: A path
@type params: C{dict}
@param params: Optional mapping of additional parameters to send. If
None, leave as an empty C{dict}.
@type data: C{unicode}
@param data: A body of data to send with the request.
@type headers: C{dict}
@param headers: Extra headers to add to the request
None, leave as an empty C{dict}.
@type method: C{str}
@param method: An HTTP method such as "GET" or "POST".
@return: An instance of type I{responseCls}
"""
if params is None:
params = {}
if headers is None:
headers = {}
self.action = action
self.method = method
# Extend default parameters
params = self.add_default_params(params)
# Extend default headers
headers = self.add_default_headers(headers)
# We always send a content length and user-agent header
headers.update({'User-Agent': self._user_agent()})
headers.update({'Host': self.host})
# Encode data if necessary
if data != '' and data != None:
data = self.encode_data(data)
if data is not None:
headers.update({'Content-Length': str(len(data))})
if params:
url = '?'.join((action, urllib.urlencode(params)))
else:
url = action
# Removed terrible hack...this a less-bad hack that doesn't execute a
# request twice, but it's still a hack.
self.connect()
try:
# @TODO: Should we just pass File object as body to request method
# instead of dealing with splitting and sending the file ourselves?
if raw:
self.connection.putrequest(method, action)
for key, value in headers.iteritems():
self.connection.putheader(key, value)
self.connection.endheaders()
else:
self.connection.request(method=method, url=url, body=data,
headers=headers)
except ssl.SSLError, e:
raise ssl.SSLError(str(e))
if raw:
response = self.rawResponseCls()
else:
response = self.responseCls(self.connection.getresponse())
response.connection = self
return response
def add_default_params(self, params):
"""
Adds default parameters (such as API key, version, etc.)
to the passed `params`
Should return a dictionary.
"""
return params
def add_default_headers(self, headers):
"""
Adds default headers (such as Authorization, X-Foo-Bar)
to the passed `headers`
Should return a dictionary.
"""
return headers
def encode_data(self, data):
"""
Encode body data.
Override in a provider's subclass.
"""
return data
class ConnectionUserAndKey(ConnectionKey):
"""
Base connection which accepts a user_id and key
"""
user_id = None
def __init__(self, user_id, key, secure=True, host=None, port=None):
super(ConnectionUserAndKey, self).__init__(key, secure, host, port)
self.user_id = user_id
| 29.973872 | 81 | 0.571044 |
import httplib
import urllib
import time
import hashlib
import StringIO
import ssl
import os
import socket
import struct
from pipes import quote as pquote
import libcloud
from libcloud.httplib_ssl import LibcloudHTTPSConnection
from httplib import HTTPConnection as LibcloudHTTPConnection
class RawResponse(object):
def __init__(self, response=None):
self._status = None
self._response = None
self._headers = {}
self._error = None
self._reason = None
@property
def response(self):
if not self._response:
self._response = self.connection.connection.getresponse()
return self._response
@property
def status(self):
if not self._status:
self._status = self.response.status
return self._status
@property
def headers(self):
if not self._headers:
self._headers = dict(self.response.getheaders())
return self._headers
@property
def reason(self):
if not self._reason:
self._reason = self.response.reason
return self._reason
class Response(object):
"""
A Base Response class to derive from.
"""
NODE_STATE_MAP = {}
object = None
body = None
status = httplib.OK
headers = {}
error = None
connection = None
def __init__(self, response):
self.body = response.read()
self.status = response.status
self.headers = dict(response.getheaders())
self.error = response.reason
if not self.success():
raise Exception(self.parse_error())
self.object = self.parse_body()
def parse_body(self):
"""
Parse response body.
Override in a provider's subclass.
@return: Parsed body.
"""
return self.body
def parse_error(self):
"""
Parse the error messages.
Override in a provider's subclass.
@return: Parsed error.
"""
return self.body
def success(self):
"""
Determine if our request was successful.
The meaning of this can be arbitrary; did we receive OK status? Did
the node get created? Were we authenticated?
@return: C{True} or C{False}
"""
return self.status == httplib.OK or self.status == httplib.CREATED
class LoggingConnection():
"""
Debug class to log all HTTP(s) requests as they could be made
with the C{curl} command.
@cvar log: file-like object that logs entries are written to.
"""
log = None
def _log_response(self, r):
rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r))
ht = ""
v = r.version
if r.version == 10:
v = "HTTP/1.0"
if r.version == 11:
v = "HTTP/1.1"
ht += "%s %s %s\r\n" % (v, r.status, r.reason)
body = r.read()
for h in r.getheaders():
ht += "%s: %s\r\n" % (h[0].title(), h[1])
ht += "\r\n"
class fakesock:
def __init__(self, s):
self.s = s
def makefile(self, mode, foo):
return StringIO.StringIO(self.s)
rr = r
if r.chunked:
ht += "%x\r\n" % (len(body))
ht += body
ht += "\r\n0\r\n"
else:
ht += body
rr = httplib.HTTPResponse(fakesock(ht),
method=r._method,
debuglevel=r.debuglevel)
rr.begin()
rv += ht
rv += ("\n# -------- end %d:%d response ----------\n"
% (id(self), id(r)))
return (rr, rv)
def _log_curl(self, method, url, body, headers):
cmd = ["curl", "-i"]
cmd.extend(["-X", pquote(method)])
for h in headers:
cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))])
if body is not None and len(body) > 0:
cmd.extend(["--data-binary", pquote(body)])
cmd.extend([pquote("https://%s:%d%s" % (self.host, self.port, url))])
return " ".join(cmd)
class LoggingHTTPSConnection(LoggingConnection, LibcloudHTTPSConnection):
"""
Utility Class for logging HTTPS connections
"""
def getresponse(self):
r = LibcloudHTTPSConnection.getresponse(self)
if self.log is not None:
r, rv = self._log_response(r)
self.log.write(rv + "\n")
self.log.flush()
return r
def request(self, method, url, body=None, headers=None):
headers.update({'X-LC-Request-ID': str(id(self))})
if self.log is not None:
pre = "# -------- begin %d request ----------\n" % id(self)
self.log.write(pre +
self._log_curl(method, url, body, headers) + "\n")
self.log.flush()
return LibcloudHTTPSConnection.request(self, method, url, body, headers)
class LoggingHTTPConnection(LoggingConnection, LibcloudHTTPConnection):
"""
Utility Class for logging HTTP connections
"""
def getresponse(self):
r = LibcloudHTTPConnection.getresponse(self)
if self.log is not None:
r, rv = self._log_response(r)
self.log.write(rv + "\n")
self.log.flush()
return r
def request(self, method, url, body=None, headers=None):
headers.update({'X-LC-Request-ID': str(id(self))})
if self.log is not None:
pre = "# -------- begin %d request ----------\n" % id(self)
self.log.write(pre +
self._log_curl(method, url, body, headers) + "\n")
self.log.flush()
return LibcloudHTTPConnection.request(self, method, url,
body, headers)
class ConnectionKey(object):
"""
A Base Connection class to derive from.
"""
conn_classes = (LibcloudHTTPConnection, LibcloudHTTPSConnection)
responseCls = Response
rawResponseCls = RawResponse
connection = None
host = '127.0.0.1'
port = (80, 443)
secure = 1
driver = None
action = None
def __init__(self, key, secure=True, host=None, force_port=None):
"""
Initialize `user_id` and `key`; set `secure` to an C{int} based on
passed value.
"""
self.key = key
self.secure = secure and 1 or 0
self.ua = []
if host:
self.host = host
if force_port:
self.port = (force_port, force_port)
def connect(self, host=None, port=None):
"""
Establish a connection with the API server.
@type host: C{str}
@param host: Optional host to override our default
@type port: C{int}
@param port: Optional port to override our default
@returns: A connection
"""
host = host or self.host
port = port or self.port[self.secure]
kwargs = {'host': host, 'port': port}
connection = self.conn_classes[self.secure](**kwargs)
self.connection = connection
def _user_agent(self):
return 'libcloud/%s (%s)%s' % (
libcloud.__version__,
self.driver.name,
"".join([" (%s)" % x for x in self.ua]))
def user_agent_append(self, token):
"""
Append a token to a user agent string.
Users of the library should call this to uniquely identify thier requests
to a provider.
@type token: C{str}
@param token: Token to add to the user agent.
"""
self.ua.append(token)
def request(self,
action,
params=None,
data='',
headers=None,
method='GET',
raw=False):
"""
Request a given `action`.
Basically a wrapper around the connection
object's `request` that does some helpful pre-processing.
@type action: C{str}
@param action: A path
@type params: C{dict}
@param params: Optional mapping of additional parameters to send. If
None, leave as an empty C{dict}.
@type data: C{unicode}
@param data: A body of data to send with the request.
@type headers: C{dict}
@param headers: Extra headers to add to the request
None, leave as an empty C{dict}.
@type method: C{str}
@param method: An HTTP method such as "GET" or "POST".
@return: An instance of type I{responseCls}
"""
if params is None:
params = {}
if headers is None:
headers = {}
self.action = action
self.method = method
# Extend default parameters
params = self.add_default_params(params)
# Extend default headers
headers = self.add_default_headers(headers)
# We always send a content length and user-agent header
headers.update({'User-Agent': self._user_agent()})
headers.update({'Host': self.host})
# Encode data if necessary
if data != '' and data != None:
data = self.encode_data(data)
if data is not None:
headers.update({'Content-Length': str(len(data))})
if params:
url = '?'.join((action, urllib.urlencode(params)))
else:
url = action
# Removed terrible hack...this a less-bad hack that doesn't execute a
self.connect()
try:
# @TODO: Should we just pass File object as body to request method
# instead of dealing with splitting and sending the file ourselves?
if raw:
self.connection.putrequest(method, action)
for key, value in headers.iteritems():
self.connection.putheader(key, value)
self.connection.endheaders()
else:
self.connection.request(method=method, url=url, body=data,
headers=headers)
except ssl.SSLError, e:
raise ssl.SSLError(str(e))
if raw:
response = self.rawResponseCls()
else:
response = self.responseCls(self.connection.getresponse())
response.connection = self
return response
def add_default_params(self, params):
"""
Adds default parameters (such as API key, version, etc.)
to the passed `params`
Should return a dictionary.
"""
return params
def add_default_headers(self, headers):
"""
Adds default headers (such as Authorization, X-Foo-Bar)
to the passed `headers`
Should return a dictionary.
"""
return headers
def encode_data(self, data):
"""
Encode body data.
Override in a provider's subclass.
"""
return data
class ConnectionUserAndKey(ConnectionKey):
"""
Base connection which accepts a user_id and key
"""
user_id = None
def __init__(self, user_id, key, secure=True, host=None, port=None):
super(ConnectionUserAndKey, self).__init__(key, secure, host, port)
self.user_id = user_id
| false | true |
f722a90d046be88487290038647fbe8c9ea42441 | 10,411 | py | Python | tests/contrib/test_fixers.py | regragui-cobra/werkzeug | 710b25614d089157c8baa7b46518d38e33861b5f | [
"BSD-3-Clause"
] | null | null | null | tests/contrib/test_fixers.py | regragui-cobra/werkzeug | 710b25614d089157c8baa7b46518d38e33861b5f | [
"BSD-3-Clause"
] | null | null | null | tests/contrib/test_fixers.py | regragui-cobra/werkzeug | 710b25614d089157c8baa7b46518d38e33861b5f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
tests.fixers
~~~~~~~~~~~~
Server / Browser fixers.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
from werkzeug.contrib import fixers
from werkzeug.datastructures import ResponseCacheControl
from werkzeug.http import parse_cache_control_header
from werkzeug.routing import Map, Rule
from werkzeug.test import Client, create_environ
from werkzeug.utils import redirect
from werkzeug.wrappers import Request, Response
@Request.application
def path_check_app(request):
return Response('PATH_INFO: %s\nSCRIPT_NAME: %s' % (
request.environ.get('PATH_INFO', ''),
request.environ.get('SCRIPT_NAME', '')
))
class TestServerFixer(object):
def test_cgi_root_fix(self):
app = fixers.CGIRootFix(path_check_app)
response = Response.from_app(
app,
dict(create_environ(),
SCRIPT_NAME='/foo',
PATH_INFO='/bar'))
assert response.get_data() == b'PATH_INFO: /bar\nSCRIPT_NAME: '
def test_cgi_root_fix_custom_app_root(self):
app = fixers.CGIRootFix(path_check_app, app_root='/baz/')
response = Response.from_app(
app,
dict(create_environ(),
SCRIPT_NAME='/foo',
PATH_INFO='/bar'))
assert response.get_data() == b'PATH_INFO: /bar\nSCRIPT_NAME: baz'
def test_path_info_from_request_uri_fix(self):
app = fixers.PathInfoFromRequestUriFix(path_check_app)
for key in 'REQUEST_URI', 'REQUEST_URL', 'UNENCODED_URL':
env = dict(create_environ(), SCRIPT_NAME='/test', PATH_INFO='/?????')
env[key] = '/test/foo%25bar?drop=this'
response = Response.from_app(app, env)
assert response.get_data() == b'PATH_INFO: /foo%bar\nSCRIPT_NAME: /test'
@pytest.mark.parametrize(('kwargs', 'base', 'url_root'), (
pytest.param({}, {
'REMOTE_ADDR': '192.168.0.2',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.1',
}, 'http://spam/', id='for'),
pytest.param({'x_proto': 1}, {
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_PROTO': 'https',
}, 'https://spam/', id='proto'),
pytest.param({'x_host': 1}, {
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_HOST': 'eggs',
}, 'http://eggs/', id='host'),
pytest.param({'x_port': 1}, {
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_PORT': '8080',
}, 'http://spam:8080/', id='port, host without port'),
pytest.param({'x_port': 1}, {
'HTTP_HOST': 'spam:9000',
'HTTP_X_FORWARDED_PORT': '8080',
}, 'http://spam:8080/', id='port, host with port'),
pytest.param({'x_port': 1}, {
'SERVER_NAME': 'spam',
'SERVER_PORT': '9000',
'HTTP_X_FORWARDED_PORT': '8080',
}, 'http://spam:8080/', id='port, name'),
pytest.param({'x_prefix': 1}, {
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_PREFIX': '/eggs',
}, 'http://spam/eggs/', id='prefix'),
pytest.param({
'x_for': 1, 'x_proto': 1, 'x_host': 1, 'x_port': 1, 'x_prefix': 1
}, {
'REMOTE_ADDR': '192.168.0.2',
'HTTP_HOST': 'spam:9000',
'HTTP_X_FORWARDED_FOR': '192.168.0.1',
'HTTP_X_FORWARDED_PROTO': 'https',
'HTTP_X_FORWARDED_HOST': 'eggs',
'HTTP_X_FORWARDED_PORT': '443',
'HTTP_X_FORWARDED_PREFIX': '/ham',
}, 'https://eggs/ham/', id='all'),
pytest.param({'x_for': 2}, {
'REMOTE_ADDR': '192.168.0.3',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.1, 192.168.0.2',
}, 'http://spam/', id='multiple for'),
pytest.param({'x_for': 0}, {
'REMOTE_ADDR': '192.168.0.1',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.2',
}, 'http://spam/', id='ignore 0'),
pytest.param({'x_for': 3}, {
'REMOTE_ADDR': '192.168.0.1',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.3, 192.168.0.2',
}, 'http://spam/', id='ignore len < trusted'),
pytest.param({}, {
'REMOTE_ADDR': '192.168.0.2',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.3, 192.168.0.1',
}, 'http://spam/', id='ignore untrusted'),
pytest.param({'x_for': 2}, {
'REMOTE_ADDR': '192.168.0.1',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': ', 192.168.0.3'
}, 'http://spam/', id='ignore empty'),
pytest.param({'x_for': 2, 'x_prefix': 1}, {
'REMOTE_ADDR': '192.168.0.2',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.1, 192.168.0.3',
'HTTP_X_FORWARDED_PREFIX': '/ham, /eggs',
}, 'http://spam/eggs/', id='prefix < for')
))
def test_proxy_fix_new(self, kwargs, base, url_root):
@Request.application
def app(request):
# for header
assert request.remote_addr == '192.168.0.1'
# proto, host, port, prefix headers
assert request.url_root == url_root
urls = url_map.bind_to_environ(request.environ)
# build includes prefix
assert urls.build('parrot') == '/'.join((
request.script_root, 'parrot'))
# match doesn't include prefix
assert urls.match('/parrot')[0] == 'parrot'
return Response('success')
url_map = Map([Rule('/parrot', endpoint='parrot')])
app = fixers.ProxyFix(app, **kwargs)
base.setdefault('REMOTE_ADDR', '192.168.0.1')
environ = create_environ(environ_overrides=base)
# host is always added, remove it if the test doesn't set it
if 'HTTP_HOST' not in base:
del environ['HTTP_HOST']
# ensure app request has correct headers
response = Response.from_app(app, environ)
assert response.get_data() == b'success'
# ensure redirect location is correct
redirect_app = redirect(
url_map.bind_to_environ(environ).build('parrot'))
response = Response.from_app(redirect_app, environ)
location = response.headers['Location']
assert location == url_root + 'parrot'
def test_proxy_fix_deprecations(self):
app = pytest.deprecated_call(fixers.ProxyFix, None, 2)
assert app.x_for == 2
with pytest.deprecated_call():
assert app.num_proxies == 2
with pytest.deprecated_call():
assert app.get_remote_addr(['spam', 'eggs']) == 'spam'
def test_header_rewriter_fix(self):
@Request.application
def application(request):
return Response("", headers=[
('X-Foo', 'bar')
])
application = fixers.HeaderRewriterFix(application, ('X-Foo',), (('X-Bar', '42'),))
response = Response.from_app(application, create_environ())
assert response.headers['Content-Type'] == 'text/plain; charset=utf-8'
assert 'X-Foo' not in response.headers
assert response.headers['X-Bar'] == '42'
class TestBrowserFixer(object):
def test_ie_fixes(self):
@fixers.InternetExplorerFix
@Request.application
def application(request):
response = Response('binary data here', mimetype='application/vnd.ms-excel')
response.headers['Vary'] = 'Cookie'
response.headers['Content-Disposition'] = 'attachment; filename=foo.xls'
return response
c = Client(application, Response)
response = c.get('/', headers=[
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
])
# IE gets no vary
assert response.get_data() == b'binary data here'
assert 'vary' not in response.headers
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
assert response.headers['content-type'] == 'application/vnd.ms-excel'
# other browsers do
c = Client(application, Response)
response = c.get('/')
assert response.get_data() == b'binary data here'
assert 'vary' in response.headers
cc = ResponseCacheControl()
cc.no_cache = True
@fixers.InternetExplorerFix
@Request.application
def application(request):
response = Response('binary data here', mimetype='application/vnd.ms-excel')
response.headers['Pragma'] = ', '.join(pragma)
response.headers['Cache-Control'] = cc.to_header()
response.headers['Content-Disposition'] = 'attachment; filename=foo.xls'
return response
# IE has no pragma or cache control
pragma = ('no-cache',)
c = Client(application, Response)
response = c.get('/', headers=[
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
])
assert response.get_data() == b'binary data here'
assert 'pragma' not in response.headers
assert 'cache-control' not in response.headers
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
# IE has simplified pragma
pragma = ('no-cache', 'x-foo')
cc.proxy_revalidate = True
response = c.get('/', headers=[
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
])
assert response.get_data() == b'binary data here'
assert response.headers['pragma'] == 'x-foo'
assert response.headers['cache-control'] == 'proxy-revalidate'
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
# regular browsers get everything
response = c.get('/')
assert response.get_data() == b'binary data here'
assert response.headers['pragma'] == 'no-cache, x-foo'
cc = parse_cache_control_header(response.headers['cache-control'],
cls=ResponseCacheControl)
assert cc.no_cache
assert cc.proxy_revalidate
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
| 39.585551 | 91 | 0.57324 |
import pytest
from werkzeug.contrib import fixers
from werkzeug.datastructures import ResponseCacheControl
from werkzeug.http import parse_cache_control_header
from werkzeug.routing import Map, Rule
from werkzeug.test import Client, create_environ
from werkzeug.utils import redirect
from werkzeug.wrappers import Request, Response
@Request.application
def path_check_app(request):
return Response('PATH_INFO: %s\nSCRIPT_NAME: %s' % (
request.environ.get('PATH_INFO', ''),
request.environ.get('SCRIPT_NAME', '')
))
class TestServerFixer(object):
def test_cgi_root_fix(self):
app = fixers.CGIRootFix(path_check_app)
response = Response.from_app(
app,
dict(create_environ(),
SCRIPT_NAME='/foo',
PATH_INFO='/bar'))
assert response.get_data() == b'PATH_INFO: /bar\nSCRIPT_NAME: '
def test_cgi_root_fix_custom_app_root(self):
app = fixers.CGIRootFix(path_check_app, app_root='/baz/')
response = Response.from_app(
app,
dict(create_environ(),
SCRIPT_NAME='/foo',
PATH_INFO='/bar'))
assert response.get_data() == b'PATH_INFO: /bar\nSCRIPT_NAME: baz'
def test_path_info_from_request_uri_fix(self):
app = fixers.PathInfoFromRequestUriFix(path_check_app)
for key in 'REQUEST_URI', 'REQUEST_URL', 'UNENCODED_URL':
env = dict(create_environ(), SCRIPT_NAME='/test', PATH_INFO='/?????')
env[key] = '/test/foo%25bar?drop=this'
response = Response.from_app(app, env)
assert response.get_data() == b'PATH_INFO: /foo%bar\nSCRIPT_NAME: /test'
@pytest.mark.parametrize(('kwargs', 'base', 'url_root'), (
pytest.param({}, {
'REMOTE_ADDR': '192.168.0.2',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.1',
}, 'http://spam/', id='for'),
pytest.param({'x_proto': 1}, {
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_PROTO': 'https',
}, 'https://spam/', id='proto'),
pytest.param({'x_host': 1}, {
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_HOST': 'eggs',
}, 'http://eggs/', id='host'),
pytest.param({'x_port': 1}, {
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_PORT': '8080',
}, 'http://spam:8080/', id='port, host without port'),
pytest.param({'x_port': 1}, {
'HTTP_HOST': 'spam:9000',
'HTTP_X_FORWARDED_PORT': '8080',
}, 'http://spam:8080/', id='port, host with port'),
pytest.param({'x_port': 1}, {
'SERVER_NAME': 'spam',
'SERVER_PORT': '9000',
'HTTP_X_FORWARDED_PORT': '8080',
}, 'http://spam:8080/', id='port, name'),
pytest.param({'x_prefix': 1}, {
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_PREFIX': '/eggs',
}, 'http://spam/eggs/', id='prefix'),
pytest.param({
'x_for': 1, 'x_proto': 1, 'x_host': 1, 'x_port': 1, 'x_prefix': 1
}, {
'REMOTE_ADDR': '192.168.0.2',
'HTTP_HOST': 'spam:9000',
'HTTP_X_FORWARDED_FOR': '192.168.0.1',
'HTTP_X_FORWARDED_PROTO': 'https',
'HTTP_X_FORWARDED_HOST': 'eggs',
'HTTP_X_FORWARDED_PORT': '443',
'HTTP_X_FORWARDED_PREFIX': '/ham',
}, 'https://eggs/ham/', id='all'),
pytest.param({'x_for': 2}, {
'REMOTE_ADDR': '192.168.0.3',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.1, 192.168.0.2',
}, 'http://spam/', id='multiple for'),
pytest.param({'x_for': 0}, {
'REMOTE_ADDR': '192.168.0.1',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.2',
}, 'http://spam/', id='ignore 0'),
pytest.param({'x_for': 3}, {
'REMOTE_ADDR': '192.168.0.1',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.3, 192.168.0.2',
}, 'http://spam/', id='ignore len < trusted'),
pytest.param({}, {
'REMOTE_ADDR': '192.168.0.2',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.3, 192.168.0.1',
}, 'http://spam/', id='ignore untrusted'),
pytest.param({'x_for': 2}, {
'REMOTE_ADDR': '192.168.0.1',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': ', 192.168.0.3'
}, 'http://spam/', id='ignore empty'),
pytest.param({'x_for': 2, 'x_prefix': 1}, {
'REMOTE_ADDR': '192.168.0.2',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': '192.168.0.1, 192.168.0.3',
'HTTP_X_FORWARDED_PREFIX': '/ham, /eggs',
}, 'http://spam/eggs/', id='prefix < for')
))
def test_proxy_fix_new(self, kwargs, base, url_root):
@Request.application
def app(request):
assert request.remote_addr == '192.168.0.1'
assert request.url_root == url_root
urls = url_map.bind_to_environ(request.environ)
assert urls.build('parrot') == '/'.join((
request.script_root, 'parrot'))
assert urls.match('/parrot')[0] == 'parrot'
return Response('success')
url_map = Map([Rule('/parrot', endpoint='parrot')])
app = fixers.ProxyFix(app, **kwargs)
base.setdefault('REMOTE_ADDR', '192.168.0.1')
environ = create_environ(environ_overrides=base)
# host is always added, remove it if the test doesn't set it
if 'HTTP_HOST' not in base:
del environ['HTTP_HOST']
response = Response.from_app(app, environ)
assert response.get_data() == b'success'
redirect_app = redirect(
url_map.bind_to_environ(environ).build('parrot'))
response = Response.from_app(redirect_app, environ)
location = response.headers['Location']
assert location == url_root + 'parrot'
def test_proxy_fix_deprecations(self):
app = pytest.deprecated_call(fixers.ProxyFix, None, 2)
assert app.x_for == 2
with pytest.deprecated_call():
assert app.num_proxies == 2
with pytest.deprecated_call():
assert app.get_remote_addr(['spam', 'eggs']) == 'spam'
def test_header_rewriter_fix(self):
@Request.application
def application(request):
return Response("", headers=[
('X-Foo', 'bar')
])
application = fixers.HeaderRewriterFix(application, ('X-Foo',), (('X-Bar', '42'),))
response = Response.from_app(application, create_environ())
assert response.headers['Content-Type'] == 'text/plain; charset=utf-8'
assert 'X-Foo' not in response.headers
assert response.headers['X-Bar'] == '42'
class TestBrowserFixer(object):
def test_ie_fixes(self):
@fixers.InternetExplorerFix
@Request.application
def application(request):
response = Response('binary data here', mimetype='application/vnd.ms-excel')
response.headers['Vary'] = 'Cookie'
response.headers['Content-Disposition'] = 'attachment; filename=foo.xls'
return response
c = Client(application, Response)
response = c.get('/', headers=[
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
])
assert response.get_data() == b'binary data here'
assert 'vary' not in response.headers
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
assert response.headers['content-type'] == 'application/vnd.ms-excel'
c = Client(application, Response)
response = c.get('/')
assert response.get_data() == b'binary data here'
assert 'vary' in response.headers
cc = ResponseCacheControl()
cc.no_cache = True
@fixers.InternetExplorerFix
@Request.application
def application(request):
response = Response('binary data here', mimetype='application/vnd.ms-excel')
response.headers['Pragma'] = ', '.join(pragma)
response.headers['Cache-Control'] = cc.to_header()
response.headers['Content-Disposition'] = 'attachment; filename=foo.xls'
return response
pragma = ('no-cache',)
c = Client(application, Response)
response = c.get('/', headers=[
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
])
assert response.get_data() == b'binary data here'
assert 'pragma' not in response.headers
assert 'cache-control' not in response.headers
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
pragma = ('no-cache', 'x-foo')
cc.proxy_revalidate = True
response = c.get('/', headers=[
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
])
assert response.get_data() == b'binary data here'
assert response.headers['pragma'] == 'x-foo'
assert response.headers['cache-control'] == 'proxy-revalidate'
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
response = c.get('/')
assert response.get_data() == b'binary data here'
assert response.headers['pragma'] == 'no-cache, x-foo'
cc = parse_cache_control_header(response.headers['cache-control'],
cls=ResponseCacheControl)
assert cc.no_cache
assert cc.proxy_revalidate
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
| true | true |
f722a95789db98a5095805a06f7ae3f2d1775ae4 | 2,249 | py | Python | Packs/CommonScripts/Scripts/ExtractDomainAndFQDNFromUrlAndEmail/ExtractDomainAndFQDNFromUrlAndEmail_test.py | ddi-danielsantander/content | 67e2edc404f50c332d928dbdbce00a447bb5532f | [
"MIT"
] | 1 | 2020-04-19T11:05:42.000Z | 2020-04-19T11:05:42.000Z | Packs/CommonScripts/Scripts/ExtractDomainAndFQDNFromUrlAndEmail/ExtractDomainAndFQDNFromUrlAndEmail_test.py | ddi-danielsantander/content | 67e2edc404f50c332d928dbdbce00a447bb5532f | [
"MIT"
] | null | null | null | Packs/CommonScripts/Scripts/ExtractDomainAndFQDNFromUrlAndEmail/ExtractDomainAndFQDNFromUrlAndEmail_test.py | ddi-danielsantander/content | 67e2edc404f50c332d928dbdbce00a447bb5532f | [
"MIT"
] | 1 | 2020-05-27T15:26:48.000Z | 2020-05-27T15:26:48.000Z | # -*- coding: utf-8 -*-
from ExtractDomainAndFQDNFromUrlAndEmail import extract_fqdn_or_domain
import pytest
@pytest.mark.parametrize('input,fqdn,domain', [ # noqa: E501 disable-secrets-detection
('http://this.is.test.com', 'this.is.test.com', 'test.com'),
('https://caseapi.phishlabs.com', 'caseapi.phishlabs.com', 'phishlabs.com'),
('www.bücher.de', 'www.bücher.de', 'bücher.de'),
('https://urldefense.proofpoint.com/v2/url?u=http-3A__go.getpostman.com_y4wULsdG0h0DDMY0Dv00100&d=DwMFaQ&c=ywDJJevdGcjv4rm9P3FcNg&r=s5kA2oIAQRXsacJiBKmTORIWyRN39ZKhobje2GyRgNs&m=vN1dVSiZvEoM9oExtQqEptm9Dbvq9tnjACDZzrBLaWI&s=zroN7KQdBCPBOfhOmv5SP1DDzZKZ1y9I3x4STS5PbHA&e=', 'go.getpostman.com', 'getpostman.com'), # noqa: E501
('hxxps://www[.]demisto[.]com', 'www.demisto.com', 'demisto.com'),
('https://emea01.safelinks.protection.outlook.com/?url=https%3A%2F%2Ftwitter.com%2FPhilipsBeLux&data=02|01||cb2462dc8640484baf7608d638d2a698|1a407a2d76754d178692b3ac285306e4|0|0|636758874714819880&sdata=dnJiphWFhnAKsk5Ps0bj0p%2FvXVo8TpidtGZcW6t8lDQ%3D&reserved=0%3E%5bcid:image003.gif@01CF4D7F.1DF62650%5d%3C', '', 'twitter.com'), # noqa: E501 disable-secrets-detection
('dummy@recipient.com', '', 'recipient.com'),
('content-test-service-acc@content-test-236508.iam.gserviceaccount.com', 'content-test-236508.iam.gserviceaccount.com', 'gserviceaccount.com'), # noqa: E501
('CAJaFoefy_acEKaqSMGfojbLzKoUnzfpPcnNemuD6K0oQZ2PikQ@mail.gmail.com', 'mail.gmail.com', 'gmail.com'),
('5be9245893ff486d98c3640879bb2657.protect@whoisguard.com', '', 'whoisguard.com'),
('test@www.bücher.de', 'www.bücher.de', 'bücher.de'),
('test@www[.]demisto[.]com', 'www.demisto.com', 'demisto.com'),
]) # noqa: E124
def test_extract_fqdn_or_domain(input, fqdn, domain):
extracted_fqdn = extract_fqdn_or_domain(input, is_fqdn=True)
extracted_domain = extract_fqdn_or_domain(input, is_domain=True)
assert extracted_fqdn == fqdn
assert extracted_domain == domain
| 86.5 | 394 | 0.66474 |
from ExtractDomainAndFQDNFromUrlAndEmail import extract_fqdn_or_domain
import pytest
@pytest.mark.parametrize('input,fqdn,domain', [
('http://this.is.test.com', 'this.is.test.com', 'test.com'),
('https://caseapi.phishlabs.com', 'caseapi.phishlabs.com', 'phishlabs.com'),
('www.bücher.de', 'www.bücher.de', 'bücher.de'),
('https://urldefense.proofpoint.com/v2/url?u=http-3A__go.getpostman.com_y4wULsdG0h0DDMY0Dv00100&d=DwMFaQ&c=ywDJJevdGcjv4rm9P3FcNg&r=s5kA2oIAQRXsacJiBKmTORIWyRN39ZKhobje2GyRgNs&m=vN1dVSiZvEoM9oExtQqEptm9Dbvq9tnjACDZzrBLaWI&s=zroN7KQdBCPBOfhOmv5SP1DDzZKZ1y9I3x4STS5PbHA&e=', 'go.getpostman.com', 'getpostman.com'),
('hxxps://www[.]demisto[.]com', 'www.demisto.com', 'demisto.com'),
('https://emea01.safelinks.protection.outlook.com/?url=https%3A%2F%2Ftwitter.com%2FPhilipsBeLux&data=02|01||cb2462dc8640484baf7608d638d2a698|1a407a2d76754d178692b3ac285306e4|0|0|636758874714819880&sdata=dnJiphWFhnAKsk5Ps0bj0p%2FvXVo8TpidtGZcW6t8lDQ%3D&reserved=0%3E%5bcid:image003.gif@01CF4D7F.1DF62650%5d%3C', '', 'twitter.com'),
('dummy@recipient.com', '', 'recipient.com'),
('content-test-service-acc@content-test-236508.iam.gserviceaccount.com', 'content-test-236508.iam.gserviceaccount.com', 'gserviceaccount.com'),
('CAJaFoefy_acEKaqSMGfojbLzKoUnzfpPcnNemuD6K0oQZ2PikQ@mail.gmail.com', 'mail.gmail.com', 'gmail.com'),
('5be9245893ff486d98c3640879bb2657.protect@whoisguard.com', '', 'whoisguard.com'),
('test@www.bücher.de', 'www.bücher.de', 'bücher.de'),
('test@www[.]demisto[.]com', 'www.demisto.com', 'demisto.com'),
])
def test_extract_fqdn_or_domain(input, fqdn, domain):
extracted_fqdn = extract_fqdn_or_domain(input, is_fqdn=True)
extracted_domain = extract_fqdn_or_domain(input, is_domain=True)
assert extracted_fqdn == fqdn
assert extracted_domain == domain
| true | true |
f722a9e3cdb88d56bf510523dd7e8e4563fa59e4 | 2,705 | py | Python | study-drills/sdex19.py | dark-teal-coder/book-learn-python-the-hard-way | e63abddde8c29dcb1c24d8a98116a78b05be67eb | [
"MIT"
] | null | null | null | study-drills/sdex19.py | dark-teal-coder/book-learn-python-the-hard-way | e63abddde8c29dcb1c24d8a98116a78b05be67eb | [
"MIT"
] | null | null | null | study-drills/sdex19.py | dark-teal-coder/book-learn-python-the-hard-way | e63abddde8c29dcb1c24d8a98116a78b05be67eb | [
"MIT"
] | null | null | null | # Study Drills 19
# 1. Go back through the script and type a comment above each line explaining in English what it does.
# 2. Start at the bottom and read each line backward, saying all the important characters.
# 3. Write at least one more function of your own design, and run it 10 different ways.
# Define a function with the name cheese_and_crackers.
# Create two argument slots called cheese_count and boxes_of_crackers.
def cheese_and_crackers(cheese_count, boxes_of_crackers):
# Print an f-string with an embeded variable cheese_count.
print(f"You have {cheese_count} cheeses!")
# Print an f-string with an embeded variable boxes_of_crackers.
print(f"You have {boxes_of_crackers} boxes of crackers!")
# Print a string.
print("Man that's enough for a party!")
# Print a string.
print("Get a blanket.\n")
# Different ways to give the function the values it needs:
# Print a string.
print("We can just give the function numbers directly:")
# Call the function cheese_and_crackers and pass 2 number arguments to it.
cheese_and_crackers(20, 30)
# Print a string.
print("OR, we can use variables from our script:")
# Assign 10 to the variable amount_of_cheese.
amount_of_cheese = 10
# Assign 50 to the variable amount_of_crackers.
amount_of_crackers = 50
# The variables in your function are not connected to the variables in your script.
# Call the function cheese_and_crackers and pass 2 variable arguments to it.
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
# Print a string.
print("We can even do math inside too:")
# Call the function cheese_and_crackers.
# Evaluate the Math expressions in the argument slots before.
# Pass the resulted values to the function.
cheese_and_crackers(10 + 20, 5 + 6)
# Print a string.
print("And we can combine the two, variables and math:")
# Call the function cheese_and_crackers.
# Take the values the variables amount_of_cheese and amount_of_crackers refer to.
# Evaluate the Math expressions in the argument slots before.
# Pass the resulted values to the function.
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
# My own function:
def sum(*args):
argsList = list(args)
sum = 0
for arg in argsList:
argsList.insert(argsList.index(arg), arg)
del argsList[argsList.index(arg) + 1]
for num in argsList:
sum += num
# print(sum)
return sum
num1 = 4
num2 = 1
print(sum(1, 2))
print(sum(1, 2, 3))
print(sum(1, 2 + 3))
print(sum(1 + 2, 3 + 4))
print(sum(num1, num2))
print(sum(1, num1 + 2))
print(sum(1, 2 + 3, num1))
print(sum(1, sum(2, 4)))
print(sum(sum(1, 4), sum(2, 4)))
print(sum(1, 2 + 3, num1, num1 + num2, sum(2, 4, num1))) | 35.12987 | 102 | 0.728281 |
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print(f"You have {cheese_count} cheeses!")
print(f"You have {boxes_of_crackers} boxes of crackers!")
print("Man that's enough for a party!")
# Print a string.
print("Get a blanket.\n")
# Different ways to give the function the values it needs:
# Print a string.
print("We can just give the function numbers directly:")
# Call the function cheese_and_crackers and pass 2 number arguments to it.
cheese_and_crackers(20, 30)
# Print a string.
print("OR, we can use variables from our script:")
# Assign 10 to the variable amount_of_cheese.
amount_of_cheese = 10
# Assign 50 to the variable amount_of_crackers.
amount_of_crackers = 50
# The variables in your function are not connected to the variables in your script.
# Call the function cheese_and_crackers and pass 2 variable arguments to it.
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
# Print a string.
print("We can even do math inside too:")
# Call the function cheese_and_crackers.
# Evaluate the Math expressions in the argument slots before.
# Pass the resulted values to the function.
cheese_and_crackers(10 + 20, 5 + 6)
# Print a string.
print("And we can combine the two, variables and math:")
# Call the function cheese_and_crackers.
# Take the values the variables amount_of_cheese and amount_of_crackers refer to.
# Evaluate the Math expressions in the argument slots before.
# Pass the resulted values to the function.
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
# My own function:
def sum(*args):
argsList = list(args)
sum = 0
for arg in argsList:
argsList.insert(argsList.index(arg), arg)
del argsList[argsList.index(arg) + 1]
for num in argsList:
sum += num
# print(sum)
return sum
num1 = 4
num2 = 1
print(sum(1, 2))
print(sum(1, 2, 3))
print(sum(1, 2 + 3))
print(sum(1 + 2, 3 + 4))
print(sum(num1, num2))
print(sum(1, num1 + 2))
print(sum(1, 2 + 3, num1))
print(sum(1, sum(2, 4)))
print(sum(sum(1, 4), sum(2, 4)))
print(sum(1, 2 + 3, num1, num1 + num2, sum(2, 4, num1))) | true | true |
f722aa562864d281669358c4022ca282d5b67d7b | 691 | py | Python | emadiff.py | prashlkam/money-spinner | 65b97c2c91f947300a228674620bc508a0bf7cda | [
"Apache-2.0"
] | 1 | 2022-03-26T13:32:28.000Z | 2022-03-26T13:32:28.000Z | emadiff.py | prashlkam/money-spinner | 65b97c2c91f947300a228674620bc508a0bf7cda | [
"Apache-2.0"
] | null | null | null | emadiff.py | prashlkam/money-spinner | 65b97c2c91f947300a228674620bc508a0bf7cda | [
"Apache-2.0"
] | 1 | 2022-03-26T21:51:34.000Z | 2022-03-26T21:51:34.000Z | import math
class emadiff:
def twe_hund_ema_diff(twema, hundema):
return abs(twema - hundema)
def wrapper_Least_ema_diff(stocks, twemas, hundemas):
Stocks_short_list = []
threshold = 0.25
stock_list_size = len(stocks)
if stock_list_size == len(twemas) and stock_list_size == len(hundemas):
for i in range(stock_list_size):
tempdiff = twe_hund_ema_diff(twemas[i], hundemas[i])
if tempdiff <= threshold:
Stocks_short_list.append(stocks[i])
else:
print('List length mismatch - ensure that all 3 lists are of the same size...')
return Stocks_short_list
| 36.368421 | 91 | 0.620839 | import math
class emadiff:
def twe_hund_ema_diff(twema, hundema):
return abs(twema - hundema)
def wrapper_Least_ema_diff(stocks, twemas, hundemas):
Stocks_short_list = []
threshold = 0.25
stock_list_size = len(stocks)
if stock_list_size == len(twemas) and stock_list_size == len(hundemas):
for i in range(stock_list_size):
tempdiff = twe_hund_ema_diff(twemas[i], hundemas[i])
if tempdiff <= threshold:
Stocks_short_list.append(stocks[i])
else:
print('List length mismatch - ensure that all 3 lists are of the same size...')
return Stocks_short_list
| true | true |
f722acf1289eda2268f41fb2f472157f675e873a | 7,784 | py | Python | python/cohorte/composer/node/criteria/reliability/crashing.py | isandlaTech/cohorte-runtime | 686556cdde20beba77ae202de9969be46feed5e2 | [
"Apache-2.0"
] | 6 | 2015-04-28T16:51:08.000Z | 2017-07-12T11:29:00.000Z | python/cohorte/composer/node/criteria/reliability/crashing.py | isandlaTech/cohorte-runtime | 686556cdde20beba77ae202de9969be46feed5e2 | [
"Apache-2.0"
] | 29 | 2015-02-24T11:11:26.000Z | 2017-08-25T08:30:18.000Z | qualifier/deploy/cohorte-home/repo/cohorte/composer/node/criteria/reliability/crashing.py | isandlaTech/cohorte-devtools | 9ba9021369188d2f0ad5c845ef242fd5a7097b57 | [
"Apache-2.0"
] | 1 | 2015-08-24T13:23:43.000Z | 2015-08-24T13:23:43.000Z | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Votes according to components stability (crashes vs time)
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 3.0.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
import operator
import time
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, Instantiate, \
Invalidate, Validate
# Composer
import cohorte.composer
# ------------------------------------------------------------------------------
# Bundle version
import cohorte.version
__version__=cohorte.version.__version__
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(cohorte.composer.SERVICE_NODE_CRITERION_RELIABILITY)
@Instantiate('cohorte-composer-node-criterion-crash')
class CrashCriterion(object):
"""
Votes for the isolate that will host a component according to the
configuration
"""
def __init__(self):
"""
Sets up members
"""
# Component name -> Rating
self._ratings = {}
# Component name -> Time of last crash
self._last_crash = {}
# Unstable components names
self._unstable = set()
def __str__(self):
"""
String representation
"""
return "Components reliability rating"
@Validate
def validate(self, context):
"""
Component validated
"""
# TODO: load initial ratings
self._ratings.clear()
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
self._ratings.clear()
self._last_crash.clear()
self._unstable.clear()
def _update_rating(self, component, delta):
"""
Updates the rating of the component with the given delta
:param component: A component name
:param delta: Rating modification
"""
# Normalize the new rating
new_rating = self._ratings.setdefault(component, 50) + delta
if new_rating < 0:
new_rating = 0
elif new_rating > 100:
new_rating = 100
# Store it
self._ratings[component] = new_rating
if new_rating < 5:
# Lower threshold reached: components are incompatible
self._unstable.add(component)
def handle_event(self, event):
"""
Does nothing: this elector only cares about what is written in
configuration files
"""
# Get the implicated components
components = sorted(set(component.name
for component in event.components))
if event.kind == 'timer':
self.on_timer(components)
elif event.kind == 'isolate.lost':
self.on_crash(components)
def on_crash(self, components):
"""
An isolate has been lost
:param components: Names of the components in the crashed isolate
"""
# Get the time of the crash
now = time.time()
# Update their stability ratings
for name in components:
if name not in self._unstable:
# Get the last crash information
last_crash = self._last_crash.get(name, 0)
time_since_crash = now - last_crash
if time_since_crash < 60:
# Less than 60s since the last crash
self._update_rating(name, -10)
else:
# More than 60s
self._update_rating(name, -5)
# Update the last crash information
self._last_crash[name] = now
def on_timer(self, components):
"""
The timer ticks: some components have been OK before last tick and now
:param components: Names of the components that well behaved
"""
# Get the tick time
now = time.time()
# Update their stability ratings
for name in components:
if name not in self._unstable:
# Get the last crash information
last_crash = self._last_crash.get(name, 0)
time_since_crash = now - last_crash
if time_since_crash > 120:
# More than 120s since the last crash
self._update_rating(name, +8)
elif time_since_crash > 60:
# More than 60s since the last crash
self._update_rating(name, +4)
# do nothing the minute right after a crash
def compute_stats(self, components):
"""
Computes statistics about the components of an isolate
:param components: Components already assigned to the isolate
"""
# Get the components names
names = set(component.name for component in components)
# TODO: compute variance too ?
# Mean rating
return sum(self._ratings.setdefault(name, 90)
for name in names) / len(names)
def vote(self, candidates, subject, ballot):
"""
Votes the isolate that matches the best the stability of the given
component
:param candidates: Isolates to vote for
:param subject: The component to place
:param ballot: The vote ballot
"""
# Get/Set the rating of the component
rating = self._ratings.setdefault(subject.name, 50.0)
# Distance with other components
distances = []
for candidate in candidates:
if candidate.components:
if len(candidate.components) == 1 \
and subject in candidate.components:
# Single one in the isolate where we were
distances.append((0, candidate))
elif subject.name in self._unstable:
# Don't try to go with other components...
ballot.append_against(candidate)
elif rating > 20:
# Only accept to work with other components if the given
# one is stable enough (> 20% stability rating)
# Compute the mean and variance of the current components
# ratings
mean = self.compute_stats(candidate.components)
distance = abs(mean - rating)
if distance < 20:
# Prefer small distances
distances.append((distance, candidate))
else:
# Prefer non-"neutral" isolates
if not candidate.name:
distances.append((20, candidate))
else:
# First component of this isolate
distances.append((5, candidate))
# Sort computed distances (lower is better)
distances.sort(key=operator.itemgetter(0))
# Use them as our vote
ballot.set_for(distance[1] for distance in distances)
ballot.lock()
| 31.51417 | 80 | 0.568602 |
import logging
import operator
import time
from pelix.ipopo.decorators import ComponentFactory, Provides, Instantiate, \
Invalidate, Validate
import cohorte.composer
import cohorte.version
__version__=cohorte.version.__version__
_logger = logging.getLogger(__name__)
@ComponentFactory()
@Provides(cohorte.composer.SERVICE_NODE_CRITERION_RELIABILITY)
@Instantiate('cohorte-composer-node-criterion-crash')
class CrashCriterion(object):
def __init__(self):
self._ratings = {}
self._last_crash = {}
self._unstable = set()
def __str__(self):
return "Components reliability rating"
@Validate
def validate(self, context):
self._ratings.clear()
@Invalidate
def invalidate(self, context):
self._ratings.clear()
self._last_crash.clear()
self._unstable.clear()
def _update_rating(self, component, delta):
new_rating = self._ratings.setdefault(component, 50) + delta
if new_rating < 0:
new_rating = 0
elif new_rating > 100:
new_rating = 100
self._ratings[component] = new_rating
if new_rating < 5:
self._unstable.add(component)
def handle_event(self, event):
components = sorted(set(component.name
for component in event.components))
if event.kind == 'timer':
self.on_timer(components)
elif event.kind == 'isolate.lost':
self.on_crash(components)
def on_crash(self, components):
now = time.time()
for name in components:
if name not in self._unstable:
last_crash = self._last_crash.get(name, 0)
time_since_crash = now - last_crash
if time_since_crash < 60:
self._update_rating(name, -10)
else:
self._update_rating(name, -5)
self._last_crash[name] = now
def on_timer(self, components):
now = time.time()
for name in components:
if name not in self._unstable:
last_crash = self._last_crash.get(name, 0)
time_since_crash = now - last_crash
if time_since_crash > 120:
self._update_rating(name, +8)
elif time_since_crash > 60:
self._update_rating(name, +4)
def compute_stats(self, components):
names = set(component.name for component in components)
return sum(self._ratings.setdefault(name, 90)
for name in names) / len(names)
def vote(self, candidates, subject, ballot):
rating = self._ratings.setdefault(subject.name, 50.0)
distances = []
for candidate in candidates:
if candidate.components:
if len(candidate.components) == 1 \
and subject in candidate.components:
distances.append((0, candidate))
elif subject.name in self._unstable:
ballot.append_against(candidate)
elif rating > 20:
# Only accept to work with other components if the given
# one is stable enough (> 20% stability rating)
# Compute the mean and variance of the current components
# ratings
mean = self.compute_stats(candidate.components)
distance = abs(mean - rating)
if distance < 20:
# Prefer small distances
distances.append((distance, candidate))
else:
# Prefer non-"neutral" isolates
if not candidate.name:
distances.append((20, candidate))
else:
# First component of this isolate
distances.append((5, candidate))
# Sort computed distances (lower is better)
distances.sort(key=operator.itemgetter(0))
# Use them as our vote
ballot.set_for(distance[1] for distance in distances)
ballot.lock()
| true | true |
f722ad1d65fd5ca1d2053ced68e3a0488392960f | 23,354 | py | Python | frappe/tests/test_db.py | itsdave-de/frappe | b193194484e0de7a2909899457f13579e4026c56 | [
"MIT"
] | null | null | null | frappe/tests/test_db.py | itsdave-de/frappe | b193194484e0de7a2909899457f13579e4026c56 | [
"MIT"
] | null | null | null | frappe/tests/test_db.py | itsdave-de/frappe | b193194484e0de7a2909899457f13579e4026c56 | [
"MIT"
] | null | null | null | # Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import datetime
import inspect
import unittest
from random import choice
from unittest.mock import patch
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
from frappe.database import savepoint
from frappe.database.database import Database
from frappe.query_builder import Field
from frappe.query_builder.functions import Concat_ws
from frappe.tests.test_query_builder import db_type_is, run_only_if
from frappe.utils import add_days, cint, now, random_string
from frappe.utils.testutils import clear_custom_fields
class TestDB(unittest.TestCase):
def test_get_value(self):
self.assertEqual(frappe.db.get_value("User", {"name": ["=", "Administrator"]}), "Administrator")
self.assertEqual(frappe.db.get_value("User", {"name": ["like", "Admin%"]}), "Administrator")
self.assertNotEqual(frappe.db.get_value("User", {"name": ["!=", "Guest"]}), "Guest")
self.assertEqual(frappe.db.get_value("User", {"name": ["<", "Adn"]}), "Administrator")
self.assertEqual(frappe.db.get_value("User", {"name": ["<=", "Administrator"]}), "Administrator")
self.assertEqual(
frappe.db.get_value("User", {}, ["Max(name)"], order_by=None),
frappe.db.sql("SELECT Max(name) FROM tabUser")[0][0],
)
self.assertEqual(
frappe.db.get_value("User", {}, "Min(name)", order_by=None),
frappe.db.sql("SELECT Min(name) FROM tabUser")[0][0],
)
self.assertIn(
"for update",
frappe.db.get_value(
"User", Field("name") == "Administrator", for_update=True, run=False
).lower(),
)
user_doctype = frappe.qb.DocType("User")
self.assertEqual(
frappe.qb.from_(user_doctype).select(user_doctype.name, user_doctype.email).run(),
frappe.db.get_values(
user_doctype,
filters={},
fieldname=[user_doctype.name, user_doctype.email],
order_by=None,
),
)
self.assertEqual(
frappe.db.sql("""SELECT name FROM `tabUser` WHERE name > 's' ORDER BY MODIFIED DESC""")[0][0],
frappe.db.get_value("User", {"name": [">", "s"]}),
)
self.assertEqual(
frappe.db.sql("""SELECT name FROM `tabUser` WHERE name >= 't' ORDER BY MODIFIED DESC""")[0][0],
frappe.db.get_value("User", {"name": [">=", "t"]}),
)
self.assertEqual(
frappe.db.get_values(
"User",
filters={"name": "Administrator"},
distinct=True,
fieldname="email",
),
frappe.qb.from_(user_doctype)
.where(user_doctype.name == "Administrator")
.select("email")
.distinct()
.run(),
)
self.assertIn(
"concat_ws",
frappe.db.get_value(
"User",
filters={"name": "Administrator"},
fieldname=Concat_ws(" ", "LastName"),
run=False,
).lower(),
)
self.assertEqual(
frappe.db.sql("select email from tabUser where name='Administrator' order by modified DESC"),
frappe.db.get_values("User", filters=[["name", "=", "Administrator"]], fieldname="email"),
)
def test_get_value_limits(self):
# check both dict and list style filters
filters = [{"enabled": 1}, [["enabled", "=", 1]]]
for filter in filters:
self.assertEqual(1, len(frappe.db.get_values("User", filters=filter, limit=1)))
# count of last touched rows as per DB-API 2.0 https://peps.python.org/pep-0249/#rowcount
self.assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
self.assertEqual(2, len(frappe.db.get_values("User", filters=filter, limit=2)))
self.assertGreaterEqual(2, cint(frappe.db._cursor.rowcount))
# without limits length == count
self.assertEqual(
len(frappe.db.get_values("User", filters=filter)), frappe.db.count("User", filter)
)
frappe.db.get_value("User", filters=filter)
self.assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
frappe.db.exists("User", filter)
self.assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
def test_escape(self):
frappe.db.escape("香港濟生堂製藥有限公司 - IT".encode("utf-8"))
def test_get_single_value(self):
# setup
values_dict = {
"Float": 1.5,
"Int": 1,
"Percent": 55.5,
"Currency": 12.5,
"Data": "Test",
"Date": datetime.datetime.now().date(),
"Datetime": datetime.datetime.now(),
"Time": datetime.timedelta(hours=9, minutes=45, seconds=10),
}
test_inputs = [
{"fieldtype": fieldtype, "value": value} for fieldtype, value in values_dict.items()
]
for fieldtype in values_dict.keys():
create_custom_field(
"Print Settings",
{
"fieldname": f"test_{fieldtype.lower()}",
"label": f"Test {fieldtype}",
"fieldtype": fieldtype,
},
)
# test
for inp in test_inputs:
fieldname = f"test_{inp['fieldtype'].lower()}"
frappe.db.set_value("Print Settings", "Print Settings", fieldname, inp["value"])
self.assertEqual(frappe.db.get_single_value("Print Settings", fieldname), inp["value"])
# teardown
clear_custom_fields("Print Settings")
def test_log_touched_tables(self):
frappe.flags.in_migrate = True
frappe.flags.touched_tables = set()
frappe.db.set_value("System Settings", "System Settings", "backup_limit", 5)
self.assertIn("tabSingles", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo = frappe.get_doc({"doctype": "ToDo", "description": "Random Description"})
todo.save()
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo.description = "Another Description"
todo.save()
self.assertIn("tabToDo", frappe.flags.touched_tables)
if frappe.db.db_type != "postgres":
frappe.flags.touched_tables = set()
frappe.db.sql("UPDATE tabToDo SET description = 'Updated Description'")
self.assertNotIn("tabToDo SET", frappe.flags.touched_tables)
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo.delete()
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
create_custom_field("ToDo", {"label": "ToDo Custom Field"})
self.assertIn("tabToDo", frappe.flags.touched_tables)
self.assertIn("tabCustom Field", frappe.flags.touched_tables)
frappe.flags.in_migrate = False
frappe.flags.touched_tables.clear()
def test_db_keywords_as_fields(self):
"""Tests if DB keywords work as docfield names. If they're wrapped with grave accents."""
# Using random.choices, picked out a list of 40 keywords for testing
all_keywords = {
"mariadb": [
"CHARACTER",
"DELAYED",
"LINES",
"EXISTS",
"YEAR_MONTH",
"LOCALTIME",
"BOTH",
"MEDIUMINT",
"LEFT",
"BINARY",
"DEFAULT",
"KILL",
"WRITE",
"SQL_SMALL_RESULT",
"CURRENT_TIME",
"CROSS",
"INHERITS",
"SELECT",
"TABLE",
"ALTER",
"CURRENT_TIMESTAMP",
"XOR",
"CASE",
"ALL",
"WHERE",
"INT",
"TO",
"SOME",
"DAY_MINUTE",
"ERRORS",
"OPTIMIZE",
"REPLACE",
"HIGH_PRIORITY",
"VARBINARY",
"HELP",
"IS",
"CHAR",
"DESCRIBE",
"KEY",
],
"postgres": [
"WORK",
"LANCOMPILER",
"REAL",
"HAVING",
"REPEATABLE",
"DATA",
"USING",
"BIT",
"DEALLOCATE",
"SERIALIZABLE",
"CURSOR",
"INHERITS",
"ARRAY",
"TRUE",
"IGNORE",
"PARAMETER_MODE",
"ROW",
"CHECKPOINT",
"SHOW",
"BY",
"SIZE",
"SCALE",
"UNENCRYPTED",
"WITH",
"AND",
"CONVERT",
"FIRST",
"SCOPE",
"WRITE",
"INTERVAL",
"CHARACTER_SET_SCHEMA",
"ADD",
"SCROLL",
"NULL",
"WHEN",
"TRANSACTION_ACTIVE",
"INT",
"FORTRAN",
"STABLE",
],
}
created_docs = []
# edit by rushabh: added [:1]
# don't run every keyword! - if one works, they all do
fields = all_keywords[frappe.conf.db_type][:1]
test_doctype = "ToDo"
def add_custom_field(field):
create_custom_field(
test_doctype,
{
"fieldname": field.lower(),
"label": field.title(),
"fieldtype": "Data",
},
)
# Create custom fields for test_doctype
for field in fields:
add_custom_field(field)
# Create documents under that doctype and query them via ORM
for _ in range(10):
docfields = {key.lower(): random_string(10) for key in fields}
doc = frappe.get_doc({"doctype": test_doctype, "description": random_string(20), **docfields})
doc.insert()
created_docs.append(doc.name)
random_field = choice(fields).lower()
random_doc = choice(created_docs)
random_value = random_string(20)
# Testing read
self.assertEqual(
list(frappe.get_all("ToDo", fields=[random_field], limit=1)[0])[0], random_field
)
self.assertEqual(
list(frappe.get_all("ToDo", fields=[f"`{random_field}` as total"], limit=1)[0])[0], "total"
)
# Testing read for distinct and sql functions
self.assertEqual(
list(
frappe.get_all(
"ToDo",
fields=[f"`{random_field}` as total"],
distinct=True,
limit=1,
)[0]
)[0],
"total",
)
self.assertEqual(
list(
frappe.get_all(
"ToDo",
fields=[f"`{random_field}`"],
distinct=True,
limit=1,
)[0]
)[0],
random_field,
)
self.assertEqual(
list(frappe.get_all("ToDo", fields=[f"count(`{random_field}`)"], limit=1)[0])[0],
"count" if frappe.conf.db_type == "postgres" else f"count(`{random_field}`)",
)
# Testing update
frappe.db.set_value(test_doctype, random_doc, random_field, random_value)
self.assertEqual(frappe.db.get_value(test_doctype, random_doc, random_field), random_value)
# Cleanup - delete records and remove custom fields
for doc in created_docs:
frappe.delete_doc(test_doctype, doc)
clear_custom_fields(test_doctype)
def test_savepoints(self):
frappe.db.rollback()
save_point = "todonope"
created_docs = []
failed_docs = []
for _ in range(5):
frappe.db.savepoint(save_point)
doc_gone = frappe.get_doc(doctype="ToDo", description="nope").save()
failed_docs.append(doc_gone.name)
frappe.db.rollback(save_point=save_point)
doc_kept = frappe.get_doc(doctype="ToDo", description="nope").save()
created_docs.append(doc_kept.name)
frappe.db.commit()
for d in failed_docs:
self.assertFalse(frappe.db.exists("ToDo", d))
for d in created_docs:
self.assertTrue(frappe.db.exists("ToDo", d))
def test_savepoints_wrapper(self):
frappe.db.rollback()
class SpecificExc(Exception):
pass
created_docs = []
failed_docs = []
for _ in range(5):
with savepoint(catch=SpecificExc):
doc_kept = frappe.get_doc(doctype="ToDo", description="nope").save()
created_docs.append(doc_kept.name)
with savepoint(catch=SpecificExc):
doc_gone = frappe.get_doc(doctype="ToDo", description="nope").save()
failed_docs.append(doc_gone.name)
raise SpecificExc
frappe.db.commit()
for d in failed_docs:
self.assertFalse(frappe.db.exists("ToDo", d))
for d in created_docs:
self.assertTrue(frappe.db.exists("ToDo", d))
def test_transaction_writes_error(self):
from frappe.database.database import Database
frappe.db.rollback()
frappe.db.MAX_WRITES_PER_TRANSACTION = 1
note = frappe.get_last_doc("ToDo")
note.description = "changed"
with self.assertRaises(frappe.TooManyWritesError) as tmw:
note.save()
frappe.db.MAX_WRITES_PER_TRANSACTION = Database.MAX_WRITES_PER_TRANSACTION
def test_transaction_write_counting(self):
note = frappe.get_doc(doctype="Note", title="transaction counting").insert()
writes = frappe.db.transaction_writes
frappe.db.set_value("Note", note.name, "content", "abc")
self.assertEqual(1, frappe.db.transaction_writes - writes)
writes = frappe.db.transaction_writes
frappe.db.sql(
"""
update `tabNote`
set content = 'abc'
where name = %s
""",
note.name,
)
self.assertEqual(1, frappe.db.transaction_writes - writes)
def test_pk_collision_ignoring(self):
# note has `name` generated from title
for _ in range(3):
frappe.get_doc(doctype="Note", title="duplicate name").insert(ignore_if_duplicate=True)
with savepoint():
self.assertRaises(
frappe.DuplicateEntryError, frappe.get_doc(doctype="Note", title="duplicate name").insert
)
# recover transaction to continue other tests
raise Exception
def test_exists(self):
dt, dn = "User", "Administrator"
self.assertEqual(frappe.db.exists(dt, dn, cache=True), dn)
self.assertEqual(frappe.db.exists(dt, dn), dn)
self.assertEqual(frappe.db.exists(dt, {"name": ("=", dn)}), dn)
filters = {"doctype": dt, "name": ("like", "Admin%")}
self.assertEqual(frappe.db.exists(filters), dn)
self.assertEqual(filters["doctype"], dt) # make sure that doctype was not removed from filters
self.assertEqual(frappe.db.exists(dt, [["name", "=", dn]]), dn)
@run_only_if(db_type_is.MARIADB)
class TestDDLCommandsMaria(unittest.TestCase):
test_table_name = "TestNotes"
def setUp(self) -> None:
frappe.db.commit()
frappe.db.sql(
f"""
CREATE TABLE `tab{self.test_table_name}` (`id` INT NULL, content TEXT, PRIMARY KEY (`id`));
"""
)
def tearDown(self) -> None:
frappe.db.sql(f"DROP TABLE tab{self.test_table_name};")
self.test_table_name = "TestNotes"
def test_rename(self) -> None:
new_table_name = f"{self.test_table_name}_new"
frappe.db.rename_table(self.test_table_name, new_table_name)
check_exists = frappe.db.sql(
f"""
SELECT * FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_NAME = N'tab{new_table_name}';
"""
)
self.assertGreater(len(check_exists), 0)
self.assertIn(f"tab{new_table_name}", check_exists[0])
# * so this table is deleted after the rename
self.test_table_name = new_table_name
def test_describe(self) -> None:
self.assertEqual(
(
("id", "int(11)", "NO", "PRI", None, ""),
("content", "text", "YES", "", None, ""),
),
frappe.db.describe(self.test_table_name),
)
def test_change_type(self) -> None:
frappe.db.change_column_type("TestNotes", "id", "varchar(255)")
test_table_description = frappe.db.sql(f"DESC tab{self.test_table_name};")
self.assertGreater(len(test_table_description), 0)
self.assertIn("varchar(255)", test_table_description[0])
def test_add_index(self) -> None:
index_name = "test_index"
frappe.db.add_index(self.test_table_name, ["id", "content(50)"], index_name)
indexs_in_table = frappe.db.sql(
f"""
SHOW INDEX FROM tab{self.test_table_name}
WHERE Key_name = '{index_name}';
"""
)
self.assertEqual(len(indexs_in_table), 2)
class TestDBSetValue(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.todo1 = frappe.get_doc(doctype="ToDo", description="test_set_value 1").insert()
cls.todo2 = frappe.get_doc(doctype="ToDo", description="test_set_value 2").insert()
def test_update_single_doctype_field(self):
value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
changed_value = not value
frappe.db.set_value(
"System Settings", "System Settings", "deny_multiple_sessions", changed_value
)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
changed_value = not current_value
frappe.db.set_value("System Settings", None, "deny_multiple_sessions", changed_value)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
changed_value = not current_value
frappe.db.set_single_value("System Settings", "deny_multiple_sessions", changed_value)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
def test_update_single_row_single_column(self):
frappe.db.set_value("ToDo", self.todo1.name, "description", "test_set_value change 1")
updated_value = frappe.db.get_value("ToDo", self.todo1.name, "description")
self.assertEqual(updated_value, "test_set_value change 1")
def test_update_single_row_multiple_columns(self):
description, status = "Upated by test_update_single_row_multiple_columns", "Closed"
frappe.db.set_value(
"ToDo",
self.todo1.name,
{
"description": description,
"status": status,
},
update_modified=False,
)
updated_desciption, updated_status = frappe.db.get_value(
"ToDo", filters={"name": self.todo1.name}, fieldname=["description", "status"]
)
self.assertEqual(description, updated_desciption)
self.assertEqual(status, updated_status)
def test_update_multiple_rows_single_column(self):
frappe.db.set_value(
"ToDo", {"description": ("like", "%test_set_value%")}, "description", "change 2"
)
self.assertEqual(frappe.db.get_value("ToDo", self.todo1.name, "description"), "change 2")
self.assertEqual(frappe.db.get_value("ToDo", self.todo2.name, "description"), "change 2")
def test_update_multiple_rows_multiple_columns(self):
todos_to_update = frappe.get_all(
"ToDo",
filters={"description": ("like", "%test_set_value%"), "status": ("!=", "Closed")},
pluck="name",
)
frappe.db.set_value(
"ToDo",
{"description": ("like", "%test_set_value%"), "status": ("!=", "Closed")},
{"status": "Closed", "priority": "High"},
)
test_result = frappe.get_all(
"ToDo", filters={"name": ("in", todos_to_update)}, fields=["status", "priority"]
)
self.assertTrue(all(x for x in test_result if x["status"] == "Closed"))
self.assertTrue(all(x for x in test_result if x["priority"] == "High"))
def test_update_modified_options(self):
self.todo2.reload()
todo = self.todo2
updated_description = f"{todo.description} - by `test_update_modified_options`"
custom_modified = datetime.datetime.fromisoformat(add_days(now(), 10))
custom_modified_by = "user_that_doesnt_exist@example.com"
frappe.db.set_value("ToDo", todo.name, "description", updated_description, update_modified=False)
self.assertEqual(updated_description, frappe.db.get_value("ToDo", todo.name, "description"))
self.assertEqual(todo.modified, frappe.db.get_value("ToDo", todo.name, "modified"))
frappe.db.set_value(
"ToDo",
todo.name,
"description",
"test_set_value change 1",
modified=custom_modified,
modified_by=custom_modified_by,
)
self.assertTupleEqual(
(custom_modified, custom_modified_by),
frappe.db.get_value("ToDo", todo.name, ["modified", "modified_by"]),
)
def test_for_update(self):
self.todo1.reload()
with patch.object(Database, "sql") as sql_called:
frappe.db.set_value(
self.todo1.doctype,
self.todo1.name,
"description",
f"{self.todo1.description}-edit by `test_for_update`",
)
first_query = sql_called.call_args_list[0].args[0]
second_query = sql_called.call_args_list[1].args[0]
self.assertTrue(sql_called.call_count == 2)
self.assertTrue("FOR UPDATE" in first_query)
if frappe.conf.db_type == "postgres":
from frappe.database.postgres.database import modify_query
self.assertTrue(modify_query("UPDATE `tabToDo` SET") in second_query)
if frappe.conf.db_type == "mariadb":
self.assertTrue("UPDATE `tabToDo` SET" in second_query)
def test_cleared_cache(self):
self.todo2.reload()
with patch.object(frappe, "clear_document_cache") as clear_cache:
frappe.db.set_value(
self.todo2.doctype,
self.todo2.name,
"description",
f"{self.todo2.description}-edit by `test_cleared_cache`",
)
clear_cache.assert_called()
def test_update_alias(self):
args = (self.todo1.doctype, self.todo1.name, "description", "Updated by `test_update_alias`")
kwargs = {
"for_update": False,
"modified": None,
"modified_by": None,
"update_modified": True,
"debug": False,
}
self.assertTrue("return self.set_value(" in inspect.getsource(frappe.db.update))
with patch.object(Database, "set_value") as set_value:
frappe.db.update(*args, **kwargs)
set_value.assert_called_once()
set_value.assert_called_with(*args, **kwargs)
@classmethod
def tearDownClass(cls):
frappe.db.rollback()
@run_only_if(db_type_is.POSTGRES)
class TestDDLCommandsPost(unittest.TestCase):
test_table_name = "TestNotes"
def setUp(self) -> None:
frappe.db.sql(
f"""
CREATE TABLE "tab{self.test_table_name}" ("id" INT NULL, content text, PRIMARY KEY ("id"))
"""
)
def tearDown(self) -> None:
frappe.db.sql(f'DROP TABLE "tab{self.test_table_name}"')
self.test_table_name = "TestNotes"
def test_rename(self) -> None:
new_table_name = f"{self.test_table_name}_new"
frappe.db.rename_table(self.test_table_name, new_table_name)
check_exists = frappe.db.sql(
f"""
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'tab{new_table_name}'
);
"""
)
self.assertTrue(check_exists[0][0])
# * so this table is deleted after the rename
self.test_table_name = new_table_name
def test_describe(self) -> None:
self.assertEqual([("id",), ("content",)], frappe.db.describe(self.test_table_name))
def test_change_type(self) -> None:
frappe.db.change_column_type(self.test_table_name, "id", "varchar(255)")
check_change = frappe.db.sql(
f"""
SELECT
table_name,
column_name,
data_type
FROM
information_schema.columns
WHERE
table_name = 'tab{self.test_table_name}'
"""
)
self.assertGreater(len(check_change), 0)
self.assertIn("character varying", check_change[0])
def test_add_index(self) -> None:
index_name = "test_index"
frappe.db.add_index(self.test_table_name, ["id", "content(50)"], index_name)
indexs_in_table = frappe.db.sql(
f"""
SELECT indexname
FROM pg_indexes
WHERE tablename = 'tab{self.test_table_name}'
AND indexname = '{index_name}' ;
""",
)
self.assertEqual(len(indexs_in_table), 1)
@run_only_if(db_type_is.POSTGRES)
def test_modify_query(self):
from frappe.database.postgres.database import modify_query
query = "select * from `tabtree b` where lft > 13 and rgt <= 16 and name =1.0 and parent = 4134qrsdc and isgroup = 1.00045"
self.assertEqual(
"select * from \"tabtree b\" where lft > '13' and rgt <= '16' and name = '1' and parent = 4134qrsdc and isgroup = 1.00045",
modify_query(query),
)
query = (
'select locate(".io", "frappe.io"), locate("3", cast(3 as varchar)), locate("3", 3::varchar)'
)
self.assertEqual(
'select strpos( "frappe.io", ".io"), strpos( cast(3 as varchar), "3"), strpos( 3::varchar, "3")',
modify_query(query),
)
@run_only_if(db_type_is.POSTGRES)
def test_modify_values(self):
from frappe.database.postgres.database import modify_values
self.assertEqual(
{"abcd": "23", "efgh": "23", "ijkl": 23.0345, "mnop": "wow"},
modify_values({"abcd": 23, "efgh": 23.0, "ijkl": 23.0345, "mnop": "wow"}),
)
self.assertEqual(["23", "23", 23.00004345, "wow"], modify_values((23, 23.0, 23.00004345, "wow")))
def test_sequence_table_creation(self):
from frappe.core.doctype.doctype.test_doctype import new_doctype
dt = new_doctype("autoinc_dt_seq_test", autoname="autoincrement").insert(ignore_permissions=True)
if frappe.db.db_type == "postgres":
self.assertTrue(
frappe.db.sql(
"""select sequence_name FROM information_schema.sequences
where sequence_name ilike 'autoinc_dt_seq_test%'"""
)[0][0]
)
else:
self.assertTrue(
frappe.db.sql(
"""select data_type FROM information_schema.tables
where table_type = 'SEQUENCE' and table_name like 'autoinc_dt_seq_test%'"""
)[0][0]
)
dt.delete(ignore_permissions=True)
| 29.941026 | 126 | 0.689646 |
import datetime
import inspect
import unittest
from random import choice
from unittest.mock import patch
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
from frappe.database import savepoint
from frappe.database.database import Database
from frappe.query_builder import Field
from frappe.query_builder.functions import Concat_ws
from frappe.tests.test_query_builder import db_type_is, run_only_if
from frappe.utils import add_days, cint, now, random_string
from frappe.utils.testutils import clear_custom_fields
class TestDB(unittest.TestCase):
def test_get_value(self):
self.assertEqual(frappe.db.get_value("User", {"name": ["=", "Administrator"]}), "Administrator")
self.assertEqual(frappe.db.get_value("User", {"name": ["like", "Admin%"]}), "Administrator")
self.assertNotEqual(frappe.db.get_value("User", {"name": ["!=", "Guest"]}), "Guest")
self.assertEqual(frappe.db.get_value("User", {"name": ["<", "Adn"]}), "Administrator")
self.assertEqual(frappe.db.get_value("User", {"name": ["<=", "Administrator"]}), "Administrator")
self.assertEqual(
frappe.db.get_value("User", {}, ["Max(name)"], order_by=None),
frappe.db.sql("SELECT Max(name) FROM tabUser")[0][0],
)
self.assertEqual(
frappe.db.get_value("User", {}, "Min(name)", order_by=None),
frappe.db.sql("SELECT Min(name) FROM tabUser")[0][0],
)
self.assertIn(
"for update",
frappe.db.get_value(
"User", Field("name") == "Administrator", for_update=True, run=False
).lower(),
)
user_doctype = frappe.qb.DocType("User")
self.assertEqual(
frappe.qb.from_(user_doctype).select(user_doctype.name, user_doctype.email).run(),
frappe.db.get_values(
user_doctype,
filters={},
fieldname=[user_doctype.name, user_doctype.email],
order_by=None,
),
)
self.assertEqual(
frappe.db.sql("""SELECT name FROM `tabUser` WHERE name > 's' ORDER BY MODIFIED DESC""")[0][0],
frappe.db.get_value("User", {"name": [">", "s"]}),
)
self.assertEqual(
frappe.db.sql("""SELECT name FROM `tabUser` WHERE name >= 't' ORDER BY MODIFIED DESC""")[0][0],
frappe.db.get_value("User", {"name": [">=", "t"]}),
)
self.assertEqual(
frappe.db.get_values(
"User",
filters={"name": "Administrator"},
distinct=True,
fieldname="email",
),
frappe.qb.from_(user_doctype)
.where(user_doctype.name == "Administrator")
.select("email")
.distinct()
.run(),
)
self.assertIn(
"concat_ws",
frappe.db.get_value(
"User",
filters={"name": "Administrator"},
fieldname=Concat_ws(" ", "LastName"),
run=False,
).lower(),
)
self.assertEqual(
frappe.db.sql("select email from tabUser where name='Administrator' order by modified DESC"),
frappe.db.get_values("User", filters=[["name", "=", "Administrator"]], fieldname="email"),
)
def test_get_value_limits(self):
filters = [{"enabled": 1}, [["enabled", "=", 1]]]
for filter in filters:
self.assertEqual(1, len(frappe.db.get_values("User", filters=filter, limit=1)))
assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
self.assertEqual(2, len(frappe.db.get_values("User", filters=filter, limit=2)))
self.assertGreaterEqual(2, cint(frappe.db._cursor.rowcount))
self.assertEqual(
len(frappe.db.get_values("User", filters=filter)), frappe.db.count("User", filter)
)
frappe.db.get_value("User", filters=filter)
self.assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
frappe.db.exists("User", filter)
self.assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
def test_escape(self):
frappe.db.escape("香港濟生堂製藥有限公司 - IT".encode("utf-8"))
def test_get_single_value(self):
values_dict = {
"Float": 1.5,
"Int": 1,
"Percent": 55.5,
"Currency": 12.5,
"Data": "Test",
"Date": datetime.datetime.now().date(),
"Datetime": datetime.datetime.now(),
"Time": datetime.timedelta(hours=9, minutes=45, seconds=10),
}
test_inputs = [
{"fieldtype": fieldtype, "value": value} for fieldtype, value in values_dict.items()
]
for fieldtype in values_dict.keys():
create_custom_field(
"Print Settings",
{
"fieldname": f"test_{fieldtype.lower()}",
"label": f"Test {fieldtype}",
"fieldtype": fieldtype,
},
)
for inp in test_inputs:
fieldname = f"test_{inp['fieldtype'].lower()}"
frappe.db.set_value("Print Settings", "Print Settings", fieldname, inp["value"])
self.assertEqual(frappe.db.get_single_value("Print Settings", fieldname), inp["value"])
clear_custom_fields("Print Settings")
def test_log_touched_tables(self):
frappe.flags.in_migrate = True
frappe.flags.touched_tables = set()
frappe.db.set_value("System Settings", "System Settings", "backup_limit", 5)
self.assertIn("tabSingles", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo = frappe.get_doc({"doctype": "ToDo", "description": "Random Description"})
todo.save()
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo.description = "Another Description"
todo.save()
self.assertIn("tabToDo", frappe.flags.touched_tables)
if frappe.db.db_type != "postgres":
frappe.flags.touched_tables = set()
frappe.db.sql("UPDATE tabToDo SET description = 'Updated Description'")
self.assertNotIn("tabToDo SET", frappe.flags.touched_tables)
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo.delete()
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
create_custom_field("ToDo", {"label": "ToDo Custom Field"})
self.assertIn("tabToDo", frappe.flags.touched_tables)
self.assertIn("tabCustom Field", frappe.flags.touched_tables)
frappe.flags.in_migrate = False
frappe.flags.touched_tables.clear()
def test_db_keywords_as_fields(self):
all_keywords = {
"mariadb": [
"CHARACTER",
"DELAYED",
"LINES",
"EXISTS",
"YEAR_MONTH",
"LOCALTIME",
"BOTH",
"MEDIUMINT",
"LEFT",
"BINARY",
"DEFAULT",
"KILL",
"WRITE",
"SQL_SMALL_RESULT",
"CURRENT_TIME",
"CROSS",
"INHERITS",
"SELECT",
"TABLE",
"ALTER",
"CURRENT_TIMESTAMP",
"XOR",
"CASE",
"ALL",
"WHERE",
"INT",
"TO",
"SOME",
"DAY_MINUTE",
"ERRORS",
"OPTIMIZE",
"REPLACE",
"HIGH_PRIORITY",
"VARBINARY",
"HELP",
"IS",
"CHAR",
"DESCRIBE",
"KEY",
],
"postgres": [
"WORK",
"LANCOMPILER",
"REAL",
"HAVING",
"REPEATABLE",
"DATA",
"USING",
"BIT",
"DEALLOCATE",
"SERIALIZABLE",
"CURSOR",
"INHERITS",
"ARRAY",
"TRUE",
"IGNORE",
"PARAMETER_MODE",
"ROW",
"CHECKPOINT",
"SHOW",
"BY",
"SIZE",
"SCALE",
"UNENCRYPTED",
"WITH",
"AND",
"CONVERT",
"FIRST",
"SCOPE",
"WRITE",
"INTERVAL",
"CHARACTER_SET_SCHEMA",
"ADD",
"SCROLL",
"NULL",
"WHEN",
"TRANSACTION_ACTIVE",
"INT",
"FORTRAN",
"STABLE",
],
}
created_docs = []
fields = all_keywords[frappe.conf.db_type][:1]
test_doctype = "ToDo"
def add_custom_field(field):
create_custom_field(
test_doctype,
{
"fieldname": field.lower(),
"label": field.title(),
"fieldtype": "Data",
},
)
# Create custom fields for test_doctype
for field in fields:
add_custom_field(field)
# Create documents under that doctype and query them via ORM
for _ in range(10):
docfields = {key.lower(): random_string(10) for key in fields}
doc = frappe.get_doc({"doctype": test_doctype, "description": random_string(20), **docfields})
doc.insert()
created_docs.append(doc.name)
random_field = choice(fields).lower()
random_doc = choice(created_docs)
random_value = random_string(20)
# Testing read
self.assertEqual(
list(frappe.get_all("ToDo", fields=[random_field], limit=1)[0])[0], random_field
)
self.assertEqual(
list(frappe.get_all("ToDo", fields=[f"`{random_field}` as total"], limit=1)[0])[0], "total"
)
# Testing read for distinct and sql functions
self.assertEqual(
list(
frappe.get_all(
"ToDo",
fields=[f"`{random_field}` as total"],
distinct=True,
limit=1,
)[0]
)[0],
"total",
)
self.assertEqual(
list(
frappe.get_all(
"ToDo",
fields=[f"`{random_field}`"],
distinct=True,
limit=1,
)[0]
)[0],
random_field,
)
self.assertEqual(
list(frappe.get_all("ToDo", fields=[f"count(`{random_field}`)"], limit=1)[0])[0],
"count" if frappe.conf.db_type == "postgres" else f"count(`{random_field}`)",
)
# Testing update
frappe.db.set_value(test_doctype, random_doc, random_field, random_value)
self.assertEqual(frappe.db.get_value(test_doctype, random_doc, random_field), random_value)
# Cleanup - delete records and remove custom fields
for doc in created_docs:
frappe.delete_doc(test_doctype, doc)
clear_custom_fields(test_doctype)
def test_savepoints(self):
frappe.db.rollback()
save_point = "todonope"
created_docs = []
failed_docs = []
for _ in range(5):
frappe.db.savepoint(save_point)
doc_gone = frappe.get_doc(doctype="ToDo", description="nope").save()
failed_docs.append(doc_gone.name)
frappe.db.rollback(save_point=save_point)
doc_kept = frappe.get_doc(doctype="ToDo", description="nope").save()
created_docs.append(doc_kept.name)
frappe.db.commit()
for d in failed_docs:
self.assertFalse(frappe.db.exists("ToDo", d))
for d in created_docs:
self.assertTrue(frappe.db.exists("ToDo", d))
def test_savepoints_wrapper(self):
frappe.db.rollback()
class SpecificExc(Exception):
pass
created_docs = []
failed_docs = []
for _ in range(5):
with savepoint(catch=SpecificExc):
doc_kept = frappe.get_doc(doctype="ToDo", description="nope").save()
created_docs.append(doc_kept.name)
with savepoint(catch=SpecificExc):
doc_gone = frappe.get_doc(doctype="ToDo", description="nope").save()
failed_docs.append(doc_gone.name)
raise SpecificExc
frappe.db.commit()
for d in failed_docs:
self.assertFalse(frappe.db.exists("ToDo", d))
for d in created_docs:
self.assertTrue(frappe.db.exists("ToDo", d))
def test_transaction_writes_error(self):
from frappe.database.database import Database
frappe.db.rollback()
frappe.db.MAX_WRITES_PER_TRANSACTION = 1
note = frappe.get_last_doc("ToDo")
note.description = "changed"
with self.assertRaises(frappe.TooManyWritesError) as tmw:
note.save()
frappe.db.MAX_WRITES_PER_TRANSACTION = Database.MAX_WRITES_PER_TRANSACTION
def test_transaction_write_counting(self):
note = frappe.get_doc(doctype="Note", title="transaction counting").insert()
writes = frappe.db.transaction_writes
frappe.db.set_value("Note", note.name, "content", "abc")
self.assertEqual(1, frappe.db.transaction_writes - writes)
writes = frappe.db.transaction_writes
frappe.db.sql(
"""
update `tabNote`
set content = 'abc'
where name = %s
""",
note.name,
)
self.assertEqual(1, frappe.db.transaction_writes - writes)
def test_pk_collision_ignoring(self):
# note has `name` generated from title
for _ in range(3):
frappe.get_doc(doctype="Note", title="duplicate name").insert(ignore_if_duplicate=True)
with savepoint():
self.assertRaises(
frappe.DuplicateEntryError, frappe.get_doc(doctype="Note", title="duplicate name").insert
)
# recover transaction to continue other tests
raise Exception
def test_exists(self):
dt, dn = "User", "Administrator"
self.assertEqual(frappe.db.exists(dt, dn, cache=True), dn)
self.assertEqual(frappe.db.exists(dt, dn), dn)
self.assertEqual(frappe.db.exists(dt, {"name": ("=", dn)}), dn)
filters = {"doctype": dt, "name": ("like", "Admin%")}
self.assertEqual(frappe.db.exists(filters), dn)
self.assertEqual(filters["doctype"], dt) # make sure that doctype was not removed from filters
self.assertEqual(frappe.db.exists(dt, [["name", "=", dn]]), dn)
@run_only_if(db_type_is.MARIADB)
class TestDDLCommandsMaria(unittest.TestCase):
test_table_name = "TestNotes"
def setUp(self) -> None:
frappe.db.commit()
frappe.db.sql(
f"""
CREATE TABLE `tab{self.test_table_name}` (`id` INT NULL, content TEXT, PRIMARY KEY (`id`));
"""
)
def tearDown(self) -> None:
frappe.db.sql(f"DROP TABLE tab{self.test_table_name};")
self.test_table_name = "TestNotes"
def test_rename(self) -> None:
new_table_name = f"{self.test_table_name}_new"
frappe.db.rename_table(self.test_table_name, new_table_name)
check_exists = frappe.db.sql(
f"""
SELECT * FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_NAME = N'tab{new_table_name}';
"""
)
self.assertGreater(len(check_exists), 0)
self.assertIn(f"tab{new_table_name}", check_exists[0])
# * so this table is deleted after the rename
self.test_table_name = new_table_name
def test_describe(self) -> None:
self.assertEqual(
(
("id", "int(11)", "NO", "PRI", None, ""),
("content", "text", "YES", "", None, ""),
),
frappe.db.describe(self.test_table_name),
)
def test_change_type(self) -> None:
frappe.db.change_column_type("TestNotes", "id", "varchar(255)")
test_table_description = frappe.db.sql(f"DESC tab{self.test_table_name};")
self.assertGreater(len(test_table_description), 0)
self.assertIn("varchar(255)", test_table_description[0])
def test_add_index(self) -> None:
index_name = "test_index"
frappe.db.add_index(self.test_table_name, ["id", "content(50)"], index_name)
indexs_in_table = frappe.db.sql(
f"""
SHOW INDEX FROM tab{self.test_table_name}
WHERE Key_name = '{index_name}';
"""
)
self.assertEqual(len(indexs_in_table), 2)
class TestDBSetValue(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.todo1 = frappe.get_doc(doctype="ToDo", description="test_set_value 1").insert()
cls.todo2 = frappe.get_doc(doctype="ToDo", description="test_set_value 2").insert()
def test_update_single_doctype_field(self):
value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
changed_value = not value
frappe.db.set_value(
"System Settings", "System Settings", "deny_multiple_sessions", changed_value
)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
changed_value = not current_value
frappe.db.set_value("System Settings", None, "deny_multiple_sessions", changed_value)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
changed_value = not current_value
frappe.db.set_single_value("System Settings", "deny_multiple_sessions", changed_value)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
def test_update_single_row_single_column(self):
frappe.db.set_value("ToDo", self.todo1.name, "description", "test_set_value change 1")
updated_value = frappe.db.get_value("ToDo", self.todo1.name, "description")
self.assertEqual(updated_value, "test_set_value change 1")
def test_update_single_row_multiple_columns(self):
description, status = "Upated by test_update_single_row_multiple_columns", "Closed"
frappe.db.set_value(
"ToDo",
self.todo1.name,
{
"description": description,
"status": status,
},
update_modified=False,
)
updated_desciption, updated_status = frappe.db.get_value(
"ToDo", filters={"name": self.todo1.name}, fieldname=["description", "status"]
)
self.assertEqual(description, updated_desciption)
self.assertEqual(status, updated_status)
def test_update_multiple_rows_single_column(self):
frappe.db.set_value(
"ToDo", {"description": ("like", "%test_set_value%")}, "description", "change 2"
)
self.assertEqual(frappe.db.get_value("ToDo", self.todo1.name, "description"), "change 2")
self.assertEqual(frappe.db.get_value("ToDo", self.todo2.name, "description"), "change 2")
def test_update_multiple_rows_multiple_columns(self):
todos_to_update = frappe.get_all(
"ToDo",
filters={"description": ("like", "%test_set_value%"), "status": ("!=", "Closed")},
pluck="name",
)
frappe.db.set_value(
"ToDo",
{"description": ("like", "%test_set_value%"), "status": ("!=", "Closed")},
{"status": "Closed", "priority": "High"},
)
test_result = frappe.get_all(
"ToDo", filters={"name": ("in", todos_to_update)}, fields=["status", "priority"]
)
self.assertTrue(all(x for x in test_result if x["status"] == "Closed"))
self.assertTrue(all(x for x in test_result if x["priority"] == "High"))
def test_update_modified_options(self):
self.todo2.reload()
todo = self.todo2
updated_description = f"{todo.description} - by `test_update_modified_options`"
custom_modified = datetime.datetime.fromisoformat(add_days(now(), 10))
custom_modified_by = "user_that_doesnt_exist@example.com"
frappe.db.set_value("ToDo", todo.name, "description", updated_description, update_modified=False)
self.assertEqual(updated_description, frappe.db.get_value("ToDo", todo.name, "description"))
self.assertEqual(todo.modified, frappe.db.get_value("ToDo", todo.name, "modified"))
frappe.db.set_value(
"ToDo",
todo.name,
"description",
"test_set_value change 1",
modified=custom_modified,
modified_by=custom_modified_by,
)
self.assertTupleEqual(
(custom_modified, custom_modified_by),
frappe.db.get_value("ToDo", todo.name, ["modified", "modified_by"]),
)
def test_for_update(self):
self.todo1.reload()
with patch.object(Database, "sql") as sql_called:
frappe.db.set_value(
self.todo1.doctype,
self.todo1.name,
"description",
f"{self.todo1.description}-edit by `test_for_update`",
)
first_query = sql_called.call_args_list[0].args[0]
second_query = sql_called.call_args_list[1].args[0]
self.assertTrue(sql_called.call_count == 2)
self.assertTrue("FOR UPDATE" in first_query)
if frappe.conf.db_type == "postgres":
from frappe.database.postgres.database import modify_query
self.assertTrue(modify_query("UPDATE `tabToDo` SET") in second_query)
if frappe.conf.db_type == "mariadb":
self.assertTrue("UPDATE `tabToDo` SET" in second_query)
def test_cleared_cache(self):
self.todo2.reload()
with patch.object(frappe, "clear_document_cache") as clear_cache:
frappe.db.set_value(
self.todo2.doctype,
self.todo2.name,
"description",
f"{self.todo2.description}-edit by `test_cleared_cache`",
)
clear_cache.assert_called()
def test_update_alias(self):
args = (self.todo1.doctype, self.todo1.name, "description", "Updated by `test_update_alias`")
kwargs = {
"for_update": False,
"modified": None,
"modified_by": None,
"update_modified": True,
"debug": False,
}
self.assertTrue("return self.set_value(" in inspect.getsource(frappe.db.update))
with patch.object(Database, "set_value") as set_value:
frappe.db.update(*args, **kwargs)
set_value.assert_called_once()
set_value.assert_called_with(*args, **kwargs)
@classmethod
def tearDownClass(cls):
frappe.db.rollback()
@run_only_if(db_type_is.POSTGRES)
class TestDDLCommandsPost(unittest.TestCase):
test_table_name = "TestNotes"
def setUp(self) -> None:
frappe.db.sql(
f"""
CREATE TABLE "tab{self.test_table_name}" ("id" INT NULL, content text, PRIMARY KEY ("id"))
"""
)
def tearDown(self) -> None:
frappe.db.sql(f'DROP TABLE "tab{self.test_table_name}"')
self.test_table_name = "TestNotes"
def test_rename(self) -> None:
new_table_name = f"{self.test_table_name}_new"
frappe.db.rename_table(self.test_table_name, new_table_name)
check_exists = frappe.db.sql(
f"""
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'tab{new_table_name}'
);
"""
)
self.assertTrue(check_exists[0][0])
# * so this table is deleted after the rename
self.test_table_name = new_table_name
def test_describe(self) -> None:
self.assertEqual([("id",), ("content",)], frappe.db.describe(self.test_table_name))
def test_change_type(self) -> None:
frappe.db.change_column_type(self.test_table_name, "id", "varchar(255)")
check_change = frappe.db.sql(
f"""
SELECT
table_name,
column_name,
data_type
FROM
information_schema.columns
WHERE
table_name = 'tab{self.test_table_name}'
"""
)
self.assertGreater(len(check_change), 0)
self.assertIn("character varying", check_change[0])
def test_add_index(self) -> None:
index_name = "test_index"
frappe.db.add_index(self.test_table_name, ["id", "content(50)"], index_name)
indexs_in_table = frappe.db.sql(
f"""
SELECT indexname
FROM pg_indexes
WHERE tablename = 'tab{self.test_table_name}'
AND indexname = '{index_name}' ;
""",
)
self.assertEqual(len(indexs_in_table), 1)
@run_only_if(db_type_is.POSTGRES)
def test_modify_query(self):
from frappe.database.postgres.database import modify_query
query = "select * from `tabtree b` where lft > 13 and rgt <= 16 and name =1.0 and parent = 4134qrsdc and isgroup = 1.00045"
self.assertEqual(
"select * from \"tabtree b\" where lft > '13' and rgt <= '16' and name = '1' and parent = 4134qrsdc and isgroup = 1.00045",
modify_query(query),
)
query = (
'select locate(".io", "frappe.io"), locate("3", cast(3 as varchar)), locate("3", 3::varchar)'
)
self.assertEqual(
'select strpos( "frappe.io", ".io"), strpos( cast(3 as varchar), "3"), strpos( 3::varchar, "3")',
modify_query(query),
)
@run_only_if(db_type_is.POSTGRES)
def test_modify_values(self):
from frappe.database.postgres.database import modify_values
self.assertEqual(
{"abcd": "23", "efgh": "23", "ijkl": 23.0345, "mnop": "wow"},
modify_values({"abcd": 23, "efgh": 23.0, "ijkl": 23.0345, "mnop": "wow"}),
)
self.assertEqual(["23", "23", 23.00004345, "wow"], modify_values((23, 23.0, 23.00004345, "wow")))
def test_sequence_table_creation(self):
from frappe.core.doctype.doctype.test_doctype import new_doctype
dt = new_doctype("autoinc_dt_seq_test", autoname="autoincrement").insert(ignore_permissions=True)
if frappe.db.db_type == "postgres":
self.assertTrue(
frappe.db.sql(
"""select sequence_name FROM information_schema.sequences
where sequence_name ilike 'autoinc_dt_seq_test%'"""
)[0][0]
)
else:
self.assertTrue(
frappe.db.sql(
"""select data_type FROM information_schema.tables
where table_type = 'SEQUENCE' and table_name like 'autoinc_dt_seq_test%'"""
)[0][0]
)
dt.delete(ignore_permissions=True)
| true | true |
f722adb301574075281fd134e69896190119db40 | 2,232 | py | Python | samples/refresh_tasks.py | rrobertsrgare/server-client-python | 3f4db45584fb352e9f21c870027a2302cb824909 | [
"CC0-1.0",
"MIT"
] | 1 | 2021-02-17T18:39:09.000Z | 2021-02-17T18:39:09.000Z | samples/refresh_tasks.py | jorwoods/server-client-python | fefd6f18d8a6617829c6323879d2c3ed77a4cda6 | [
"CC0-1.0",
"MIT"
] | 1 | 2021-02-24T00:26:46.000Z | 2021-02-24T00:26:46.000Z | samples/refresh_tasks.py | jorwoods/server-client-python | fefd6f18d8a6617829c6323879d2c3ed77a4cda6 | [
"CC0-1.0",
"MIT"
] | 1 | 2021-02-17T18:39:10.000Z | 2021-02-17T18:39:10.000Z | ####
# This script demonstrates how to use the Tableau Server Client
# to query extract refresh tasks and run them as needed.
#
# To run the script, you must have installed Python 3.5 or later.
####
import argparse
import getpass
import logging
import tableauserverclient as TSC
def handle_run(server, args):
task = server.tasks.get_by_id(args.id)
print(server.tasks.run(task))
def handle_list(server, _):
tasks, pagination = server.tasks.get()
for task in tasks:
print("{}".format(task))
def handle_info(server, args):
task = server.tasks.get_by_id(args.id)
print("{}".format(task))
def main():
parser = argparse.ArgumentParser(description='Get all of the refresh tasks available on a server')
parser.add_argument('--server', '-s', required=True, help='server address')
parser.add_argument('--username', '-u', required=True, help='username to sign into server')
parser.add_argument('--site', '-S', default=None)
parser.add_argument('-p', default=None)
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
subcommands = parser.add_subparsers()
list_arguments = subcommands.add_parser('list')
list_arguments.set_defaults(func=handle_list)
run_arguments = subcommands.add_parser('run')
run_arguments.add_argument('id', default=None)
run_arguments.set_defaults(func=handle_run)
info_arguments = subcommands.add_parser('info')
info_arguments.add_argument('id', default=None)
info_arguments.set_defaults(func=handle_info)
args = parser.parse_args()
if args.p is None:
password = getpass.getpass("Password: ")
else:
password = args.p
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# SIGN IN
tableau_auth = TSC.TableauAuth(args.username, password, args.site)
server = TSC.Server(args.server)
server.version = '2.6'
with server.auth.sign_in(tableau_auth):
args.func(server, args)
if __name__ == '__main__':
main()
| 29.76 | 102 | 0.692652 | t argparse
import getpass
import logging
import tableauserverclient as TSC
def handle_run(server, args):
task = server.tasks.get_by_id(args.id)
print(server.tasks.run(task))
def handle_list(server, _):
tasks, pagination = server.tasks.get()
for task in tasks:
print("{}".format(task))
def handle_info(server, args):
task = server.tasks.get_by_id(args.id)
print("{}".format(task))
def main():
parser = argparse.ArgumentParser(description='Get all of the refresh tasks available on a server')
parser.add_argument('--server', '-s', required=True, help='server address')
parser.add_argument('--username', '-u', required=True, help='username to sign into server')
parser.add_argument('--site', '-S', default=None)
parser.add_argument('-p', default=None)
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
subcommands = parser.add_subparsers()
list_arguments = subcommands.add_parser('list')
list_arguments.set_defaults(func=handle_list)
run_arguments = subcommands.add_parser('run')
run_arguments.add_argument('id', default=None)
run_arguments.set_defaults(func=handle_run)
info_arguments = subcommands.add_parser('info')
info_arguments.add_argument('id', default=None)
info_arguments.set_defaults(func=handle_info)
args = parser.parse_args()
if args.p is None:
password = getpass.getpass("Password: ")
else:
password = args.p
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
tableau_auth = TSC.TableauAuth(args.username, password, args.site)
server = TSC.Server(args.server)
server.version = '2.6'
with server.auth.sign_in(tableau_auth):
args.func(server, args)
if __name__ == '__main__':
main()
| true | true |
f722af34017e91c4b0048d9a3ded31d6e006fbea | 4,146 | py | Python | numba/knn/GPU/base_knn.py | Hardcode84/dpbench | e6bc1fc6493cb80a1b5a2ffcca4cc1348dd3ad99 | [
"BSD-2-Clause"
] | null | null | null | numba/knn/GPU/base_knn.py | Hardcode84/dpbench | e6bc1fc6493cb80a1b5a2ffcca4cc1348dd3ad99 | [
"BSD-2-Clause"
] | null | null | null | numba/knn/GPU/base_knn.py | Hardcode84/dpbench | e6bc1fc6493cb80a1b5a2ffcca4cc1348dd3ad99 | [
"BSD-2-Clause"
] | null | null | null | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import argparse
import sys
import numpy as np
DATA_DIM = 16
try:
import itimer as it
now = it.itime
get_mops = it.itime_mops_now
except:
from timeit import default_timer
now = default_timer
get_mops = lambda t0, t1, n: (n / (t1 - t0),t1-t0)
######################################################
# GLOBAL DECLARATIONS THAT WILL BE USED IN ALL FILES #
######################################################
# make xrange available in python 3
try:
xrange
except NameError:
xrange = range
###############################################
def gen_data_x(nopt, data_dim=DATA_DIM):
data = np.random.rand(nopt, data_dim)
return data
def gen_data_y(nopt, classes_num=3):
data = np.random.randint(classes_num, size=nopt)
return data
##############################################
def run(name, alg, sizes=10, step=2, nopt=2**10):
parser = argparse.ArgumentParser()
parser.add_argument('--steps', type=int, default=sizes,
help='Number of steps')
parser.add_argument('--step', type=int, default=step,
help='Factor for each step')
parser.add_argument('--size', type=int, default=nopt,
help='Initial data size')
parser.add_argument('--repeat', type=int, default=100,
help='Iterations inside measured region')
parser.add_argument('--text', default='', help='Print with each result')
args = parser.parse_args()
nopt = args.size
repeat = args.repeat
train_data_size = 2**10
with open('perf_output.csv', 'w', 1) as fd, open("runtimes.csv", 'w', 1) as fd2:
for _ in xrange(args.steps):
print("TRAIN_DATA_SIZE: ", train_data_size)
print("TEST_DATA_SIZE: ", nopt)
x_train, y_train = gen_data_x(train_data_size), gen_data_y(train_data_size)
x_test = gen_data_x(nopt)
n_neighbors = 5
print('ERF: {}: Size: {}'.format(name, nopt), end=' ', flush=True)
sys.stdout.flush()
predictions = alg(x_train, y_train, x_test, k=n_neighbors) # warmup
t0 = now()
for _ in xrange(repeat):
predictions = alg(x_train, y_train, x_test, k=n_neighbors)
mops, time = get_mops(t0, now(), nopt)
result_mops = mops * repeat
print('MOPS:', result_mops, args.text)
fd.write('{},{}\n'.format(nopt, result_mops))
fd2.write('{},{}\n'.format(nopt, time))
print("TIME: ", time)
nopt *= args.step
repeat = max(repeat - args.step, 1)
| 36.052174 | 87 | 0.601785 |
import argparse
import sys
import numpy as np
DATA_DIM = 16
try:
import itimer as it
now = it.itime
get_mops = it.itime_mops_now
except:
from timeit import default_timer
now = default_timer
get_mops = lambda t0, t1, n: (n / (t1 - t0),t1-t0)
| true | true |
f722af587574ebe501b894988e417a8183396162 | 2,561 | py | Python | src/cutty/templates/adapters/cookiecutter/config.py | cjolowicz/cutty | 3a183fb06f5f521eaf1909514cb8c3d9e5b9c193 | [
"MIT"
] | 1 | 2021-11-15T20:27:59.000Z | 2021-11-15T20:27:59.000Z | src/cutty/templates/adapters/cookiecutter/config.py | cjolowicz/cutty | 3a183fb06f5f521eaf1909514cb8c3d9e5b9c193 | [
"MIT"
] | 171 | 2020-07-24T07:30:20.000Z | 2022-03-31T14:05:45.000Z | src/cutty/templates/adapters/cookiecutter/config.py | cjolowicz/cutty | 3a183fb06f5f521eaf1909514cb8c3d9e5b9c193 | [
"MIT"
] | null | null | null | """Cookiecutter loader."""
import json
from collections.abc import Iterator
from typing import Any
from cutty.filesystems.domain.path import Path
from cutty.templates.domain.config import Config
from cutty.templates.domain.variables import Variable
def loadvalue(value: Any) -> Any:
"""Stringize scalars."""
if isinstance(value, (bool, int, float)):
return str(value)
if isinstance(value, (str, dict)):
return value
raise RuntimeError(f"unsupported value type {type(value)}") # pragma: no cover
def loadvariable(name: str, value: Any) -> Variable:
"""Load a variable."""
if isinstance(value, list):
choices = tuple(loadvalue(choice) for choice in value)
[valuetype] = set(type(choice) for choice in choices)
return Variable(
name=name,
description=name,
type=valuetype,
default=choices[0],
choices=choices,
interactive=True,
)
value = loadvalue(value)
return Variable(
name=name,
description=name,
type=type(value),
default=value,
choices=(),
interactive=True,
)
def loadcookiecutterconfig(template: str, path: Path) -> Config:
"""Load the configuration for a Cookiecutter template."""
text = (path / "cookiecutter.json").read_text()
data = json.loads(text)
assert isinstance(data, dict) and all( # noqa: S101
isinstance(name, str) for name in data
)
data.setdefault("_template", template)
settings = {name: value for name, value in data.items() if name.startswith("_")}
variables = tuple(
loadvariable(name, value)
for name, value in data.items()
if not name.startswith("_")
)
return Config(settings, variables)
def findcookiecutterpaths(path: Path, config: Config) -> Iterator[Path]:
"""Load project files in a Cookiecutter template."""
for template_dir in path.iterdir():
if all(token in template_dir.name for token in ("{{", "cookiecutter", "}}")):
break
else:
raise RuntimeError("template directory not found") # pragma: no cover
yield template_dir
def findcookiecutterhooks(path: Path) -> Iterator[Path]:
"""Load hooks in a Cookiecutter template."""
hooks = {"pre_gen_project", "post_gen_project"}
hookdir = path / "hooks"
if hookdir.is_dir():
for path in hookdir.iterdir():
if path.is_file() and not path.name.endswith("~") and path.stem in hooks:
yield path
| 29.102273 | 85 | 0.634908 | import json
from collections.abc import Iterator
from typing import Any
from cutty.filesystems.domain.path import Path
from cutty.templates.domain.config import Config
from cutty.templates.domain.variables import Variable
def loadvalue(value: Any) -> Any:
if isinstance(value, (bool, int, float)):
return str(value)
if isinstance(value, (str, dict)):
return value
raise RuntimeError(f"unsupported value type {type(value)}")
def loadvariable(name: str, value: Any) -> Variable:
if isinstance(value, list):
choices = tuple(loadvalue(choice) for choice in value)
[valuetype] = set(type(choice) for choice in choices)
return Variable(
name=name,
description=name,
type=valuetype,
default=choices[0],
choices=choices,
interactive=True,
)
value = loadvalue(value)
return Variable(
name=name,
description=name,
type=type(value),
default=value,
choices=(),
interactive=True,
)
def loadcookiecutterconfig(template: str, path: Path) -> Config:
text = (path / "cookiecutter.json").read_text()
data = json.loads(text)
assert isinstance(data, dict) and all(
isinstance(name, str) for name in data
)
data.setdefault("_template", template)
settings = {name: value for name, value in data.items() if name.startswith("_")}
variables = tuple(
loadvariable(name, value)
for name, value in data.items()
if not name.startswith("_")
)
return Config(settings, variables)
def findcookiecutterpaths(path: Path, config: Config) -> Iterator[Path]:
for template_dir in path.iterdir():
if all(token in template_dir.name for token in ("{{", "cookiecutter", "}}")):
break
else:
raise RuntimeError("template directory not found")
yield template_dir
def findcookiecutterhooks(path: Path) -> Iterator[Path]:
hooks = {"pre_gen_project", "post_gen_project"}
hookdir = path / "hooks"
if hookdir.is_dir():
for path in hookdir.iterdir():
if path.is_file() and not path.name.endswith("~") and path.stem in hooks:
yield path
| true | true |
f722b02b0ef579f244024488c3e668d8720b0fef | 726 | py | Python | chat_dump.py | healingbrew/HeroesEmojiSlicer | 3c2cab9db8a0afea6b3bf3eb60c2a0e45d840355 | [
"MIT"
] | 1 | 2020-09-18T20:48:10.000Z | 2020-09-18T20:48:10.000Z | chat_dump.py | healingbrew/HeroesEmojiSlicer | 3c2cab9db8a0afea6b3bf3eb60c2a0e45d840355 | [
"MIT"
] | null | null | null | chat_dump.py | healingbrew/HeroesEmojiSlicer | 3c2cab9db8a0afea6b3bf3eb60c2a0e45d840355 | [
"MIT"
] | null | null | null | import heroprotocol, sys, os, os.path, pprint
from heroprotocol.mpyq import mpyq
sys.path.append(os.path.join(os.getcwd(), "heroprotocol"))
from heroprotocol import protocol29406
archive = mpyq.MPQArchive(sys.argv[-1])
contents = archive.header['user_data_header']['content']
header = protocol29406.decode_replay_header(contents)
baseBuild = header['m_version']['m_baseBuild']
try:
protocol = __import__('protocol%s' % (baseBuild,))
except:
print >> sys.stderr, 'Unsupported base build: %d' % baseBuild
sys.exit(1)
message_events = protocol.decode_replay_message_events(archive.read_file('replay.message.events'))
for message in message_events:
if 'm_string' in message:
print(message['m_string']) | 33 | 98 | 0.750689 | import heroprotocol, sys, os, os.path, pprint
from heroprotocol.mpyq import mpyq
sys.path.append(os.path.join(os.getcwd(), "heroprotocol"))
from heroprotocol import protocol29406
archive = mpyq.MPQArchive(sys.argv[-1])
contents = archive.header['user_data_header']['content']
header = protocol29406.decode_replay_header(contents)
baseBuild = header['m_version']['m_baseBuild']
try:
protocol = __import__('protocol%s' % (baseBuild,))
except:
print >> sys.stderr, 'Unsupported base build: %d' % baseBuild
sys.exit(1)
message_events = protocol.decode_replay_message_events(archive.read_file('replay.message.events'))
for message in message_events:
if 'm_string' in message:
print(message['m_string']) | true | true |
f722b139debca8b53a4596ee33968474cbe0e8ce | 2,296 | py | Python | tketris/__init__.py | andydevs/tketris | 054914a9ad815b009b6792067bd689e36095e8de | [
"MIT"
] | 1 | 2020-09-13T21:29:12.000Z | 2020-09-13T21:29:12.000Z | tketris/__init__.py | andydevs/tketris | 054914a9ad815b009b6792067bd689e36095e8de | [
"MIT"
] | null | null | null | tketris/__init__.py | andydevs/tketris | 054914a9ad815b009b6792067bd689e36095e8de | [
"MIT"
] | null | null | null | """
Tketris
Tetris using tkinter
Author: Anshul Kharbanda
Created: 10 - 11 - 2018
"""
from .game import GameLogic
from .view.board import Board
from .view.side_menu import SideMenu
from tkinter import *
class Tketris(Frame, GameLogic):
"""
The main application frame. Includes the GameLogic mixin
"""
DEBUG = False
def __init__(self, master=None, debug=False):
"""
Initializes application instance
"""
super(Tketris, self).__init__(master)
self.DEBUG = debug
self.init_ui()
self.init_events()
self.init_game()
self.start_game()
def init_ui(self):
"""
Initializes user interface
"""
self.board = Board(self)
self.board.pack(side=LEFT, fill=Y)
self.side_menu = SideMenu(self)
self.side_menu.pack(side=RIGHT, fill=Y)
def init_events(self):
"""
Initializes keybound events
"""
self.master.bind('<Up>', self.rotate)
self.master.bind('<Left>', self.move_left)
self.master.bind('<Right>', self.move_right)
self.master.bind('<Down>', self.move_down)
def reset_game(self):
"""
Resets game
"""
if not self.game_continue:
self.start_game()
def toggle_game_resume(self):
"""
Toggle game resume state and update menu
"""
self.game_resume = not self.game_resume
self.side_menu.display_game_resume_state(self.game_resume)
def run_clock(self):
"""
Runs clock
"""
if self.game_continue:
if self.game_resume:
self.clock_tick_update()
self.master.after(1000, self.run_clock)
def on_start_game(self):
"""
Select playing and start clock on start game
"""
self.side_menu.select_playing()
self.run_clock()
def on_game_over(self):
"""
Displays game over menu and display final score on game over
"""
self.side_menu.select_game_over()
self.side_menu.display_final_score(self.score)
def on_update_score(self):
"""
Display score on playing menu on update score
"""
self.side_menu.display_score(self.score)
| 25.230769 | 68 | 0.591028 | from .game import GameLogic
from .view.board import Board
from .view.side_menu import SideMenu
from tkinter import *
class Tketris(Frame, GameLogic):
DEBUG = False
def __init__(self, master=None, debug=False):
super(Tketris, self).__init__(master)
self.DEBUG = debug
self.init_ui()
self.init_events()
self.init_game()
self.start_game()
def init_ui(self):
self.board = Board(self)
self.board.pack(side=LEFT, fill=Y)
self.side_menu = SideMenu(self)
self.side_menu.pack(side=RIGHT, fill=Y)
def init_events(self):
self.master.bind('<Up>', self.rotate)
self.master.bind('<Left>', self.move_left)
self.master.bind('<Right>', self.move_right)
self.master.bind('<Down>', self.move_down)
def reset_game(self):
if not self.game_continue:
self.start_game()
def toggle_game_resume(self):
self.game_resume = not self.game_resume
self.side_menu.display_game_resume_state(self.game_resume)
def run_clock(self):
if self.game_continue:
if self.game_resume:
self.clock_tick_update()
self.master.after(1000, self.run_clock)
def on_start_game(self):
self.side_menu.select_playing()
self.run_clock()
def on_game_over(self):
self.side_menu.select_game_over()
self.side_menu.display_final_score(self.score)
def on_update_score(self):
self.side_menu.display_score(self.score)
| true | true |
f722b202bfa2ecdcd28d900f7e723d89f993c74b | 2,840 | py | Python | lambda/comprehend-realtime-text-classification-lambda.py | aws-samples/amazon-comprehend-active-learning-framework | edf01c415a1d0cd52a779c3be4bdabb6c3f3fa00 | [
"MIT-0"
] | 1 | 2021-12-18T00:13:57.000Z | 2021-12-18T00:13:57.000Z | lambda/comprehend-realtime-text-classification-lambda.py | aws-samples/amazon-comprehend-active-learning-framework | edf01c415a1d0cd52a779c3be4bdabb6c3f3fa00 | [
"MIT-0"
] | 1 | 2021-11-22T12:24:01.000Z | 2021-11-22T12:24:01.000Z | lambda/comprehend-realtime-text-classification-lambda.py | aws-samples/amazon-comprehend-active-learning-framework | edf01c415a1d0cd52a779c3be4bdabb6c3f3fa00 | [
"MIT-0"
] | 5 | 2021-01-28T18:55:39.000Z | 2021-09-08T05:56:40.000Z | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
import json
import boto3
from urllib.parse import unquote_plus
import urllib
import time
import os
client = boto3.client(service_name='comprehend', region_name=os.environ['AWS_REGION'], use_ssl=True)
kinesis = boto3.client('firehose', region_name=os.environ['AWS_REGION'])
ssm = boto3.client('ssm', region_name=os.environ['AWS_REGION'])
kinesis_delivery_stream = os.environ['kinesis_delivery_stream']
comprehend_endpoint = os.environ['comprehend_endpoint_name']
ssm_key_name = os.environ['ssm_key_name'] or ''
score_threshold = os.environ['score_threshold']
def lambda_handler(event, context):
print(event)
body = json.loads(event['body'])
classifier = body['classifier']
sentence = body['sentence']
if not ssm_key_name:
comprehend_endpoint_arn = comprehend_endpoint
else:
parameter = ssm.get_parameter(Name=ssm_key_name)
comprehend_endpoint_arn = parameter['Parameter']['Value']
endpointArn = comprehend_endpoint_arn
print(endpointArn)
try:
response = client.classify_document(Text=sentence,EndpointArn=endpointArn)
p = response['Classes'][0]['Name']
score = response['Classes'][0]['Score']
#print(f"S:{sentence}, Score:{score}")
response = {}
response['utterance']=sentence
response['prediction']=p
response['confidence'] = score
lowconfidencepair = response
lowconfidencepair['classifier']=classifier
score_threshold_float = float(score_threshold)
lowconfidencepair = json.dumps(lowconfidencepair)+"\n"
if score < score_threshold_float:
# write the low-confidence-pair to firehose
kinesis.put_record(
DeliveryStreamName=kinesis_delivery_stream,
Record={"Data":bytes(lowconfidencepair, 'utf-8')})
return {'statusCode': 200,'headers' : {"X-Requested-With": '*',"Access-Control-Allow-Headers": 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-requested-with',"Access-Control-Allow-Origin": '*',"Access-Control-Allow-Methods": 'DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT'},'body': json.dumps(response)}
except Exception as e:
print(e)
print('Failed')
return {'statusCode': 200,'headers' : {"X-Requested-With": '*',"Access-Control-Allow-Headers": 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-requested-with',"Access-Control-Allow-Origin": '*',"Access-Control-Allow-Methods": 'DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT'},'body': 'failed'}
| 36.883117 | 315 | 0.684507 |
import json
import boto3
from urllib.parse import unquote_plus
import urllib
import time
import os
client = boto3.client(service_name='comprehend', region_name=os.environ['AWS_REGION'], use_ssl=True)
kinesis = boto3.client('firehose', region_name=os.environ['AWS_REGION'])
ssm = boto3.client('ssm', region_name=os.environ['AWS_REGION'])
kinesis_delivery_stream = os.environ['kinesis_delivery_stream']
comprehend_endpoint = os.environ['comprehend_endpoint_name']
ssm_key_name = os.environ['ssm_key_name'] or ''
score_threshold = os.environ['score_threshold']
def lambda_handler(event, context):
print(event)
body = json.loads(event['body'])
classifier = body['classifier']
sentence = body['sentence']
if not ssm_key_name:
comprehend_endpoint_arn = comprehend_endpoint
else:
parameter = ssm.get_parameter(Name=ssm_key_name)
comprehend_endpoint_arn = parameter['Parameter']['Value']
endpointArn = comprehend_endpoint_arn
print(endpointArn)
try:
response = client.classify_document(Text=sentence,EndpointArn=endpointArn)
p = response['Classes'][0]['Name']
score = response['Classes'][0]['Score']
response = {}
response['utterance']=sentence
response['prediction']=p
response['confidence'] = score
lowconfidencepair = response
lowconfidencepair['classifier']=classifier
score_threshold_float = float(score_threshold)
lowconfidencepair = json.dumps(lowconfidencepair)+"\n"
if score < score_threshold_float:
kinesis.put_record(
DeliveryStreamName=kinesis_delivery_stream,
Record={"Data":bytes(lowconfidencepair, 'utf-8')})
return {'statusCode': 200,'headers' : {"X-Requested-With": '*',"Access-Control-Allow-Headers": 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-requested-with',"Access-Control-Allow-Origin": '*',"Access-Control-Allow-Methods": 'DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT'},'body': json.dumps(response)}
except Exception as e:
print(e)
print('Failed')
return {'statusCode': 200,'headers' : {"X-Requested-With": '*',"Access-Control-Allow-Headers": 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-requested-with',"Access-Control-Allow-Origin": '*',"Access-Control-Allow-Methods": 'DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT'},'body': 'failed'}
| true | true |
f722b2ad5211380a3cd9cba27dee50ff13d03247 | 863 | py | Python | world_creator/data_structures.py | lilSpeedwagon/zaWRka-project | e40c6520183c802e9c61faabeaa079bced4e8b00 | [
"MIT"
] | 1 | 2019-12-11T17:18:42.000Z | 2019-12-11T17:18:42.000Z | world_creator/data_structures.py | lilSpeedwagon/zaWRka-project | e40c6520183c802e9c61faabeaa079bced4e8b00 | [
"MIT"
] | null | null | null | world_creator/data_structures.py | lilSpeedwagon/zaWRka-project | e40c6520183c802e9c61faabeaa079bced4e8b00 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from copy import deepcopy
class Vector2D:
def __init__(self, x, y):
self.x = x
self.y = y
def __truediv__(self, other):
answer = deepcopy(self)
answer.x = self.x / other
answer.y = self.y / other
return answer
def __sub__(self, other):
answer = deepcopy(self)
answer.x = self.x - other.x
answer.y = self.y - other.y
return answer
def __str__(self):
return "[{0}, {1}]".format(self.x, self.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
@staticmethod
def from_list(data: list):
return Vector2D(data[0], data[1])
def as_list(self):
return list([self.x, self.y])
class Point2D(Vector2D):
pass
class Size2D(Vector2D):
pass
| 22.128205 | 54 | 0.551564 |
from copy import deepcopy
class Vector2D:
def __init__(self, x, y):
self.x = x
self.y = y
def __truediv__(self, other):
answer = deepcopy(self)
answer.x = self.x / other
answer.y = self.y / other
return answer
def __sub__(self, other):
answer = deepcopy(self)
answer.x = self.x - other.x
answer.y = self.y - other.y
return answer
def __str__(self):
return "[{0}, {1}]".format(self.x, self.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
@staticmethod
def from_list(data: list):
return Vector2D(data[0], data[1])
def as_list(self):
return list([self.x, self.y])
class Point2D(Vector2D):
pass
class Size2D(Vector2D):
pass
| true | true |
f722b3e29e34689cf05526ca96e5fb3e45c1cb35 | 318 | py | Python | gym_jiminy/gym_jiminy/__init__.py | matthieuvigne/jiminy | f893b2254a9e695a4154b941b599536756ea3d8b | [
"MIT"
] | null | null | null | gym_jiminy/gym_jiminy/__init__.py | matthieuvigne/jiminy | f893b2254a9e695a4154b941b599536756ea3d8b | [
"MIT"
] | null | null | null | gym_jiminy/gym_jiminy/__init__.py | matthieuvigne/jiminy | f893b2254a9e695a4154b941b599536756ea3d8b | [
"MIT"
] | null | null | null | from gym.envs.registration import register
register(
id='jiminy-cartpole-v0',
entry_point='gym_jiminy.envs:JiminyCartPoleEnv',
reward_threshold=10000.0,
)
register(
id='jiminy-acrobot-v0',
entry_point='gym_jiminy.envs:JiminyAcrobotEnv',
max_episode_steps=12000,
reward_threshold=-3000.0
) | 22.714286 | 52 | 0.742138 | from gym.envs.registration import register
register(
id='jiminy-cartpole-v0',
entry_point='gym_jiminy.envs:JiminyCartPoleEnv',
reward_threshold=10000.0,
)
register(
id='jiminy-acrobot-v0',
entry_point='gym_jiminy.envs:JiminyAcrobotEnv',
max_episode_steps=12000,
reward_threshold=-3000.0
) | true | true |
f722b4a8133853ddcde5fa36ed0487ffd06f2773 | 37,136 | py | Python | container/google/cloud/container_v1beta1/proto/cluster_service_pb2_grpc.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | 2 | 2021-11-26T07:08:43.000Z | 2022-03-07T20:20:04.000Z | container/google/cloud/container_v1beta1/proto/cluster_service_pb2_grpc.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | 40 | 2019-07-16T10:04:48.000Z | 2020-01-20T09:04:59.000Z | container/google/cloud/container_v1beta1/proto/cluster_service_pb2_grpc.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | 2 | 2019-07-18T00:05:31.000Z | 2019-11-27T14:17:22.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.container_v1beta1.proto import (
cluster_service_pb2 as google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ClusterManagerStub(object):
"""Google Kubernetes Engine Cluster Manager v1beta1
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListClusters = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListClusters",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersResponse.FromString,
)
self.GetCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Cluster.FromString,
)
self.CreateCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CreateCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/UpdateCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/UpdateNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolAutoscaling = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNodePoolAutoscaling",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolAutoscalingRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLoggingService = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLoggingService",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLoggingServiceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMonitoringService = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetMonitoringService",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMonitoringServiceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetAddonsConfig = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetAddonsConfig",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetAddonsConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLocations = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLocations",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLocationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateMaster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/UpdateMaster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateMasterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMasterAuth = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetMasterAuth",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMasterAuthRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.DeleteCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/DeleteCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.ListOperations = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListOperations",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsResponse.FromString,
)
self.GetOperation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetOperation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetOperationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.CancelOperation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CancelOperation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CancelOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetServerConfig = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetServerConfig",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetServerConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ServerConfig.FromString,
)
self.ListNodePools = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListNodePools",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsResponse.FromString,
)
self.GetNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.NodePool.FromString,
)
self.CreateNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CreateNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.DeleteNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/DeleteNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.RollbackNodePoolUpgrade = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/RollbackNodePoolUpgrade",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.RollbackNodePoolUpgradeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolManagement = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNodePoolManagement",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolManagementRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLabels = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLabels",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLabelsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLegacyAbac = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLegacyAbac",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLegacyAbacRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.StartIPRotation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/StartIPRotation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.StartIPRotationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.CompleteIPRotation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CompleteIPRotation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CompleteIPRotationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolSize = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNodePoolSize",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolSizeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNetworkPolicy = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNetworkPolicy",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNetworkPolicyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMaintenancePolicy = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetMaintenancePolicy",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMaintenancePolicyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.ListUsableSubnetworks = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListUsableSubnetworks",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksResponse.FromString,
)
self.ListLocations = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListLocations",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsResponse.FromString,
)
class ClusterManagerServicer(object):
"""Google Kubernetes Engine Cluster Manager v1beta1
"""
def ListClusters(self, request, context):
"""Lists all clusters owned by a project in either the specified zone or all
zones.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetCluster(self, request, context):
"""Gets the details for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateCluster(self, request, context):
"""Creates a cluster, consisting of the specified number and type of Google
Compute Engine instances.
By default, the cluster is created in the project's
[default network](/compute/docs/networks-and-firewalls#networks).
One firewall is added for the cluster. After cluster creation,
the cluster creates routes for each node to allow the containers
on that node to communicate with all other instances in the
cluster.
Finally, an entry is added to the project's global metadata indicating
which CIDR range is being used by the cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateCluster(self, request, context):
"""Updates the settings for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateNodePool(self, request, context):
"""Updates the version and/or image type of a specific node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolAutoscaling(self, request, context):
"""Sets the autoscaling settings of a specific node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLoggingService(self, request, context):
"""Sets the logging service for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMonitoringService(self, request, context):
"""Sets the monitoring service for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetAddonsConfig(self, request, context):
"""Sets the addons for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLocations(self, request, context):
"""Sets the locations for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateMaster(self, request, context):
"""Updates the master for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMasterAuth(self, request, context):
"""Used to set master auth materials. Currently supports :-
Changing the admin password for a specific cluster.
This can be either via password generation or explicitly set.
Modify basic_auth.csv and reset the K8S API server.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteCluster(self, request, context):
"""Deletes the cluster, including the Kubernetes endpoint and all worker
nodes.
Firewalls and routes that were configured during cluster creation
are also deleted.
Other Google Compute Engine resources that might be in use by the cluster
(e.g. load balancer resources) will not be deleted if they weren't present
at the initial create time.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListOperations(self, request, context):
"""Lists all operations in a project in a specific zone or all zones.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetOperation(self, request, context):
"""Gets the specified operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CancelOperation(self, request, context):
"""Cancels the specified operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetServerConfig(self, request, context):
"""Returns configuration info about the Kubernetes Engine service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListNodePools(self, request, context):
"""Lists the node pools for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetNodePool(self, request, context):
"""Retrieves the node pool requested.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateNodePool(self, request, context):
"""Creates a node pool for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteNodePool(self, request, context):
"""Deletes a node pool from a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RollbackNodePoolUpgrade(self, request, context):
"""Roll back the previously Aborted or Failed NodePool upgrade.
This will be an no-op if the last upgrade successfully completed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolManagement(self, request, context):
"""Sets the NodeManagement options for a node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLabels(self, request, context):
"""Sets labels on a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLegacyAbac(self, request, context):
"""Enables or disables the ABAC authorization mechanism on a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def StartIPRotation(self, request, context):
"""Start master IP rotation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CompleteIPRotation(self, request, context):
"""Completes master IP rotation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolSize(self, request, context):
"""Sets the size for a specific node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNetworkPolicy(self, request, context):
"""Enables/Disables Network Policy for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMaintenancePolicy(self, request, context):
"""Sets the maintenance policy for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListUsableSubnetworks(self, request, context):
"""Lists subnetworks that are usable for creating clusters in a project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListLocations(self, request, context):
"""Used to fetch locations that offer GKE.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_ClusterManagerServicer_to_server(servicer, server):
rpc_method_handlers = {
"ListClusters": grpc.unary_unary_rpc_method_handler(
servicer.ListClusters,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersResponse.SerializeToString,
),
"GetCluster": grpc.unary_unary_rpc_method_handler(
servicer.GetCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Cluster.SerializeToString,
),
"CreateCluster": grpc.unary_unary_rpc_method_handler(
servicer.CreateCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateCluster": grpc.unary_unary_rpc_method_handler(
servicer.UpdateCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateNodePool": grpc.unary_unary_rpc_method_handler(
servicer.UpdateNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolAutoscaling": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolAutoscaling,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolAutoscalingRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLoggingService": grpc.unary_unary_rpc_method_handler(
servicer.SetLoggingService,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLoggingServiceRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMonitoringService": grpc.unary_unary_rpc_method_handler(
servicer.SetMonitoringService,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMonitoringServiceRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetAddonsConfig": grpc.unary_unary_rpc_method_handler(
servicer.SetAddonsConfig,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetAddonsConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLocations": grpc.unary_unary_rpc_method_handler(
servicer.SetLocations,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLocationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateMaster": grpc.unary_unary_rpc_method_handler(
servicer.UpdateMaster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateMasterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMasterAuth": grpc.unary_unary_rpc_method_handler(
servicer.SetMasterAuth,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMasterAuthRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"DeleteCluster": grpc.unary_unary_rpc_method_handler(
servicer.DeleteCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"ListOperations": grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsResponse.SerializeToString,
),
"GetOperation": grpc.unary_unary_rpc_method_handler(
servicer.GetOperation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetOperationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"CancelOperation": grpc.unary_unary_rpc_method_handler(
servicer.CancelOperation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CancelOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetServerConfig": grpc.unary_unary_rpc_method_handler(
servicer.GetServerConfig,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetServerConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ServerConfig.SerializeToString,
),
"ListNodePools": grpc.unary_unary_rpc_method_handler(
servicer.ListNodePools,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsResponse.SerializeToString,
),
"GetNodePool": grpc.unary_unary_rpc_method_handler(
servicer.GetNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.NodePool.SerializeToString,
),
"CreateNodePool": grpc.unary_unary_rpc_method_handler(
servicer.CreateNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"DeleteNodePool": grpc.unary_unary_rpc_method_handler(
servicer.DeleteNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"RollbackNodePoolUpgrade": grpc.unary_unary_rpc_method_handler(
servicer.RollbackNodePoolUpgrade,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.RollbackNodePoolUpgradeRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolManagement": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolManagement,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolManagementRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLabels": grpc.unary_unary_rpc_method_handler(
servicer.SetLabels,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLabelsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLegacyAbac": grpc.unary_unary_rpc_method_handler(
servicer.SetLegacyAbac,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLegacyAbacRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"StartIPRotation": grpc.unary_unary_rpc_method_handler(
servicer.StartIPRotation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.StartIPRotationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"CompleteIPRotation": grpc.unary_unary_rpc_method_handler(
servicer.CompleteIPRotation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CompleteIPRotationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolSize": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolSize,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolSizeRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNetworkPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetNetworkPolicy,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNetworkPolicyRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMaintenancePolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetMaintenancePolicy,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMaintenancePolicyRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"ListUsableSubnetworks": grpc.unary_unary_rpc_method_handler(
servicer.ListUsableSubnetworks,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksResponse.SerializeToString,
),
"ListLocations": grpc.unary_unary_rpc_method_handler(
servicer.ListLocations,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.container.v1beta1.ClusterManager", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| 61.585406 | 156 | 0.775986 |
import grpc
from google.cloud.container_v1beta1.proto import (
cluster_service_pb2 as google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ClusterManagerStub(object):
def __init__(self, channel):
self.ListClusters = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListClusters",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersResponse.FromString,
)
self.GetCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Cluster.FromString,
)
self.CreateCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CreateCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/UpdateCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/UpdateNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolAutoscaling = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNodePoolAutoscaling",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolAutoscalingRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLoggingService = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLoggingService",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLoggingServiceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMonitoringService = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetMonitoringService",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMonitoringServiceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetAddonsConfig = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetAddonsConfig",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetAddonsConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLocations = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLocations",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLocationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateMaster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/UpdateMaster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateMasterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMasterAuth = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetMasterAuth",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMasterAuthRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.DeleteCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/DeleteCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.ListOperations = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListOperations",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsResponse.FromString,
)
self.GetOperation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetOperation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetOperationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.CancelOperation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CancelOperation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CancelOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetServerConfig = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetServerConfig",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetServerConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ServerConfig.FromString,
)
self.ListNodePools = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListNodePools",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsResponse.FromString,
)
self.GetNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.NodePool.FromString,
)
self.CreateNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CreateNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.DeleteNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/DeleteNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.RollbackNodePoolUpgrade = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/RollbackNodePoolUpgrade",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.RollbackNodePoolUpgradeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolManagement = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNodePoolManagement",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolManagementRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLabels = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLabels",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLabelsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLegacyAbac = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLegacyAbac",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLegacyAbacRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.StartIPRotation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/StartIPRotation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.StartIPRotationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.CompleteIPRotation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CompleteIPRotation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CompleteIPRotationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolSize = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNodePoolSize",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolSizeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNetworkPolicy = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNetworkPolicy",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNetworkPolicyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMaintenancePolicy = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetMaintenancePolicy",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMaintenancePolicyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.ListUsableSubnetworks = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListUsableSubnetworks",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksResponse.FromString,
)
self.ListLocations = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListLocations",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsResponse.FromString,
)
class ClusterManagerServicer(object):
def ListClusters(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetCluster(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateCluster(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateCluster(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateNodePool(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolAutoscaling(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLoggingService(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMonitoringService(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetAddonsConfig(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLocations(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateMaster(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMasterAuth(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteCluster(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListOperations(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetOperation(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CancelOperation(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetServerConfig(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListNodePools(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetNodePool(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateNodePool(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteNodePool(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RollbackNodePoolUpgrade(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolManagement(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLabels(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLegacyAbac(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def StartIPRotation(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CompleteIPRotation(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolSize(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNetworkPolicy(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMaintenancePolicy(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListUsableSubnetworks(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListLocations(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_ClusterManagerServicer_to_server(servicer, server):
rpc_method_handlers = {
"ListClusters": grpc.unary_unary_rpc_method_handler(
servicer.ListClusters,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersResponse.SerializeToString,
),
"GetCluster": grpc.unary_unary_rpc_method_handler(
servicer.GetCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Cluster.SerializeToString,
),
"CreateCluster": grpc.unary_unary_rpc_method_handler(
servicer.CreateCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateCluster": grpc.unary_unary_rpc_method_handler(
servicer.UpdateCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateNodePool": grpc.unary_unary_rpc_method_handler(
servicer.UpdateNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolAutoscaling": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolAutoscaling,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolAutoscalingRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLoggingService": grpc.unary_unary_rpc_method_handler(
servicer.SetLoggingService,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLoggingServiceRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMonitoringService": grpc.unary_unary_rpc_method_handler(
servicer.SetMonitoringService,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMonitoringServiceRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetAddonsConfig": grpc.unary_unary_rpc_method_handler(
servicer.SetAddonsConfig,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetAddonsConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLocations": grpc.unary_unary_rpc_method_handler(
servicer.SetLocations,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLocationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateMaster": grpc.unary_unary_rpc_method_handler(
servicer.UpdateMaster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateMasterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMasterAuth": grpc.unary_unary_rpc_method_handler(
servicer.SetMasterAuth,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMasterAuthRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"DeleteCluster": grpc.unary_unary_rpc_method_handler(
servicer.DeleteCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"ListOperations": grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsResponse.SerializeToString,
),
"GetOperation": grpc.unary_unary_rpc_method_handler(
servicer.GetOperation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetOperationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"CancelOperation": grpc.unary_unary_rpc_method_handler(
servicer.CancelOperation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CancelOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetServerConfig": grpc.unary_unary_rpc_method_handler(
servicer.GetServerConfig,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetServerConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ServerConfig.SerializeToString,
),
"ListNodePools": grpc.unary_unary_rpc_method_handler(
servicer.ListNodePools,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsResponse.SerializeToString,
),
"GetNodePool": grpc.unary_unary_rpc_method_handler(
servicer.GetNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.NodePool.SerializeToString,
),
"CreateNodePool": grpc.unary_unary_rpc_method_handler(
servicer.CreateNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"DeleteNodePool": grpc.unary_unary_rpc_method_handler(
servicer.DeleteNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"RollbackNodePoolUpgrade": grpc.unary_unary_rpc_method_handler(
servicer.RollbackNodePoolUpgrade,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.RollbackNodePoolUpgradeRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolManagement": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolManagement,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolManagementRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLabels": grpc.unary_unary_rpc_method_handler(
servicer.SetLabels,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLabelsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLegacyAbac": grpc.unary_unary_rpc_method_handler(
servicer.SetLegacyAbac,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLegacyAbacRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"StartIPRotation": grpc.unary_unary_rpc_method_handler(
servicer.StartIPRotation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.StartIPRotationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"CompleteIPRotation": grpc.unary_unary_rpc_method_handler(
servicer.CompleteIPRotation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CompleteIPRotationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolSize": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolSize,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolSizeRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNetworkPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetNetworkPolicy,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNetworkPolicyRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMaintenancePolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetMaintenancePolicy,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMaintenancePolicyRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"ListUsableSubnetworks": grpc.unary_unary_rpc_method_handler(
servicer.ListUsableSubnetworks,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksResponse.SerializeToString,
),
"ListLocations": grpc.unary_unary_rpc_method_handler(
servicer.ListLocations,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.container.v1beta1.ClusterManager", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| true | true |
f722b5a2cf06650e9185d063de48a9d6666ad3ed | 7,042 | py | Python | graph-kernels2/calculate_kernels.py | OminiaVincit/scale-variant-topo | 6945bc42aacd0d71a6fb472c87e09da223821e1e | [
"MIT"
] | 8 | 2018-11-09T21:59:59.000Z | 2020-07-22T19:02:10.000Z | graph-kernels2/calculate_kernels.py | OminiaVincit/scale-variant-topo | 6945bc42aacd0d71a6fb472c87e09da223821e1e | [
"MIT"
] | null | null | null | graph-kernels2/calculate_kernels.py | OminiaVincit/scale-variant-topo | 6945bc42aacd0d71a6fb472c87e09da223821e1e | [
"MIT"
] | null | null | null | import numpy as np
import os
import time
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_predict
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score
from grakel import datasets
from grakel import GraphKernel
from grakel.kernels import VertexHistogram, ShortestPath, WeisfeilerLehman, RandomWalkLabeled, MultiscaleLaplacianFast
from six import itervalues, iteritems
import argparse
def sec_to_time(sec):
"""Print time in a correct format."""
dt = list()
days = int(sec // 86400)
if days > 0:
sec -= 86400*days
dt.append(str(days) + " d")
hrs = int(sec // 3600)
if hrs > 0:
sec -= 3600*hrs
dt.append(str(hrs) + " h")
mins = int(sec // 60)
if mins > 0:
sec -= 60*mins
dt.append(str(mins) + " m")
if sec > 0:
dt.append(str(round(sec, 2)) + " s")
return " ".join(dt)
lb_kernels = {
"GraphletSampling": [{"name": "graphlet_sampling", "sampling": {"n_samples": 150}}],
"WL-Subtree": [{"name": "weisfeiler_lehman", "niter": 5}, {"name": "subtree_wl"}],
"WL-ShortestPath": [{"name": "weisfeiler_lehman", "niter": 5}, {"name": "shortest_path"}]
}
ulb_kernels = {
"ShortestPath" : [{"name": "shortest_path", "with_labels": False}],
"GraphletSampling": [{"name": "graphlet_sampling", "sampling": {"n_samples": 150}}],
"GeometricRandomWalk" : [{"name": "random_walk", "method_type": "fast", "with_labels": False, "kernel_type": "geometric"}], #ill defined, donot normalize
"ExponentialRandomWalk" : [{"name": "random_walk", "method_type": "fast", "with_labels": False, "kernel_type": "exponential"}],
# Must have node attribute "MultiScaleLaplacianFast" : [{"name": "multiscale_laplacian", "which": "fast"}],
"LovaszTheta" : [{"name": "lovasz_theta"}], #slow
#"SvmTheta" : [{"name": "svm_theta"}] #fast
}
#gk = WeisfeilerLehman(niter=1, normalize=True, base_kernel=VertexHistogram)
#gk = VertexHistogram(normalize=True)
def save_kernel(G, gk, outpath, dataname, kername, b, handle=False):
start = time.time()
print('Compute kernel {} use handle = {}'.format(kername, handle))
n = len(G)
#TODO: Let's use multi-processing but need to handle with large memory consuming problem
K = np.zeros((n,n))
if handle == True:
for i in range(0, n, b):
ib = min(n, i+b)
Gs = G[i:ib]
Ks = gk.fit_transform(Gs)
K[i:ib, i:ib] = Ks
for j in range(ib, n, b):
jb = min(n, j+b)
Gn = G[j:jb]
Kn = gk.transform(Gn)
K[i:ib, j:jb] = Kn.T
K[j:jb, i:ib] = Kn
elapse = sec_to_time(round(time.time()-start, 2))
print('i={}, j={}, b={}, {}'.format(i, j, b, elapse))
else:
K = gk.fit_transform(G)
# P = gk.fit_transform(G)
# print('K')
# print(K)
# print('P')
# print(P)
# print('K-P')
# print(np.max(np.abs(P-K)))
outfile = os.path.join(outpath, '{}_{}.txt'.format(dataname, kername))
end = time.time()
elapse = sec_to_time(round(end-start, 2))
print('Calculate kernel {} in {} '.format(kername, elapse))
np.savetxt(outfile, K)
print('Saved kernel ', kername, K.shape)
print('')
def to_one_hot(G):
# Index all discrete labels
mp = {dl: i for (i, dl) in enumerate(set(l for g in G for l in itervalues(g[1])))}
def make_vec(k):
vec = np.zeros((len(mp),), dtype=float)
vec[k] = 1.0
return vec
return [(g[0], {i: make_vec(mp[k]) for (i, k) in iteritems(g[1])}) for g in G]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exppath', '-e', type=str, required=True)
parser.add_argument('--folder', '-f', type=str, default='gkernel')
parser.add_argument('--njobs', '-j', type=int, default=-1)
parser.add_argument('--norm', '-n', type=int, default=1)
parser.add_argument('--handle', type=int, default=0)
parser.add_argument('--batchsize', '-b', type=int, default=128)
parser.add_argument('--label', type=int, default=0)
parser.add_argument('--dataname', '-d', type=str, default='')
parser.add_argument('--kername', '-k', type=str, default='')
args = parser.parse_args()
print(args)
njobs = None
norm, handle, b, label = args.norm, args.handle, args.batchsize, args.label
if args.njobs > 0:
njobs = args.njobs
dname, kname = args.dataname, args.kername
lb_datalist = ['MUTAG', 'BZR', 'COX2', 'DHFR', 'ENZYMES', 'PROTEINS', 'NCI1', 'NCI109', 'DD', 'MSRC_9']
ulb_datalist = ['IMDB-BINARY', 'IMDB-MULTI', 'REDDIT-BINARY','FRANKENSTEIN', 'COLLAB']
if label > 0:
datalist = lb_datalist
kernels = lb_kernels
else:
datalist = ulb_datalist
kernels = ulb_kernels
rows = sorted(list(kernels.keys()))
if dname != '' and dname not in datalist:
raise ValueError('Not found specified data: {}'.format(dname))
if kname != '' and kname not in kernels:
raise ValueError('Not found specified kernel: {}'.format(kname))
for dataname in datalist:
if dname != '' and dataname != dname:
continue
outpath = os.path.join(args.exppath,dataname)
outpath = os.path.join(outpath, args.folder)
if not os.path.isdir(outpath):
os.makedirs(outpath)
dat = datasets.fetch_dataset(dataname, as_graphs=True)
G, y = dat.data, dat.target
print(dataname, y.shape)
# Need to run each of below kernels separately
if False and label > 0:
gk = VertexHistogram(normalize=norm, n_jobs=njobs)
save_kernel(G, gk, outpath, dataname, 'VertexHist', b, handle=handle)
gk = ShortestPath(normalize=norm, n_jobs=njobs)
save_kernel(G, gk, outpath, dataname, 'ShortestPath', b, handle=handle)
if False and label > 0:
gk = WeisfeilerLehman(niter=5, normalize=norm, base_kernel=VertexHistogram, n_jobs=None)
save_kernel(G, gk, outpath, dataname, 'WL-VertexHist', b, handle=handle)
# if False:
# for rwtype in ['geometric', 'exponential']:
# gk = RandomWalkLabeled(normalize=True, kernel_type=rwtype)
# save_kernel(G, gk, outpath, dataname, 'randomwalk_{}'.format(rwtype))
if True:
for (i, kername) in enumerate(rows):
if kname != '' and kername != kname:
continue
print(kername, end=" ")
gk = GraphKernel(kernel=kernels[kername], normalize=norm, n_jobs=njobs)
print("", end=".")
use_handy = False
if 'WL' in kername and len(G) > 256:
use_handy = True
save_kernel(G, gk, outpath, dataname, kername.replace('/', '-'), b, handle=use_handy)
| 37.657754 | 157 | 0.592161 | import numpy as np
import os
import time
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_predict
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score
from grakel import datasets
from grakel import GraphKernel
from grakel.kernels import VertexHistogram, ShortestPath, WeisfeilerLehman, RandomWalkLabeled, MultiscaleLaplacianFast
from six import itervalues, iteritems
import argparse
def sec_to_time(sec):
dt = list()
days = int(sec // 86400)
if days > 0:
sec -= 86400*days
dt.append(str(days) + " d")
hrs = int(sec // 3600)
if hrs > 0:
sec -= 3600*hrs
dt.append(str(hrs) + " h")
mins = int(sec // 60)
if mins > 0:
sec -= 60*mins
dt.append(str(mins) + " m")
if sec > 0:
dt.append(str(round(sec, 2)) + " s")
return " ".join(dt)
lb_kernels = {
"GraphletSampling": [{"name": "graphlet_sampling", "sampling": {"n_samples": 150}}],
"WL-Subtree": [{"name": "weisfeiler_lehman", "niter": 5}, {"name": "subtree_wl"}],
"WL-ShortestPath": [{"name": "weisfeiler_lehman", "niter": 5}, {"name": "shortest_path"}]
}
ulb_kernels = {
"ShortestPath" : [{"name": "shortest_path", "with_labels": False}],
"GraphletSampling": [{"name": "graphlet_sampling", "sampling": {"n_samples": 150}}],
"GeometricRandomWalk" : [{"name": "random_walk", "method_type": "fast", "with_labels": False, "kernel_type": "geometric"}],
"ExponentialRandomWalk" : [{"name": "random_walk", "method_type": "fast", "with_labels": False, "kernel_type": "exponential"}],
"LovaszTheta" : [{"name": "lovasz_theta"}],
def save_kernel(G, gk, outpath, dataname, kername, b, handle=False):
start = time.time()
print('Compute kernel {} use handle = {}'.format(kername, handle))
n = len(G)
K = np.zeros((n,n))
if handle == True:
for i in range(0, n, b):
ib = min(n, i+b)
Gs = G[i:ib]
Ks = gk.fit_transform(Gs)
K[i:ib, i:ib] = Ks
for j in range(ib, n, b):
jb = min(n, j+b)
Gn = G[j:jb]
Kn = gk.transform(Gn)
K[i:ib, j:jb] = Kn.T
K[j:jb, i:ib] = Kn
elapse = sec_to_time(round(time.time()-start, 2))
print('i={}, j={}, b={}, {}'.format(i, j, b, elapse))
else:
K = gk.fit_transform(G)
# P = gk.fit_transform(G)
# print('K')
# print(K)
# print('P')
# print(P)
# print('K-P')
# print(np.max(np.abs(P-K)))
outfile = os.path.join(outpath, '{}_{}.txt'.format(dataname, kername))
end = time.time()
elapse = sec_to_time(round(end-start, 2))
print('Calculate kernel {} in {} '.format(kername, elapse))
np.savetxt(outfile, K)
print('Saved kernel ', kername, K.shape)
print('')
def to_one_hot(G):
# Index all discrete labels
mp = {dl: i for (i, dl) in enumerate(set(l for g in G for l in itervalues(g[1])))}
def make_vec(k):
vec = np.zeros((len(mp),), dtype=float)
vec[k] = 1.0
return vec
return [(g[0], {i: make_vec(mp[k]) for (i, k) in iteritems(g[1])}) for g in G]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exppath', '-e', type=str, required=True)
parser.add_argument('--folder', '-f', type=str, default='gkernel')
parser.add_argument('--njobs', '-j', type=int, default=-1)
parser.add_argument('--norm', '-n', type=int, default=1)
parser.add_argument('--handle', type=int, default=0)
parser.add_argument('--batchsize', '-b', type=int, default=128)
parser.add_argument('--label', type=int, default=0)
parser.add_argument('--dataname', '-d', type=str, default='')
parser.add_argument('--kername', '-k', type=str, default='')
args = parser.parse_args()
print(args)
njobs = None
norm, handle, b, label = args.norm, args.handle, args.batchsize, args.label
if args.njobs > 0:
njobs = args.njobs
dname, kname = args.dataname, args.kername
lb_datalist = ['MUTAG', 'BZR', 'COX2', 'DHFR', 'ENZYMES', 'PROTEINS', 'NCI1', 'NCI109', 'DD', 'MSRC_9']
ulb_datalist = ['IMDB-BINARY', 'IMDB-MULTI', 'REDDIT-BINARY','FRANKENSTEIN', 'COLLAB']
if label > 0:
datalist = lb_datalist
kernels = lb_kernels
else:
datalist = ulb_datalist
kernels = ulb_kernels
rows = sorted(list(kernels.keys()))
if dname != '' and dname not in datalist:
raise ValueError('Not found specified data: {}'.format(dname))
if kname != '' and kname not in kernels:
raise ValueError('Not found specified kernel: {}'.format(kname))
for dataname in datalist:
if dname != '' and dataname != dname:
continue
outpath = os.path.join(args.exppath,dataname)
outpath = os.path.join(outpath, args.folder)
if not os.path.isdir(outpath):
os.makedirs(outpath)
dat = datasets.fetch_dataset(dataname, as_graphs=True)
G, y = dat.data, dat.target
print(dataname, y.shape)
# Need to run each of below kernels separately
if False and label > 0:
gk = VertexHistogram(normalize=norm, n_jobs=njobs)
save_kernel(G, gk, outpath, dataname, 'VertexHist', b, handle=handle)
gk = ShortestPath(normalize=norm, n_jobs=njobs)
save_kernel(G, gk, outpath, dataname, 'ShortestPath', b, handle=handle)
if False and label > 0:
gk = WeisfeilerLehman(niter=5, normalize=norm, base_kernel=VertexHistogram, n_jobs=None)
save_kernel(G, gk, outpath, dataname, 'WL-VertexHist', b, handle=handle)
# if False:
# for rwtype in ['geometric', 'exponential']:
# gk = RandomWalkLabeled(normalize=True, kernel_type=rwtype)
# save_kernel(G, gk, outpath, dataname, 'randomwalk_{}'.format(rwtype))
if True:
for (i, kername) in enumerate(rows):
if kname != '' and kername != kname:
continue
print(kername, end=" ")
gk = GraphKernel(kernel=kernels[kername], normalize=norm, n_jobs=njobs)
print("", end=".")
use_handy = False
if 'WL' in kername and len(G) > 256:
use_handy = True
save_kernel(G, gk, outpath, dataname, kername.replace('/', '-'), b, handle=use_handy)
| true | true |
f722b5d5ad1cb0a99fd0a2b120d5a42a953925ac | 374 | py | Python | sql_extender/__init__.py | MiConnell/sql-extender | 32a5f839c3b7d5fa7abdf7b02c7aa5702cf31a69 | [
"MIT"
] | null | null | null | sql_extender/__init__.py | MiConnell/sql-extender | 32a5f839c3b7d5fa7abdf7b02c7aa5702cf31a69 | [
"MIT"
] | null | null | null | sql_extender/__init__.py | MiConnell/sql-extender | 32a5f839c3b7d5fa7abdf7b02c7aa5702cf31a69 | [
"MIT"
] | null | null | null | # type: ignore[attr-defined]
"""Python package to extend sql functionality"""
try:
from importlib.metadata import PackageNotFoundError, version
except ImportError: # pragma: no cover
from importlib_metadata import PackageNotFoundError, version
try:
__version__ = version(__name__)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
| 26.714286 | 64 | 0.764706 |
try:
from importlib.metadata import PackageNotFoundError, version
except ImportError:
from importlib_metadata import PackageNotFoundError, version
try:
__version__ = version(__name__)
except PackageNotFoundError:
__version__ = "unknown"
| true | true |
f722b63dddf18e587faafd9a5459737bf43c110f | 540 | py | Python | python/ray/serve/examples/echo.py | acmore/ray | 9f0f54266064e203b0bdcc9d3fa947cb4518ebc0 | [
"Apache-2.0"
] | 1 | 2020-08-20T16:22:38.000Z | 2020-08-20T16:22:38.000Z | python/ray/serve/examples/echo.py | acmore/ray | 9f0f54266064e203b0bdcc9d3fa947cb4518ebc0 | [
"Apache-2.0"
] | 1 | 2020-06-23T07:54:44.000Z | 2020-06-23T08:04:47.000Z | python/ray/serve/examples/echo.py | acmore/ray | 9f0f54266064e203b0bdcc9d3fa947cb4518ebc0 | [
"Apache-2.0"
] | null | null | null | """
Example service that prints out http context.
"""
import time
import requests
from ray import serve
from ray.serve.utils import pformat_color_json
def echo(flask_request):
return "hello " + flask_request.args.get("name", "serve!")
serve.init()
serve.create_backend("echo:v1", echo)
serve.create_endpoint("my_endpoint", backend="echo:v1", route="/echo")
while True:
resp = requests.get("http://127.0.0.1:8000/echo").json()
print(pformat_color_json(resp))
print("...Sleeping for 2 seconds...")
time.sleep(2)
| 19.285714 | 70 | 0.701852 |
import time
import requests
from ray import serve
from ray.serve.utils import pformat_color_json
def echo(flask_request):
return "hello " + flask_request.args.get("name", "serve!")
serve.init()
serve.create_backend("echo:v1", echo)
serve.create_endpoint("my_endpoint", backend="echo:v1", route="/echo")
while True:
resp = requests.get("http://127.0.0.1:8000/echo").json()
print(pformat_color_json(resp))
print("...Sleeping for 2 seconds...")
time.sleep(2)
| true | true |
f722b64048e9d3433b07297d01920a9d40b3caad | 2,066 | py | Python | backend/app/db/models/sales.py | Laende/anleggsmaskin-salgsprisprekisjon | d1e448d9ee081ba055c3ac05c1b1883294f19c99 | [
"MIT"
] | null | null | null | backend/app/db/models/sales.py | Laende/anleggsmaskin-salgsprisprekisjon | d1e448d9ee081ba055c3ac05c1b1883294f19c99 | [
"MIT"
] | null | null | null | backend/app/db/models/sales.py | Laende/anleggsmaskin-salgsprisprekisjon | d1e448d9ee081ba055c3ac05c1b1883294f19c99 | [
"MIT"
] | null | null | null | from datetime import datetime
from sqlalchemy import Column, Date, DateTime, Float, Integer, String
from sqlalchemy.sql.sqltypes import Boolean
from app.db.base_class import Base
class Sales(Base):
id = Column(Integer, primary_key=True)
saleprice = Column(Integer, nullable=True)
model_id = Column(Integer, nullable=True)
data_source = Column(Integer, nullable=True)
auctioneer_id = Column(String, nullable=True)
year_made = Column(Integer, nullable=True, default=1996)
machine_hours_current_meter = Column(Integer, nullable=True, default=3213)
saledate = Column(Date, nullable=False)
fi_model_desc = Column(String, nullable=True)
fi_base_model = Column(String, nullable=True)
fi_secondary_desc = Column(String, nullable=True)
fi_model_series = Column(String, nullable=True)
fi_model_descriptor = Column(String, nullable=True)
product_size = Column(String, nullable=True)
state = Column(String, nullable=True)
product_group = Column(String, nullable=True)
product_group_desc = Column(String, nullable=True)
drive_system = Column(String, nullable=True)
enclosure = Column(String, nullable=True)
ride_control = Column(String, nullable=True)
stick = Column(String, nullable=True)
transmission = Column(String, nullable=True)
engine_horsepower_desc = Column(String, nullable=True)
hydraulics = Column(String, nullable=True)
ripper = Column(String, nullable=True)
tire_size = Column(Float, nullable=True)
coupler = Column(String, nullable=True)
hydraulics_flow = Column(String, nullable=True)
track_type = Column(String, nullable=True)
undercarriage_pad_width = Column(Float, nullable=True)
stick_length = Column(String, nullable=True)
grouser_type = Column(String, nullable=True)
blade_type = Column(String, nullable=True)
differential_type = Column(String, nullable=True)
steering_controls = Column(String, nullable=True)
engine_horsepower = Column(Float, nullable=True)
is_new = Column(Boolean, default=False)
| 38.981132 | 78 | 0.742498 | from datetime import datetime
from sqlalchemy import Column, Date, DateTime, Float, Integer, String
from sqlalchemy.sql.sqltypes import Boolean
from app.db.base_class import Base
class Sales(Base):
id = Column(Integer, primary_key=True)
saleprice = Column(Integer, nullable=True)
model_id = Column(Integer, nullable=True)
data_source = Column(Integer, nullable=True)
auctioneer_id = Column(String, nullable=True)
year_made = Column(Integer, nullable=True, default=1996)
machine_hours_current_meter = Column(Integer, nullable=True, default=3213)
saledate = Column(Date, nullable=False)
fi_model_desc = Column(String, nullable=True)
fi_base_model = Column(String, nullable=True)
fi_secondary_desc = Column(String, nullable=True)
fi_model_series = Column(String, nullable=True)
fi_model_descriptor = Column(String, nullable=True)
product_size = Column(String, nullable=True)
state = Column(String, nullable=True)
product_group = Column(String, nullable=True)
product_group_desc = Column(String, nullable=True)
drive_system = Column(String, nullable=True)
enclosure = Column(String, nullable=True)
ride_control = Column(String, nullable=True)
stick = Column(String, nullable=True)
transmission = Column(String, nullable=True)
engine_horsepower_desc = Column(String, nullable=True)
hydraulics = Column(String, nullable=True)
ripper = Column(String, nullable=True)
tire_size = Column(Float, nullable=True)
coupler = Column(String, nullable=True)
hydraulics_flow = Column(String, nullable=True)
track_type = Column(String, nullable=True)
undercarriage_pad_width = Column(Float, nullable=True)
stick_length = Column(String, nullable=True)
grouser_type = Column(String, nullable=True)
blade_type = Column(String, nullable=True)
differential_type = Column(String, nullable=True)
steering_controls = Column(String, nullable=True)
engine_horsepower = Column(Float, nullable=True)
is_new = Column(Boolean, default=False)
| true | true |
f722b6fb9a5e79b3ad00ffa5fce2548d02a0141f | 3,564 | py | Python | code/Algorithms/Algorithms.py | BogyMitutoyoCTL/Snake-AI-2021.1 | dfbc556e74628e670db3fc4891b50203c466ae16 | [
"MIT"
] | null | null | null | code/Algorithms/Algorithms.py | BogyMitutoyoCTL/Snake-AI-2021.1 | dfbc556e74628e670db3fc4891b50203c466ae16 | [
"MIT"
] | null | null | null | code/Algorithms/Algorithms.py | BogyMitutoyoCTL/Snake-AI-2021.1 | dfbc556e74628e670db3fc4891b50203c466ae16 | [
"MIT"
] | null | null | null | from GameData import GameData
from RewardSystem import RewardSystem
from TrainingData import TrainingData
from Visualization import Visualization
from Field import Field
from typing import Optional
class Algorithm:
def __init__(self):
self.model = None
self.reward_system: Optional[RewardSystem] = None
"""
This is the base class / interface for an algorithm.
"""
def decide(self, info: GameData) -> str:
"""
This method needs to decide for an action to be taken given the current state of the game.
:param info: The current state of the snake game before the action is performed.
:return: an action that is accepted by the Gym.
"""
pass
def epochfinished(self) -> (object, float):
"""
When an epoch has completed, this method can be used to retrieve the model and fitness.
:return: Tupel of model and fitness.
The model could be a trained model in case of an AI algorithm, but could be None in case of a classic Algorithm
"""
return None, 0.0
def train(self, info: GameData, action: str, reward: float) -> None:
"""
This method can be used for training / optimizing the algorithm.
Non-learning algorithms can ignore this method.
:param info:
:param action: the action that has lead to the state.
Note: when using an ɛ-approach for randomization, the action passed here is not necessarily the one last
returned by the train() method.
:param reward: A reward for the action.
:return: None.
"""
pass
def visualize(self, data: GameData, training: TrainingData) -> Field:
"""
Can be used to visualize the thought process of the algorithm.
It is not needed to visualize the game state. This can be done by the Gym alone.
:param data: the state of the game, which may be needed to calculate the visualization.
:param training: statistical information about the training
:return: None.
"""
pass
def epsilon(self, epoch: int, maxepochs: int) -> float:
"""
Calculates the randomness that shall be applied for training the algorithm.
:param epoch: Number of the currently trained epoch
:param maxepochs: Maximum number of epochs, the training lasts
:return: Number between 0 and 1, representing a chance of 0% ... 100% random choice
"""
return 0.0
class Visual(Algorithm):
"""
This class is a default visualizer that can be used to wrap any algorithm for visualization.
It basically delegates all methods to the algorithm passed in the constructor but paints the
game data on its way.
"""
def __init__(self, algorithm: Algorithm):
super().__init__()
self.decider: Algorithm = algorithm
self.vis = Visualization(20, Field(10, 20))
def decide(self, info: GameData) -> str:
return self.decider.decide(info)
def epochfinished(self) -> (object, float):
return self.decider.epochfinished()
def train(self, info: GameData, action, reward) -> None:
return self.decider.train(info, action, reward)
def visualize(self, data: GameData, training: TrainingData):
layer = self.decider.visualize(data, training)
self.vis.reset()
self.vis.display_visualization_stats()
self.vis.display_training(training)
self.vis.display_game(data)
self.vis.add_layer(layer)
self.vis.tick()
| 37.125 | 119 | 0.658249 | from GameData import GameData
from RewardSystem import RewardSystem
from TrainingData import TrainingData
from Visualization import Visualization
from Field import Field
from typing import Optional
class Algorithm:
def __init__(self):
self.model = None
self.reward_system: Optional[RewardSystem] = None
def decide(self, info: GameData) -> str:
pass
def epochfinished(self) -> (object, float):
return None, 0.0
def train(self, info: GameData, action: str, reward: float) -> None:
pass
def visualize(self, data: GameData, training: TrainingData) -> Field:
pass
def epsilon(self, epoch: int, maxepochs: int) -> float:
return 0.0
class Visual(Algorithm):
def __init__(self, algorithm: Algorithm):
super().__init__()
self.decider: Algorithm = algorithm
self.vis = Visualization(20, Field(10, 20))
def decide(self, info: GameData) -> str:
return self.decider.decide(info)
def epochfinished(self) -> (object, float):
return self.decider.epochfinished()
def train(self, info: GameData, action, reward) -> None:
return self.decider.train(info, action, reward)
def visualize(self, data: GameData, training: TrainingData):
layer = self.decider.visualize(data, training)
self.vis.reset()
self.vis.display_visualization_stats()
self.vis.display_training(training)
self.vis.display_game(data)
self.vis.add_layer(layer)
self.vis.tick()
| true | true |
f722b721e7eab69ffbf8ddc2e18bb83c2eacb719 | 797 | py | Python | tests/test_utils.py | marteinn/wagtail-meta-preview | c07a1bace9b9f18dc4c4ef5429eb9b94fdc4eb0b | [
"MIT"
] | 22 | 2020-08-02T07:37:48.000Z | 2022-03-23T08:10:42.000Z | tests/test_utils.py | marteinn/wagtail-meta-preview | c07a1bace9b9f18dc4c4ef5429eb9b94fdc4eb0b | [
"MIT"
] | 2 | 2020-08-02T19:27:12.000Z | 2020-08-16T10:02:59.000Z | tests/test_utils.py | marteinn/wagtail-meta-preview | c07a1bace9b9f18dc4c4ef5429eb9b94fdc4eb0b | [
"MIT"
] | 2 | 2020-08-02T18:55:44.000Z | 2020-08-03T12:01:54.000Z | from io import BytesIO
import PIL.Image
from django.test import TestCase
from django.core.files.images import ImageFile
from wagtail.tests.utils import WagtailTestUtils
from wagtail.images.models import Image
from wagtail_meta_preview.utils import get_focal
# Taken from wagtail.images.test.utils
def get_test_image_file(filename="test.png", colour="white", size=(640, 480)):
f = BytesIO()
image = PIL.Image.new("RGBA", size, colour)
image.save(f, "PNG")
return ImageFile(f, name=filename)
class TestUtils(TestCase, WagtailTestUtils):
def setUp(self):
self.image = Image(
title="Test image", file=get_test_image_file(colour="white"),
)
def test_focal(self):
self.assertEqual(get_focal(self.image), {"x": "50.00%", "y": "50.00%"})
| 28.464286 | 79 | 0.705144 | from io import BytesIO
import PIL.Image
from django.test import TestCase
from django.core.files.images import ImageFile
from wagtail.tests.utils import WagtailTestUtils
from wagtail.images.models import Image
from wagtail_meta_preview.utils import get_focal
def get_test_image_file(filename="test.png", colour="white", size=(640, 480)):
f = BytesIO()
image = PIL.Image.new("RGBA", size, colour)
image.save(f, "PNG")
return ImageFile(f, name=filename)
class TestUtils(TestCase, WagtailTestUtils):
def setUp(self):
self.image = Image(
title="Test image", file=get_test_image_file(colour="white"),
)
def test_focal(self):
self.assertEqual(get_focal(self.image), {"x": "50.00%", "y": "50.00%"})
| true | true |
f722b7c07bc7436fa8077d981c5440bed42bce81 | 2,307 | py | Python | _unittests/ut_onnxrt/test_rt_valid_model_gaussian_mixture.py | sdpython/mlprodic | 9367dacc91d35ec670c8a8a76708300a75bbc993 | [
"MIT"
] | 32 | 2018-03-04T23:33:30.000Z | 2022-03-10T19:15:06.000Z | _unittests/ut_onnxrt/test_rt_valid_model_gaussian_mixture.py | sdpython/mlprodic | 9367dacc91d35ec670c8a8a76708300a75bbc993 | [
"MIT"
] | 184 | 2017-11-30T14:10:35.000Z | 2022-02-21T08:29:31.000Z | _unittests/ut_onnxrt/test_rt_valid_model_gaussian_mixture.py | sdpython/mlprodic | 9367dacc91d35ec670c8a8a76708300a75bbc993 | [
"MIT"
] | 9 | 2019-07-24T13:18:00.000Z | 2022-03-07T04:08:07.000Z | """
@brief test log(time=16s)
"""
import unittest
from logging import getLogger
from pandas import DataFrame
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.pandashelper import df2rst
from sklearn.exceptions import ConvergenceWarning
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from skl2onnx import __version__ as skl2onnx_version
from mlprodict.onnxrt.validate import enumerate_validated_operator_opsets, summary_report
from mlprodict.onnxrt.doc.doc_write_helper import split_columns_subsets
class TestRtValidateGaussianMixture(ExtTestCase):
@ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))
def test_rt_GaussianMixture_python(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
debug = False
buffer = []
def myprint(*args, **kwargs):
buffer.append(" ".join(map(str, args)))
rows = list(enumerate_validated_operator_opsets(
verbose, models={"GaussianMixture"}, opset_min=9,
opset_max=11, fLOG=myprint,
runtime='python', debug=debug,
filter_exp=lambda m, p: 'mix' in p))
self.assertGreater(len(rows), 1)
self.assertIn('skl_nop', rows[-1])
keys = set()
for row in rows:
keys.update(set(row))
self.assertIn('onx_size', keys)
piv = summary_report(DataFrame(rows))
opset = [c for c in piv.columns if 'opset' in c]
self.assertTrue('opset11' in opset or 'opset10' in opset)
self.assertGreater(len(buffer), 1 if debug else 0)
common, subsets = split_columns_subsets(piv)
try:
conv = df2rst(piv, split_col_common=common, # pylint: disable=E1123
split_col_subsets=subsets)
self.assertIn('| GaussianMixture |', conv)
except TypeError as e:
if "got an unexpected keyword argument 'split_col_common'" in str(e):
return
raise e
if __name__ == "__main__":
unittest.main()
| 36.619048 | 89 | 0.673602 | import unittest
from logging import getLogger
from pandas import DataFrame
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.pandashelper import df2rst
from sklearn.exceptions import ConvergenceWarning
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from skl2onnx import __version__ as skl2onnx_version
from mlprodict.onnxrt.validate import enumerate_validated_operator_opsets, summary_report
from mlprodict.onnxrt.doc.doc_write_helper import split_columns_subsets
class TestRtValidateGaussianMixture(ExtTestCase):
@ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))
def test_rt_GaussianMixture_python(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
debug = False
buffer = []
def myprint(*args, **kwargs):
buffer.append(" ".join(map(str, args)))
rows = list(enumerate_validated_operator_opsets(
verbose, models={"GaussianMixture"}, opset_min=9,
opset_max=11, fLOG=myprint,
runtime='python', debug=debug,
filter_exp=lambda m, p: 'mix' in p))
self.assertGreater(len(rows), 1)
self.assertIn('skl_nop', rows[-1])
keys = set()
for row in rows:
keys.update(set(row))
self.assertIn('onx_size', keys)
piv = summary_report(DataFrame(rows))
opset = [c for c in piv.columns if 'opset' in c]
self.assertTrue('opset11' in opset or 'opset10' in opset)
self.assertGreater(len(buffer), 1 if debug else 0)
common, subsets = split_columns_subsets(piv)
try:
conv = df2rst(piv, split_col_common=common,
split_col_subsets=subsets)
self.assertIn('| GaussianMixture |', conv)
except TypeError as e:
if "got an unexpected keyword argument 'split_col_common'" in str(e):
return
raise e
if __name__ == "__main__":
unittest.main()
| true | true |
f722b84767f8fa5c57fce1563a9d9b91a26ec9ed | 270 | py | Python | Python/String Validators.py | shivendra90/HackerRank_Solutions | 0e6f9b907c5f8ca93b2945787cb1fe7ed172bed6 | [
"MIT"
] | null | null | null | Python/String Validators.py | shivendra90/HackerRank_Solutions | 0e6f9b907c5f8ca93b2945787cb1fe7ed172bed6 | [
"MIT"
] | null | null | null | Python/String Validators.py | shivendra90/HackerRank_Solutions | 0e6f9b907c5f8ca93b2945787cb1fe7ed172bed6 | [
"MIT"
] | null | null | null | if __name__ == '__main__':
string = str(input())
methods = [".isalnum()", ".isalpha()", ".isdigit()",
".islower()", ".isupper()"]
for i, method in enumerate(methods):
print(eval("any(alpha{0} for alpha in string)".format(method)))
| 27 | 71 | 0.548148 | if __name__ == '__main__':
string = str(input())
methods = [".isalnum()", ".isalpha()", ".isdigit()",
".islower()", ".isupper()"]
for i, method in enumerate(methods):
print(eval("any(alpha{0} for alpha in string)".format(method)))
| true | true |
f722bab0afcdaf0269ac288c12cb80d2a596631f | 82 | py | Python | books/PRML/PRML-master-Python/prml/linear/classifier.py | iamfaith/DeepLearning | 80ce429d0e9e448cf84e7d51129ef4e0077048a2 | [
"Apache-2.0"
] | 7,581 | 2018-04-26T04:29:30.000Z | 2022-03-31T15:35:39.000Z | books/PRML/PRML-master-Python/prml/linear/classifier.py | lizhenchen2019/DeepLearning | 467c73e2d0435f0a05255e5b5e00454260d01f27 | [
"Apache-2.0"
] | 8 | 2019-05-22T02:27:35.000Z | 2022-03-03T03:53:05.000Z | books/PRML/PRML-master-Python/prml/linear/classifier.py | lizhenchen2019/DeepLearning | 467c73e2d0435f0a05255e5b5e00454260d01f27 | [
"Apache-2.0"
] | 2,340 | 2018-04-26T04:28:11.000Z | 2022-03-31T02:28:25.000Z | class Classifier(object):
"""
Base class for classifiers
"""
pass
| 13.666667 | 30 | 0.585366 | class Classifier(object):
pass
| true | true |
f722bc57a9f5970f9cf73611c7fa64c8e152b05e | 3,454 | py | Python | datalabeling/create_instruction.py | m-abba/python-docs-samples | b00f00d734b89edae8ae6876d6261e19dc82cd34 | [
"Apache-2.0"
] | 1 | 2020-03-05T18:24:45.000Z | 2020-03-05T18:24:45.000Z | datalabeling/create_instruction.py | m-abba/python-docs-samples | b00f00d734b89edae8ae6876d6261e19dc82cd34 | [
"Apache-2.0"
] | 1 | 2022-01-14T23:28:27.000Z | 2022-01-14T23:28:27.000Z | datalabeling/create_instruction.py | m-abba/python-docs-samples | b00f00d734b89edae8ae6876d6261e19dc82cd34 | [
"Apache-2.0"
] | 2 | 2020-06-07T07:03:00.000Z | 2021-04-10T18:22:25.000Z | #!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from google.api_core.client_options import ClientOptions
# [START datalabeling_create_instruction_beta]
def create_instruction(project_id, data_type, instruction_gcs_uri):
""" Creates a data labeling PDF instruction for the given Google Cloud
project. The PDF file should be uploaded to the project in
Google Cloud Storage.
"""
from google.cloud import datalabeling_v1beta1 as datalabeling
client = datalabeling.DataLabelingServiceClient()
# [END datalabeling_create_instruction_beta]
# If provided, use a provided test endpoint - this will prevent tests on
# this snippet from triggering any action by a real human
if 'DATALABELING_ENDPOINT' in os.environ:
opts = ClientOptions(api_endpoint=os.getenv('DATALABELING_ENDPOINT'))
client = datalabeling.DataLabelingServiceClient(client_options=opts)
# [START datalabeling_create_instruction_beta]
project_path = client.project_path(project_id)
pdf_instruction = datalabeling.types.PdfInstruction(
gcs_file_uri=instruction_gcs_uri)
instruction = datalabeling.types.Instruction(
display_name='YOUR_INSTRUCTION_DISPLAY_NAME',
description='YOUR_DESCRIPTION',
data_type=data_type,
pdf_instruction=pdf_instruction
)
operation = client.create_instruction(project_path, instruction)
result = operation.result()
# The format of the resource name:
# project_id/{project_id}/instruction/{instruction_id}
print('The instruction resource name: {}\n'.format(result.name))
print('Display name: {}'.format(result.display_name))
print('Description: {}'.format(result.description))
print('Create time:')
print('\tseconds: {}'.format(result.create_time.seconds))
print('\tnanos: {}'.format(result.create_time.nanos))
print('Data type: {}'.format(
datalabeling.enums.DataType(result.data_type).name))
print('Pdf instruction:')
print('\tGcs file uri: {}'.format(
result.pdf_instruction.gcs_file_uri))
return result
# [END datalabeling_create_instruction_beta]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--project-id',
help='Project ID. Required.',
required=True
)
parser.add_argument(
'--data-type',
help='Data type. Only support IMAGE, VIDEO, TEXT and AUDIO. Required.',
required=True
)
parser.add_argument(
'--instruction-gcs-uri',
help='The URI of Google Cloud Storage of the instruction. Required.',
required=True
)
args = parser.parse_args()
create_instruction(
args.project_id,
args.data_type,
args.instruction_gcs_uri
)
| 33.533981 | 79 | 0.713955 |
import argparse
import os
from google.api_core.client_options import ClientOptions
def create_instruction(project_id, data_type, instruction_gcs_uri):
from google.cloud import datalabeling_v1beta1 as datalabeling
client = datalabeling.DataLabelingServiceClient()
if 'DATALABELING_ENDPOINT' in os.environ:
opts = ClientOptions(api_endpoint=os.getenv('DATALABELING_ENDPOINT'))
client = datalabeling.DataLabelingServiceClient(client_options=opts)
project_path = client.project_path(project_id)
pdf_instruction = datalabeling.types.PdfInstruction(
gcs_file_uri=instruction_gcs_uri)
instruction = datalabeling.types.Instruction(
display_name='YOUR_INSTRUCTION_DISPLAY_NAME',
description='YOUR_DESCRIPTION',
data_type=data_type,
pdf_instruction=pdf_instruction
)
operation = client.create_instruction(project_path, instruction)
result = operation.result()
print('The instruction resource name: {}\n'.format(result.name))
print('Display name: {}'.format(result.display_name))
print('Description: {}'.format(result.description))
print('Create time:')
print('\tseconds: {}'.format(result.create_time.seconds))
print('\tnanos: {}'.format(result.create_time.nanos))
print('Data type: {}'.format(
datalabeling.enums.DataType(result.data_type).name))
print('Pdf instruction:')
print('\tGcs file uri: {}'.format(
result.pdf_instruction.gcs_file_uri))
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--project-id',
help='Project ID. Required.',
required=True
)
parser.add_argument(
'--data-type',
help='Data type. Only support IMAGE, VIDEO, TEXT and AUDIO. Required.',
required=True
)
parser.add_argument(
'--instruction-gcs-uri',
help='The URI of Google Cloud Storage of the instruction. Required.',
required=True
)
args = parser.parse_args()
create_instruction(
args.project_id,
args.data_type,
args.instruction_gcs_uri
)
| true | true |
f722bcfd8c23f4dcfdc59a006cd1fccacb2edff4 | 1,310 | py | Python | tests/test_test_utils.py | tomasaschan/scantree | d6e432cb4d71b32341ee0668c90499e957c2bb89 | [
"MIT"
] | 8 | 2020-06-03T08:23:03.000Z | 2022-02-23T21:08:55.000Z | tests/test_test_utils.py | tomasaschan/scantree | d6e432cb4d71b32341ee0668c90499e957c2bb89 | [
"MIT"
] | 10 | 2019-03-13T01:07:06.000Z | 2022-01-09T06:58:24.000Z | tests/test_test_utils.py | tomasaschan/scantree | d6e432cb4d71b32341ee0668c90499e957c2bb89 | [
"MIT"
] | 3 | 2020-07-18T04:39:49.000Z | 2021-11-10T12:46:02.000Z | from __future__ import print_function, division
import pytest
import attr
from scantree.test_utils import assert_dir_entry_equal
from scantree import DirEntryReplacement
class MockStat(object):
def __init__(self, st_ino=None):
self.st_ino = st_ino
class TestAssertDirEntryEqual(object):
def get_mock_dir_entry(self):
de = DirEntryReplacement(path='/path/to/mock', name='mock')
de._is_dir = True
de._is_file = False
de._is_symlink = False
de._stat_sym = MockStat(1)
de._stat_nosym = MockStat(0)
return de
def test_equal(self):
de = self.get_mock_dir_entry()
assert_dir_entry_equal(de, de)
@pytest.mark.parametrize(
'kwargs',
[
{'path': 'other/path'},
{'name': 'other_name'},
{'_is_dir': False},
{'_is_file': True},
{'_is_symlink': True},
{'_stat_sym': MockStat(11)},
{'_stat_nosym': MockStat(22)},
]
)
def test_not_equal(self, kwargs):
de = self.get_mock_dir_entry()
de_different = attr.evolve(de)
for k, v in kwargs.items():
setattr(de_different, k, v)
with pytest.raises(AssertionError):
assert_dir_entry_equal(de, de_different)
| 25.192308 | 67 | 0.60229 | from __future__ import print_function, division
import pytest
import attr
from scantree.test_utils import assert_dir_entry_equal
from scantree import DirEntryReplacement
class MockStat(object):
def __init__(self, st_ino=None):
self.st_ino = st_ino
class TestAssertDirEntryEqual(object):
def get_mock_dir_entry(self):
de = DirEntryReplacement(path='/path/to/mock', name='mock')
de._is_dir = True
de._is_file = False
de._is_symlink = False
de._stat_sym = MockStat(1)
de._stat_nosym = MockStat(0)
return de
def test_equal(self):
de = self.get_mock_dir_entry()
assert_dir_entry_equal(de, de)
@pytest.mark.parametrize(
'kwargs',
[
{'path': 'other/path'},
{'name': 'other_name'},
{'_is_dir': False},
{'_is_file': True},
{'_is_symlink': True},
{'_stat_sym': MockStat(11)},
{'_stat_nosym': MockStat(22)},
]
)
def test_not_equal(self, kwargs):
de = self.get_mock_dir_entry()
de_different = attr.evolve(de)
for k, v in kwargs.items():
setattr(de_different, k, v)
with pytest.raises(AssertionError):
assert_dir_entry_equal(de, de_different)
| true | true |
f722bd3b55caa4b1c165ca71f3cbabe696b4b303 | 13,630 | py | Python | bindings/python/cntk/train/trainer.py | KeyanAndrewLi/CNTK | 4b9f8739c72068d70279f91b4b59923b2ae1fc3a | [
"RSA-MD"
] | null | null | null | bindings/python/cntk/train/trainer.py | KeyanAndrewLi/CNTK | 4b9f8739c72068d70279f91b4b59923b2ae1fc3a | [
"RSA-MD"
] | null | null | null | bindings/python/cntk/train/trainer.py | KeyanAndrewLi/CNTK | 4b9f8739c72068d70279f91b4b59923b2ae1fc3a | [
"RSA-MD"
] | null | null | null |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from .. import cntk_py
from ..device import use_default_device
from cntk.internal import sanitize_var_map, sanitize_function, typemap, \
_value_as_sequence_or_array
from cntk.internal.utils import _py_dict_to_cntk_dict
from ..io import MinibatchData
__doc__ = '''\
A trainer encapsulates the overall training process and employs one or more
:mod:`~cntk.learners` to tune the parameters of a specified model
using gradients of parameters w.r.t. a training objective.
'''
class Trainer(cntk_py.Trainer):
'''
Class for training the model parameters of a models' specified loss function, using the
specified set of ``parameter_learners`` for updating the model's parameters
using computed gradients.
An optional specified metric function, which can be non-differentiable,
can be used for tracking the trained model's quality.
Args:
model (:class:`~cntk.ops.functions.Function`): root node of the function to train
criterion (tuple of :class:`~cntk.ops.functions.Function` or :class:`~cntk.variables.Variable`):
Function with one or two outputs, representing loss and, if given, evaluation metric (in this order).
Alternatively, a tuple(loss Function, evaluation Function) is also accepted.
parameter_learners (list): list of learners from :mod:`cntk.learners`
progress_writers (list): optionally, list of progress writers from :mod:`cntk.utils` to automatically track
training progress.
Todo:
Allow to skip some parameters that should not be updated.
'''
@staticmethod
def _get_loss_metric(criterion): # helper to interpret criterion parameter
if isinstance(criterion, cntk_py.Function): # input can be a tuple of Functions or a tuple-valued Function
criterion = criterion.outputs # break up tuple-valued Function into tuple of Functions
# map Variable to Function
from cntk import combine
criterion = tuple([combine([output], output.name) if isinstance(output, cntk_py.Variable) else output for output in criterion])
if len(criterion) == 1:
criterion = criterion + (None,) # tuple of 1 value: pad with None
elif len(criterion) != 2:
raise ValueError("criterion parameter must be a singleton or a tuple of 2 elements")
return criterion
def __init__(self, model, criterion, parameter_learners, progress_writers=None):
loss_function, eval_function = Trainer._get_loss_metric(criterion)
# TODO sanitizing should be removed once Swig's typemaps are in place
if model is not None: # None means dummy model that is, e.g., the same as a criterion
model = sanitize_function(model)
loss_function = sanitize_function(loss_function)
if eval_function is not None:
eval_function = sanitize_function(eval_function)
if not isinstance(parameter_learners, list):
parameter_learners = [parameter_learners]
if progress_writers is None:
progress_writers = []
elif not isinstance(progress_writers, list):
progress_writers = [progress_writers]
trainer = cntk_py.trainer_impl(model, loss_function, eval_function, parameter_learners, progress_writers)
# transplant into this class instance
self.__dict__ = trainer.__dict__
# TODO: bring this back once the design has been settled
def _train_test_mb_map_args(self, *args, **kwargs):
'''helper function for mimicking Python calling convention in train/test_minibatch()'''
# one argument, which is an arg map or a (map, bool) tuple
if len(args) == 1 and isinstance(args[0], (dict, tuple)):
return args[0]
# map to function arguments
args = self.loss_function.argument_map(*args, **kwargs)
# in this use case, all must have the same inputs (subsets of loss) since they are all called as a single combined function
if self.model:
for arg in self.model.arguments:
if arg not in self.loss_function.arguments:
raise ValueError("model function must share its arguments with the loss function")
if self.evaluation_function:
for arg in self.evaluation_function.arguments:
if arg not in self.loss_function.arguments:
raise ValueError("evaluation function must have the same signature and inputs as the loss function")
return args
def train_minibatch(self, arguments, outputs=None, device=None):
'''
Optimize model parameters using the specified 'arguments' minibatch of training samples.
Args:
arguments: maps variables to their input data. Empty map signifies
end of local training data.
The interpretation depends on the input type:
* `dict`: keys are input variable or names, and values are the input data.
* any other type: if node has an unique input, ``arguments`` is mapped to this input.
For nodes with more than one input, only `dict` is allowed.
In both cases, every sample in the data will be interpreted
as a new sequence. To mark samples as continuations of the
previous sequence, specify ``arguments`` as `tuple`: the
first element will be used as ``arguments``, and the second one will
be used as a list of bools, denoting whether a sequence is a new
one (`True`) or a continuation of the previous one (`False`).
Data should be either NumPy arrays or a
:class:`~cntk.io.MinibatchData` instance.
outputs (iterable): outputs to fetch values for.
device (:class:`~cntk.device.DeviceDescriptor`): the device descriptor that
contains the type and id of the device on which the computation is
to be performed.
Note:
See :meth:`~cntk.ops.functions.Function.forward` for examples on
passing input data.
Returns:
`bool` or `tuple`:
If ``outputs`` have not been provided, the returned value is `True`
if updates have been performed, `False` if all parameter learners
indicate end of learning (through their update). Otherwise, the
return value is a tuple of the that `bool` and a dictionary that
maps the variables in `outputs` to their respective NumPy arrays.
'''
if not device:
device = use_default_device()
if arguments: # arguments must feed all inputs (model, loss, eval)
all_args = set(self.loss_function.arguments)
if self.model:
all_args |= set(self.model.arguments)
if self.evaluation_function:
all_args |= set(self.evaluation_function.arguments)
arguments = sanitize_var_map(tuple(all_args), arguments,
extract_values_from_minibatch_data = False, device=device)
contains_minibatch_data = False
if (len(arguments) > 0):
value = next(iter(arguments.values()))
contains_minibatch_data = isinstance(value, MinibatchData)
if outputs:
output_map = {v: None for v in outputs}
if contains_minibatch_data:
updated = super(Trainer, self).train_minibatch_overload_for_minibatchdata(
arguments, output_map, device)
else:
updated = super(Trainer, self).train_minibatch(arguments,
output_map, device)
for k, v in output_map.items():
output_map[k] = _value_as_sequence_or_array(v, k)
return updated, output_map
else:
if contains_minibatch_data:
updated = super(Trainer, self).train_minibatch_overload_for_minibatchdata(
arguments, device)
else:
updated = super(Trainer, self).train_minibatch(arguments,
device)
return updated
def test_minibatch(self, arguments, device=None):
'''
Test the model on the specified batch of samples using the evaluation
Function specified during construction of the Trainer.
Args:
arguments: maps variables to their
input data. The interpretation depends on the input type:
* `dict`: keys are input variable or names, and values are the input data.
See :meth:`~cntk.ops.functions.Function.forward` for details on passing input data.
* any other type: if node has an unique input, ``arguments`` is mapped to this input.
For nodes with more than one input, only `dict` is allowed.
In both cases, every sample in the data will be interpreted
as a new sequence. To mark samples as continuations of the
previous sequence, specify ``arguments`` as `tuple`: the
first element will be used as ``arguments``, and the second one will
be used as a list of bools, denoting whether a sequence is a new
one (`True`) or a continuation of the previous one (`False`).
Data should be either NumPy arrays or a
:class:`~cntk.io.MinibatchData` instance.
device (:class:`~cntk.device.DeviceDescriptor`): the device descriptor that
contains the type and id of the device on which the computation is
to be performed.
Note:
See :meth:`~cntk.ops.functions.Function.forward` for examples on
passing input data.
Returns:
`float`: the average evaluation criterion value per sample for the
tested minibatch.
'''
if not device:
device = use_default_device()
# pass all args of all parts (model, loss, eval)
all_args = set(self.loss_function.arguments)
if self.model:
all_args |= set(self.model.arguments)
if self.evaluation_function:
all_args |= set(self.evaluation_function.arguments)
arguments = sanitize_var_map(tuple(all_args), arguments)
return super(Trainer, self).test_minibatch(arguments, device)
def save_checkpoint(self, filename, external_state={}):
'''
Saves a checkpoint of the model and other Trainer state at the
specified file location.
In distributed environment the checkpointing is done by
the main worker.
Args:
filename (str): filename to store the checkpoint.
'''
super(Trainer, self).save_checkpoint(filename, _py_dict_to_cntk_dict(external_state))
def restore_from_checkpoint(self, filename):
'''
Restores a checkpoint of the model and Trainer state from the
specified file location.
Args:
filename (str): filename to restore the checkpoint from
'''
super(Trainer, self).restore_from_checkpoint(filename)
@property
@typemap
def model(self):
'''
The model that the trainer is training.
'''
return super(Trainer, self).model()
@property
@typemap
def loss_function(self):
'''
The loss function that the trainer is using.
'''
return super(Trainer, self).loss_function()
@property
@typemap
def evaluation_function(self):
'''
The evaluation function that the trainer is using.
'''
return super(Trainer, self).evaluation_function()
@property
@typemap
def parameter_learners(self):
'''
The parameter learners that the trainer is using.
'''
return super(Trainer, self).parameter_learners()
@property
def previous_minibatch_loss_average(self):
'''
The average training loss per sample for the last minibatch trained
'''
return super(Trainer, self).previous_minibatch_loss_average()
@property
def previous_minibatch_evaluation_average(self):
'''
The average evaluation criterion value per sample for the last minibatch trained
'''
return super(Trainer, self).previous_minibatch_evaluation_average()
@property
def previous_minibatch_sample_count(self):
'''
The number of samples in the last minibatch trained with
'''
return super(Trainer, self).previous_minibatch_sample_count()
@property
def total_number_of_samples_seen(self):
'''
The number of samples seen globally between all workers from the beginning of training.
'''
return super(Trainer, self).total_number_of_samples_seen()
def summarize_training_progress(self):
'''
Updates the progress writers with the summary of training progress since start and resets the internal
accumulators.
'''
return super(Trainer, self).summarize_training_progress()
def summarize_test_progress(self):
'''
Updates the progress writers with the summary of test progress since start and resets the internal
accumulators.
'''
return super(Trainer, self).summarize_test_progress()
| 42.461059 | 135 | 0.644241 |
from .. import cntk_py
from ..device import use_default_device
from cntk.internal import sanitize_var_map, sanitize_function, typemap, \
_value_as_sequence_or_array
from cntk.internal.utils import _py_dict_to_cntk_dict
from ..io import MinibatchData
__doc__ = '''\
A trainer encapsulates the overall training process and employs one or more
:mod:`~cntk.learners` to tune the parameters of a specified model
using gradients of parameters w.r.t. a training objective.
'''
class Trainer(cntk_py.Trainer):
@staticmethod
def _get_loss_metric(criterion):
if isinstance(criterion, cntk_py.Function):
criterion = criterion.outputs
from cntk import combine
criterion = tuple([combine([output], output.name) if isinstance(output, cntk_py.Variable) else output for output in criterion])
if len(criterion) == 1:
criterion = criterion + (None,)
elif len(criterion) != 2:
raise ValueError("criterion parameter must be a singleton or a tuple of 2 elements")
return criterion
def __init__(self, model, criterion, parameter_learners, progress_writers=None):
loss_function, eval_function = Trainer._get_loss_metric(criterion)
if model is not None: # None means dummy model that is, e.g., the same as a criterion
model = sanitize_function(model)
loss_function = sanitize_function(loss_function)
if eval_function is not None:
eval_function = sanitize_function(eval_function)
if not isinstance(parameter_learners, list):
parameter_learners = [parameter_learners]
if progress_writers is None:
progress_writers = []
elif not isinstance(progress_writers, list):
progress_writers = [progress_writers]
trainer = cntk_py.trainer_impl(model, loss_function, eval_function, parameter_learners, progress_writers)
# transplant into this class instance
self.__dict__ = trainer.__dict__
# TODO: bring this back once the design has been settled
def _train_test_mb_map_args(self, *args, **kwargs):
# one argument, which is an arg map or a (map, bool) tuple
if len(args) == 1 and isinstance(args[0], (dict, tuple)):
return args[0]
# map to function arguments
args = self.loss_function.argument_map(*args, **kwargs)
# in this use case, all must have the same inputs (subsets of loss) since they are all called as a single combined function
if self.model:
for arg in self.model.arguments:
if arg not in self.loss_function.arguments:
raise ValueError("model function must share its arguments with the loss function")
if self.evaluation_function:
for arg in self.evaluation_function.arguments:
if arg not in self.loss_function.arguments:
raise ValueError("evaluation function must have the same signature and inputs as the loss function")
return args
def train_minibatch(self, arguments, outputs=None, device=None):
if not device:
device = use_default_device()
if arguments: # arguments must feed all inputs (model, loss, eval)
all_args = set(self.loss_function.arguments)
if self.model:
all_args |= set(self.model.arguments)
if self.evaluation_function:
all_args |= set(self.evaluation_function.arguments)
arguments = sanitize_var_map(tuple(all_args), arguments,
extract_values_from_minibatch_data = False, device=device)
contains_minibatch_data = False
if (len(arguments) > 0):
value = next(iter(arguments.values()))
contains_minibatch_data = isinstance(value, MinibatchData)
if outputs:
output_map = {v: None for v in outputs}
if contains_minibatch_data:
updated = super(Trainer, self).train_minibatch_overload_for_minibatchdata(
arguments, output_map, device)
else:
updated = super(Trainer, self).train_minibatch(arguments,
output_map, device)
for k, v in output_map.items():
output_map[k] = _value_as_sequence_or_array(v, k)
return updated, output_map
else:
if contains_minibatch_data:
updated = super(Trainer, self).train_minibatch_overload_for_minibatchdata(
arguments, device)
else:
updated = super(Trainer, self).train_minibatch(arguments,
device)
return updated
def test_minibatch(self, arguments, device=None):
if not device:
device = use_default_device()
# pass all args of all parts (model, loss, eval)
all_args = set(self.loss_function.arguments)
if self.model:
all_args |= set(self.model.arguments)
if self.evaluation_function:
all_args |= set(self.evaluation_function.arguments)
arguments = sanitize_var_map(tuple(all_args), arguments)
return super(Trainer, self).test_minibatch(arguments, device)
def save_checkpoint(self, filename, external_state={}):
super(Trainer, self).save_checkpoint(filename, _py_dict_to_cntk_dict(external_state))
def restore_from_checkpoint(self, filename):
super(Trainer, self).restore_from_checkpoint(filename)
@property
@typemap
def model(self):
return super(Trainer, self).model()
@property
@typemap
def loss_function(self):
return super(Trainer, self).loss_function()
@property
@typemap
def evaluation_function(self):
return super(Trainer, self).evaluation_function()
@property
@typemap
def parameter_learners(self):
return super(Trainer, self).parameter_learners()
@property
def previous_minibatch_loss_average(self):
return super(Trainer, self).previous_minibatch_loss_average()
@property
def previous_minibatch_evaluation_average(self):
return super(Trainer, self).previous_minibatch_evaluation_average()
@property
def previous_minibatch_sample_count(self):
return super(Trainer, self).previous_minibatch_sample_count()
@property
def total_number_of_samples_seen(self):
return super(Trainer, self).total_number_of_samples_seen()
def summarize_training_progress(self):
return super(Trainer, self).summarize_training_progress()
def summarize_test_progress(self):
return super(Trainer, self).summarize_test_progress()
| true | true |
f722bd62f7f88b7d727e32c81e84c320ebafaa0b | 755 | py | Python | unicef_schools_attribute_cleaning/models/EducationLevel.py | developmentseed/unicef-schools-attribute-cleaning | eb3dd8a02f26e3455ee04ac2788e79c205ae97e5 | [
"MIT"
] | null | null | null | unicef_schools_attribute_cleaning/models/EducationLevel.py | developmentseed/unicef-schools-attribute-cleaning | eb3dd8a02f26e3455ee04ac2788e79c205ae97e5 | [
"MIT"
] | 3 | 2020-10-08T15:28:38.000Z | 2020-10-15T14:37:00.000Z | unicef_schools_attribute_cleaning/models/EducationLevel.py | developmentseed/unicef-schools-attribute-cleaning | eb3dd8a02f26e3455ee04ac2788e79c205ae97e5 | [
"MIT"
] | null | null | null | """
Enumerated type for education level.
"""
from .FuzzyMatchingEnum import FuzzyMatchingEnum
class EducationLevel(FuzzyMatchingEnum):
"""
EducationLevel enumerated type with fuzzy matching.
"""
pre_primary = "Pre-Primary"
pre_básica = "Pre-Primary"
primary = "Primary"
básica = "Primary"
middle = "Middle"
media = "Middle"
secondary = "Secondary"
_9_ybe = "Pre-Primary"
_12_ybe = "Pre-Primary"
a_level = "Secondary"
b_level = "Secondary"
polytechnic = "Polytechnic"
college = "University"
university = "University"
technical = "Polytechnic"
vocational = "Polytechnic"
vtc = "Polytechnic"
tss = "Polytechnic"
disability = "Polytechnic"
nursing = "Polytechnic"
| 23.59375 | 55 | 0.658278 | from .FuzzyMatchingEnum import FuzzyMatchingEnum
class EducationLevel(FuzzyMatchingEnum):
pre_primary = "Pre-Primary"
pre_básica = "Pre-Primary"
primary = "Primary"
básica = "Primary"
middle = "Middle"
media = "Middle"
secondary = "Secondary"
_9_ybe = "Pre-Primary"
_12_ybe = "Pre-Primary"
a_level = "Secondary"
b_level = "Secondary"
polytechnic = "Polytechnic"
college = "University"
university = "University"
technical = "Polytechnic"
vocational = "Polytechnic"
vtc = "Polytechnic"
tss = "Polytechnic"
disability = "Polytechnic"
nursing = "Polytechnic"
| true | true |
f722be26b41a4cd19397bdae99ece132a731f927 | 1,136 | py | Python | mindhome_alpha/erpnext/patches/v11_0/add_default_email_template_for_leave.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/patches/v11_0/add_default_email_template_for_leave.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/patches/v11_0/add_default_email_template_for_leave.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | from __future__ import unicode_literals
import frappe, os
from frappe import _
def execute():
frappe.reload_doc("email", "doctype", "email_template")
if not frappe.db.exists("Email Template", _('Leave Approval Notification')):
base_path = frappe.get_app_path("erpnext", "hr", "doctype")
response = frappe.read_file(os.path.join(base_path, "leave_application/leave_application_email_template.html"))
frappe.get_doc({
'doctype': 'Email Template',
'name': _("Leave Approval Notification"),
'response': response,
'subject': _("Leave Approval Notification"),
'owner': frappe.session.user,
}).insert(ignore_permissions=True)
if not frappe.db.exists("Email Template", _('Leave Status Notification')):
base_path = frappe.get_app_path("erpnext", "hr", "doctype")
response = frappe.read_file(os.path.join(base_path, "leave_application/leave_application_email_template.html"))
frappe.get_doc({
'doctype': 'Email Template',
'name': _("Leave Status Notification"),
'response': response,
'subject': _("Leave Status Notification"),
'owner': frappe.session.user,
}).insert(ignore_permissions=True)
| 36.645161 | 113 | 0.729754 | from __future__ import unicode_literals
import frappe, os
from frappe import _
def execute():
frappe.reload_doc("email", "doctype", "email_template")
if not frappe.db.exists("Email Template", _('Leave Approval Notification')):
base_path = frappe.get_app_path("erpnext", "hr", "doctype")
response = frappe.read_file(os.path.join(base_path, "leave_application/leave_application_email_template.html"))
frappe.get_doc({
'doctype': 'Email Template',
'name': _("Leave Approval Notification"),
'response': response,
'subject': _("Leave Approval Notification"),
'owner': frappe.session.user,
}).insert(ignore_permissions=True)
if not frappe.db.exists("Email Template", _('Leave Status Notification')):
base_path = frappe.get_app_path("erpnext", "hr", "doctype")
response = frappe.read_file(os.path.join(base_path, "leave_application/leave_application_email_template.html"))
frappe.get_doc({
'doctype': 'Email Template',
'name': _("Leave Status Notification"),
'response': response,
'subject': _("Leave Status Notification"),
'owner': frappe.session.user,
}).insert(ignore_permissions=True)
| true | true |
f722be643b31ce8c40ee6ceee4e09c86f2a2faba | 386 | py | Python | Dataset/Leetcode/train/69/621.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/69/621.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/69/621.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, x: int) -> int:
if x == 1:
return 1
left = 0
right = x
while right - left > 1:
mid = (left + right) // 2
a = mid ** 2
if a == x:
return mid
if a > x:
right = mid
else:
left = mid
return left
| 21.444444 | 37 | 0.339378 | class Solution:
def XXX(self, x: int) -> int:
if x == 1:
return 1
left = 0
right = x
while right - left > 1:
mid = (left + right) // 2
a = mid ** 2
if a == x:
return mid
if a > x:
right = mid
else:
left = mid
return left
| true | true |
f722bec17844490f751c3d9dc7d43b531ddd3f98 | 2,608 | py | Python | resnet/official/utils/misc/distribution_utils_test.py | biolins/frivolous_dnns | 23d9a057ac517770cdfe9d8ac71543c328fcf76d | [
"MIT"
] | null | null | null | resnet/official/utils/misc/distribution_utils_test.py | biolins/frivolous_dnns | 23d9a057ac517770cdfe9d8ac71543c328fcf76d | [
"MIT"
] | null | null | null | resnet/official/utils/misc/distribution_utils_test.py | biolins/frivolous_dnns | 23d9a057ac517770cdfe9d8ac71543c328fcf76d | [
"MIT"
] | 2 | 2021-05-31T23:08:13.000Z | 2021-12-28T19:11:08.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Tests for distribution util functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.misc import distribution_utils
class GetDistributionStrategyTest(tf.test.TestCase):
"""Tests for get_distribution_strategy."""
def test_one_device_strategy_cpu(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=0)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('CPU', ds.extended.worker_devices[0])
def test_one_device_strategy_gpu(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=1)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('GPU', ds.extended.worker_devices[0])
def test_mirrored_strategy(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=5)
self.assertEquals(ds.num_replicas_in_sync, 5)
self.assertEquals(len(ds.extended.worker_devices), 5)
for device in ds.extended.worker_devices:
self.assertIn('GPU', device)
class PerReplicaBatchSizeTest(tf.test.TestCase):
"""Tests for per_replica_batch_size."""
def test_batch_size(self):
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=0), 147)
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=1), 147)
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=7), 21)
def test_batch_size_with_remainder(self):
with self.assertRaises(ValueError):
distribution_utils.per_replica_batch_size(147, num_gpus=5)
if __name__ == "__main__":
tf.test.main()
| 38.925373 | 80 | 0.715874 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.utils.misc import distribution_utils
class GetDistributionStrategyTest(tf.test.TestCase):
def test_one_device_strategy_cpu(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=0)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('CPU', ds.extended.worker_devices[0])
def test_one_device_strategy_gpu(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=1)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('GPU', ds.extended.worker_devices[0])
def test_mirrored_strategy(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=5)
self.assertEquals(ds.num_replicas_in_sync, 5)
self.assertEquals(len(ds.extended.worker_devices), 5)
for device in ds.extended.worker_devices:
self.assertIn('GPU', device)
class PerReplicaBatchSizeTest(tf.test.TestCase):
def test_batch_size(self):
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=0), 147)
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=1), 147)
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=7), 21)
def test_batch_size_with_remainder(self):
with self.assertRaises(ValueError):
distribution_utils.per_replica_batch_size(147, num_gpus=5)
if __name__ == "__main__":
tf.test.main()
| true | true |
f722bf6a876701e79bdf77fe164cedf55ae66037 | 3,622 | py | Python | simpletransformers/conv_ai/conv_ai_utils.py | manueltonneau/simpletransformers | 7374b786857008e023604789e89c1690ad8bde97 | [
"Apache-2.0"
] | 1 | 2021-04-04T12:21:11.000Z | 2021-04-04T12:21:11.000Z | simpletransformers/conv_ai/conv_ai_utils.py | manueltonneau/simpletransformers | 7374b786857008e023604789e89c1690ad8bde97 | [
"Apache-2.0"
] | null | null | null | simpletransformers/conv_ai/conv_ai_utils.py | manueltonneau/simpletransformers | 7374b786857008e023604789e89c1690ad8bde97 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import os
import socket
import tarfile
import tempfile
from datetime import datetime
from multiprocessing import Pool
import torch
from tqdm.auto import tqdm
from transformers import cached_path
PERSONACHAT_URL = "https://s3.amazonaws.com/datasets.huggingface.co/personachat/personachat_self_original.json"
HF_FINETUNED_MODEL = (
"https://s3.amazonaws.com/models.huggingface.co/transfer-learning-chatbot/gpt_personachat_cache.tar.gz" # noqa
)
logger = logging.getLogger(__file__)
def download_pretrained_model():
""" Download and extract finetuned model from S3 """
resolved_archive_file = cached_path(HF_FINETUNED_MODEL)
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, "r:gz") as archive:
archive.extractall(tempdir)
return tempdir
def tokenize_multi(data):
obj, tokenizer = data
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize_multi((o, tokenizer))) for n, o in obj.items())
return list(tokenize_multi((o, tokenizer)) for o in obj)
def get_dataset(
tokenizer,
dataset_path,
dataset_cache,
process_count,
proxies,
evaluate=False,
interact=False,
no_cache=False,
args=None,
):
""" Get tokenized PERSONACHAT dataset from S3 or cache."""
dataset_path = dataset_path or PERSONACHAT_URL
mode = "eval" if evaluate else "train"
if interact:
mode = "interact"
dataset_cache = (
dataset_cache + "_" + type(tokenizer).__name__ + "_" + mode
) # To avoid using GPT cache for GPT-2 and vice-versa
if dataset_cache and os.path.isfile(dataset_cache) and not no_cache:
logger.info("Load tokenized dataset from cache at %s", dataset_cache)
dataset = torch.load(dataset_cache)
else:
logger.info("Download dataset from %s", dataset_path)
personachat_file = cached_path(dataset_path, proxies=proxies)
with open(personachat_file, "r", encoding="utf-8") as f:
dataset = json.loads(f.read())
logger.info("Tokenize and encode the dataset")
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
data = [(d, tokenizer) for d in obj]
if args.multiprocessing_chunksize == -1:
chunksize = max(len(data) // (args.process_count * 2), 500)
else:
chunksize = args.multiprocessing_chunksize
with Pool(process_count) as p:
tokenized_data = list(tqdm(p.imap(tokenize_multi, data, chunksize=chunksize), total=len(data)))
return tokenized_data
if not interact and dataset_path == PERSONACHAT_URL:
if not evaluate:
dataset = dataset["train"]
else:
dataset = dataset["valid"]
dataset = tokenize(dataset)
torch.save(dataset, dataset_cache)
return dataset
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
| 33.537037 | 115 | 0.6709 |
import json
import logging
import os
import socket
import tarfile
import tempfile
from datetime import datetime
from multiprocessing import Pool
import torch
from tqdm.auto import tqdm
from transformers import cached_path
PERSONACHAT_URL = "https://s3.amazonaws.com/datasets.huggingface.co/personachat/personachat_self_original.json"
HF_FINETUNED_MODEL = (
"https://s3.amazonaws.com/models.huggingface.co/transfer-learning-chatbot/gpt_personachat_cache.tar.gz"
)
logger = logging.getLogger(__file__)
def download_pretrained_model():
resolved_archive_file = cached_path(HF_FINETUNED_MODEL)
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, "r:gz") as archive:
archive.extractall(tempdir)
return tempdir
def tokenize_multi(data):
obj, tokenizer = data
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize_multi((o, tokenizer))) for n, o in obj.items())
return list(tokenize_multi((o, tokenizer)) for o in obj)
def get_dataset(
tokenizer,
dataset_path,
dataset_cache,
process_count,
proxies,
evaluate=False,
interact=False,
no_cache=False,
args=None,
):
dataset_path = dataset_path or PERSONACHAT_URL
mode = "eval" if evaluate else "train"
if interact:
mode = "interact"
dataset_cache = (
dataset_cache + "_" + type(tokenizer).__name__ + "_" + mode
)
if dataset_cache and os.path.isfile(dataset_cache) and not no_cache:
logger.info("Load tokenized dataset from cache at %s", dataset_cache)
dataset = torch.load(dataset_cache)
else:
logger.info("Download dataset from %s", dataset_path)
personachat_file = cached_path(dataset_path, proxies=proxies)
with open(personachat_file, "r", encoding="utf-8") as f:
dataset = json.loads(f.read())
logger.info("Tokenize and encode the dataset")
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
data = [(d, tokenizer) for d in obj]
if args.multiprocessing_chunksize == -1:
chunksize = max(len(data) // (args.process_count * 2), 500)
else:
chunksize = args.multiprocessing_chunksize
with Pool(process_count) as p:
tokenized_data = list(tqdm(p.imap(tokenize_multi, data, chunksize=chunksize), total=len(data)))
return tokenized_data
if not interact and dataset_path == PERSONACHAT_URL:
if not evaluate:
dataset = dataset["train"]
else:
dataset = dataset["valid"]
dataset = tokenize(dataset)
torch.save(dataset, dataset_cache)
return dataset
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
| true | true |
f722c0d72f83743569ce6bd8e0b43ff07382e0b5 | 2,315 | py | Python | zwave_js_server/model/firmware.py | firstof9/zwave-js-server-python | 728d1e44277b8c69fac06eba1f8362c281762bf7 | [
"Apache-2.0"
] | 53 | 2021-01-09T18:47:34.000Z | 2022-03-16T21:54:41.000Z | zwave_js_server/model/firmware.py | firstof9/zwave-js-server-python | 728d1e44277b8c69fac06eba1f8362c281762bf7 | [
"Apache-2.0"
] | 130 | 2021-01-06T21:34:46.000Z | 2022-03-29T18:44:14.000Z | zwave_js_server/model/firmware.py | firstof9/zwave-js-server-python | 728d1e44277b8c69fac06eba1f8362c281762bf7 | [
"Apache-2.0"
] | 17 | 2021-01-07T21:55:29.000Z | 2022-03-29T08:08:50.000Z | """Provide a model for Z-Wave firmware."""
from enum import IntEnum
from typing import TYPE_CHECKING, Optional, TypedDict
if TYPE_CHECKING:
from .node import Node
class FirmwareUpdateStatus(IntEnum):
"""Enum with all Firmware update status values.
https://zwave-js.github.io/node-zwave-js/#/api/node?id=status
"""
ERROR_TIMEOUT = -1
ERROR_CHECKSUM = 0
ERROR_TRANSMISSION_FAILED = 1
ERROR_INVALID_MANUFACTURER_ID = 2
ERROR_INVALID_FIRMWARE_ID = 3
ERROR_INVALID_FIRMWARE_TARGET = 4
ERROR_INVALID_HEADER_INFORMATION = 5
ERROR_INVALID_HEADER_FORMAT = 6
ERROR_INSUFFICIENT_MEMORY = 7
ERROR_INVALID_HARDWARE_VERSION = 8
OK_WAITING_FOR_ACTIVATION = 253
OK_NO_RESTART = 254
OK_RESTART_PENDING = 255
class FirmwareUpdateProgressDataType(TypedDict):
"""Represent a firmware update progress event dict type."""
sentFragments: int # required
totalFragments: int # required
class FirmwareUpdateProgress:
"""Model for firmware update progress event."""
def __init__(self, node: "Node", data: FirmwareUpdateProgressDataType) -> None:
"""Initialize."""
self.data = data
self.node = node
@property
def sent_fragments(self) -> int:
"""Return the number of fragments sent to the device so far."""
return self.data["sentFragments"]
@property
def total_fragments(self) -> int:
"""Return the total number of fragments that need to be sent to the device."""
return self.data["totalFragments"]
class FirmwareUpdateFinishedDataType(TypedDict, total=False):
"""Represent a firmware update finished event dict type."""
status: int # required
waitTime: int
class FirmwareUpdateFinished:
"""Model for firmware update finished event."""
def __init__(self, node: "Node", data: FirmwareUpdateFinishedDataType) -> None:
"""Initialize."""
self.data = data
self.node = node
@property
def status(self) -> FirmwareUpdateStatus:
"""Return the firmware update status."""
return FirmwareUpdateStatus(self.data["status"])
@property
def wait_time(self) -> Optional[int]:
"""Return the wait time in seconds before the device is functional again."""
return self.data.get("waitTime")
| 28.9375 | 86 | 0.690713 | from enum import IntEnum
from typing import TYPE_CHECKING, Optional, TypedDict
if TYPE_CHECKING:
from .node import Node
class FirmwareUpdateStatus(IntEnum):
ERROR_TIMEOUT = -1
ERROR_CHECKSUM = 0
ERROR_TRANSMISSION_FAILED = 1
ERROR_INVALID_MANUFACTURER_ID = 2
ERROR_INVALID_FIRMWARE_ID = 3
ERROR_INVALID_FIRMWARE_TARGET = 4
ERROR_INVALID_HEADER_INFORMATION = 5
ERROR_INVALID_HEADER_FORMAT = 6
ERROR_INSUFFICIENT_MEMORY = 7
ERROR_INVALID_HARDWARE_VERSION = 8
OK_WAITING_FOR_ACTIVATION = 253
OK_NO_RESTART = 254
OK_RESTART_PENDING = 255
class FirmwareUpdateProgressDataType(TypedDict):
sentFragments: int
totalFragments: int
class FirmwareUpdateProgress:
def __init__(self, node: "Node", data: FirmwareUpdateProgressDataType) -> None:
self.data = data
self.node = node
@property
def sent_fragments(self) -> int:
return self.data["sentFragments"]
@property
def total_fragments(self) -> int:
return self.data["totalFragments"]
class FirmwareUpdateFinishedDataType(TypedDict, total=False):
status: int
waitTime: int
class FirmwareUpdateFinished:
def __init__(self, node: "Node", data: FirmwareUpdateFinishedDataType) -> None:
self.data = data
self.node = node
@property
def status(self) -> FirmwareUpdateStatus:
return FirmwareUpdateStatus(self.data["status"])
@property
def wait_time(self) -> Optional[int]:
return self.data.get("waitTime")
| true | true |
f722c138b3d31728a6e3144034675629954da58f | 917 | py | Python | template/main/migrations/0001_add_profile.py | vidalmatheus/f-square | 0a79fd6181e702b05821cd01b7f1cfc87ffa8699 | [
"MIT"
] | 2 | 2021-11-30T14:59:05.000Z | 2022-01-17T02:14:46.000Z | template/main/migrations/0001_add_profile.py | vidalmatheus/f-square | 0a79fd6181e702b05821cd01b7f1cfc87ffa8699 | [
"MIT"
] | null | null | null | template/main/migrations/0001_add_profile.py | vidalmatheus/f-square | 0a79fd6181e702b05821cd01b7f1cfc87ffa8699 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-11-02 18:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_blocked', models.BooleanField(default=False)),
('block_reason', models.TextField(blank=True, null=True)),
('photo_url', models.CharField(blank=True, max_length=1024, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 32.75 | 121 | 0.63795 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_blocked', models.BooleanField(default=False)),
('block_reason', models.TextField(blank=True, null=True)),
('photo_url', models.CharField(blank=True, max_length=1024, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f722c28f10583fe5c63ddf1e423f2b3a0843dff0 | 3,549 | py | Python | bindings/python/ensmallen/datasets/string/streptomycescatenulae.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-09-10T18:31:58.000Z | 2022-03-24T04:28:04.000Z | bindings/python/ensmallen/datasets/string/streptomycescatenulae.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/streptomycescatenulae.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Streptomyces catenulae.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def StreptomycesCatenulae(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Streptomyces catenulae graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Streptomyces catenulae graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="StreptomycesCatenulae",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.861111 | 223 | 0.677656 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def StreptomycesCatenulae(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="StreptomycesCatenulae",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f722c46bb06665aae4a50164c96b98d2168500c2 | 54,813 | py | Python | radiomics/imageoperations.py | laurentletg/pyradiomics | b30a7fe086417999481bc6792dced4bf3dc3de32 | [
"BSD-3-Clause"
] | 1 | 2019-10-07T12:48:20.000Z | 2019-10-07T12:48:20.000Z | radiomics/imageoperations.py | laurentletg/pyradiomics | b30a7fe086417999481bc6792dced4bf3dc3de32 | [
"BSD-3-Clause"
] | null | null | null | radiomics/imageoperations.py | laurentletg/pyradiomics | b30a7fe086417999481bc6792dced4bf3dc3de32 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import logging
import numpy
import pywt
import SimpleITK as sitk
import six
from six.moves import range
logger = logging.getLogger(__name__)
def getMask(mask, **kwargs):
"""
Function to get the correct mask. Includes enforcing a correct pixel data type (UInt32).
Also supports extracting the mask for a segmentation (stored as SimpleITK Vector image) if necessary.
In this case, the mask at index ``label_channel`` is extracted. The resulting 3D volume is then treated as it were a
scalar input volume (i.e. with the region of interest defined by voxels with value matching ``label``).
Finally, checks if the mask volume contains an ROI identified by ``label``. Raises a value error if the label is not
present (including a list of valid labels found).
:param mask: SimpleITK Image object representing the mask. Can be a vector image to allow for overlapping masks.
:param kwargs: keyword arguments. If argument ``label_channel`` is present, this is used to select the channel.
Otherwise label_channel ``0`` is assumed.
:return: SimpleITK.Image with pixel type UInt32 representing the mask volume
"""
global logger
label = kwargs.get('label', 1)
label_channel = kwargs.get('label_channel', 0)
if 'vector' in mask.GetPixelIDTypeAsString().lower():
logger.debug('Mask appears to be a segmentation object (=stored as vector image).')
n_components = mask.GetNumberOfComponentsPerPixel()
assert label_channel < n_components, \
"Mask %i requested, but segmentation object only contains %i objects" % (label_channel, n_components)
logger.info('Extracting mask at index %i', label_channel)
selector = sitk.VectorIndexSelectionCastImageFilter()
selector.SetIndex(label_channel)
mask = selector.Execute(mask)
logger.debug('Force casting mask to UInt32 to ensure correct datatype.')
mask = sitk.Cast(mask, sitk.sitkUInt32)
labels = numpy.unique(sitk.GetArrayFromImage(mask))
if len(labels) == 1:
raise ValueError('No labels found in this mask (i.e. nothing is segmented)!')
if label not in labels:
raise ValueError('Label (%g) not present in mask. Choose from %s' % (label, labels[labels != 0]))
return mask
def getBinEdges(parameterValues, **kwargs):
r"""
Calculate and return the histogram using parameterValues (1D array of all segmented voxels in the image).
**Fixed bin width:**
Returns the bin edges, a list of the edges of the calculated bins, length is N(bins) + 1. Bins are defined such, that
the bin edges are equally spaced from zero, and that the leftmost edge :math:`\leq \min(X_{gl})`. These bin edges
represent the half-open ranges of each bin :math:`[\text{lower_edge}, \text{upper_edge})` and result in gray value
discretization as follows:
.. math::
X_{b, i} = \lfloor \frac{X_{gl, i}}{W} \rfloor - \lfloor \frac {\min(X_{gl})}{W} \rfloor + 1
Here, :math:`X_{gl, i}` and :math:`X_{b, i}` are gray level intensities before and after discretization, respectively.
:math:`{W}` is the bin width value (specfied in ``binWidth`` parameter). The first part of the formula ensures that
the bins are equally spaced from 0, whereas the second part ensures that the minimum gray level intensity inside the
ROI after binning is always 1.
In the case where the maximum gray level intensity is equally dividable by the binWidth, i.e.
:math:`\max(X_{gl}) \mod W = 0`, this will result in that maximum gray level being assigned to bin
:math:`[\max(X_{gl}), \max(X_{gl}) + W)`, which is consistent with numpy.digitize, but different from the behaviour
of numpy.histogram, where the final bin has a closed range, including the maximum gray level, i.e.
:math:`[\max(X_{gl}) - W, \max(X_{gl})]`.
.. note::
This method is slightly different from the fixed bin size discretization method described by IBSI. The two most
notable differences are 1) that PyRadiomics uses a floor division (and adds 1), as opposed to a ceiling division and
2) that in PyRadiomics, bins are always equally spaced from 0, as opposed to equally spaced from the minimum
gray level intensity.
*Example: for a ROI with values ranging from 54 to 166, and a bin width of 25, the bin edges will be [50, 75, 100,
125, 150, 175].*
This value can be directly passed to ``numpy.histogram`` to generate a histogram or ``numpy.digitize`` to discretize
the ROI gray values. See also :py:func:`binImage()`.
**Fixed bin Count:**
.. math::
X_{b, i} = \left\{ {\begin{array}{lcl}
\lfloor N_b\frac{(X_{gl, i} - \min(X_{gl})}{\max(X_{gl}) - \min(X_{gl})} \rfloor + 1 &
\mbox{for} & X_{gl, i} < \max(X_{gl}) \\
N_b & \mbox{for} & X_{gl, i} = \max(X_{gl}) \end{array}} \right.
Here, :math:`N_b` is the number of bins to use, as defined in ``binCount``.
References
- Leijenaar RTH, Nalbantov G, Carvalho S, et al. The effect of SUV discretization in quantitative FDG-PET Radiomics:
the need for standardized methodology in tumor texture analysis. Sci Rep. 2015;5(August):11075.
"""
global logger
binWidth = kwargs.get('binWidth', 25)
binCount = kwargs.get('binCount')
if binCount is not None:
binEdges = numpy.histogram(parameterValues, binCount)[1]
binEdges[-1] += 1 # Ensures that the maximum value is included in the topmost bin when using numpy.digitize
else:
minimum = min(parameterValues)
maximum = max(parameterValues)
# Start binning form the first value lesser than or equal to the minimum value and evenly dividable by binwidth
lowBound = minimum - (minimum % binWidth)
# Add + 2* binwidth to ensure the maximum value is included in the range generated by numpy.arange, and that values
# equal to highbound are binned into a separate bin by numpy.histogram (This ensures ALL bins are half open, as
# numpy.histogram treats the last bin as a closed interval. Moreover, this ensures consistency with numpy.digitize,
# which will assign len(bins) + 1 to values equal to rightmost bin edge, treating all bins as half-open)
highBound = maximum + 2 * binWidth
binEdges = numpy.arange(lowBound, highBound, binWidth)
# if min(parameterValues) % binWidth = 0 and min(parameterValues) = max(parameterValues), binEdges will only contain
# 1 value. If this is the case (flat region) ensure that numpy.histogram creates 1 bin (requires 2 edges). For
# numpy.histogram, a binCount (1) would also suffice, however, this is not accepted by numpy.digitize, which also uses
# binEdges calculated by this function.
if len(binEdges) == 1: # Flat region, ensure that there is 1 bin
binEdges = [binEdges[0] - .5, binEdges[0] + .5] # Simulates binEdges returned by numpy.histogram if bins = 1
logger.debug('Calculated %d bins for bin width %g with edges: %s)', len(binEdges) - 1, binWidth, binEdges)
return binEdges # numpy.histogram(parameterValues, bins=binedges)
def binImage(parameterMatrix, parameterMatrixCoordinates=None, **kwargs):
r"""
Discretizes the parameterMatrix (matrix representation of the gray levels in the ROI) using the binEdges calculated
using :py:func:`getBinEdges`. Only voxels defined by parameterMatrixCoordinates (defining the segmentation) are used
for calculation of histogram and subsequently discretized. Voxels outside segmentation are left unchanged.
"""
global logger
logger.debug('Discretizing gray levels inside ROI')
discretizedParameterMatrix = numpy.zeros(parameterMatrix.shape, dtype='int')
if parameterMatrixCoordinates is None:
binEdges = getBinEdges(parameterMatrix.flatten(), **kwargs)
discretizedParameterMatrix = numpy.digitize(parameterMatrix, binEdges)
else:
binEdges = getBinEdges(parameterMatrix[parameterMatrixCoordinates], **kwargs)
discretizedParameterMatrix[parameterMatrixCoordinates] = numpy.digitize(parameterMatrix[parameterMatrixCoordinates], binEdges)
return discretizedParameterMatrix, binEdges
def checkMask(imageNode, maskNode, **kwargs):
"""
Checks whether the Region of Interest (ROI) defined in the mask size and dimensions match constraints, specified in
settings. The following checks are performed.
1. Check whether the mask corresponds to the image (i.e. has a similar size, spacing, direction and origin). **N.B.
This check is performed by SimpleITK, if it fails, an error is logged, with additional error information from
SimpleITK logged with level DEBUG (i.e. logging-level has to be set to debug to store this information in the log
file).** The tolerance can be increased using the ``geometryTolerance`` parameter. Alternatively, if the
``correctMask`` parameter is ``True``, PyRadiomics will check if the mask contains a valid ROI (inside image
physical area) and if so, resample the mask to image geometry. See :ref:`radiomics-settings-label` for more info.
2. Check if the label is present in the mask
3. Count the number of dimensions in which the size of the ROI > 1 (i.e. does the ROI represent a single voxel (0), a
line (1), a surface (2) or a volume (3)) and compare this to the minimum number of dimension required (specified in
``minimumROIDimensions``).
4. Optional. Check if there are at least N voxels in the ROI. N is defined in ``minimumROISize``, this test is skipped
if ``minimumROISize = None``.
This function returns a tuple of two items. The first item is the bounding box of the mask. The second item is the
mask that has been corrected by resampling to the input image geometry (if that resampling was successful).
If a check fails, a ValueError is raised. No features will be extracted for this mask.
If the mask passes all tests, this function returns the bounding box, which is used in the :py:func:`cropToTumorMask`
function.
The bounding box is calculated during (1.) and used for the subsequent checks. The bounding box is
calculated by SimpleITK.LabelStatisticsImageFilter() and returned as a tuple of indices: (L_x, U_x, L_y, U_y, L_z,
U_z), where 'L' and 'U' are lower and upper bound, respectively, and 'x', 'y' and 'z' the three image dimensions.
By reusing the bounding box calculated here, calls to SimpleITK.LabelStatisticsImageFilter() are reduced, improving
performance.
Uses the following settings:
- minimumROIDimensions [1]: Integer, range 1-3, specifies the minimum dimensions (1D, 2D or 3D, respectively).
Single-voxel segmentations are always excluded.
- minimumROISize [None]: Integer, > 0, specifies the minimum number of voxels required. Test is skipped if
this parameter is set to None.
.. note::
If the first check fails there are generally 2 possible causes:
1. The image and mask are matched, but there is a slight difference in origin, direction or spacing. The exact
cause, difference and used tolerance are stored with level DEBUG in a log (if enabled). For more information on
setting up logging, see ":ref:`setting up logging <radiomics-logging-label>`" and the helloRadiomics examples
(located in the ``pyradiomics/examples`` folder). This problem can be fixed by changing the global tolerance
(``geometryTolerance`` parameter) or enabling mask correction (``correctMask`` parameter).
2. The image and mask do not match, but the ROI contained within the mask does represent a physical volume
contained within the image. If this is the case, resampling is needed to ensure matching geometry between image
and mask before features can be extracted. This can be achieved by enabling mask correction using the
``correctMask`` parameter.
"""
global logger
correctedMask = None
label = kwargs.get('label', 1)
minDims = kwargs.get('minimumROIDimensions', 2)
minSize = kwargs.get('minimumROISize', None)
logger.debug('Checking mask with label %d', label)
logger.debug('Calculating bounding box')
# Determine bounds
lsif = sitk.LabelStatisticsImageFilter()
try:
lsif.Execute(imageNode, maskNode)
# If lsif fails, and mask is corrected, it includes a check whether the label is present. Therefore, perform
# this test here only if lsif does not fail on the first attempt.
if label not in lsif.GetLabels():
raise ValueError('Label (%g) not present in mask' % label)
except RuntimeError as e:
# If correctMask = True, try to resample the mask to the image geometry, otherwise return None ("fail")
if not kwargs.get('correctMask', False):
if "Both images for LabelStatisticsImageFilter don't match type or dimension!" in e.args[0]:
logger.debug('Additional information on error.', exc_info=True)
raise ValueError('Image/Mask datatype or size mismatch. Potential fix: enable correctMask, see '
'Documentation:Usage:Customizing the Extraction:Settings:correctMask for more information')
elif "Inputs do not occupy the same physical space!" in e.args[0]:
logger.debug('Additional information on error.', exc_info=True)
raise ValueError('Image/Mask geometry mismatch. Potential fix: increase tolerance using geometryTolerance, '
'see Documentation:Usage:Customizing the Extraction:Settings:geometryTolerance for more '
'information')
else:
raise e # unhandled error
logger.warning('Image/Mask geometry mismatch, attempting to correct Mask')
correctedMask = _correctMask(imageNode, maskNode, **kwargs) # Raises Value error if ROI outside image physical space
# Resampling successful, try to calculate boundingbox
try:
lsif.Execute(imageNode, correctedMask)
except RuntimeError:
logger.debug('Bounding box calculation with resampled mask failed', exc_info=True)
raise ValueError('Calculation of bounding box failed, for more information run with DEBUG logging and check log')
# LBound and UBound of the bounding box, as (L_X, U_X, L_Y, U_Y, L_Z, U_Z)
boundingBox = numpy.array(lsif.GetBoundingBox(label))
logger.debug('Checking minimum number of dimensions requirements (%d)', minDims)
ndims = numpy.sum((boundingBox[1::2] - boundingBox[0::2] + 1) > 1) # UBound - LBound + 1 = Size
if ndims == 0:
raise ValueError('mask only contains 1 segmented voxel! Cannot extract features for a single voxel.')
elif ndims < minDims:
raise ValueError('mask has too few dimensions (number of dimensions %d, minimum required %d)' % (ndims, minDims))
if minSize is not None:
logger.debug('Checking minimum size requirements (minimum size: %d)', minSize)
roiSize = lsif.GetCount(label)
if roiSize <= minSize:
raise ValueError('Size of the ROI is too small (minimum size: %g, ROI size: %g' % (minSize, roiSize))
return boundingBox, correctedMask
def _correctMask(imageNode, maskNode, **kwargs):
"""
If the mask geometry does not match the image geometry, this function can be used to resample the mask to the image
physical space.
First, the mask is checked for a valid ROI (i.e. maskNode contains an ROI with the given label value, which does not
include areas outside of the physical image bounds).
If the ROI is valid, the maskNode is resampled using the imageNode as a reference image and a nearest neighbor
interpolation.
If the ROI is valid, the resampled mask is returned, otherwise ``None`` is returned.
"""
global logger
logger.debug('Resampling mask to image geometry')
_checkROI(imageNode, maskNode, **kwargs) # Raises a value error if ROI is invalid
rif = sitk.ResampleImageFilter()
rif.SetReferenceImage(imageNode)
rif.SetInterpolator(sitk.sitkNearestNeighbor)
logger.debug('Resampling...')
return rif.Execute(maskNode)
def _checkROI(imageNode, maskNode, **kwargs):
"""
Check whether maskNode contains a valid ROI defined by label:
1. Check whether the label value is present in the maskNode.
2. Check whether the ROI defined by the label does not include an area outside the physical area of the image.
For the second check, a tolerance of 1e-3 is allowed.
If the ROI is valid, the bounding box (lower bounds, followd by size in all dimensions (X, Y, Z ordered)) is
returned. Otherwise, a ValueError is raised.
"""
global logger
label = kwargs.get('label', 1)
logger.debug('Checking ROI validity')
# Determine bounds of cropped volume in terms of original Index coordinate space
lssif = sitk.LabelShapeStatisticsImageFilter()
lssif.Execute(maskNode)
logger.debug('Checking if label %d is persent in the mask', label)
if label not in lssif.GetLabels():
raise ValueError('Label (%d) not present in mask', label)
# LBound and size of the bounding box, as (L_X, L_Y, [L_Z], S_X, S_Y, [S_Z])
bb = numpy.array(lssif.GetBoundingBox(label))
Nd = maskNode.GetDimension()
# Determine if the ROI is within the physical space of the image
logger.debug('Comparing physical space of bounding box to physical space of image')
# Step 1: Get the origin and UBound corners of the bounding box in physical space
# The additional 0.5 represents the difference between the voxel center and the voxel corner
# Upper bound index of ROI = bb[:Nd] + bb[Nd:] - 1 (LBound + Size - 1), .5 is added to get corner
ROIBounds = (maskNode.TransformContinuousIndexToPhysicalPoint(bb[:Nd] - .5), # Origin
maskNode.TransformContinuousIndexToPhysicalPoint(bb[:Nd] + bb[Nd:] - 0.5)) # UBound
# Step 2: Translate the ROI physical bounds to the image coordinate space
ROIBounds = (imageNode.TransformPhysicalPointToContinuousIndex(ROIBounds[0]), # Origin
imageNode.TransformPhysicalPointToContinuousIndex(ROIBounds[1]))
logger.debug('ROI bounds (image coordinate space): %s', ROIBounds)
# Check if any of the ROI bounds are outside the image indices (i.e. -0.5 < ROI < Im.Size -0.5)
# The additional 0.5 is to allow for different spacings (defines the edges, not the centers of the edge-voxels
tolerance = 1e-3 # Define a tolerance to correct for machine precision errors
if numpy.any(numpy.min(ROIBounds, axis=0) < (- .5 - tolerance)) or \
numpy.any(numpy.max(ROIBounds, axis=0) > (numpy.array(imageNode.GetSize()) - .5 + tolerance)):
raise ValueError('Bounding box of ROI is larger than image space:\n\t'
'ROI bounds (x, y, z image coordinate space) %s\n\tImage Size %s' %
(ROIBounds, imageNode.GetSize()))
logger.debug('ROI valid, calculating resampling grid')
return bb
def cropToTumorMask(imageNode, maskNode, boundingBox, **kwargs):
"""
Create a sitkImage of the segmented region of the image based on the input label.
Create a sitkImage of the labelled region of the image, cropped to have a
cuboid shape equal to the ijk boundaries of the label.
:param boundingBox: The bounding box used to crop the image. This is the bounding box as returned by
:py:func:`checkMask`.
:param label: [1], value of the label, onto which the image and mask must be cropped.
:return: Cropped image and mask (SimpleITK image instances).
"""
global logger
padDistance = kwargs.get('padDistance', 0)
size = numpy.array(maskNode.GetSize())
ijkMinBounds = boundingBox[0::2] - padDistance
ijkMaxBounds = size - boundingBox[1::2] - padDistance - 1
# Ensure cropped area is not outside original image bounds
ijkMinBounds = numpy.maximum(ijkMinBounds, 0)
ijkMaxBounds = numpy.maximum(ijkMaxBounds, 0)
# Crop Image
logger.debug('Cropping to size %s', (boundingBox[1::2] - boundingBox[0::2]) + 1)
cif = sitk.CropImageFilter()
try:
cif.SetLowerBoundaryCropSize(ijkMinBounds)
cif.SetUpperBoundaryCropSize(ijkMaxBounds)
except TypeError:
# newer versions of SITK/python want a tuple or list
cif.SetLowerBoundaryCropSize(ijkMinBounds.tolist())
cif.SetUpperBoundaryCropSize(ijkMaxBounds.tolist())
croppedImageNode = cif.Execute(imageNode)
croppedMaskNode = cif.Execute(maskNode)
return croppedImageNode, croppedMaskNode
def resampleImage(imageNode, maskNode, **kwargs):
"""
Resamples image and mask to the specified pixel spacing (The default interpolator is Bspline).
Resampling can be enabled using the settings 'interpolator' and 'resampledPixelSpacing' in the parameter file or as
part of the settings passed to the feature extractor. See also
:ref:`feature extractor <radiomics-featureextractor-label>`.
'imageNode' and 'maskNode' are SimpleITK Objects, and 'resampledPixelSpacing' is the output pixel spacing (sequence of
3 elements).
If only in-plane resampling is required, set the output pixel spacing for the out-of-plane dimension (usually the last
dimension) to 0. Spacings with a value of 0 are replaced by the spacing as it is in the original mask.
Only part of the image and labelmap are resampled. The resampling grid is aligned to the input origin, but only voxels
covering the area of the image ROI (defined by the bounding box) and the padDistance are resampled. This results in a
resampled and partially cropped image and mask. Additional padding is required as some filters also sample voxels
outside of segmentation boundaries. For feature calculation, image and mask are cropped to the bounding box without
any additional padding, as the feature classes do not need the gray level values outside the segmentation.
The resampling grid is calculated using only the input mask. Even when image and mask have different directions, both
the cropped image and mask will have the same direction (equal to direction of the mask). Spacing and size are
determined by settings and bounding box of the ROI.
.. note::
Before resampling the bounds of the non-padded ROI are compared to the bounds. If the ROI bounding box includes
areas outside of the physical space of the image, an error is logged and (None, None) is returned. No features will
be extracted. This enables the input image and mask to have different geometry, so long as the ROI defines an area
within the image.
.. note::
The additional padding is adjusted, so that only the physical space within the mask is resampled. This is done to
prevent resampling outside of the image. Please note that this assumes the image and mask to image the same physical
space. If this is not the case, it is possible that voxels outside the image are included in the resampling grid,
these will be assigned a value of 0. It is therefore recommended, but not enforced, to use an input mask which has
the same or a smaller physical space than the image.
"""
global logger
resampledPixelSpacing = kwargs['resampledPixelSpacing']
interpolator = kwargs.get('interpolator', sitk.sitkBSpline)
padDistance = kwargs.get('padDistance', 5)
label = kwargs.get('label', 1)
logger.debug('Resampling image and mask')
if imageNode is None or maskNode is None:
raise ValueError('Requires both image and mask to resample')
maskSpacing = numpy.array(maskNode.GetSpacing())
imageSpacing = numpy.array(imageNode.GetSpacing())
Nd_resampled = len(resampledPixelSpacing)
Nd_mask = len(maskSpacing)
assert Nd_resampled == Nd_mask, \
'Wrong dimensionality (%i-D) of resampledPixelSpacing!, %i-D required' % (Nd_resampled, Nd_mask)
# If spacing for a direction is set to 0, use the original spacing (enables "only in-slice" resampling)
logger.debug('Where resampled spacing is set to 0, set it to the original spacing (mask)')
resampledPixelSpacing = numpy.array(resampledPixelSpacing)
resampledPixelSpacing = numpy.where(resampledPixelSpacing == 0, maskSpacing, resampledPixelSpacing)
# Check if the maskNode contains a valid ROI. If ROI is valid, the bounding box needed to calculate the resampling
# grid is returned.
bb = _checkROI(imageNode, maskNode, **kwargs)
# Do not resample in those directions where labelmap spans only one slice.
maskSize = numpy.array(maskNode.GetSize())
resampledPixelSpacing = numpy.where(bb[Nd_mask:] != 1, resampledPixelSpacing, maskSpacing)
# If current spacing is equal to resampledPixelSpacing, no interpolation is needed
# Tolerance = 1e-5 + 1e-8*abs(resampledSpacing)
logger.debug('Comparing resampled spacing to original spacing (image')
if numpy.allclose(imageSpacing, resampledPixelSpacing):
logger.info('New spacing equal to original image spacing, just resampling the mask')
# Ensure that image and mask geometry match
rif = sitk.ResampleImageFilter()
rif.SetReferenceImage(imageNode)
rif.SetInterpolator(sitk.sitkNearestNeighbor)
maskNode = rif.Execute(maskNode)
# re-calculate the bounding box of the mask
lssif = sitk.LabelShapeStatisticsImageFilter()
lssif.Execute(maskNode)
bb = numpy.array(lssif.GetBoundingBox(label))
low_up_bb = numpy.empty(Nd_mask * 2, dtype=int)
low_up_bb[::2] = bb[:3]
low_up_bb[1::2] = bb[:3] + bb[3:] - 1
return cropToTumorMask(imageNode, maskNode, low_up_bb, **kwargs)
spacingRatio = maskSpacing / resampledPixelSpacing
# Determine bounds of cropped volume in terms of new Index coordinate space,
# round down for lowerbound and up for upperbound to ensure entire segmentation is captured (prevent data loss)
# Pad with an extra .5 to prevent data loss in case of upsampling. For Ubound this is (-1 + 0.5 = -0.5)
bbNewLBound = numpy.floor((bb[:Nd_mask] - 0.5) * spacingRatio - padDistance)
bbNewUBound = numpy.ceil((bb[:Nd_mask] + bb[Nd_mask:] - 0.5) * spacingRatio + padDistance)
# Ensure resampling is not performed outside bounds of original image
maxUbound = numpy.ceil(maskSize * spacingRatio) - 1
bbNewLBound = numpy.where(bbNewLBound < 0, 0, bbNewLBound)
bbNewUBound = numpy.where(bbNewUBound > maxUbound, maxUbound, bbNewUBound)
# Calculate the new size. Cast to int to prevent error in sitk.
newSize = numpy.array(bbNewUBound - bbNewLBound + 1, dtype='int').tolist()
# Determine continuous index of bbNewLBound in terms of the original Index coordinate space
bbOriginalLBound = bbNewLBound / spacingRatio
# Origin is located in center of first voxel, e.g. 1/2 of the spacing
# from Corner, which corresponds to 0 in the original Index coordinate space.
# The new spacing will be in 0 the new Index coordinate space. Here we use continuous
# index to calculate where the new 0 of the new Index coordinate space (of the original volume
# in terms of the original spacing, and add the minimum bounds of the cropped area to
# get the new Index coordinate space of the cropped volume in terms of the original Index coordinate space.
# Then use the ITK functionality to bring the continuous index into the physical space (mm)
newOriginIndex = numpy.array(.5 * (resampledPixelSpacing - maskSpacing) / maskSpacing)
newCroppedOriginIndex = newOriginIndex + bbOriginalLBound
newOrigin = maskNode.TransformContinuousIndexToPhysicalPoint(newCroppedOriginIndex)
imagePixelType = imageNode.GetPixelID()
maskPixelType = maskNode.GetPixelID()
direction = numpy.array(maskNode.GetDirection())
logger.info('Applying resampling from spacing %s and size %s to spacing %s and size %s',
maskSpacing, maskSize, resampledPixelSpacing, newSize)
try:
if isinstance(interpolator, six.string_types):
interpolator = getattr(sitk, interpolator)
except Exception:
logger.warning('interpolator "%s" not recognized, using sitkBSpline', interpolator)
interpolator = sitk.sitkBSpline
rif = sitk.ResampleImageFilter()
rif.SetOutputSpacing(resampledPixelSpacing)
rif.SetOutputDirection(direction)
rif.SetSize(newSize)
rif.SetOutputOrigin(newOrigin)
logger.debug('Resampling image')
rif.SetOutputPixelType(imagePixelType)
rif.SetInterpolator(interpolator)
resampledImageNode = rif.Execute(imageNode)
logger.debug('Resampling mask')
rif.SetOutputPixelType(maskPixelType)
rif.SetInterpolator(sitk.sitkNearestNeighbor)
resampledMaskNode = rif.Execute(maskNode)
return resampledImageNode, resampledMaskNode
def normalizeImage(image, **kwargs):
r"""
Normalizes the image by centering it at the mean with standard deviation. Normalization is based on all gray values in
the image, not just those inside the segementation.
:math:`f(x) = \frac{s(x - \mu_x)}{\sigma_x}`
Where:
- :math:`x` and :math:`f(x)` are the original and normalized intensity, respectively.
- :math:`\mu_x` and :math:`\sigma_x` are the mean and standard deviation of the image instensity values.
- :math:`s` is an optional scaling defined by ``scale``. By default, it is set to 1.
Optionally, outliers can be removed, in which case values for which :math:`x > \mu_x + n\sigma_x` or
:math:`x < \mu_x - n\sigma_x` are set to :math:`\mu_x + n\sigma_x` and :math:`\mu_x - n\sigma_x`, respectively.
Here, :math:`n>0` and defined by ``outliers``. This, in turn, is controlled by the ``removeOutliers`` parameter.
Removal of outliers is done after the values of the image are normalized, but before ``scale`` is applied.
"""
global logger
scale = kwargs.get('normalizeScale', 1)
outliers = kwargs.get('removeOutliers')
logger.debug('Normalizing image with scale %d', scale)
image = sitk.Normalize(image)
if outliers is not None:
logger.debug('Removing outliers > %g standard deviations', outliers)
imageArr = sitk.GetArrayFromImage(image)
imageArr[imageArr > outliers] = outliers
imageArr[imageArr < -outliers] = -outliers
newImage = sitk.GetImageFromArray(imageArr)
newImage.CopyInformation(image)
image = newImage
image *= scale
return image
def resegmentMask(imageNode, maskNode, **kwargs):
r"""
Resegment the Mask based on the range specified by the threshold(s) in ``resegmentRange``. Either 1 or 2 thresholds
can be defined. In case of 1 threshold, all values equal to or higher than that threshold are included. If there are
2 thresholds, all voxels with a value inside the closed-range defined by these thresholds is included
(i.e. a voxels is included if :math:`T_{lower} \leq X_gl \leq T_{upper}`).
The resegmented mask is therefore always equal or smaller in size than the original mask.
In the case where either resegmentRange or resegmentMode contains illigal values, a ValueError is raised.
There are 3 modes for defining the threshold:
1. absolute (default): The values in resegmentRange define as absolute values (i.e. corresponding to the gray values
in the image
2. relative: The values in resegmentRange define the threshold as relative to the maximum value found in the ROI.
(e.g. 0.5 indicates a threshold at 50% of maximum gray value)
3. sigma: The threshold is defined as the number of sigma from the mean. (e.g. resegmentRange [-3, 3] will include
all voxels that have a value that differs 3 or less standard deviations from the mean).
"""
global logger
resegmentRange = kwargs['resegmentRange']
resegmentMode = kwargs.get('resegmentMode', 'absolute')
label = kwargs.get('label', 1)
if resegmentRange is None:
raise ValueError('resegmentRange is None.')
if len(resegmentRange) == 0 or len(resegmentRange) > 2:
raise ValueError('Length %i is not allowed for resegmentRange' % len(resegmentRange))
logger.debug('Resegmenting mask (range %s, mode %s)', resegmentRange, resegmentMode)
im_arr = sitk.GetArrayFromImage(imageNode)
ma_arr = (sitk.GetArrayFromImage(maskNode) == label) # boolean array
oldSize = numpy.sum(ma_arr)
if resegmentMode == 'absolute':
logger.debug('Resegmenting in absolute mode')
thresholds = sorted(resegmentRange)
elif resegmentMode == 'relative':
max_gl = numpy.max(im_arr[ma_arr])
logger.debug('Resegmenting in relative mode, max %g', max_gl)
thresholds = [max_gl * th for th in sorted(resegmentRange)]
elif resegmentMode == 'sigma':
mean_gl = numpy.mean(im_arr[ma_arr])
sd_gl = numpy.std(im_arr[ma_arr])
logger.debug('Resegmenting in sigma mode, mean %g, std %g', mean_gl, sd_gl)
thresholds = [mean_gl + sd_gl * th for th in sorted(resegmentRange)]
else:
raise ValueError('Resegment mode %s not recognized.' % resegmentMode)
# Apply lower threshold
logger.debug('Applying lower threshold (%g)', thresholds[0])
ma_arr[ma_arr] = im_arr[ma_arr] >= thresholds[0]
# If 2 thresholds are defined, also apply an upper threshold
if len(thresholds) == 2:
logger.debug('Applying upper threshold (%g)', thresholds[1])
ma_arr[ma_arr] = im_arr[ma_arr] <= thresholds[1]
roiSize = numpy.sum(ma_arr)
if roiSize <= 1:
raise ValueError("Resegmentation excluded too many voxels with label %i (retained %i voxel(s))! "
"Cannot extract features" % (label, roiSize))
# Transform the boolean array back to an image with the correct voxels set to the label value
newMask_arr = numpy.zeros(ma_arr.shape, dtype='int')
newMask_arr[ma_arr] = label
newMask = sitk.GetImageFromArray(newMask_arr)
newMask.CopyInformation(maskNode)
logger.debug('Resegmentation complete, new size: %d voxels (excluded %d voxels)', roiSize, oldSize - roiSize)
return newMask
def getOriginalImage(inputImage, inputMask, **kwargs):
"""
This function does not apply any filter, but returns the original image. This function is needed to
dynamically expose the original image as a valid image type.
:return: Yields original image, 'original' and ``kwargs``
"""
global logger
logger.debug('Yielding original image')
yield inputImage, 'original', kwargs
def getLoGImage(inputImage, inputMask, **kwargs):
r"""
Applies a Laplacian of Gaussian filter to the input image and yields a derived image for each sigma value specified.
A Laplacian of Gaussian image is obtained by convolving the image with the second derivative (Laplacian) of a Gaussian
kernel.
The Gaussian kernel is used to smooth the image and is defined as
.. math::
G(x, y, z, \sigma) = \frac{1}{(\sigma \sqrt{2 \pi})^3}e^{-\frac{x^2 + y^2 + z^2}{2\sigma^2}}
The Gaussian kernel is convolved by the laplacian kernel :math:`\nabla^2G(x, y, z)`, which is sensitive to areas with
rapidly changing intensities, enhancing edges. The width of the filter in the Gaussian kernel is determined by
:math:`\sigma` and can be used to emphasize more fine (low :math:`\sigma` values) or coarse (high :math:`\sigma`
values) textures.
.. warning::
The LoG filter implemented in PyRadiomics is a 3D LoG filter, and therefore requires 3D input. Features using a
single slice (2D) segmentation can still be extracted, but the input image *must* be a 3D image, with a minimum size
in all dimensions :math:`\geq \sigma`. If input image is too small, a warning is logged and :math:`\sigma` value is
skipped. Moreover, the image size *must* be at least 4 voxels in each dimensions, if this constraint is not met, no
LoG derived images can be generated.
Following settings are possible:
- sigma: List of floats or integers, must be greater than 0. Filter width (mm) to use for the Gaussian kernel
(determines coarseness).
.. warning::
Setting for sigma must be provided. If omitted, no LoG image features are calculated and the function
will return an empty dictionary.
Returned filter name reflects LoG settings:
log-sigma-<sigmaValue>-3D.
References:
- `SimpleITK Doxygen documentation
<https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1LaplacianRecursiveGaussianImageFilter.html>`_
- `ITK Doxygen documentation <https://itk.org/Doxygen/html/classitk_1_1LaplacianRecursiveGaussianImageFilter.html>`_
- `<https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian>`_
:return: Yields log filtered image for each specified sigma, corresponding image type name and ``kwargs`` (customized
settings).
"""
global logger
logger.debug('Generating LoG images')
# Check if size of image is > 4 in all 3D directions (otherwise, LoG filter will fail)
size = numpy.array(inputImage.GetSize())
spacing = numpy.array(inputImage.GetSpacing())
if numpy.min(size) < 4:
logger.warning('Image too small to apply LoG filter, size: %s', size)
return
sigmaValues = kwargs.get('sigma', [])
for sigma in sigmaValues:
logger.info('Computing LoG with sigma %g', sigma)
if sigma > 0.0:
if numpy.all(size >= numpy.ceil(sigma / spacing) + 1):
lrgif = sitk.LaplacianRecursiveGaussianImageFilter()
lrgif.SetNormalizeAcrossScale(True)
lrgif.SetSigma(sigma)
inputImageName = 'log-sigma-%s-mm-3D' % (str(sigma).replace('.', '-'))
logger.debug('Yielding %s image', inputImageName)
yield lrgif.Execute(inputImage), inputImageName, kwargs
else:
logger.warning('applyLoG: sigma(%g)/spacing(%s) + 1 must be greater than the size(%s) of the inputImage',
sigma,
spacing,
size)
else:
logger.warning('applyLoG: sigma must be greater than 0.0: %g', sigma)
def getWaveletImage(inputImage, inputMask, **kwargs):
"""
Applies wavelet filter to the input image and yields the decompositions and the approximation.
Following settings are possible:
- start_level [0]: integer, 0 based level of wavelet which should be used as first set of decompositions
from which a signature is calculated
- level [1]: integer, number of levels of wavelet decompositions from which a signature is calculated.
- wavelet ["coif1"]: string, type of wavelet decomposition. Enumerated value, validated against possible values
present in the ``pyWavelet.wavelist()``. Current possible values (pywavelet version 0.4.0) (where an
aditional number is needed, range of values is indicated in []):
- haar
- dmey
- sym[2-20]
- db[1-20]
- coif[1-5]
- bior[1.1, 1.3, 1.5, 2.2, 2.4, 2.6, 2.8, 3.1, 3.3, 3.5, 3.7, 3.9, 4.4, 5.5, 6.8]
- rbio[1.1, 1.3, 1.5, 2.2, 2.4, 2.6, 2.8, 3.1, 3.3, 3.5, 3.7, 3.9, 4.4, 5.5, 6.8]
Returned filter name reflects wavelet type:
wavelet[level]-<decompositionName>
N.B. only levels greater than the first level are entered into the name.
:return: Yields each wavelet decomposition and final approximation, corresponding imaget type name and ``kwargs``
(customized settings).
"""
global logger
logger.debug('Generating Wavelet images')
Nd = inputImage.GetDimension()
axes = list(range(Nd - 1, -1, -1))
if kwargs.get('force2D', False):
axes.remove(kwargs.get('force2Ddimension', 0))
approx, ret = _swt3(inputImage, tuple(axes), **kwargs)
for idx, wl in enumerate(ret, start=1):
for decompositionName, decompositionImage in wl.items():
logger.info('Computing Wavelet %s', decompositionName)
if idx == 1:
inputImageName = 'wavelet-%s' % (decompositionName)
else:
inputImageName = 'wavelet%s-%s' % (idx, decompositionName)
logger.debug('Yielding %s image', inputImageName)
yield decompositionImage, inputImageName, kwargs
if len(ret) == 1:
inputImageName = 'wavelet-%s' % ('L' * len(axes))
else:
inputImageName = 'wavelet%s-%s' % (len(ret), ('L' * len(axes)))
logger.debug('Yielding approximation (%s) image', inputImageName)
yield approx, inputImageName, kwargs
def _swt3(inputImage, axes, **kwargs): # Stationary Wavelet Transform 3D
wavelet = kwargs.get('wavelet', 'coif1')
level = kwargs.get('level', 1)
start_level = kwargs.get('start_level', 0)
matrix = sitk.GetArrayFromImage(inputImage) # This function gets a numpy array from the SimpleITK Image "inputImage"
matrix = numpy.asarray(matrix) # The function np.asarray converts "matrix" (which could be also a tuple) into an array.
original_shape = matrix.shape
# original_shape becomes a tuple (?,?,?) containing the number of rows, columns, and slices of the image
# this is of course dependent on the number of dimensions, but the same principle holds
padding = tuple([(0, 1 if dim % 2 != 0 else 0) for dim in original_shape])
# padding is necessary because of pywt.swtn (see function Notes)
data = matrix.copy() # creates a modifiable copy of "matrix" and we call it "data"
data = numpy.pad(data, padding, 'wrap') # padding the tuple "padding" previously computed
if not isinstance(wavelet, pywt.Wavelet):
wavelet = pywt.Wavelet(wavelet)
for i in range(0, start_level): # if start_level = 0 (default) this for loop never gets executed
# compute all decompositions and saves them in "dec" dict
dec = pywt.swtn(data, wavelet, level=1, start_level=0, axes=axes)[0]
# copies in "data" just the "aaa" decomposition (i.e. approximation; No of consecutive 'a's = len(axes))
data = dec['a' * len(axes)].copy()
ret = [] # initialize empty list
for i in range(start_level, start_level + level):
# compute the n-dimensional stationary wavelet transform
dec = pywt.swtn(data, wavelet, level=1, start_level=0, axes=axes)[0]
# Copy the approximation into data (approximation in output / input for next levels)
data = dec['a' * len(axes)].copy()
dec_im = {} # initialize empty dict
for decName, decImage in six.iteritems(dec):
# Returning the approximiation is done only for the last loop,
# and is handled separately below (by building it from `data`)
# There for, skip it here
if decName == 'a' * len(axes):
continue
decTemp = decImage.copy()
decTemp = decTemp[tuple(slice(None, -1 if dim % 2 != 0 else None) for dim in original_shape)]
sitkImage = sitk.GetImageFromArray(decTemp)
sitkImage.CopyInformation(inputImage)
dec_im[str(decName).replace('a', 'L').replace('d', 'H')] = sitkImage
# modifies 'a' with 'L' (Low-pass filter) and 'd' with 'H' (High-pass filter)
ret.append(dec_im) # appending all the filtered sitk images (stored in "dec_im") to the "ret" list
data = data[tuple(slice(None, -1 if dim % 2 != 0 else None) for dim in original_shape)]
approximation = sitk.GetImageFromArray(data)
approximation.CopyInformation(inputImage)
return approximation, ret # returns the approximation and the detail (ret) coefficients of the stationary wavelet decomposition
def getSquareImage(inputImage, inputMask, **kwargs):
r"""
Computes the square of the image intensities.
Resulting values are rescaled on the range of the initial original image and negative intensities are made
negative in resultant filtered image.
:math:`f(x) = (cx)^2,\text{ where } c=\displaystyle\frac{1}{\sqrt{\max(|x|)}}`
Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.
:return: Yields square filtered image, 'square' and ``kwargs`` (customized settings).
"""
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
coeff = 1 / numpy.sqrt(numpy.max(numpy.abs(im)))
im = (coeff * im) ** 2
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding square image')
yield im, 'square', kwargs
def getSquareRootImage(inputImage, inputMask, **kwargs):
r"""
Computes the square root of the absolute value of image intensities.
Resulting values are rescaled on the range of the initial original image and negative intensities are made
negative in resultant filtered image.
:math:`f(x) = \left\{ {\begin{array}{lcl}
\sqrt{cx} & \mbox{for} & x \ge 0 \\
-\sqrt{-cx} & \mbox{for} & x < 0\end{array}} \right.,\text{ where } c=\max(|x|)`
Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.
:return: Yields square root filtered image, 'squareroot' and ``kwargs`` (customized settings).
"""
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
coeff = numpy.max(numpy.abs(im))
im[im > 0] = numpy.sqrt(im[im > 0] * coeff)
im[im < 0] = - numpy.sqrt(-im[im < 0] * coeff)
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding squareroot image')
yield im, 'squareroot', kwargs
def getLogarithmImage(inputImage, inputMask, **kwargs):
r"""
Computes the logarithm of the absolute value of the original image + 1.
Resulting values are rescaled on the range of the initial original image and negative intensities are made
negative in resultant filtered image.
:math:`f(x) = \left\{ {\begin{array}{lcl}
c\log{(x + 1)} & \mbox{for} & x \ge 0 \\
-c\log{(-x + 1)} & \mbox{for} & x < 0\end{array}} \right. \text{, where } c=\frac{\max(|x|)}{\log(\max(|x|) + 1)}`
Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.
:return: Yields logarithm filtered image, 'logarithm' and ``kwargs`` (customized settings)
"""
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
im_max = numpy.max(numpy.abs(im))
im[im > 0] = numpy.log(im[im > 0] + 1)
im[im < 0] = - numpy.log(- (im[im < 0] - 1))
im = im * (im_max / numpy.max(numpy.abs(im)))
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding logarithm image')
yield im, 'logarithm', kwargs
def getExponentialImage(inputImage, inputMask, **kwargs):
r"""
Computes the exponential of the original image.
Resulting values are rescaled on the range of the initial original image.
:math:`f(x) = e^{cx},\text{ where } c=\displaystyle\frac{\log(\max(|x|))}{\max(|x|)}`
Where :math:`x` and :math:`f(x)` are the original and filtered intensity, respectively.
:return: Yields exponential filtered image, 'exponential' and ``kwargs`` (customized settings)
"""
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
im_max = numpy.max(numpy.abs(im))
coeff = numpy.log(im_max) / im_max
im = numpy.exp(coeff * im)
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding exponential image')
yield im, 'exponential', kwargs
def getGradientImage(inputImage, inputMask, **kwargs):
r"""
Compute and return the Gradient Magnitude in the image.
By default, takes into account the image spacing, this can be switched off by specifying
``gradientUseSpacing = False``.
References:
- `SimpleITK documentation
<https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1GradientMagnitudeImageFilter.html>`_
- `<https://en.wikipedia.org/wiki/Image_gradient>`_
"""
gmif = sitk.GradientMagnitudeImageFilter()
gmif.SetUseImageSpacing(kwargs.get('gradientUseSpacing', True))
im = gmif.Execute(inputImage)
yield im, 'gradient', kwargs
def getLBP2DImage(inputImage, inputMask, **kwargs):
"""
Compute and return the Local Binary Pattern (LBP) in 2D. If ``force2D`` is set to false (= feature extraction in 3D) a
warning is logged, as this filter processes the image in a by-slice operation. The plane in which the LBP is
applied can be controlled by the ``force2Ddimension`` parameter (see also :py:func:`generateAngles`).
Following settings are possible (in addition to ``force2Ddimension``):
- ``lbp2DRadius`` [1]: Float, specifies the radius in which the neighbours should be sampled
- ``lbp2DSamples`` [9]: Integer, specifies the number of samples to use
- ``lbp2DMethod`` ['uniform']: String, specifies the method for computing the LBP to use.
For more information see `scikit documentation
<http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.local_binary_pattern>`_
:return: Yields LBP filtered image, 'lbp-2D' and ``kwargs`` (customized settings)
.. note::
LBP can often return only a very small number of different gray levels. A customized bin width is often needed.
.. warning::
Requires package ``scikit-image`` to function. If not available, this filter logs a warning and does not yield an image.
References:
- T. Ojala, M. Pietikainen, and D. Harwood (1994), "Performance evaluation of texture measures with classification
based on Kullback discrimination of distributions", Proceedings of the 12th IAPR International Conference on Pattern
Recognition (ICPR 1994), vol. 1, pp. 582 - 585.
- T. Ojala, M. Pietikainen, and D. Harwood (1996), "A Comparative Study of Texture Measures with Classification Based
on Feature Distributions", Pattern Recognition, vol. 29, pp. 51-59.
"""
global logger
try:
from skimage.feature import local_binary_pattern
except ImportError:
logger.warning('Could not load required package "skimage", cannot implement filter LBP 2D')
return
lbp_radius = kwargs.get('lbp2DRadius', 1)
lbp_samples = kwargs.get('lbp2DSamples', 8)
lbp_method = kwargs.get('lbp2DMethod', 'uniform')
im_arr = sitk.GetArrayFromImage(inputImage)
Nd = inputImage.GetDimension()
if Nd == 3:
# Warn the user if features are extracted in 3D, as this function calculates LBP in 2D
if not kwargs.get('force2D', False):
logger.warning('Calculating Local Binary Pattern in 2D, but extracting features in 3D. Use with caution!')
lbp_axis = kwargs.get('force2Ddimension', 0)
im_arr = im_arr.swapaxes(0, lbp_axis)
for idx in range(im_arr.shape[0]):
im_arr[idx, ...] = local_binary_pattern(im_arr[idx, ...], P=lbp_samples, R=lbp_radius, method=lbp_method)
im_arr = im_arr.swapaxes(0, lbp_axis)
elif Nd == 2:
im_arr = local_binary_pattern(im_arr, P=lbp_samples, R=lbp_radius, method=lbp_method)
else:
logger.warning('LBP 2D is only available for 2D or 3D with forced 2D extraction')
return
im = sitk.GetImageFromArray(im_arr)
im.CopyInformation(inputImage)
yield im, 'lbp-2D', kwargs
def getLBP3DImage(inputImage, inputMask, **kwargs):
"""
Compute and return the Local Binary Pattern (LBP) in 3D using spherical harmonics.
If ``force2D`` is set to true (= feature extraction in 2D) a warning is logged.
LBP is only calculated for voxels segmented in the mask
Following settings are possible:
- ``lbp3DLevels`` [2]: integer, specifies the the number of levels in spherical harmonics to use.
- ``lbp3DIcosphereRadius`` [1]: Float, specifies the radius in which the neighbours should be sampled
- ``lbp3DIcosphereSubdivision`` [1]: Integer, specifies the number of subdivisions to apply in the icosphere
:return: Yields LBP filtered image for each level, 'lbp-3D-m<level>' and ``kwargs`` (customized settings).
Additionally yields the kurtosis image, 'lbp-3D-k' and ``kwargs``.
.. note::
LBP can often return only a very small number of different gray levels. A customized bin width is often needed.
.. warning::
Requires package ``scipy`` and ``trimesh`` to function. If not available, this filter logs a warning and does not
yield an image.
References:
- Banerjee, J, Moelker, A, Niessen, W.J, & van Walsum, T.W. (2013), "3D LBP-based rotationally invariant region
description." In: Park JI., Kim J. (eds) Computer Vision - ACCV 2012 Workshops. ACCV 2012. Lecture Notes in Computer
Science, vol 7728. Springer, Berlin, Heidelberg. doi:10.1007/978-3-642-37410-4_3
"""
global logger
Nd = inputImage.GetDimension()
if Nd != 3:
logger.warning('LBP 3D only available for 3 dimensional images, found %i dimensions', Nd)
return
try:
from scipy.stats import kurtosis
from scipy.ndimage.interpolation import map_coordinates
from scipy.special import sph_harm
from trimesh.creation import icosphere
except ImportError:
logger.warning('Could not load required package "scipy" or "trimesh", cannot implement filter LBP 3D')
return
# Warn the user if features are extracted in 2D, as this function calculates LBP in 3D
if kwargs.get('force2D', False):
logger.warning('Calculating Local Binary Pattern in 3D, but extracting features in 2D. Use with caution!')
label = kwargs.get('label', 1)
lbp_levels = kwargs.get('lbp3DLevels', 2)
lbp_icosphereRadius = kwargs.get('lbp3DIcosphereRadius', 1)
lbp_icosphereSubdivision = kwargs.get('lbp3DIcosphereSubdivision', 1)
im_arr = sitk.GetArrayFromImage(inputImage)
ma_arr = sitk.GetArrayFromImage(inputMask)
# Variables used in the shape comments:
# Np Number of voxels
# Nv Number of vertices
# Vertices icosahedron for spherical sampling
coords_icosahedron = numpy.array(icosphere(lbp_icosphereSubdivision, lbp_icosphereRadius).vertices) # shape(Nv, 3)
# Corresponding polar coordinates
theta = numpy.arccos(numpy.true_divide(coords_icosahedron[:, 2], lbp_icosphereRadius))
phi = numpy.arctan2(coords_icosahedron[:, 1], coords_icosahedron[:, 0])
# Corresponding spherical harmonics coefficients Y_{m, n, theta, phi}
Y = sph_harm(0, 0, theta, phi) # shape(Nv,)
n_ix = numpy.array(0)
for n in range(1, lbp_levels):
for m in range(-n, n + 1):
n_ix = numpy.append(n_ix, n)
Y = numpy.column_stack((Y, sph_harm(m, n, theta, phi)))
# shape (Nv, x) where x is the number of iterations in the above loops + 1
# Get labelled coordinates
ROI_coords = numpy.where(ma_arr == label) # shape(3, Np)
# Interpolate f (samples on the spheres across the entire volume)
coords = numpy.array(ROI_coords).T[None, :, :] + coords_icosahedron[:, None, :] # shape(Nv, Np, 3)
f = map_coordinates(im_arr, coords.T, order=3) # Shape(Np, Nv) Note that 'Np' and 'Nv' are swapped due to .T
# Compute spherical Kurtosis
k = kurtosis(f, axis=1) # shape(Np,)
# Apply sign function
f_centroids = im_arr[ROI_coords] # Shape(Np,)
f = numpy.greater_equal(f, f_centroids[:, None]).astype(int) # Shape(Np, Nv)
# Compute c_{m,n} coefficients
c = numpy.multiply(f[:, :, None], Y[None, :, :]) # Shape(Np, Nv, x)
c = c.sum(axis=1) # Shape(Np, x)
# Integrate over m
f = numpy.multiply(c[:, None, n_ix == 0], Y[None, :, n_ix == 0]) # Shape (Np, Nv, 1)
for n in range(1, lbp_levels):
f = numpy.concatenate((f,
numpy.sum(numpy.multiply(c[:, None, n_ix == n], Y[None, :, n_ix == n]),
axis=2, keepdims=True)
),
axis=2)
# Shape f (Np, Nv, levels)
# Compute L2-Norm
f = numpy.sqrt(numpy.sum(f ** 2, axis=1)) # shape(Np, levels)
# Keep only Real Part
f = numpy.real(f) # shape(Np, levels)
k = numpy.real(k) # shape(Np,)
# Yield the derived images for each level
result = numpy.ndarray(im_arr.shape)
for l_idx in range(lbp_levels):
result[ROI_coords] = f[:, l_idx]
# Create a SimpleITK image
im = sitk.GetImageFromArray(result)
im.CopyInformation(inputImage)
yield im, 'lbp-3D-m%d' % (l_idx + 1), kwargs
# Yield Kurtosis
result[ROI_coords] = k
# Create a SimpleITK image
im = sitk.GetImageFromArray(result)
im.CopyInformation(inputImage)
yield im, 'lbp-3D-k', kwargs
| 45.225248 | 130 | 0.719227 | from __future__ import print_function
import logging
import numpy
import pywt
import SimpleITK as sitk
import six
from six.moves import range
logger = logging.getLogger(__name__)
def getMask(mask, **kwargs):
global logger
label = kwargs.get('label', 1)
label_channel = kwargs.get('label_channel', 0)
if 'vector' in mask.GetPixelIDTypeAsString().lower():
logger.debug('Mask appears to be a segmentation object (=stored as vector image).')
n_components = mask.GetNumberOfComponentsPerPixel()
assert label_channel < n_components, \
"Mask %i requested, but segmentation object only contains %i objects" % (label_channel, n_components)
logger.info('Extracting mask at index %i', label_channel)
selector = sitk.VectorIndexSelectionCastImageFilter()
selector.SetIndex(label_channel)
mask = selector.Execute(mask)
logger.debug('Force casting mask to UInt32 to ensure correct datatype.')
mask = sitk.Cast(mask, sitk.sitkUInt32)
labels = numpy.unique(sitk.GetArrayFromImage(mask))
if len(labels) == 1:
raise ValueError('No labels found in this mask (i.e. nothing is segmented)!')
if label not in labels:
raise ValueError('Label (%g) not present in mask. Choose from %s' % (label, labels[labels != 0]))
return mask
def getBinEdges(parameterValues, **kwargs):
global logger
binWidth = kwargs.get('binWidth', 25)
binCount = kwargs.get('binCount')
if binCount is not None:
binEdges = numpy.histogram(parameterValues, binCount)[1]
binEdges[-1] += 1
else:
minimum = min(parameterValues)
maximum = max(parameterValues)
lowBound = minimum - (minimum % binWidth)
highBound = maximum + 2 * binWidth
binEdges = numpy.arange(lowBound, highBound, binWidth)
if len(binEdges) == 1:
binEdges = [binEdges[0] - .5, binEdges[0] + .5]
logger.debug('Calculated %d bins for bin width %g with edges: %s)', len(binEdges) - 1, binWidth, binEdges)
return binEdges
def binImage(parameterMatrix, parameterMatrixCoordinates=None, **kwargs):
global logger
logger.debug('Discretizing gray levels inside ROI')
discretizedParameterMatrix = numpy.zeros(parameterMatrix.shape, dtype='int')
if parameterMatrixCoordinates is None:
binEdges = getBinEdges(parameterMatrix.flatten(), **kwargs)
discretizedParameterMatrix = numpy.digitize(parameterMatrix, binEdges)
else:
binEdges = getBinEdges(parameterMatrix[parameterMatrixCoordinates], **kwargs)
discretizedParameterMatrix[parameterMatrixCoordinates] = numpy.digitize(parameterMatrix[parameterMatrixCoordinates], binEdges)
return discretizedParameterMatrix, binEdges
def checkMask(imageNode, maskNode, **kwargs):
global logger
correctedMask = None
label = kwargs.get('label', 1)
minDims = kwargs.get('minimumROIDimensions', 2)
minSize = kwargs.get('minimumROISize', None)
logger.debug('Checking mask with label %d', label)
logger.debug('Calculating bounding box')
lsif = sitk.LabelStatisticsImageFilter()
try:
lsif.Execute(imageNode, maskNode)
if label not in lsif.GetLabels():
raise ValueError('Label (%g) not present in mask' % label)
except RuntimeError as e:
if not kwargs.get('correctMask', False):
if "Both images for LabelStatisticsImageFilter don't match type or dimension!" in e.args[0]:
logger.debug('Additional information on error.', exc_info=True)
raise ValueError('Image/Mask datatype or size mismatch. Potential fix: enable correctMask, see '
'Documentation:Usage:Customizing the Extraction:Settings:correctMask for more information')
elif "Inputs do not occupy the same physical space!" in e.args[0]:
logger.debug('Additional information on error.', exc_info=True)
raise ValueError('Image/Mask geometry mismatch. Potential fix: increase tolerance using geometryTolerance, '
'see Documentation:Usage:Customizing the Extraction:Settings:geometryTolerance for more '
'information')
else:
raise e # unhandled error
logger.warning('Image/Mask geometry mismatch, attempting to correct Mask')
correctedMask = _correctMask(imageNode, maskNode, **kwargs) # Raises Value error if ROI outside image physical space
# Resampling successful, try to calculate boundingbox
try:
lsif.Execute(imageNode, correctedMask)
except RuntimeError:
logger.debug('Bounding box calculation with resampled mask failed', exc_info=True)
raise ValueError('Calculation of bounding box failed, for more information run with DEBUG logging and check log')
# LBound and UBound of the bounding box, as (L_X, U_X, L_Y, U_Y, L_Z, U_Z)
boundingBox = numpy.array(lsif.GetBoundingBox(label))
logger.debug('Checking minimum number of dimensions requirements (%d)', minDims)
ndims = numpy.sum((boundingBox[1::2] - boundingBox[0::2] + 1) > 1) # UBound - LBound + 1 = Size
if ndims == 0:
raise ValueError('mask only contains 1 segmented voxel! Cannot extract features for a single voxel.')
elif ndims < minDims:
raise ValueError('mask has too few dimensions (number of dimensions %d, minimum required %d)' % (ndims, minDims))
if minSize is not None:
logger.debug('Checking minimum size requirements (minimum size: %d)', minSize)
roiSize = lsif.GetCount(label)
if roiSize <= minSize:
raise ValueError('Size of the ROI is too small (minimum size: %g, ROI size: %g' % (minSize, roiSize))
return boundingBox, correctedMask
def _correctMask(imageNode, maskNode, **kwargs):
global logger
logger.debug('Resampling mask to image geometry')
_checkROI(imageNode, maskNode, **kwargs) # Raises a value error if ROI is invalid
rif = sitk.ResampleImageFilter()
rif.SetReferenceImage(imageNode)
rif.SetInterpolator(sitk.sitkNearestNeighbor)
logger.debug('Resampling...')
return rif.Execute(maskNode)
def _checkROI(imageNode, maskNode, **kwargs):
global logger
label = kwargs.get('label', 1)
logger.debug('Checking ROI validity')
# Determine bounds of cropped volume in terms of original Index coordinate space
lssif = sitk.LabelShapeStatisticsImageFilter()
lssif.Execute(maskNode)
logger.debug('Checking if label %d is persent in the mask', label)
if label not in lssif.GetLabels():
raise ValueError('Label (%d) not present in mask', label)
# LBound and size of the bounding box, as (L_X, L_Y, [L_Z], S_X, S_Y, [S_Z])
bb = numpy.array(lssif.GetBoundingBox(label))
Nd = maskNode.GetDimension()
# Determine if the ROI is within the physical space of the image
logger.debug('Comparing physical space of bounding box to physical space of image')
# Step 1: Get the origin and UBound corners of the bounding box in physical space
# The additional 0.5 represents the difference between the voxel center and the voxel corner
# Upper bound index of ROI = bb[:Nd] + bb[Nd:] - 1 (LBound + Size - 1), .5 is added to get corner
ROIBounds = (maskNode.TransformContinuousIndexToPhysicalPoint(bb[:Nd] - .5), # Origin
maskNode.TransformContinuousIndexToPhysicalPoint(bb[:Nd] + bb[Nd:] - 0.5)) # UBound
# Step 2: Translate the ROI physical bounds to the image coordinate space
ROIBounds = (imageNode.TransformPhysicalPointToContinuousIndex(ROIBounds[0]), # Origin
imageNode.TransformPhysicalPointToContinuousIndex(ROIBounds[1]))
logger.debug('ROI bounds (image coordinate space): %s', ROIBounds)
# Check if any of the ROI bounds are outside the image indices (i.e. -0.5 < ROI < Im.Size -0.5)
# The additional 0.5 is to allow for different spacings (defines the edges, not the centers of the edge-voxels
tolerance = 1e-3 # Define a tolerance to correct for machine precision errors
if numpy.any(numpy.min(ROIBounds, axis=0) < (- .5 - tolerance)) or \
numpy.any(numpy.max(ROIBounds, axis=0) > (numpy.array(imageNode.GetSize()) - .5 + tolerance)):
raise ValueError('Bounding box of ROI is larger than image space:\n\t'
'ROI bounds (x, y, z image coordinate space) %s\n\tImage Size %s' %
(ROIBounds, imageNode.GetSize()))
logger.debug('ROI valid, calculating resampling grid')
return bb
def cropToTumorMask(imageNode, maskNode, boundingBox, **kwargs):
global logger
padDistance = kwargs.get('padDistance', 0)
size = numpy.array(maskNode.GetSize())
ijkMinBounds = boundingBox[0::2] - padDistance
ijkMaxBounds = size - boundingBox[1::2] - padDistance - 1
# Ensure cropped area is not outside original image bounds
ijkMinBounds = numpy.maximum(ijkMinBounds, 0)
ijkMaxBounds = numpy.maximum(ijkMaxBounds, 0)
# Crop Image
logger.debug('Cropping to size %s', (boundingBox[1::2] - boundingBox[0::2]) + 1)
cif = sitk.CropImageFilter()
try:
cif.SetLowerBoundaryCropSize(ijkMinBounds)
cif.SetUpperBoundaryCropSize(ijkMaxBounds)
except TypeError:
# newer versions of SITK/python want a tuple or list
cif.SetLowerBoundaryCropSize(ijkMinBounds.tolist())
cif.SetUpperBoundaryCropSize(ijkMaxBounds.tolist())
croppedImageNode = cif.Execute(imageNode)
croppedMaskNode = cif.Execute(maskNode)
return croppedImageNode, croppedMaskNode
def resampleImage(imageNode, maskNode, **kwargs):
global logger
resampledPixelSpacing = kwargs['resampledPixelSpacing']
interpolator = kwargs.get('interpolator', sitk.sitkBSpline)
padDistance = kwargs.get('padDistance', 5)
label = kwargs.get('label', 1)
logger.debug('Resampling image and mask')
if imageNode is None or maskNode is None:
raise ValueError('Requires both image and mask to resample')
maskSpacing = numpy.array(maskNode.GetSpacing())
imageSpacing = numpy.array(imageNode.GetSpacing())
Nd_resampled = len(resampledPixelSpacing)
Nd_mask = len(maskSpacing)
assert Nd_resampled == Nd_mask, \
'Wrong dimensionality (%i-D) of resampledPixelSpacing!, %i-D required' % (Nd_resampled, Nd_mask)
# If spacing for a direction is set to 0, use the original spacing (enables "only in-slice" resampling)
logger.debug('Where resampled spacing is set to 0, set it to the original spacing (mask)')
resampledPixelSpacing = numpy.array(resampledPixelSpacing)
resampledPixelSpacing = numpy.where(resampledPixelSpacing == 0, maskSpacing, resampledPixelSpacing)
# Check if the maskNode contains a valid ROI. If ROI is valid, the bounding box needed to calculate the resampling
# grid is returned.
bb = _checkROI(imageNode, maskNode, **kwargs)
# Do not resample in those directions where labelmap spans only one slice.
maskSize = numpy.array(maskNode.GetSize())
resampledPixelSpacing = numpy.where(bb[Nd_mask:] != 1, resampledPixelSpacing, maskSpacing)
# If current spacing is equal to resampledPixelSpacing, no interpolation is needed
# Tolerance = 1e-5 + 1e-8*abs(resampledSpacing)
logger.debug('Comparing resampled spacing to original spacing (image')
if numpy.allclose(imageSpacing, resampledPixelSpacing):
logger.info('New spacing equal to original image spacing, just resampling the mask')
# Ensure that image and mask geometry match
rif = sitk.ResampleImageFilter()
rif.SetReferenceImage(imageNode)
rif.SetInterpolator(sitk.sitkNearestNeighbor)
maskNode = rif.Execute(maskNode)
# re-calculate the bounding box of the mask
lssif = sitk.LabelShapeStatisticsImageFilter()
lssif.Execute(maskNode)
bb = numpy.array(lssif.GetBoundingBox(label))
low_up_bb = numpy.empty(Nd_mask * 2, dtype=int)
low_up_bb[::2] = bb[:3]
low_up_bb[1::2] = bb[:3] + bb[3:] - 1
return cropToTumorMask(imageNode, maskNode, low_up_bb, **kwargs)
spacingRatio = maskSpacing / resampledPixelSpacing
# Determine bounds of cropped volume in terms of new Index coordinate space,
# round down for lowerbound and up for upperbound to ensure entire segmentation is captured (prevent data loss)
# Pad with an extra .5 to prevent data loss in case of upsampling. For Ubound this is (-1 + 0.5 = -0.5)
bbNewLBound = numpy.floor((bb[:Nd_mask] - 0.5) * spacingRatio - padDistance)
bbNewUBound = numpy.ceil((bb[:Nd_mask] + bb[Nd_mask:] - 0.5) * spacingRatio + padDistance)
# Ensure resampling is not performed outside bounds of original image
maxUbound = numpy.ceil(maskSize * spacingRatio) - 1
bbNewLBound = numpy.where(bbNewLBound < 0, 0, bbNewLBound)
bbNewUBound = numpy.where(bbNewUBound > maxUbound, maxUbound, bbNewUBound)
# Calculate the new size. Cast to int to prevent error in sitk.
newSize = numpy.array(bbNewUBound - bbNewLBound + 1, dtype='int').tolist()
# Determine continuous index of bbNewLBound in terms of the original Index coordinate space
bbOriginalLBound = bbNewLBound / spacingRatio
# Origin is located in center of first voxel, e.g. 1/2 of the spacing
# from Corner, which corresponds to 0 in the original Index coordinate space.
# The new spacing will be in 0 the new Index coordinate space. Here we use continuous
# index to calculate where the new 0 of the new Index coordinate space (of the original volume
# in terms of the original spacing, and add the minimum bounds of the cropped area to
# get the new Index coordinate space of the cropped volume in terms of the original Index coordinate space.
# Then use the ITK functionality to bring the continuous index into the physical space (mm)
newOriginIndex = numpy.array(.5 * (resampledPixelSpacing - maskSpacing) / maskSpacing)
newCroppedOriginIndex = newOriginIndex + bbOriginalLBound
newOrigin = maskNode.TransformContinuousIndexToPhysicalPoint(newCroppedOriginIndex)
imagePixelType = imageNode.GetPixelID()
maskPixelType = maskNode.GetPixelID()
direction = numpy.array(maskNode.GetDirection())
logger.info('Applying resampling from spacing %s and size %s to spacing %s and size %s',
maskSpacing, maskSize, resampledPixelSpacing, newSize)
try:
if isinstance(interpolator, six.string_types):
interpolator = getattr(sitk, interpolator)
except Exception:
logger.warning('interpolator "%s" not recognized, using sitkBSpline', interpolator)
interpolator = sitk.sitkBSpline
rif = sitk.ResampleImageFilter()
rif.SetOutputSpacing(resampledPixelSpacing)
rif.SetOutputDirection(direction)
rif.SetSize(newSize)
rif.SetOutputOrigin(newOrigin)
logger.debug('Resampling image')
rif.SetOutputPixelType(imagePixelType)
rif.SetInterpolator(interpolator)
resampledImageNode = rif.Execute(imageNode)
logger.debug('Resampling mask')
rif.SetOutputPixelType(maskPixelType)
rif.SetInterpolator(sitk.sitkNearestNeighbor)
resampledMaskNode = rif.Execute(maskNode)
return resampledImageNode, resampledMaskNode
def normalizeImage(image, **kwargs):
global logger
scale = kwargs.get('normalizeScale', 1)
outliers = kwargs.get('removeOutliers')
logger.debug('Normalizing image with scale %d', scale)
image = sitk.Normalize(image)
if outliers is not None:
logger.debug('Removing outliers > %g standard deviations', outliers)
imageArr = sitk.GetArrayFromImage(image)
imageArr[imageArr > outliers] = outliers
imageArr[imageArr < -outliers] = -outliers
newImage = sitk.GetImageFromArray(imageArr)
newImage.CopyInformation(image)
image = newImage
image *= scale
return image
def resegmentMask(imageNode, maskNode, **kwargs):
global logger
resegmentRange = kwargs['resegmentRange']
resegmentMode = kwargs.get('resegmentMode', 'absolute')
label = kwargs.get('label', 1)
if resegmentRange is None:
raise ValueError('resegmentRange is None.')
if len(resegmentRange) == 0 or len(resegmentRange) > 2:
raise ValueError('Length %i is not allowed for resegmentRange' % len(resegmentRange))
logger.debug('Resegmenting mask (range %s, mode %s)', resegmentRange, resegmentMode)
im_arr = sitk.GetArrayFromImage(imageNode)
ma_arr = (sitk.GetArrayFromImage(maskNode) == label) # boolean array
oldSize = numpy.sum(ma_arr)
if resegmentMode == 'absolute':
logger.debug('Resegmenting in absolute mode')
thresholds = sorted(resegmentRange)
elif resegmentMode == 'relative':
max_gl = numpy.max(im_arr[ma_arr])
logger.debug('Resegmenting in relative mode, max %g', max_gl)
thresholds = [max_gl * th for th in sorted(resegmentRange)]
elif resegmentMode == 'sigma':
mean_gl = numpy.mean(im_arr[ma_arr])
sd_gl = numpy.std(im_arr[ma_arr])
logger.debug('Resegmenting in sigma mode, mean %g, std %g', mean_gl, sd_gl)
thresholds = [mean_gl + sd_gl * th for th in sorted(resegmentRange)]
else:
raise ValueError('Resegment mode %s not recognized.' % resegmentMode)
# Apply lower threshold
logger.debug('Applying lower threshold (%g)', thresholds[0])
ma_arr[ma_arr] = im_arr[ma_arr] >= thresholds[0]
# If 2 thresholds are defined, also apply an upper threshold
if len(thresholds) == 2:
logger.debug('Applying upper threshold (%g)', thresholds[1])
ma_arr[ma_arr] = im_arr[ma_arr] <= thresholds[1]
roiSize = numpy.sum(ma_arr)
if roiSize <= 1:
raise ValueError("Resegmentation excluded too many voxels with label %i (retained %i voxel(s))! "
"Cannot extract features" % (label, roiSize))
# Transform the boolean array back to an image with the correct voxels set to the label value
newMask_arr = numpy.zeros(ma_arr.shape, dtype='int')
newMask_arr[ma_arr] = label
newMask = sitk.GetImageFromArray(newMask_arr)
newMask.CopyInformation(maskNode)
logger.debug('Resegmentation complete, new size: %d voxels (excluded %d voxels)', roiSize, oldSize - roiSize)
return newMask
def getOriginalImage(inputImage, inputMask, **kwargs):
global logger
logger.debug('Yielding original image')
yield inputImage, 'original', kwargs
def getLoGImage(inputImage, inputMask, **kwargs):
global logger
logger.debug('Generating LoG images')
# Check if size of image is > 4 in all 3D directions (otherwise, LoG filter will fail)
size = numpy.array(inputImage.GetSize())
spacing = numpy.array(inputImage.GetSpacing())
if numpy.min(size) < 4:
logger.warning('Image too small to apply LoG filter, size: %s', size)
return
sigmaValues = kwargs.get('sigma', [])
for sigma in sigmaValues:
logger.info('Computing LoG with sigma %g', sigma)
if sigma > 0.0:
if numpy.all(size >= numpy.ceil(sigma / spacing) + 1):
lrgif = sitk.LaplacianRecursiveGaussianImageFilter()
lrgif.SetNormalizeAcrossScale(True)
lrgif.SetSigma(sigma)
inputImageName = 'log-sigma-%s-mm-3D' % (str(sigma).replace('.', '-'))
logger.debug('Yielding %s image', inputImageName)
yield lrgif.Execute(inputImage), inputImageName, kwargs
else:
logger.warning('applyLoG: sigma(%g)/spacing(%s) + 1 must be greater than the size(%s) of the inputImage',
sigma,
spacing,
size)
else:
logger.warning('applyLoG: sigma must be greater than 0.0: %g', sigma)
def getWaveletImage(inputImage, inputMask, **kwargs):
global logger
logger.debug('Generating Wavelet images')
Nd = inputImage.GetDimension()
axes = list(range(Nd - 1, -1, -1))
if kwargs.get('force2D', False):
axes.remove(kwargs.get('force2Ddimension', 0))
approx, ret = _swt3(inputImage, tuple(axes), **kwargs)
for idx, wl in enumerate(ret, start=1):
for decompositionName, decompositionImage in wl.items():
logger.info('Computing Wavelet %s', decompositionName)
if idx == 1:
inputImageName = 'wavelet-%s' % (decompositionName)
else:
inputImageName = 'wavelet%s-%s' % (idx, decompositionName)
logger.debug('Yielding %s image', inputImageName)
yield decompositionImage, inputImageName, kwargs
if len(ret) == 1:
inputImageName = 'wavelet-%s' % ('L' * len(axes))
else:
inputImageName = 'wavelet%s-%s' % (len(ret), ('L' * len(axes)))
logger.debug('Yielding approximation (%s) image', inputImageName)
yield approx, inputImageName, kwargs
def _swt3(inputImage, axes, **kwargs): # Stationary Wavelet Transform 3D
wavelet = kwargs.get('wavelet', 'coif1')
level = kwargs.get('level', 1)
start_level = kwargs.get('start_level', 0)
matrix = sitk.GetArrayFromImage(inputImage) # This function gets a numpy array from the SimpleITK Image "inputImage"
matrix = numpy.asarray(matrix) # The function np.asarray converts "matrix" (which could be also a tuple) into an array.
original_shape = matrix.shape
# original_shape becomes a tuple (?,?,?) containing the number of rows, columns, and slices of the image
# this is of course dependent on the number of dimensions, but the same principle holds
padding = tuple([(0, 1 if dim % 2 != 0 else 0) for dim in original_shape])
# padding is necessary because of pywt.swtn (see function Notes)
data = matrix.copy() # creates a modifiable copy of "matrix" and we call it "data"
data = numpy.pad(data, padding, 'wrap') # padding the tuple "padding" previously computed
if not isinstance(wavelet, pywt.Wavelet):
wavelet = pywt.Wavelet(wavelet)
for i in range(0, start_level): # if start_level = 0 (default) this for loop never gets executed
# compute all decompositions and saves them in "dec" dict
dec = pywt.swtn(data, wavelet, level=1, start_level=0, axes=axes)[0]
# copies in "data" just the "aaa" decomposition (i.e. approximation; No of consecutive 'a's = len(axes))
data = dec['a' * len(axes)].copy()
ret = [] # initialize empty list
for i in range(start_level, start_level + level):
# compute the n-dimensional stationary wavelet transform
dec = pywt.swtn(data, wavelet, level=1, start_level=0, axes=axes)[0]
# Copy the approximation into data (approximation in output / input for next levels)
data = dec['a' * len(axes)].copy()
dec_im = {} # initialize empty dict
for decName, decImage in six.iteritems(dec):
# Returning the approximiation is done only for the last loop,
# and is handled separately below (by building it from `data`)
# There for, skip it here
if decName == 'a' * len(axes):
continue
decTemp = decImage.copy()
decTemp = decTemp[tuple(slice(None, -1 if dim % 2 != 0 else None) for dim in original_shape)]
sitkImage = sitk.GetImageFromArray(decTemp)
sitkImage.CopyInformation(inputImage)
dec_im[str(decName).replace('a', 'L').replace('d', 'H')] = sitkImage
# modifies 'a' with 'L' (Low-pass filter) and 'd' with 'H' (High-pass filter)
ret.append(dec_im) # appending all the filtered sitk images (stored in "dec_im") to the "ret" list
data = data[tuple(slice(None, -1 if dim % 2 != 0 else None) for dim in original_shape)]
approximation = sitk.GetImageFromArray(data)
approximation.CopyInformation(inputImage)
return approximation, ret # returns the approximation and the detail (ret) coefficients of the stationary wavelet decomposition
def getSquareImage(inputImage, inputMask, **kwargs):
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
coeff = 1 / numpy.sqrt(numpy.max(numpy.abs(im)))
im = (coeff * im) ** 2
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding square image')
yield im, 'square', kwargs
def getSquareRootImage(inputImage, inputMask, **kwargs):
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
coeff = numpy.max(numpy.abs(im))
im[im > 0] = numpy.sqrt(im[im > 0] * coeff)
im[im < 0] = - numpy.sqrt(-im[im < 0] * coeff)
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding squareroot image')
yield im, 'squareroot', kwargs
def getLogarithmImage(inputImage, inputMask, **kwargs):
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
im_max = numpy.max(numpy.abs(im))
im[im > 0] = numpy.log(im[im > 0] + 1)
im[im < 0] = - numpy.log(- (im[im < 0] - 1))
im = im * (im_max / numpy.max(numpy.abs(im)))
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding logarithm image')
yield im, 'logarithm', kwargs
def getExponentialImage(inputImage, inputMask, **kwargs):
global logger
im = sitk.GetArrayFromImage(inputImage)
im = im.astype('float64')
im_max = numpy.max(numpy.abs(im))
coeff = numpy.log(im_max) / im_max
im = numpy.exp(coeff * im)
im = sitk.GetImageFromArray(im)
im.CopyInformation(inputImage)
logger.debug('Yielding exponential image')
yield im, 'exponential', kwargs
def getGradientImage(inputImage, inputMask, **kwargs):
gmif = sitk.GradientMagnitudeImageFilter()
gmif.SetUseImageSpacing(kwargs.get('gradientUseSpacing', True))
im = gmif.Execute(inputImage)
yield im, 'gradient', kwargs
def getLBP2DImage(inputImage, inputMask, **kwargs):
global logger
try:
from skimage.feature import local_binary_pattern
except ImportError:
logger.warning('Could not load required package "skimage", cannot implement filter LBP 2D')
return
lbp_radius = kwargs.get('lbp2DRadius', 1)
lbp_samples = kwargs.get('lbp2DSamples', 8)
lbp_method = kwargs.get('lbp2DMethod', 'uniform')
im_arr = sitk.GetArrayFromImage(inputImage)
Nd = inputImage.GetDimension()
if Nd == 3:
# Warn the user if features are extracted in 3D, as this function calculates LBP in 2D
if not kwargs.get('force2D', False):
logger.warning('Calculating Local Binary Pattern in 2D, but extracting features in 3D. Use with caution!')
lbp_axis = kwargs.get('force2Ddimension', 0)
im_arr = im_arr.swapaxes(0, lbp_axis)
for idx in range(im_arr.shape[0]):
im_arr[idx, ...] = local_binary_pattern(im_arr[idx, ...], P=lbp_samples, R=lbp_radius, method=lbp_method)
im_arr = im_arr.swapaxes(0, lbp_axis)
elif Nd == 2:
im_arr = local_binary_pattern(im_arr, P=lbp_samples, R=lbp_radius, method=lbp_method)
else:
logger.warning('LBP 2D is only available for 2D or 3D with forced 2D extraction')
return
im = sitk.GetImageFromArray(im_arr)
im.CopyInformation(inputImage)
yield im, 'lbp-2D', kwargs
def getLBP3DImage(inputImage, inputMask, **kwargs):
global logger
Nd = inputImage.GetDimension()
if Nd != 3:
logger.warning('LBP 3D only available for 3 dimensional images, found %i dimensions', Nd)
return
try:
from scipy.stats import kurtosis
from scipy.ndimage.interpolation import map_coordinates
from scipy.special import sph_harm
from trimesh.creation import icosphere
except ImportError:
logger.warning('Could not load required package "scipy" or "trimesh", cannot implement filter LBP 3D')
return
# Warn the user if features are extracted in 2D, as this function calculates LBP in 3D
if kwargs.get('force2D', False):
logger.warning('Calculating Local Binary Pattern in 3D, but extracting features in 2D. Use with caution!')
label = kwargs.get('label', 1)
lbp_levels = kwargs.get('lbp3DLevels', 2)
lbp_icosphereRadius = kwargs.get('lbp3DIcosphereRadius', 1)
lbp_icosphereSubdivision = kwargs.get('lbp3DIcosphereSubdivision', 1)
im_arr = sitk.GetArrayFromImage(inputImage)
ma_arr = sitk.GetArrayFromImage(inputMask)
# Variables used in the shape comments:
# Np Number of voxels
# Nv Number of vertices
# Vertices icosahedron for spherical sampling
coords_icosahedron = numpy.array(icosphere(lbp_icosphereSubdivision, lbp_icosphereRadius).vertices) # shape(Nv, 3)
# Corresponding polar coordinates
theta = numpy.arccos(numpy.true_divide(coords_icosahedron[:, 2], lbp_icosphereRadius))
phi = numpy.arctan2(coords_icosahedron[:, 1], coords_icosahedron[:, 0])
# Corresponding spherical harmonics coefficients Y_{m, n, theta, phi}
Y = sph_harm(0, 0, theta, phi) # shape(Nv,)
n_ix = numpy.array(0)
for n in range(1, lbp_levels):
for m in range(-n, n + 1):
n_ix = numpy.append(n_ix, n)
Y = numpy.column_stack((Y, sph_harm(m, n, theta, phi)))
# shape (Nv, x) where x is the number of iterations in the above loops + 1
# Get labelled coordinates
ROI_coords = numpy.where(ma_arr == label) # shape(3, Np)
# Interpolate f (samples on the spheres across the entire volume)
coords = numpy.array(ROI_coords).T[None, :, :] + coords_icosahedron[:, None, :] # shape(Nv, Np, 3)
f = map_coordinates(im_arr, coords.T, order=3) # Shape(Np, Nv) Note that 'Np' and 'Nv' are swapped due to .T
# Compute spherical Kurtosis
k = kurtosis(f, axis=1) # shape(Np,)
# Apply sign function
f_centroids = im_arr[ROI_coords] # Shape(Np,)
f = numpy.greater_equal(f, f_centroids[:, None]).astype(int) # Shape(Np, Nv)
# Compute c_{m,n} coefficients
c = numpy.multiply(f[:, :, None], Y[None, :, :]) # Shape(Np, Nv, x)
c = c.sum(axis=1) # Shape(Np, x)
# Integrate over m
f = numpy.multiply(c[:, None, n_ix == 0], Y[None, :, n_ix == 0]) # Shape (Np, Nv, 1)
for n in range(1, lbp_levels):
f = numpy.concatenate((f,
numpy.sum(numpy.multiply(c[:, None, n_ix == n], Y[None, :, n_ix == n]),
axis=2, keepdims=True)
),
axis=2)
# Shape f (Np, Nv, levels)
# Compute L2-Norm
f = numpy.sqrt(numpy.sum(f ** 2, axis=1)) # shape(Np, levels)
# Keep only Real Part
f = numpy.real(f) # shape(Np, levels)
k = numpy.real(k) # shape(Np,)
# Yield the derived images for each level
result = numpy.ndarray(im_arr.shape)
for l_idx in range(lbp_levels):
result[ROI_coords] = f[:, l_idx]
# Create a SimpleITK image
im = sitk.GetImageFromArray(result)
im.CopyInformation(inputImage)
yield im, 'lbp-3D-m%d' % (l_idx + 1), kwargs
# Yield Kurtosis
result[ROI_coords] = k
# Create a SimpleITK image
im = sitk.GetImageFromArray(result)
im.CopyInformation(inputImage)
yield im, 'lbp-3D-k', kwargs
| true | true |
f722c4835b23199b17733664f837a80e2c32f7bf | 1,114 | py | Python | debug/test_tf_funcs.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | null | null | null | debug/test_tf_funcs.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | 1 | 2020-12-09T07:29:00.000Z | 2020-12-09T07:29:00.000Z | debug/test_tf_funcs.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
# -*-coding:utf-8-*-
# @file: test_tf_funcs.py
# @brief:
# @author: Changjiang Cai, ccai1@stevens.edu, caicj5351@gmail.com
# @version: 0.0.1
# @creation date: 13-08-2019
# @last modified: Tue 13 Aug 2019 05:38:05 PM EDT
import tensorflow as tf
import numpy as np
if __name__ == "__main__":
""" test tf.gather_nd """
# data is [[[ 0 1]
# [ 2 3]
# [ 4 5]]
#
# [[ 6 7]
# [ 8 9]
# [10 11]]]
data = np.reshape(np.arange(12), [2, 3, 2])
x = tf.constant(data)
idx_1 = [[[0, 0, 0], [0, 1, 1]], [[1, 0, 1], [1, 1, 0]]] # 2 x 2 x 3
result1 = tf.gather_nd(x, idx_1)
idx_2 = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]] # 4 x 3
result2 = tf.gather_nd(x, idx_2)
# Construct a 'Session' to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
x, res1, res2 = sess.run([x, result1, result2])
print ('x = {}'.format(x))
print ('res1 = {}'.format(res1))
print ('res2 = {}'.format(res2))
| 26.52381 | 76 | 0.509874 |
import tensorflow as tf
import numpy as np
if __name__ == "__main__":
data = np.reshape(np.arange(12), [2, 3, 2])
x = tf.constant(data)
idx_1 = [[[0, 0, 0], [0, 1, 1]], [[1, 0, 1], [1, 1, 0]]]
result1 = tf.gather_nd(x, idx_1)
idx_2 = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
result2 = tf.gather_nd(x, idx_2)
sess = tf.Session()
x, res1, res2 = sess.run([x, result1, result2])
print ('x = {}'.format(x))
print ('res1 = {}'.format(res1))
print ('res2 = {}'.format(res2))
| true | true |
f722c5c7dd5c858532cb80dbbf2c250515d98113 | 3,353 | py | Python | tests/storages_tests/rdb_tests/test_with_server.py | agarwalrounak/optuna | b5fd0439dc33c94c06251974b8cb023a3f9bccc7 | [
"MIT"
] | 4,950 | 2019-11-15T07:35:51.000Z | 2022-03-31T10:32:42.000Z | tests/storages_tests/rdb_tests/test_with_server.py | SCUTJcfeng/optuna | 9331374a2460da067a6922e4ea09dd4706f3d950 | [
"MIT"
] | 2,490 | 2019-11-15T07:06:20.000Z | 2022-03-31T23:52:45.000Z | tests/storages_tests/rdb_tests/test_with_server.py | SCUTJcfeng/optuna | 9331374a2460da067a6922e4ea09dd4706f3d950 | [
"MIT"
] | 621 | 2019-11-15T11:26:57.000Z | 2022-03-28T11:46:34.000Z | from multiprocessing import Pool
import os
from typing import Sequence
from typing import Tuple
import numpy as np
import pytest
import optuna
_STUDY_NAME = "_test_multiprocess"
def f(x: float, y: float) -> float:
return (x - 3) ** 2 + y
def objective(trial: optuna.Trial) -> float:
x = trial.suggest_float("x", -10, 10)
y = trial.suggest_float("y", -10, 10)
trial.report(x, 0)
trial.report(y, 1)
trial.set_user_attr("x", x)
trial.set_system_attr("y", y)
return f(x, y)
def run_optimize(args: Tuple[str, str]) -> None:
study_name = args[0]
storage_url = args[1]
# Create a study
study = optuna.load_study(study_name=study_name, storage=storage_url)
# Run optimization
study.optimize(objective, n_trials=20)
@pytest.fixture
def storage_url() -> str:
if "TEST_DB_URL" not in os.environ:
pytest.skip("This test requires TEST_DB_URL.")
storage_url = os.environ["TEST_DB_URL"]
try:
optuna.study.delete_study(_STUDY_NAME, storage_url)
except KeyError:
pass
return storage_url
def _check_trials(trials: Sequence[optuna.trial.FrozenTrial]) -> None:
# Check trial states.
assert all(trial.state == optuna.trial.TrialState.COMPLETE for trial in trials)
# Check trial values and params.
assert all("x" in trial.params for trial in trials)
assert all("y" in trial.params for trial in trials)
assert all(
np.isclose(
np.asarray([trial.value for trial in trials]),
[f(trial.params["x"], trial.params["y"]) for trial in trials],
atol=1e-4,
).tolist()
)
# Check intermediate values.
assert all(len(trial.intermediate_values) == 2 for trial in trials)
assert all(trial.params["x"] == trial.intermediate_values[0] for trial in trials)
assert all(trial.params["y"] == trial.intermediate_values[1] for trial in trials)
# Check attrs.
assert all(
np.isclose(
[trial.user_attrs["x"] for trial in trials],
[trial.params["x"] for trial in trials],
atol=1e-4,
).tolist()
)
assert all(
np.isclose(
[trial.system_attrs["y"] for trial in trials],
[trial.params["y"] for trial in trials],
atol=1e-4,
).tolist()
)
def test_loaded_trials(storage_url: str) -> None:
# Please create the tables by placing this function before the multi-process tests.
N_TRIALS = 20
study = optuna.create_study(study_name=_STUDY_NAME, storage=storage_url)
# Run optimization
study.optimize(objective, n_trials=N_TRIALS)
trials = study.trials
assert len(trials) == N_TRIALS
_check_trials(trials)
# Create a new study to confirm the study can load trial properly.
loaded_study = optuna.load_study(study_name=_STUDY_NAME, storage=storage_url)
_check_trials(loaded_study.trials)
def test_multiprocess(storage_url: str) -> None:
n_workers = 8
study_name = _STUDY_NAME
optuna.create_study(storage=storage_url, study_name=study_name)
with Pool(n_workers) as pool:
pool.map(run_optimize, [(study_name, storage_url)] * n_workers)
study = optuna.load_study(study_name=study_name, storage=storage_url)
trials = study.trials
assert len(trials) == n_workers * 20
_check_trials(trials)
| 28.415254 | 87 | 0.664778 | from multiprocessing import Pool
import os
from typing import Sequence
from typing import Tuple
import numpy as np
import pytest
import optuna
_STUDY_NAME = "_test_multiprocess"
def f(x: float, y: float) -> float:
return (x - 3) ** 2 + y
def objective(trial: optuna.Trial) -> float:
x = trial.suggest_float("x", -10, 10)
y = trial.suggest_float("y", -10, 10)
trial.report(x, 0)
trial.report(y, 1)
trial.set_user_attr("x", x)
trial.set_system_attr("y", y)
return f(x, y)
def run_optimize(args: Tuple[str, str]) -> None:
study_name = args[0]
storage_url = args[1]
study = optuna.load_study(study_name=study_name, storage=storage_url)
study.optimize(objective, n_trials=20)
@pytest.fixture
def storage_url() -> str:
if "TEST_DB_URL" not in os.environ:
pytest.skip("This test requires TEST_DB_URL.")
storage_url = os.environ["TEST_DB_URL"]
try:
optuna.study.delete_study(_STUDY_NAME, storage_url)
except KeyError:
pass
return storage_url
def _check_trials(trials: Sequence[optuna.trial.FrozenTrial]) -> None:
assert all(trial.state == optuna.trial.TrialState.COMPLETE for trial in trials)
assert all("x" in trial.params for trial in trials)
assert all("y" in trial.params for trial in trials)
assert all(
np.isclose(
np.asarray([trial.value for trial in trials]),
[f(trial.params["x"], trial.params["y"]) for trial in trials],
atol=1e-4,
).tolist()
)
assert all(len(trial.intermediate_values) == 2 for trial in trials)
assert all(trial.params["x"] == trial.intermediate_values[0] for trial in trials)
assert all(trial.params["y"] == trial.intermediate_values[1] for trial in trials)
assert all(
np.isclose(
[trial.user_attrs["x"] for trial in trials],
[trial.params["x"] for trial in trials],
atol=1e-4,
).tolist()
)
assert all(
np.isclose(
[trial.system_attrs["y"] for trial in trials],
[trial.params["y"] for trial in trials],
atol=1e-4,
).tolist()
)
def test_loaded_trials(storage_url: str) -> None:
N_TRIALS = 20
study = optuna.create_study(study_name=_STUDY_NAME, storage=storage_url)
study.optimize(objective, n_trials=N_TRIALS)
trials = study.trials
assert len(trials) == N_TRIALS
_check_trials(trials)
loaded_study = optuna.load_study(study_name=_STUDY_NAME, storage=storage_url)
_check_trials(loaded_study.trials)
def test_multiprocess(storage_url: str) -> None:
n_workers = 8
study_name = _STUDY_NAME
optuna.create_study(storage=storage_url, study_name=study_name)
with Pool(n_workers) as pool:
pool.map(run_optimize, [(study_name, storage_url)] * n_workers)
study = optuna.load_study(study_name=study_name, storage=storage_url)
trials = study.trials
assert len(trials) == n_workers * 20
_check_trials(trials)
| true | true |
f722c5d12335461948e2e30b324c028ee6d1ca43 | 611 | py | Python | dynamic_programming/longest common subsequence.py | nilansharora/Python-algorithms | ef38fab1cfa956e00118f44625ba305baa7e1066 | [
"MIT"
] | 1 | 2020-06-23T10:31:01.000Z | 2020-06-23T10:31:01.000Z | dynamic_programming/longest common subsequence.py | nilansharora/Python-algorithms | ef38fab1cfa956e00118f44625ba305baa7e1066 | [
"MIT"
] | null | null | null | dynamic_programming/longest common subsequence.py | nilansharora/Python-algorithms | ef38fab1cfa956e00118f44625ba305baa7e1066 | [
"MIT"
] | 1 | 2019-10-09T11:02:07.000Z | 2019-10-09T11:02:07.000Z | """
LCS Problem Statement: Given two sequences, find the length of longest subsequence present in both of them.
A subsequence is a sequence that appears in the same relative order, but not necessarily continious.
Example:"abc", "abg" are subsequences of "abcdefgh".
"""
def LCS(s1, s2):
m = len(s1)
n = len(s2)
arr = [[0 for i in range(n+1)]for j in range(m+1)]
for i in range(1,m+1):
for j in range(1,n+1):
if s1[i-1] == s2[j-1]:
arr[i][j] = arr[i-1][j-1]+1
else:
arr[i][j] = max(arr[i-1][j], arr[i][j-1])
return arr[m][n]
| 32.157895 | 107 | 0.567921 | def LCS(s1, s2):
m = len(s1)
n = len(s2)
arr = [[0 for i in range(n+1)]for j in range(m+1)]
for i in range(1,m+1):
for j in range(1,n+1):
if s1[i-1] == s2[j-1]:
arr[i][j] = arr[i-1][j-1]+1
else:
arr[i][j] = max(arr[i-1][j], arr[i][j-1])
return arr[m][n]
| true | true |
f722c6927aad955f35834589666b1bc5a3185125 | 1,726 | py | Python | modules/text/language_model/lda_webpage/vose_alias.py | chunzhang-hub/PaddleHub | c5cfd021f77fd59340fb26e223e09a592e6a345f | [
"Apache-2.0"
] | 8,360 | 2019-01-18T10:46:45.000Z | 2022-03-31T14:50:02.000Z | modules/text/language_model/lda_webpage/vose_alias.py | dwuping/PaddleHub | 9a3b23295947e22149cc85c17cb4cf23c03f9e06 | [
"Apache-2.0"
] | 1,158 | 2019-04-11T09:22:43.000Z | 2022-03-31T12:12:09.000Z | modules/text/language_model/lda_webpage/vose_alias.py | dwuping/PaddleHub | 9a3b23295947e22149cc85c17cb4cf23c03f9e06 | [
"Apache-2.0"
] | 1,677 | 2019-04-09T15:07:40.000Z | 2022-03-31T06:41:10.000Z | import os
import numpy as np
from paddlehub.common.logger import logger
from lda_webpage.util import rand, rand_k
class VoseAlias(object):
"""Vose's Alias Method.
"""
def __init__(self):
self.__alias = None
self.__prob = None # np.array
def initialize(self, distribution):
"""Initialize the alias table according to the input distribution
Arg:
distribution: Numpy array.
"""
size = distribution.shape[0]
self.__alias = np.zeros(size, dtype=np.int64)
self.__prob = np.zeros(size)
sum_ = np.sum(distribution)
p = distribution / sum_ * size # Scale up probability.
large, small = [], []
for i, p_ in enumerate(p):
if p_ < 1.0:
small.append(i)
else:
large.append(i)
while large and small:
l = small[0]
g = large[0]
small.pop(0)
large.pop(0)
self.__prob[l] = p[l]
self.__alias[l] = g
p[g] = p[g] + p[l] - 1 # A more numerically stable option.
if p[g] < 1.0:
small.append(g)
else:
large.append(g)
while large:
g = large[0]
large.pop(0)
self.__prob[g] = 1.0
while small:
l = small[0]
small.pop(0)
self.__prob[l] = 1.0
def generate(self):
"""Generate samples from given distribution.
"""
dart1 = rand_k(self.size())
dart2 = int(rand())
return dart1 if dart2 > self.__prob[dart1] else self.__alias[dart1]
def size(self):
return self.__prob.shape[0]
| 26.151515 | 75 | 0.509849 | import os
import numpy as np
from paddlehub.common.logger import logger
from lda_webpage.util import rand, rand_k
class VoseAlias(object):
def __init__(self):
self.__alias = None
self.__prob = None
def initialize(self, distribution):
size = distribution.shape[0]
self.__alias = np.zeros(size, dtype=np.int64)
self.__prob = np.zeros(size)
sum_ = np.sum(distribution)
p = distribution / sum_ * size
large, small = [], []
for i, p_ in enumerate(p):
if p_ < 1.0:
small.append(i)
else:
large.append(i)
while large and small:
l = small[0]
g = large[0]
small.pop(0)
large.pop(0)
self.__prob[l] = p[l]
self.__alias[l] = g
p[g] = p[g] + p[l] - 1
if p[g] < 1.0:
small.append(g)
else:
large.append(g)
while large:
g = large[0]
large.pop(0)
self.__prob[g] = 1.0
while small:
l = small[0]
small.pop(0)
self.__prob[l] = 1.0
def generate(self):
dart1 = rand_k(self.size())
dart2 = int(rand())
return dart1 if dart2 > self.__prob[dart1] else self.__alias[dart1]
def size(self):
return self.__prob.shape[0]
| true | true |
f722c89a9cfdd582a9b7ec9d7a05f47e101e904d | 1,958 | py | Python | mods/Lorenz95/core.py | franktoffel/dapper | 373a27273ea109f349e5edcdcef0cfe0b83b925e | [
"MIT"
] | 3 | 2021-07-31T10:13:11.000Z | 2022-01-14T16:52:04.000Z | mods/Lorenz95/core.py | franktoffel/dapper | 373a27273ea109f349e5edcdcef0cfe0b83b925e | [
"MIT"
] | null | null | null | mods/Lorenz95/core.py | franktoffel/dapper | 373a27273ea109f349e5edcdcef0cfe0b83b925e | [
"MIT"
] | null | null | null | # "Lorenz-95" (or 96) model. For a deeper introduction, see
# "DAPPER/tutorials/T4 - Dynamical systems, chaos, Lorenz.ipynb"
#
# Note: implementation is ndim-agnostic.
import numpy as np
from tools.math import rk4, integrate_TLM, is1d
Force = 8.0
# Note: the model is unstable (blows up) if there are large peaks
# (as may be occasioned by the analysis update, especially with partial obs).
# Example: integrate 4 steps with dt=0.05 from x0 = [0,-30,0,30].
# This is effectively a CFL condition... Can be addressed by:
# - lowering dt
# - using an implicit time stepping scheme instead of rk4
# - stupidly crop amplitudes, as is done here:
prevent_blow_up = False
Tplot = 10
x0 = lambda M: 2.3*np.ones(M)
def dxdt(x):
a = x.ndim-1
s = lambda x,n: np.roll(x,-n,axis=a)
return (s(x,1)-s(x,-2))*s(x,-1) - x + Force
def step(x0, t, dt):
if prevent_blow_up:
clip = abs(x0)>30
x0[clip] *= 0.1
return rk4(lambda t,x: dxdt(x), x0, np.nan, dt)
################################################
# OPTIONAL (not necessary for EnKF or PartFilt):
################################################
def TLM(x):
"""Tangent linear model"""
assert is1d(x)
Nx = len(x)
TLM = np.zeros((Nx,Nx))
md = lambda i: np.mod(i,Nx)
for i in range(Nx):
TLM[i,i] = -1.0
TLM[i, i-2 ] = -x[i-1]
TLM[i,md(i+1)] = +x[i-1]
TLM[i, i-1 ] = x[md(i+1)]-x[i-2]
return TLM
def dfdx(x,t,dt):
"""Integral of TLM. Jacobian of step."""
# method='analytic' is a substantial upgrade for Lor95
return integrate_TLM(TLM(x),dt,method='analytic')
################################################
# Add some non-default liveplotters
################################################
import tools.liveplotting as LP
def LPs(jj=None): return [
(11, 1, LP.spatial1d(jj) ),
(12, 1, LP.correlations ),
(15, 0, LP.spectral_errors),
(13, 0, LP.phase3d(jj) ),
(11, 0, LP.sliding_marginals(jj)) ,
]
| 27.194444 | 78 | 0.567416 |
import numpy as np
from tools.math import rk4, integrate_TLM, is1d
Force = 8.0
prevent_blow_up = False
Tplot = 10
x0 = lambda M: 2.3*np.ones(M)
def dxdt(x):
a = x.ndim-1
s = lambda x,n: np.roll(x,-n,axis=a)
return (s(x,1)-s(x,-2))*s(x,-1) - x + Force
def step(x0, t, dt):
if prevent_blow_up:
clip = abs(x0)>30
x0[clip] *= 0.1
return rk4(lambda t,x: dxdt(x), x0, np.nan, dt)
| true | true |
f722c9033b578367acf4c87292ac6d21909101b8 | 868 | py | Python | analytics.py | Abhi-Balijepalli/PONDR-dev-api | 9ffe7a031b56241ccbda9281de722adcf9a610ea | [
"Apache-2.0"
] | null | null | null | analytics.py | Abhi-Balijepalli/PONDR-dev-api | 9ffe7a031b56241ccbda9281de722adcf9a610ea | [
"Apache-2.0"
] | null | null | null | analytics.py | Abhi-Balijepalli/PONDR-dev-api | 9ffe7a031b56241ccbda9281de722adcf9a610ea | [
"Apache-2.0"
] | null | null | null | from import_reqs import *
from app import app
@app.route('/enterprise/product=<id>', methods=['GET'])
def get_advanced_analytics(id):
"""
get_advanced_analytics(id): this will be for company dashboard, they will see the advanced analytics of a product.
"""
try:
id_token = request.headers['Authorization']
claims = auth.verify_id_token(id_token)
uid = claims['uid']
if claims['Enterprise'] is True:
todo = ADVANCED_ANALYTICS.document(id).get().to_dict()
if todo['company_id'] == uid:
return jsonify(todo), 200
else:
return (jsonify({"Access Denied"}), 403)
else:
return (jsonify("You are not authorized to view this specific enterprise analytics page."), 403)
except Exception as e:
return f"An Error Occured: {e}" | 39.454545 | 123 | 0.616359 | from import_reqs import *
from app import app
@app.route('/enterprise/product=<id>', methods=['GET'])
def get_advanced_analytics(id):
try:
id_token = request.headers['Authorization']
claims = auth.verify_id_token(id_token)
uid = claims['uid']
if claims['Enterprise'] is True:
todo = ADVANCED_ANALYTICS.document(id).get().to_dict()
if todo['company_id'] == uid:
return jsonify(todo), 200
else:
return (jsonify({"Access Denied"}), 403)
else:
return (jsonify("You are not authorized to view this specific enterprise analytics page."), 403)
except Exception as e:
return f"An Error Occured: {e}" | true | true |
f722c92a261204b68b931457cc24a81d1adf368d | 393 | py | Python | Ahriknow/wsgi.py | ahriknow/ahriknow | 817b5670c964e01ffe19ed182ce0a7b42e17ce09 | [
"MIT"
] | null | null | null | Ahriknow/wsgi.py | ahriknow/ahriknow | 817b5670c964e01ffe19ed182ce0a7b42e17ce09 | [
"MIT"
] | 3 | 2021-03-19T01:28:43.000Z | 2021-04-08T19:57:19.000Z | Ahriknow/wsgi.py | ahriknow/ahriknow | 817b5670c964e01ffe19ed182ce0a7b42e17ce09 | [
"MIT"
] | null | null | null | """
WSGI config for Ahriknow project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Ahriknow.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Ahriknow.settings')
application = get_wsgi_application()
| true | true |
f722c92c1d8cbce8eebe04231bf78e6a2f03d486 | 4,682 | py | Python | venv/Lib/site-packages/pandas/tests/frame/methods/test_at_time.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/frame/methods/test_at_time.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/frame/methods/test_at_time.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | from datetime import time
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import timezones
from pandas import (
DataFrame,
date_range,
)
import pandas._testing as tm
class TestAtTime:
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_at_time(self, tzstr, frame_or_series):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="H")
ts = frame_or_series(np.random.randn(len(rng)), index=rng)
ts_local = ts.tz_localize(tzstr)
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(tzstr)
tm.assert_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_at_time(self, frame_or_series):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
if frame_or_series is not DataFrame:
ts = ts[0]
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_equal(result, expected)
def test_at_time_midnight(self, frame_or_series):
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
if frame_or_series is not DataFrame:
ts = ts[0]
result = ts.at_time(time(0, 0))
tm.assert_equal(result, ts)
def test_at_time_nonexistent(self, frame_or_series):
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = DataFrame(np.random.randn(len(rng)), rng)
if frame_or_series is not DataFrame:
ts = ts[0]
rs = ts.at_time("16:00")
assert len(rs) == 0
@pytest.mark.parametrize(
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)]
)
def test_at_time_errors(self, hour):
# GH#24043
dti = date_range("2018", periods=3, freq="H")
df = DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, "tzinfo", None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH#24043
dti = date_range("2018", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern")))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self, frame_or_series):
# GH#20725
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
if frame_or_series is not DataFrame:
obj = obj[0]
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
obj.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ["index", 0]:
expected = ts.loc[indices, :]
elif axis in ["columns", 1]:
expected = ts.loc[:, indices]
result = ts.at_time("9:30", axis=axis)
# Without clearing freq, result has freq 1440T and expected 5T
result.index = result.index._with_freq(None)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
def test_at_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq="30min")
df = DataFrame(np.random.randn(len(index), 5), index=index)
akey = time(12, 0, 0)
ainds = [24, 72, 120, 168]
result = df.at_time(akey)
expected = df.loc[akey]
expected2 = df.iloc[ainds]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, expected2)
assert len(result) == 4
| 36.294574 | 82 | 0.582443 | from datetime import time
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import timezones
from pandas import (
DataFrame,
date_range,
)
import pandas._testing as tm
class TestAtTime:
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_at_time(self, tzstr, frame_or_series):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="H")
ts = frame_or_series(np.random.randn(len(rng)), index=rng)
ts_local = ts.tz_localize(tzstr)
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(tzstr)
tm.assert_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_at_time(self, frame_or_series):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
if frame_or_series is not DataFrame:
ts = ts[0]
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_equal(result, expected)
def test_at_time_midnight(self, frame_or_series):
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
if frame_or_series is not DataFrame:
ts = ts[0]
result = ts.at_time(time(0, 0))
tm.assert_equal(result, ts)
def test_at_time_nonexistent(self, frame_or_series):
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = DataFrame(np.random.randn(len(rng)), rng)
if frame_or_series is not DataFrame:
ts = ts[0]
rs = ts.at_time("16:00")
assert len(rs) == 0
@pytest.mark.parametrize(
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)]
)
def test_at_time_errors(self, hour):
# GH#24043
dti = date_range("2018", periods=3, freq="H")
df = DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, "tzinfo", None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH#24043
dti = date_range("2018", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern")))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self, frame_or_series):
# GH#20725
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
if frame_or_series is not DataFrame:
obj = obj[0]
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
obj.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ["index", 0]:
expected = ts.loc[indices, :]
elif axis in ["columns", 1]:
expected = ts.loc[:, indices]
result = ts.at_time("9:30", axis=axis)
# Without clearing freq, result has freq 1440T and expected 5T
result.index = result.index._with_freq(None)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
def test_at_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq="30min")
df = DataFrame(np.random.randn(len(index), 5), index=index)
akey = time(12, 0, 0)
ainds = [24, 72, 120, 168]
result = df.at_time(akey)
expected = df.loc[akey]
expected2 = df.iloc[ainds]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, expected2)
assert len(result) == 4
| true | true |
f722c98a8d9ab0cee71e36dd63e9ca989995573b | 1,951 | py | Python | casperlabs_client/decorators.py | CasperLabs/client-py | 12955d2b88bc439f94a1cc33a063fda0c20ef8ab | [
"Apache-2.0"
] | 2 | 2021-05-12T06:43:45.000Z | 2021-10-02T11:45:41.000Z | casperlabs_client/decorators.py | CasperLabs/client-py | 12955d2b88bc439f94a1cc33a063fda0c20ef8ab | [
"Apache-2.0"
] | 24 | 2020-06-30T14:55:20.000Z | 2021-01-05T18:18:29.000Z | casperlabs_client/decorators.py | CasperLabs/client-py | 12955d2b88bc439f94a1cc33a063fda0c20ef8ab | [
"Apache-2.0"
] | 1 | 2020-06-22T15:32:38.000Z | 2020-06-22T15:32:38.000Z | # -*- coding: utf-8 -*-
import functools
import logging
import sys
import time
import grpc
from grpc._channel import _Rendezvous
NUMBER_OF_RETRIES = 5
# Initial delay in seconds before an attempt to retry
INITIAL_DELAY = 0.3
def retry_wrapper(function, *args):
delay = INITIAL_DELAY
for i in range(NUMBER_OF_RETRIES):
try:
return function(*args)
except _Rendezvous as e:
if e.code() == grpc.StatusCode.UNAVAILABLE and i < NUMBER_OF_RETRIES - 1:
# TODO: Look at using backoff PyPi package.
delay += delay
logging.warning(f"Retrying after {e} in {delay} seconds")
time.sleep(delay)
else:
raise
def retry_unary(function):
@functools.wraps(function)
def wrapper(*args):
return retry_wrapper(function, *args)
return wrapper
def retry_stream(function):
@functools.wraps(function)
def wrapper(*args):
yield from retry_wrapper(function, *args)
return wrapper
def guarded_command(function):
"""
Decorator of functions that implement CLI commands.
Occasionally the node can throw some exceptions instead of properly sending us a response,
those will be deserialized on our end and rethrown by the gRPC layer.
In this case we want to catch the exception and return a non-zero return code to the shell.
:param function: function to be decorated
:return:
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
rc = function(*args, **kwargs)
# Generally the CLI commands are assumed to succeed if they don't throw,
# but they can also return a positive error code if they need to.
if rc is not None:
return rc
return 0
except Exception as e:
print(str(e), file=sys.stderr)
return 1
return wrapper
| 27.097222 | 95 | 0.633521 |
import functools
import logging
import sys
import time
import grpc
from grpc._channel import _Rendezvous
NUMBER_OF_RETRIES = 5
INITIAL_DELAY = 0.3
def retry_wrapper(function, *args):
delay = INITIAL_DELAY
for i in range(NUMBER_OF_RETRIES):
try:
return function(*args)
except _Rendezvous as e:
if e.code() == grpc.StatusCode.UNAVAILABLE and i < NUMBER_OF_RETRIES - 1:
delay += delay
logging.warning(f"Retrying after {e} in {delay} seconds")
time.sleep(delay)
else:
raise
def retry_unary(function):
@functools.wraps(function)
def wrapper(*args):
return retry_wrapper(function, *args)
return wrapper
def retry_stream(function):
@functools.wraps(function)
def wrapper(*args):
yield from retry_wrapper(function, *args)
return wrapper
def guarded_command(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
rc = function(*args, **kwargs)
# but they can also return a positive error code if they need to.
if rc is not None:
return rc
return 0
except Exception as e:
print(str(e), file=sys.stderr)
return 1
return wrapper
| true | true |
f722ca4fb202ed3d7a1aa9eea95479918d1dc441 | 31,292 | py | Python | test/test_extractor.py | thombashi/DataProperty | ed894483882ef81bd7796c2de4a966b548167c65 | [
"MIT"
] | 18 | 2016-02-20T04:16:07.000Z | 2022-01-05T20:24:51.000Z | test/test_extractor.py | thombashi/DataProperty | ed894483882ef81bd7796c2de4a966b548167c65 | [
"MIT"
] | 13 | 2016-03-20T12:11:50.000Z | 2020-01-01T06:43:49.000Z | test/test_extractor.py | thombashi/DataProperty | ed894483882ef81bd7796c2de4a966b548167c65 | [
"MIT"
] | 5 | 2016-05-24T21:09:27.000Z | 2019-12-02T21:00:00.000Z | """
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import datetime
from decimal import Decimal
import pytest
from typepy import DateTime, RealNumber, String, Typecode
from dataproperty import (
Align,
DataPropertyExtractor,
Format,
LineBreakHandling,
MatrixFormatting,
Preprocessor,
)
from .common import get_strict_level_map
DATATIME_DATA = datetime.datetime(2017, 1, 2, 3, 4, 5)
nan = float("nan")
inf = float("inf")
@pytest.fixture
def dp_extractor():
return DataPropertyExtractor()
def datetime_formatter_test(value):
return value.strftime("%Y%m%d %H%M%S")
def datetime_formatter_tostr_0(value):
return value.strftime("%Y-%m-%d %H:%M:%S%z")
def datetime_formatter_tostr_1(value):
return value.strftime("%Y/%m/%d %H:%M:%S")
def trans_func_1(v):
if v is None:
return ""
if v is False:
return "false"
if v == 0:
return 123
return v
def trans_func_2(v):
if v == 123:
return 321
return v
def nop(v):
return v
class Test_DataPropertyExtractor_to_dp:
@pytest.mark.parametrize(
["value", "type_value_map", "is_strict", "expected_value", "expected_typecode"],
[
[None, {Typecode.NONE: None}, True, None, Typecode.NONE],
[None, {Typecode.NONE: "null"}, False, "null", Typecode.STRING],
[None, {Typecode.NONE: ""}, True, "", Typecode.NULL_STRING],
[None, {Typecode.NONE: 0}, False, 0, Typecode.INTEGER],
[inf, {Typecode.INFINITY: "INF_1"}, False, "INF_1", Typecode.STRING],
[inf, {Typecode.INFINITY: "INF_2"}, True, "INF_2", Typecode.STRING],
[inf, {Typecode.INFINITY: None}, True, None, Typecode.NONE],
["inf", {Typecode.INFINITY: "INF_3"}, False, "INF_3", Typecode.STRING],
["inf", {Typecode.INFINITY: "INF_4"}, True, "inf", Typecode.STRING],
["inf", {Typecode.INFINITY: inf}, False, Decimal("Infinity"), Typecode.INFINITY],
[nan, {Typecode.NAN: "NAN_1"}, False, "NAN_1", Typecode.STRING],
[nan, {Typecode.NAN: "NAN_2"}, True, "NAN_2", Typecode.STRING],
[nan, {Typecode.NAN: None}, True, None, Typecode.NONE],
["nan", {Typecode.NAN: "NAN_4"}, False, "NAN_4", Typecode.STRING],
["nan", {Typecode.NAN: "NAN_5"}, True, "nan", Typecode.STRING],
],
)
def test_normal_type_value_map(
self, dp_extractor, value, type_value_map, is_strict, expected_value, expected_typecode
):
dp_extractor.type_value_map = type_value_map
dp_extractor.strict_level_map = get_strict_level_map(is_strict)
dp = dp_extractor.to_dp(value)
assert dp.data == expected_value
assert dp.typecode == expected_typecode
assert isinstance(dp.to_str(), str)
@pytest.mark.parametrize(
["value", "datetime_formatter", "datetime_format_str", "is_strict", "expected"],
[
[DATATIME_DATA, datetime_formatter_tostr_0, "s", False, "2017-01-02 03:04:05"],
["2017-01-01 00:00:00", datetime_formatter_tostr_1, "s", False, "2017/01/01 00:00:00"],
[
"2017-01-01 00:00:00",
None,
"%Y-%m-%dT%H:%M:%S",
False,
datetime.datetime(2017, 1, 1, 0, 0, 0),
],
["2017-01-01 00:00:00", None, "s", True, "2017-01-01 00:00:00"],
],
)
def test_normal_datetime(
self, dp_extractor, value, datetime_formatter, datetime_format_str, is_strict, expected
):
dp_extractor.datetime_formatter = datetime_formatter
dp_extractor.datetime_format_str = datetime_format_str
dp_extractor.strict_level_map = get_strict_level_map(is_strict)
dp = dp_extractor.to_dp(value)
assert dp.data == expected
@pytest.mark.parametrize(
["value", "type_hint", "trans_func", "expected"],
[
[1, String, nop, "1"],
[0, String, nop, "0"],
[None, String, nop, "None"],
[0, String, trans_func_1, "123"],
[False, String, trans_func_1, "false"],
[None, String, trans_func_1, ""],
],
)
def test_normal_type_hint(self, dp_extractor, value, type_hint, trans_func, expected):
dp_extractor.register_trans_func(trans_func)
dp = dp_extractor._DataPropertyExtractor__to_dp(value, type_hint=type_hint)
assert dp.data == expected
@pytest.mark.parametrize(
["value", "type_hint", "trans_funcs", "expected"],
[
[0, String, [trans_func_2, trans_func_1], "321"],
[0, String, [trans_func_1, trans_func_2], "123"],
],
)
def test_normal_trans_funcs(self, dp_extractor, value, type_hint, trans_funcs, expected):
for trans_func in trans_funcs:
dp_extractor.register_trans_func(trans_func)
dp = dp_extractor._DataPropertyExtractor__to_dp(value, type_hint=type_hint)
assert dp.data == expected
class Test_DataPropertyExtractor_to_dp_quoting_flags:
ALWAYS_QUOTE_FLAG_MAP = {
Typecode.NONE: True,
Typecode.INTEGER: True,
Typecode.REAL_NUMBER: True,
Typecode.STRING: True,
Typecode.NULL_STRING: True,
Typecode.DATETIME: True,
Typecode.REAL_NUMBER: True,
Typecode.NAN: True,
Typecode.BOOL: True,
}
@pytest.mark.parametrize(
["value", "quoting_flags", "expected"],
[
["string", ALWAYS_QUOTE_FLAG_MAP, '"string"'],
['"string"', ALWAYS_QUOTE_FLAG_MAP, '"string"'],
[' "123"', ALWAYS_QUOTE_FLAG_MAP, ' "123"'],
['"string" ', ALWAYS_QUOTE_FLAG_MAP, '"string" '],
[' "12 345" ', ALWAYS_QUOTE_FLAG_MAP, ' "12 345" '],
],
)
def test_normal_always_quote(self, dp_extractor, value, quoting_flags, expected):
dp_extractor.quoting_flags = quoting_flags
dp = dp_extractor.to_dp(value)
assert dp.data == expected
class Test_DataPropertyExtractor_to_dp_matrix:
@pytest.mark.parametrize(
["value"],
[
[
[
["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"],
["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"],
]
]
],
)
def test_smoke(self, dp_extractor, value):
assert len(list(dp_extractor.to_dp_matrix(value))) > 0
@pytest.mark.parametrize(
["value", "type_value_map", "datetime_formatter"],
[
[
[[None, "1"], [1.1, "a"], [nan, inf], ["false", DATATIME_DATA]],
{Typecode.NONE: "null", Typecode.INFINITY: "INFINITY", Typecode.NAN: "NAN"},
datetime_formatter_test,
]
],
)
def test_normal(self, dp_extractor, value, type_value_map, datetime_formatter):
dp_extractor.type_value_map = type_value_map
dp_extractor.datetime_formatter = datetime_formatter
dp_matrix = list(dp_extractor.to_dp_matrix(dp_extractor.to_dp_matrix(value)))
assert len(dp_matrix) == 4
dp = dp_matrix[0][0]
assert dp.data == "null"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[0][1]
assert dp.data == 1
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
dp = dp_matrix[1][0]
assert dp.data == Decimal("1.1")
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.decimal_places == 1
assert dp.format_str == "{:.1f}"
dp = dp_matrix[1][1]
assert dp.data == "a"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[2][0]
assert dp.data == "NAN"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[2][1]
assert dp.data == "INFINITY"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[3][0]
assert dp.data == "false"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[3][1]
assert dp.data == "20170102 030405"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
@pytest.mark.parametrize(["value", "expected"], [[None, []], [[], []], [(), []]])
def test_empty(self, dp_extractor, value, expected):
assert dp_extractor.to_dp_matrix(value) == expected
class Test_DataPropertyExtractor_to_dp_list:
@pytest.mark.parametrize(
["value", "float_type"], [[[0.1, Decimal("1.1")], float], [[0.1, Decimal("1.1")], Decimal]]
)
def test_normal_float(self, dp_extractor, value, float_type):
dp_extractor.float_type = float_type
dp_list = dp_extractor.to_dp_list(value)
for dp in dp_list:
assert isinstance(dp.data, float_type)
@pytest.mark.parametrize(
["value", "type_hint", "expected_list"],
[
[
["2017-01-02 03:04:05", datetime.datetime(2017, 1, 2, 3, 4, 5)],
None,
[Typecode.STRING, Typecode.DATETIME],
],
[
["2017-01-02 03:04:05", datetime.datetime(2017, 1, 2, 3, 4, 5)],
DateTime,
[Typecode.DATETIME, Typecode.DATETIME],
],
],
)
def test_normal_type_hint(self, dp_extractor, value, type_hint, expected_list):
dp_extractor.default_type_hint = type_hint
dp_list = dp_extractor.to_dp_list(value)
for dp, expected in zip(dp_list, expected_list):
assert dp.typecode == expected
@pytest.mark.parametrize(
["value", "strip_str_header", "strip_str_value", "expected"],
[
[['"1"', '"-1.1"', '"abc"'], "", '"', [1, Decimal("-1.1"), "abc"]],
[['"1"', '"-1.1"', '"abc"'], '"', "", ['"1"', '"-1.1"', '"abc"']],
[['"1"', '"-1.1"', '"abc"'], None, None, ['"1"', '"-1.1"', '"abc"']],
],
)
def test_normal_strip_str(
self, dp_extractor, value, strip_str_header, strip_str_value, expected
):
dp_extractor.strip_str_header = strip_str_header
dp_extractor.preprocessor = Preprocessor(strip_str=strip_str_value)
dp_list = dp_extractor.to_dp_list(value)
for dp, expected_value in zip(dp_list, expected):
assert dp.data == expected_value
dp_matrix = dp_extractor.to_dp_matrix([value])
for dp, expected_value in zip(dp_matrix[0], expected):
assert dp.data == expected_value
@pytest.mark.parametrize(
["value", "line_break_handling", "expected"],
[
[["a\nb", "a\r\nb"], LineBreakHandling.NOP, ["a\nb", "a\r\nb"]],
[["a\nb", "a\r\nb"], LineBreakHandling.REPLACE, ["a b", "a b"]],
[["a\nb", "a\r\nb"], LineBreakHandling.ESCAPE, ["a\\nb", "a\\r\\nb"]],
],
)
def test_normal_line_break_handling(self, dp_extractor, value, line_break_handling, expected):
dp_extractor.preprocessor = Preprocessor(line_break_handling=line_break_handling)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value
@pytest.mark.parametrize(
["value", "line_break_handling", "line_break_repl", "expected"],
[
[["a\nb", "a\r\nb"], LineBreakHandling.NOP, "<br>", ["a\nb", "a\r\nb"]],
[
["a\nb", "a\r\nb", "a\r\n\nb"],
LineBreakHandling.REPLACE,
"<br>",
["a<br>b", "a<br>b", "a<br><br>b"],
],
],
)
def test_normal_line_break_repl(
self, dp_extractor, value, line_break_handling, line_break_repl, expected
):
dp_extractor.preprocessor = Preprocessor(
line_break_handling=line_break_handling, line_break_repl=line_break_repl
)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
@pytest.mark.parametrize(
["value", "escape_formula_injection", "expected"],
[
[
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
True,
["a+b", "'=a+b", "'-a+b", "'+a+b", "'@a+b"],
],
[
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
False,
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
],
],
)
def test_normal_escape_formula_injection(
self, dp_extractor, value, escape_formula_injection, expected
):
dp_extractor.preprocessor = Preprocessor(
is_escape_formula_injection=escape_formula_injection
)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
@pytest.mark.parametrize(
["value", "expected"],
[[[0, None], [0, None]]],
)
def test_exception_escape_formula_injection(self, dp_extractor, value, expected):
dp_extractor.preprocessor = Preprocessor(is_escape_formula_injection=True)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
class Test_DataPropertyExtractor_to_column_dp_list:
TEST_DATA_MATRIX = [
[1, 1.1, "aa", 1, 1, True, inf, nan, datetime.datetime(2017, 1, 1, 0, 0, 0)],
[2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", "2017-01-01T01:23:45+0900"],
[3, 3.33, "cccc", -3, "ccc", True, "infinity", "NAN", "2017-11-01 01:23:45+0900"],
]
TEST_DATA_MATRIX_TUPLE = (
(1, 1.1, "aa", 1, 1, True, inf, nan, datetime.datetime(2017, 1, 1, 0, 0, 0)),
(2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", "2017-01-01T01:23:45+0900"),
(3, 3.33, "cccc", -3, "ccc", True, "infinity", "NAN", "2017-11-01 01:23:45+0900"),
)
@pytest.mark.parametrize(
["max_workers", "headers", "value"],
[
[1, ["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[4, ["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[None, None, TEST_DATA_MATRIX],
[None, [], TEST_DATA_MATRIX],
[
None,
("i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"),
TEST_DATA_MATRIX_TUPLE,
],
],
)
def test_normal_default(self, dp_extractor, max_workers, headers, value):
dp_extractor.max_workers = max_workers
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 9
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
assert str(dp) == (
"column=0, type=INTEGER, align=right, "
"ascii_width=1, bit_len=2, int_digits=1, decimal_places=0"
)
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 2
assert dp.format_str == "{:.2f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 1
assert dp.format_str == "{:.1f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places == 1
assert dp.format_str == "{:s}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.BOOL
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 5
assert dp.decimal_places is None
assert dp.format_str == "{}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INFINITY
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 8
assert dp.decimal_places is None
assert dp.format_str == "{:f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.NAN
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places is None
assert dp.format_str == "{:f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 24
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
@pytest.mark.parametrize(
["headers", "value"],
[
[
["i", "f"],
[
[1234, 1234.5],
[1234567, 34.5],
],
],
[
[],
[
[1234, 1234.5],
[1234567, 34.5],
],
],
],
)
def test_normal_format_str(self, dp_extractor, headers, value):
dp_extractor.format_flags_list = [Format.THOUSAND_SEPARATOR, Format.THOUSAND_SEPARATOR]
dp_extractor.max_workers = 1
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.format_str == "{:,d}"
assert dp.ascii_char_width == 9
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.format_str == "{:,.1f}"
assert dp.ascii_char_width == 7
@pytest.mark.parametrize(
["headers", "value"],
[
[["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[None, TEST_DATA_MATRIX],
[[], TEST_DATA_MATRIX],
],
)
def test_normal_not_strict(self, dp_extractor, headers, value):
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 9
dp = col_dp_list[0]
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
dp = col_dp_list[1]
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 2
assert dp.format_str == "{:.2f}"
def test_normal_column_type_hints(self, dp_extractor):
data_matrix = [
[1, "1.1", 1, "2017-01-02 03:04:05"],
[2, "2.2", 0.1, "2017-01-02 03:04:05"],
]
dp_extractor.headers = ["none", "to_float", "to_str", "to_datetime"]
dp_extractor.column_type_hints = [None, RealNumber, String, DateTime]
assert dp_extractor.column_type_hints == [None, RealNumber, String, DateTime]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(data_matrix))
assert len(col_dp_list) == 4
assert col_dp_list[0].typecode == Typecode.INTEGER
assert col_dp_list[1].typecode == Typecode.REAL_NUMBER
assert col_dp_list[2].typecode == Typecode.STRING
assert col_dp_list[3].typecode == Typecode.DATETIME
dp_extractor.column_type_hints = ["", "float", "str", "datetime"]
assert dp_extractor.column_type_hints == [None, RealNumber, String, DateTime]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(data_matrix))
assert len(col_dp_list) == 4
assert col_dp_list[0].typecode == Typecode.INTEGER
assert col_dp_list[1].typecode == Typecode.REAL_NUMBER
assert col_dp_list[2].typecode == Typecode.STRING
assert col_dp_list[3].typecode == Typecode.DATETIME
def test_normal_max_precision(self):
extractor = DataPropertyExtractor(max_precision=3)
extractor.headers = ["i", "f"]
value = [
[1234, 0.0000000001],
[1234567, 34.5],
]
col_dp_list = extractor.to_column_dp_list(extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.decimal_places == 0
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.decimal_places == 3
# test setter
extractor.max_precision = 1
col_dp_list = extractor.to_column_dp_list(extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.decimal_places == 0
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.decimal_places == 1
def test_normal_nan_inf(self, dp_extractor):
dp_extractor.headers = ["n", "i"]
col_dp_list = dp_extractor.to_column_dp_list(
dp_extractor.to_dp_matrix([[nan, inf], ["nan", "inf"]])
)
assert len(col_dp_list) == 2
dp = col_dp_list[0]
assert dp.typecode == Typecode.NAN
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places is None
dp = col_dp_list[1]
assert dp.typecode == Typecode.INFINITY
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 8
assert dp.decimal_places is None
@pytest.mark.parametrize(["ambiguous_width"], [[2], [1]])
def test_normal_east_asian_ambiguous_width(self, dp_extractor, ambiguous_width):
dp_extractor.headers = ["ascii", "eaa"]
dp_extractor.east_asian_ambiguous_width = ambiguous_width
col_dp_list = dp_extractor.to_column_dp_list(
dp_extractor.to_dp_matrix([["abcdefg", "Øαββ"], ["abcdefghij", "ØØ"]])
)
assert len(col_dp_list) == 2
dp = col_dp_list[0]
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 10
assert dp.decimal_places is None
dp = col_dp_list[1]
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4 * ambiguous_width
assert dp.decimal_places is None
def test_normal_empty_value(self, dp_extractor):
dp_extractor.headers = ["a", "22", "cccc"]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(None))
dp = col_dp_list[0]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places is None
assert dp.format_str == "{}"
dp = col_dp_list[1]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 2
assert dp.decimal_places is None
assert dp.format_str == "{}"
dp = col_dp_list[2]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places is None
assert dp.format_str == "{}"
class Test_DataPropertyExtractor_matrix_formatting:
TEST_DATA_MATRIX_NORMAL_COL3 = [["a", 0, "aa"], ["b", 1, "bb"], ["c", 2, "ccc"]]
TEST_DATA_MATRIX_NOUNIFORM_COL1 = [["a", 0], ["b", 1, "bb"], ["c", 2, "ccc", 0.1], ["d"]]
@pytest.mark.parametrize(
["headers", "value", "matrix_formatting", "expected"],
[
[None, TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.TRIM, 1],
[["a", "b"], TEST_DATA_MATRIX_NORMAL_COL3, MatrixFormatting.TRIM, 2],
[None, TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.FILL_NONE, 4],
[["a", "b", "c"], TEST_DATA_MATRIX_NORMAL_COL3, MatrixFormatting.FILL_NONE, 3],
[["a", "b", "c"], TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.HEADER_ALIGNED, 3],
[
["a", "b", "c", "d", "e"],
TEST_DATA_MATRIX_NOUNIFORM_COL1,
MatrixFormatting.HEADER_ALIGNED,
5,
],
],
)
def test_normal_matrix_formatting(
self, dp_extractor, headers, value, matrix_formatting, expected
):
dp_extractor.headers = headers
dp_extractor.matrix_formatting = matrix_formatting
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == expected
@pytest.mark.parametrize(
["headers", "value", "matrix_formatting", "expected"],
[
[
["i", "f", "s", "if", "mix"],
TEST_DATA_MATRIX_NOUNIFORM_COL1,
MatrixFormatting.EXCEPTION,
ValueError,
]
],
)
def test_exception_matrix_formatting(
self, dp_extractor, headers, value, matrix_formatting, expected
):
dp_extractor.headers = headers
dp_extractor.matrix_formatting = matrix_formatting
with pytest.raises(expected):
dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
class Test_DataPropertyExtractor_update_preprocessor:
def test_normal(self, dp_extractor):
assert dp_extractor.preprocessor.strip_str is None
assert dp_extractor.preprocessor.replace_tabs_with_spaces is True
assert dp_extractor.preprocessor.tab_length == 2
assert dp_extractor.preprocessor.line_break_handling is LineBreakHandling.NOP
assert dp_extractor.preprocessor.line_break_repl == " "
assert dp_extractor.preprocessor.is_escape_html_tag is False
assert dp_extractor.preprocessor.is_escape_formula_injection is False
dp_extractor.update_preprocessor(
strip_str='"',
replace_tabs_with_spaces=False,
tab_length=4,
line_break_handling=LineBreakHandling.REPLACE,
line_break_repl="<br>",
is_escape_html_tag=True,
is_escape_formula_injection=True,
)
assert dp_extractor.preprocessor.strip_str == '"'
assert dp_extractor.preprocessor.replace_tabs_with_spaces is False
assert dp_extractor.preprocessor.tab_length == 4
assert dp_extractor.preprocessor.line_break_handling is LineBreakHandling.REPLACE
assert dp_extractor.preprocessor.line_break_repl == "<br>"
assert dp_extractor.preprocessor.is_escape_html_tag is True
assert dp_extractor.preprocessor.is_escape_formula_injection is True
| 37.565426 | 99 | 0.601975 |
import datetime
from decimal import Decimal
import pytest
from typepy import DateTime, RealNumber, String, Typecode
from dataproperty import (
Align,
DataPropertyExtractor,
Format,
LineBreakHandling,
MatrixFormatting,
Preprocessor,
)
from .common import get_strict_level_map
DATATIME_DATA = datetime.datetime(2017, 1, 2, 3, 4, 5)
nan = float("nan")
inf = float("inf")
@pytest.fixture
def dp_extractor():
return DataPropertyExtractor()
def datetime_formatter_test(value):
return value.strftime("%Y%m%d %H%M%S")
def datetime_formatter_tostr_0(value):
return value.strftime("%Y-%m-%d %H:%M:%S%z")
def datetime_formatter_tostr_1(value):
return value.strftime("%Y/%m/%d %H:%M:%S")
def trans_func_1(v):
if v is None:
return ""
if v is False:
return "false"
if v == 0:
return 123
return v
def trans_func_2(v):
if v == 123:
return 321
return v
def nop(v):
return v
class Test_DataPropertyExtractor_to_dp:
@pytest.mark.parametrize(
["value", "type_value_map", "is_strict", "expected_value", "expected_typecode"],
[
[None, {Typecode.NONE: None}, True, None, Typecode.NONE],
[None, {Typecode.NONE: "null"}, False, "null", Typecode.STRING],
[None, {Typecode.NONE: ""}, True, "", Typecode.NULL_STRING],
[None, {Typecode.NONE: 0}, False, 0, Typecode.INTEGER],
[inf, {Typecode.INFINITY: "INF_1"}, False, "INF_1", Typecode.STRING],
[inf, {Typecode.INFINITY: "INF_2"}, True, "INF_2", Typecode.STRING],
[inf, {Typecode.INFINITY: None}, True, None, Typecode.NONE],
["inf", {Typecode.INFINITY: "INF_3"}, False, "INF_3", Typecode.STRING],
["inf", {Typecode.INFINITY: "INF_4"}, True, "inf", Typecode.STRING],
["inf", {Typecode.INFINITY: inf}, False, Decimal("Infinity"), Typecode.INFINITY],
[nan, {Typecode.NAN: "NAN_1"}, False, "NAN_1", Typecode.STRING],
[nan, {Typecode.NAN: "NAN_2"}, True, "NAN_2", Typecode.STRING],
[nan, {Typecode.NAN: None}, True, None, Typecode.NONE],
["nan", {Typecode.NAN: "NAN_4"}, False, "NAN_4", Typecode.STRING],
["nan", {Typecode.NAN: "NAN_5"}, True, "nan", Typecode.STRING],
],
)
def test_normal_type_value_map(
self, dp_extractor, value, type_value_map, is_strict, expected_value, expected_typecode
):
dp_extractor.type_value_map = type_value_map
dp_extractor.strict_level_map = get_strict_level_map(is_strict)
dp = dp_extractor.to_dp(value)
assert dp.data == expected_value
assert dp.typecode == expected_typecode
assert isinstance(dp.to_str(), str)
@pytest.mark.parametrize(
["value", "datetime_formatter", "datetime_format_str", "is_strict", "expected"],
[
[DATATIME_DATA, datetime_formatter_tostr_0, "s", False, "2017-01-02 03:04:05"],
["2017-01-01 00:00:00", datetime_formatter_tostr_1, "s", False, "2017/01/01 00:00:00"],
[
"2017-01-01 00:00:00",
None,
"%Y-%m-%dT%H:%M:%S",
False,
datetime.datetime(2017, 1, 1, 0, 0, 0),
],
["2017-01-01 00:00:00", None, "s", True, "2017-01-01 00:00:00"],
],
)
def test_normal_datetime(
self, dp_extractor, value, datetime_formatter, datetime_format_str, is_strict, expected
):
dp_extractor.datetime_formatter = datetime_formatter
dp_extractor.datetime_format_str = datetime_format_str
dp_extractor.strict_level_map = get_strict_level_map(is_strict)
dp = dp_extractor.to_dp(value)
assert dp.data == expected
@pytest.mark.parametrize(
["value", "type_hint", "trans_func", "expected"],
[
[1, String, nop, "1"],
[0, String, nop, "0"],
[None, String, nop, "None"],
[0, String, trans_func_1, "123"],
[False, String, trans_func_1, "false"],
[None, String, trans_func_1, ""],
],
)
def test_normal_type_hint(self, dp_extractor, value, type_hint, trans_func, expected):
dp_extractor.register_trans_func(trans_func)
dp = dp_extractor._DataPropertyExtractor__to_dp(value, type_hint=type_hint)
assert dp.data == expected
@pytest.mark.parametrize(
["value", "type_hint", "trans_funcs", "expected"],
[
[0, String, [trans_func_2, trans_func_1], "321"],
[0, String, [trans_func_1, trans_func_2], "123"],
],
)
def test_normal_trans_funcs(self, dp_extractor, value, type_hint, trans_funcs, expected):
for trans_func in trans_funcs:
dp_extractor.register_trans_func(trans_func)
dp = dp_extractor._DataPropertyExtractor__to_dp(value, type_hint=type_hint)
assert dp.data == expected
class Test_DataPropertyExtractor_to_dp_quoting_flags:
ALWAYS_QUOTE_FLAG_MAP = {
Typecode.NONE: True,
Typecode.INTEGER: True,
Typecode.REAL_NUMBER: True,
Typecode.STRING: True,
Typecode.NULL_STRING: True,
Typecode.DATETIME: True,
Typecode.REAL_NUMBER: True,
Typecode.NAN: True,
Typecode.BOOL: True,
}
@pytest.mark.parametrize(
["value", "quoting_flags", "expected"],
[
["string", ALWAYS_QUOTE_FLAG_MAP, '"string"'],
['"string"', ALWAYS_QUOTE_FLAG_MAP, '"string"'],
[' "123"', ALWAYS_QUOTE_FLAG_MAP, ' "123"'],
['"string" ', ALWAYS_QUOTE_FLAG_MAP, '"string" '],
[' "12 345" ', ALWAYS_QUOTE_FLAG_MAP, ' "12 345" '],
],
)
def test_normal_always_quote(self, dp_extractor, value, quoting_flags, expected):
dp_extractor.quoting_flags = quoting_flags
dp = dp_extractor.to_dp(value)
assert dp.data == expected
class Test_DataPropertyExtractor_to_dp_matrix:
@pytest.mark.parametrize(
["value"],
[
[
[
["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"],
["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"],
]
]
],
)
def test_smoke(self, dp_extractor, value):
assert len(list(dp_extractor.to_dp_matrix(value))) > 0
@pytest.mark.parametrize(
["value", "type_value_map", "datetime_formatter"],
[
[
[[None, "1"], [1.1, "a"], [nan, inf], ["false", DATATIME_DATA]],
{Typecode.NONE: "null", Typecode.INFINITY: "INFINITY", Typecode.NAN: "NAN"},
datetime_formatter_test,
]
],
)
def test_normal(self, dp_extractor, value, type_value_map, datetime_formatter):
dp_extractor.type_value_map = type_value_map
dp_extractor.datetime_formatter = datetime_formatter
dp_matrix = list(dp_extractor.to_dp_matrix(dp_extractor.to_dp_matrix(value)))
assert len(dp_matrix) == 4
dp = dp_matrix[0][0]
assert dp.data == "null"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[0][1]
assert dp.data == 1
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
dp = dp_matrix[1][0]
assert dp.data == Decimal("1.1")
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.decimal_places == 1
assert dp.format_str == "{:.1f}"
dp = dp_matrix[1][1]
assert dp.data == "a"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[2][0]
assert dp.data == "NAN"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[2][1]
assert dp.data == "INFINITY"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[3][0]
assert dp.data == "false"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[3][1]
assert dp.data == "20170102 030405"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
@pytest.mark.parametrize(["value", "expected"], [[None, []], [[], []], [(), []]])
def test_empty(self, dp_extractor, value, expected):
assert dp_extractor.to_dp_matrix(value) == expected
class Test_DataPropertyExtractor_to_dp_list:
@pytest.mark.parametrize(
["value", "float_type"], [[[0.1, Decimal("1.1")], float], [[0.1, Decimal("1.1")], Decimal]]
)
def test_normal_float(self, dp_extractor, value, float_type):
dp_extractor.float_type = float_type
dp_list = dp_extractor.to_dp_list(value)
for dp in dp_list:
assert isinstance(dp.data, float_type)
@pytest.mark.parametrize(
["value", "type_hint", "expected_list"],
[
[
["2017-01-02 03:04:05", datetime.datetime(2017, 1, 2, 3, 4, 5)],
None,
[Typecode.STRING, Typecode.DATETIME],
],
[
["2017-01-02 03:04:05", datetime.datetime(2017, 1, 2, 3, 4, 5)],
DateTime,
[Typecode.DATETIME, Typecode.DATETIME],
],
],
)
def test_normal_type_hint(self, dp_extractor, value, type_hint, expected_list):
dp_extractor.default_type_hint = type_hint
dp_list = dp_extractor.to_dp_list(value)
for dp, expected in zip(dp_list, expected_list):
assert dp.typecode == expected
@pytest.mark.parametrize(
["value", "strip_str_header", "strip_str_value", "expected"],
[
[['"1"', '"-1.1"', '"abc"'], "", '"', [1, Decimal("-1.1"), "abc"]],
[['"1"', '"-1.1"', '"abc"'], '"', "", ['"1"', '"-1.1"', '"abc"']],
[['"1"', '"-1.1"', '"abc"'], None, None, ['"1"', '"-1.1"', '"abc"']],
],
)
def test_normal_strip_str(
self, dp_extractor, value, strip_str_header, strip_str_value, expected
):
dp_extractor.strip_str_header = strip_str_header
dp_extractor.preprocessor = Preprocessor(strip_str=strip_str_value)
dp_list = dp_extractor.to_dp_list(value)
for dp, expected_value in zip(dp_list, expected):
assert dp.data == expected_value
dp_matrix = dp_extractor.to_dp_matrix([value])
for dp, expected_value in zip(dp_matrix[0], expected):
assert dp.data == expected_value
@pytest.mark.parametrize(
["value", "line_break_handling", "expected"],
[
[["a\nb", "a\r\nb"], LineBreakHandling.NOP, ["a\nb", "a\r\nb"]],
[["a\nb", "a\r\nb"], LineBreakHandling.REPLACE, ["a b", "a b"]],
[["a\nb", "a\r\nb"], LineBreakHandling.ESCAPE, ["a\\nb", "a\\r\\nb"]],
],
)
def test_normal_line_break_handling(self, dp_extractor, value, line_break_handling, expected):
dp_extractor.preprocessor = Preprocessor(line_break_handling=line_break_handling)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value
@pytest.mark.parametrize(
["value", "line_break_handling", "line_break_repl", "expected"],
[
[["a\nb", "a\r\nb"], LineBreakHandling.NOP, "<br>", ["a\nb", "a\r\nb"]],
[
["a\nb", "a\r\nb", "a\r\n\nb"],
LineBreakHandling.REPLACE,
"<br>",
["a<br>b", "a<br>b", "a<br><br>b"],
],
],
)
def test_normal_line_break_repl(
self, dp_extractor, value, line_break_handling, line_break_repl, expected
):
dp_extractor.preprocessor = Preprocessor(
line_break_handling=line_break_handling, line_break_repl=line_break_repl
)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
@pytest.mark.parametrize(
["value", "escape_formula_injection", "expected"],
[
[
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
True,
["a+b", "'=a+b", "'-a+b", "'+a+b", "'@a+b"],
],
[
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
False,
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
],
],
)
def test_normal_escape_formula_injection(
self, dp_extractor, value, escape_formula_injection, expected
):
dp_extractor.preprocessor = Preprocessor(
is_escape_formula_injection=escape_formula_injection
)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
@pytest.mark.parametrize(
["value", "expected"],
[[[0, None], [0, None]]],
)
def test_exception_escape_formula_injection(self, dp_extractor, value, expected):
dp_extractor.preprocessor = Preprocessor(is_escape_formula_injection=True)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
class Test_DataPropertyExtractor_to_column_dp_list:
TEST_DATA_MATRIX = [
[1, 1.1, "aa", 1, 1, True, inf, nan, datetime.datetime(2017, 1, 1, 0, 0, 0)],
[2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", "2017-01-01T01:23:45+0900"],
[3, 3.33, "cccc", -3, "ccc", True, "infinity", "NAN", "2017-11-01 01:23:45+0900"],
]
TEST_DATA_MATRIX_TUPLE = (
(1, 1.1, "aa", 1, 1, True, inf, nan, datetime.datetime(2017, 1, 1, 0, 0, 0)),
(2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", "2017-01-01T01:23:45+0900"),
(3, 3.33, "cccc", -3, "ccc", True, "infinity", "NAN", "2017-11-01 01:23:45+0900"),
)
@pytest.mark.parametrize(
["max_workers", "headers", "value"],
[
[1, ["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[4, ["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[None, None, TEST_DATA_MATRIX],
[None, [], TEST_DATA_MATRIX],
[
None,
("i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"),
TEST_DATA_MATRIX_TUPLE,
],
],
)
def test_normal_default(self, dp_extractor, max_workers, headers, value):
dp_extractor.max_workers = max_workers
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 9
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
assert str(dp) == (
"column=0, type=INTEGER, align=right, "
"ascii_width=1, bit_len=2, int_digits=1, decimal_places=0"
)
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 2
assert dp.format_str == "{:.2f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 1
assert dp.format_str == "{:.1f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places == 1
assert dp.format_str == "{:s}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.BOOL
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 5
assert dp.decimal_places is None
assert dp.format_str == "{}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INFINITY
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 8
assert dp.decimal_places is None
assert dp.format_str == "{:f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.NAN
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places is None
assert dp.format_str == "{:f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 24
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
@pytest.mark.parametrize(
["headers", "value"],
[
[
["i", "f"],
[
[1234, 1234.5],
[1234567, 34.5],
],
],
[
[],
[
[1234, 1234.5],
[1234567, 34.5],
],
],
],
)
def test_normal_format_str(self, dp_extractor, headers, value):
dp_extractor.format_flags_list = [Format.THOUSAND_SEPARATOR, Format.THOUSAND_SEPARATOR]
dp_extractor.max_workers = 1
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.format_str == "{:,d}"
assert dp.ascii_char_width == 9
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.format_str == "{:,.1f}"
assert dp.ascii_char_width == 7
@pytest.mark.parametrize(
["headers", "value"],
[
[["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[None, TEST_DATA_MATRIX],
[[], TEST_DATA_MATRIX],
],
)
def test_normal_not_strict(self, dp_extractor, headers, value):
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 9
dp = col_dp_list[0]
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
dp = col_dp_list[1]
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 2
assert dp.format_str == "{:.2f}"
def test_normal_column_type_hints(self, dp_extractor):
data_matrix = [
[1, "1.1", 1, "2017-01-02 03:04:05"],
[2, "2.2", 0.1, "2017-01-02 03:04:05"],
]
dp_extractor.headers = ["none", "to_float", "to_str", "to_datetime"]
dp_extractor.column_type_hints = [None, RealNumber, String, DateTime]
assert dp_extractor.column_type_hints == [None, RealNumber, String, DateTime]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(data_matrix))
assert len(col_dp_list) == 4
assert col_dp_list[0].typecode == Typecode.INTEGER
assert col_dp_list[1].typecode == Typecode.REAL_NUMBER
assert col_dp_list[2].typecode == Typecode.STRING
assert col_dp_list[3].typecode == Typecode.DATETIME
dp_extractor.column_type_hints = ["", "float", "str", "datetime"]
assert dp_extractor.column_type_hints == [None, RealNumber, String, DateTime]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(data_matrix))
assert len(col_dp_list) == 4
assert col_dp_list[0].typecode == Typecode.INTEGER
assert col_dp_list[1].typecode == Typecode.REAL_NUMBER
assert col_dp_list[2].typecode == Typecode.STRING
assert col_dp_list[3].typecode == Typecode.DATETIME
def test_normal_max_precision(self):
extractor = DataPropertyExtractor(max_precision=3)
extractor.headers = ["i", "f"]
value = [
[1234, 0.0000000001],
[1234567, 34.5],
]
col_dp_list = extractor.to_column_dp_list(extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.decimal_places == 0
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.decimal_places == 3
extractor.max_precision = 1
col_dp_list = extractor.to_column_dp_list(extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.decimal_places == 0
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.decimal_places == 1
def test_normal_nan_inf(self, dp_extractor):
dp_extractor.headers = ["n", "i"]
col_dp_list = dp_extractor.to_column_dp_list(
dp_extractor.to_dp_matrix([[nan, inf], ["nan", "inf"]])
)
assert len(col_dp_list) == 2
dp = col_dp_list[0]
assert dp.typecode == Typecode.NAN
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places is None
dp = col_dp_list[1]
assert dp.typecode == Typecode.INFINITY
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 8
assert dp.decimal_places is None
@pytest.mark.parametrize(["ambiguous_width"], [[2], [1]])
def test_normal_east_asian_ambiguous_width(self, dp_extractor, ambiguous_width):
dp_extractor.headers = ["ascii", "eaa"]
dp_extractor.east_asian_ambiguous_width = ambiguous_width
col_dp_list = dp_extractor.to_column_dp_list(
dp_extractor.to_dp_matrix([["abcdefg", "Øαββ"], ["abcdefghij", "ØØ"]])
)
assert len(col_dp_list) == 2
dp = col_dp_list[0]
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 10
assert dp.decimal_places is None
dp = col_dp_list[1]
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4 * ambiguous_width
assert dp.decimal_places is None
def test_normal_empty_value(self, dp_extractor):
dp_extractor.headers = ["a", "22", "cccc"]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(None))
dp = col_dp_list[0]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places is None
assert dp.format_str == "{}"
dp = col_dp_list[1]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 2
assert dp.decimal_places is None
assert dp.format_str == "{}"
dp = col_dp_list[2]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places is None
assert dp.format_str == "{}"
class Test_DataPropertyExtractor_matrix_formatting:
TEST_DATA_MATRIX_NORMAL_COL3 = [["a", 0, "aa"], ["b", 1, "bb"], ["c", 2, "ccc"]]
TEST_DATA_MATRIX_NOUNIFORM_COL1 = [["a", 0], ["b", 1, "bb"], ["c", 2, "ccc", 0.1], ["d"]]
@pytest.mark.parametrize(
["headers", "value", "matrix_formatting", "expected"],
[
[None, TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.TRIM, 1],
[["a", "b"], TEST_DATA_MATRIX_NORMAL_COL3, MatrixFormatting.TRIM, 2],
[None, TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.FILL_NONE, 4],
[["a", "b", "c"], TEST_DATA_MATRIX_NORMAL_COL3, MatrixFormatting.FILL_NONE, 3],
[["a", "b", "c"], TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.HEADER_ALIGNED, 3],
[
["a", "b", "c", "d", "e"],
TEST_DATA_MATRIX_NOUNIFORM_COL1,
MatrixFormatting.HEADER_ALIGNED,
5,
],
],
)
def test_normal_matrix_formatting(
self, dp_extractor, headers, value, matrix_formatting, expected
):
dp_extractor.headers = headers
dp_extractor.matrix_formatting = matrix_formatting
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == expected
@pytest.mark.parametrize(
["headers", "value", "matrix_formatting", "expected"],
[
[
["i", "f", "s", "if", "mix"],
TEST_DATA_MATRIX_NOUNIFORM_COL1,
MatrixFormatting.EXCEPTION,
ValueError,
]
],
)
def test_exception_matrix_formatting(
self, dp_extractor, headers, value, matrix_formatting, expected
):
dp_extractor.headers = headers
dp_extractor.matrix_formatting = matrix_formatting
with pytest.raises(expected):
dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
class Test_DataPropertyExtractor_update_preprocessor:
def test_normal(self, dp_extractor):
assert dp_extractor.preprocessor.strip_str is None
assert dp_extractor.preprocessor.replace_tabs_with_spaces is True
assert dp_extractor.preprocessor.tab_length == 2
assert dp_extractor.preprocessor.line_break_handling is LineBreakHandling.NOP
assert dp_extractor.preprocessor.line_break_repl == " "
assert dp_extractor.preprocessor.is_escape_html_tag is False
assert dp_extractor.preprocessor.is_escape_formula_injection is False
dp_extractor.update_preprocessor(
strip_str='"',
replace_tabs_with_spaces=False,
tab_length=4,
line_break_handling=LineBreakHandling.REPLACE,
line_break_repl="<br>",
is_escape_html_tag=True,
is_escape_formula_injection=True,
)
assert dp_extractor.preprocessor.strip_str == '"'
assert dp_extractor.preprocessor.replace_tabs_with_spaces is False
assert dp_extractor.preprocessor.tab_length == 4
assert dp_extractor.preprocessor.line_break_handling is LineBreakHandling.REPLACE
assert dp_extractor.preprocessor.line_break_repl == "<br>"
assert dp_extractor.preprocessor.is_escape_html_tag is True
assert dp_extractor.preprocessor.is_escape_formula_injection is True
| true | true |
f722cad3bd9d74b9d520ccea28c51f10673e99e8 | 669 | py | Python | answers/x_9_9.py | ofl/kuku | 76eefc0d3d859051473ee0d5f48b5d42d17d05a6 | [
"MIT"
] | null | null | null | answers/x_9_9.py | ofl/kuku | 76eefc0d3d859051473ee0d5f48b5d42d17d05a6 | [
"MIT"
] | 4 | 2021-09-23T03:19:52.000Z | 2021-11-13T10:38:21.000Z | answers/x_9_9.py | ofl/kuku | 76eefc0d3d859051473ee0d5f48b5d42d17d05a6 | [
"MIT"
] | null | null | null | # x_9_9
#
# 「prefecture.csv」を利用して都道府県番号を答える機能をチャットボットに追加してください
import csv
chatbot = {
'おはよう': 'おはようございます',
'おやすみ': 'おやすみなさい',
'今日は何日ですか': '2021年11月14日です',
'今日の天気は': '雨です',
'何か歌って': 'もーもたろさんももたろさん',
}
message = input('何か話しかけてください:')
if message == '都道府県番号を教えて':
prefecture = input('何県の都道府県番号ですか?:')
file = open('./files/prefecture.csv', encoding="utf-8")
reader = csv.DictReader(file)
for row in reader:
if row['都道府県名'] == prefecture:
print(prefecture + 'の都道府県番号は' + str(row['都道府県番号']) + 'です')
break
file.close()
elif message in chatbot:
print(chatbot[message])
else:
print('わかりません')
| 20.272727 | 70 | 0.61136 |
import csv
chatbot = {
'おはよう': 'おはようございます',
'おやすみ': 'おやすみなさい',
'今日は何日ですか': '2021年11月14日です',
'今日の天気は': '雨です',
'何か歌って': 'もーもたろさんももたろさん',
}
message = input('何か話しかけてください:')
if message == '都道府県番号を教えて':
prefecture = input('何県の都道府県番号ですか?:')
file = open('./files/prefecture.csv', encoding="utf-8")
reader = csv.DictReader(file)
for row in reader:
if row['都道府県名'] == prefecture:
print(prefecture + 'の都道府県番号は' + str(row['都道府県番号']) + 'です')
break
file.close()
elif message in chatbot:
print(chatbot[message])
else:
print('わかりません')
| true | true |
f722cbb15de2d0bb821479a2b795997c45034f00 | 2,219 | py | Python | openGaussBase/testcase/SYSTEM_CATALOGS&SYSTEM_VIEWS/SYSTEM_TABLE/Opengauss_Function_System_Table_Case0033.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SYSTEM_CATALOGS&SYSTEM_VIEWS/SYSTEM_TABLE/Opengauss_Function_System_Table_Case0033.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SYSTEM_CATALOGS&SYSTEM_VIEWS/SYSTEM_TABLE/Opengauss_Function_System_Table_Case0033.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 系统表
Case Name : 测试系统表PG_FOREIGN_TABLE字段与数据类型
Description :
1.查看系统表PG_FOREIGN_TABLE的表结构
2.该表字段与对应字段数据类型是否正确
Expect :
1.查看系统表PG_FOREIGN_TABLE的表结构成功
2.该表字段与字段数据类型对应正确
History :
"""
import sys
import unittest
from yat.test import Node
from yat.test import macro
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
logger = Logger()
class IndexFileDamaged(unittest.TestCase):
def setUp(self):
logger.info('----------------this is setup-----------------------')
logger.info('--------------Opengauss_Function_System_Table_Case0033开始执行--------------')
self.userNode = Node('dbuser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.com = Common()
self.comsh = CommonSH('dbuser')
self.expect_result_dict = {'Column': ['ftrelid', 'ftserver', 'ftwriteonly', 'ftoptions'],
'Type': ['oid', 'oid', 'boolean', 'text[]']}
def test_Index_file_damaged(self):
logger.info('----------------------------查看表结构-----------------------------')
msg = self.comsh.execut_db_sql('\d PG_FOREIGN_TABLE')
logger.info(msg)
result_dict = self.com.format_sql_result(msg)
logger.info(result_dict)
del result_dict['Modifiers']
self.assertDictEqual(self.expect_result_dict, result_dict)
def tearDown(self):
logger.info('----------------this is tearDown-----------------------')
# 无须清理环境
logger.info('-----------------------Opengauss_Function_System_Table_Case0033执行完成-----------------------------')
| 34.671875 | 119 | 0.626408 |
import sys
import unittest
from yat.test import Node
from yat.test import macro
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
logger = Logger()
class IndexFileDamaged(unittest.TestCase):
def setUp(self):
logger.info('----------------this is setup-----------------------')
logger.info('--------------Opengauss_Function_System_Table_Case0033开始执行--------------')
self.userNode = Node('dbuser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.com = Common()
self.comsh = CommonSH('dbuser')
self.expect_result_dict = {'Column': ['ftrelid', 'ftserver', 'ftwriteonly', 'ftoptions'],
'Type': ['oid', 'oid', 'boolean', 'text[]']}
def test_Index_file_damaged(self):
logger.info('----------------------------查看表结构-----------------------------')
msg = self.comsh.execut_db_sql('\d PG_FOREIGN_TABLE')
logger.info(msg)
result_dict = self.com.format_sql_result(msg)
logger.info(result_dict)
del result_dict['Modifiers']
self.assertDictEqual(self.expect_result_dict, result_dict)
def tearDown(self):
logger.info('----------------this is tearDown-----------------------')
logger.info('-----------------------Opengauss_Function_System_Table_Case0033执行完成-----------------------------')
| true | true |
f722cd6c856266fba9fb04305214e342cf9e2048 | 2,211 | py | Python | decay/decays/sample/half_sudden.py | pyl1b/decay | 7200516455fc03351ad658af66b5cc39b2b2d50a | [
"MIT"
] | null | null | null | decay/decays/sample/half_sudden.py | pyl1b/decay | 7200516455fc03351ad658af66b5cc39b2b2d50a | [
"MIT"
] | null | null | null | decay/decays/sample/half_sudden.py | pyl1b/decay | 7200516455fc03351ad658af66b5cc39b2b2d50a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Contains the definition of the SuddenDecay class.
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import numpy as np
from . import SampleBasedDecay
logger = logging.getLogger('decay.half_sudden')
class HalfSuddenDecay(SampleBasedDecay):
"""
Class that decays the value following the sigmoid curve.
Sigmoid is:
k
Y = --------------------- + 1
a + bx
1 + e
This curve used a=10, b=-10, k=-2
This intersects the Y axis at
+1 and the X axis at -1 and +1. We're interested only in the
positive x.
"""
def __init__(self, *args, **kwargs):
""" Constructor. """
super(HalfSuddenDecay, self).__init__(
decay_name='.decay.half_sudden.', *args, **kwargs)
def __str__(self):
""" Represent this object as a human-readable string. """
return 'SuddenDecay()'
def __repr__(self):
""" Represent this object as a python constructor. """
return 'SuddenDecay()'
decay_x = np.array([
0.0,
0.05263157894736842,
0.10526315789473684,
0.15789473684210525,
0.21052631578947367,
0.2631578947368421,
0.3157894736842105,
0.3684210526315789,
0.42105263157894735,
0.47368421052631576,
0.5263157894736842,
0.5789473684210527,
0.631578947368421,
0.6842105263157894,
0.7368421052631579,
0.7894736842105263,
0.8421052631578947,
0.894736842105263,
0.9473684210526315,
1.0,
])
decay_y = np.array([
1.0,
0.9998463162863197,
0.9997398757902081,
0.9995597314205974,
0.999254877774581,
0.9987390684889199,
0.9978665723466811,
0.9963914462121438,
0.9938994809709213,
0.9896955173948945,
0.9826197888368629,
0.9707568136416107,
0.9509968204584932,
0.9184373437414545,
0.8657330022308358,
0.7828273568190789,
0.6581107760257361,
0.4825598285864794,
0.2572468384313463,
0.0,
])
| 24.842697 | 65 | 0.591588 |
from __future__ import unicode_literals
from __future__ import print_function
import logging
import numpy as np
from . import SampleBasedDecay
logger = logging.getLogger('decay.half_sudden')
class HalfSuddenDecay(SampleBasedDecay):
def __init__(self, *args, **kwargs):
super(HalfSuddenDecay, self).__init__(
decay_name='.decay.half_sudden.', *args, **kwargs)
def __str__(self):
return 'SuddenDecay()'
def __repr__(self):
return 'SuddenDecay()'
decay_x = np.array([
0.0,
0.05263157894736842,
0.10526315789473684,
0.15789473684210525,
0.21052631578947367,
0.2631578947368421,
0.3157894736842105,
0.3684210526315789,
0.42105263157894735,
0.47368421052631576,
0.5263157894736842,
0.5789473684210527,
0.631578947368421,
0.6842105263157894,
0.7368421052631579,
0.7894736842105263,
0.8421052631578947,
0.894736842105263,
0.9473684210526315,
1.0,
])
decay_y = np.array([
1.0,
0.9998463162863197,
0.9997398757902081,
0.9995597314205974,
0.999254877774581,
0.9987390684889199,
0.9978665723466811,
0.9963914462121438,
0.9938994809709213,
0.9896955173948945,
0.9826197888368629,
0.9707568136416107,
0.9509968204584932,
0.9184373437414545,
0.8657330022308358,
0.7828273568190789,
0.6581107760257361,
0.4825598285864794,
0.2572468384313463,
0.0,
])
| true | true |
f722cd78a8cac2e41c27ebc3b010dead3c261d46 | 398 | py | Python | test.py | glauberrleite/system-identifier | 982e04b7df84211d5797d259e9cb431d83b00529 | [
"MIT"
] | null | null | null | test.py | glauberrleite/system-identifier | 982e04b7df84211d5797d259e9cb431d83b00529 | [
"MIT"
] | null | null | null | test.py | glauberrleite/system-identifier | 982e04b7df84211d5797d259e9cb431d83b00529 | [
"MIT"
] | null | null | null | from Util import Util
import numpy
import matplotlib.pyplot as pyplot
inputArray = numpy.ones(100)
theta = [ 2.705, -2.448, 0.7408, 0.0523, -0.0855, 0.035 ]
orderOutput = 3
orderInput = 3
sampleRate = 0.1
y = Util.computeOutput(inputArray, theta, orderOutput, orderInput)
t = numpy.arange(0, len(y)*sampleRate, sampleRate)
pyplot.plot(t, y, 'r')
pyplot.plot(t, inputArray, 'b--')
pyplot.show()
| 22.111111 | 66 | 0.713568 | from Util import Util
import numpy
import matplotlib.pyplot as pyplot
inputArray = numpy.ones(100)
theta = [ 2.705, -2.448, 0.7408, 0.0523, -0.0855, 0.035 ]
orderOutput = 3
orderInput = 3
sampleRate = 0.1
y = Util.computeOutput(inputArray, theta, orderOutput, orderInput)
t = numpy.arange(0, len(y)*sampleRate, sampleRate)
pyplot.plot(t, y, 'r')
pyplot.plot(t, inputArray, 'b--')
pyplot.show()
| true | true |
f722ceebadc01cde384986f37c3b059f0718a6e8 | 12,353 | py | Python | osgar/drivers/cortexpilot.py | m3d/osgar_archive_2020 | 556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e | [
"MIT"
] | null | null | null | osgar/drivers/cortexpilot.py | m3d/osgar_archive_2020 | 556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e | [
"MIT"
] | null | null | null | osgar/drivers/cortexpilot.py | m3d/osgar_archive_2020 | 556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e | [
"MIT"
] | 1 | 2022-01-02T04:06:01.000Z | 2022-01-02T04:06:01.000Z | """
Driver for robot Robik from cortexpilot.com
"""
import ctypes
import struct
import math
from datetime import timedelta
from osgar.node import Node
from osgar.bus import BusShutdownException
from osgar.lib import quaternion
# CPR = 9958 (ticks per revolution)
# wheel diameter D = 395 mm
# 1 Rev = 1241 mm
ENC_SCALE = 1.241/9958
WHEEL_DISTANCE = 0.88 # meters TODO confirm
RAMP_STEP = 0.1 # fractional number for speed in -1.0 .. 1.0
def sint32_diff(a, b):
return ctypes.c_int32(a - b).value
class Cortexpilot(Node):
def __init__(self, config, bus):
super().__init__(config, bus)
bus.register('raw', 'encoders', 'emergency_stop', 'pose2d',
'voltage', 'rotation', 'orientation', 'scan')
self._buf = b''
# commands
self.desired_speed = 0.0 # m/s
self.desired_angular_speed = 0.0
self.cmd_flags = 0x40 #0x41 # 0 = remote steering, PWM OFF, laser ON, TODO
self.speeds = self.plain_speeds()
# status
self.emergency_stop = None # uknown state
self.pose = (0.0, 0.0, 0.0) # x, y in meters, heading in radians (not corrected to 2PI)
self.flags = None
self.last_encoders = None
self.yaw = None
self.lidar_valid = False
self.lidar_timestamp = 0
self.uptime = None
def send_pose(self):
x, y, heading = self.pose
self.publish('pose2d', [round(x*1000), round(y*1000),
round(math.degrees(heading)*100)])
def query_version(self):
ret = bytes([0, 0, 3, 0x1, 0x01])
checksum = sum(ret) & 0xFF
return ret + bytes([(256-checksum) & 0xFF])
def oscilate(self):
while True:
end = self.time + timedelta(seconds=1)
while self.time < end:
yield self.desired_speed, -self.desired_angular_speed
end = self.time + timedelta(seconds=1)
while self.time < end:
yield -self.desired_speed, -self.desired_angular_speed
def plain_speeds(self):
while True:
yield self.desired_speed, -self.desired_angular_speed
def create_packet(self):
if self.yaw is None:
self.yaw = 0.0 # hack!
speed_frac, speed_dir = next(self.speeds)
speed_frac *= 2
speed_dir *= 1.2
if speed_frac < 0:
speed_dir = -speed_dir # Robik V5.1.1 handles backup backwards
if not self.lidar_valid:
speed_frac = 0.0
speed_dir = 0.0
#print(self.time, "{:.4f}, {:.4f} \t {:.4f} {:.4f}".format(speed_frac, speed_dir, self.desired_speed, self.desired_angular_speed))
flags = self.cmd_flags
flags |= (1<<8) # agresive turning
if self.emergency_stop is not None:
if self.emergency_stop:
flags |= (1<<11) # display red LEDs
else:
flags |= (1<<10) # turn on green (9th bit)
packet = struct.pack('<ffI', speed_frac, speed_dir, flags)
assert len(packet) < 256, len(packet) # just to use LSB only
ret = bytes([0, 0, len(packet) + 2 + 1, 0x1, 0x0D]) + packet
# addr=0x1, cmd=0xD, length is given by payload, addr, cmd and checksum
checksum = sum(ret) & 0xFF
return ret + bytes([(256-checksum) & 0xFF])
def get_packet(self):
"""extract packet from internal buffer (if available otherwise return None"""
data = self._buf
if len(data) < 3:
return None
high, mid, low = data[:3] # 24bit packet length (big endian int)
assert high == 0, high # all messages < 65535 bytes
size = 256 * mid + low + 3 # counting also 3 bytes of packet length header
if len(data) < size:
return None
ret, self._buf = data[:size], data[size:]
checksum = sum(ret) & 0xFF
assert checksum == 0, checksum # checksum error
return ret
def parse_packet(self, data):
"""
Parse cortexpilot sensors status message
"""
# expects already validated single sample with 3 bytes length prefix
# and checksum at the end
high, mid, low = data[:3]
assert high == 0, high # fixed packet size 2*256+89 bytes
assert mid == 2, mid
assert low == 89, low
addr, cmd = data[3:5]
assert addr == 1, addr
assert cmd == 0xD, cmd
offset = 5 # payload offset
# 4 byte Flags (unsigned long) 0
# bit 0 -> 1 = BigRedSwitch
# bit 1 -> 1 = MissionButton
# bit 2 -> copy of EnableRun flag (motors enabled)
# bit 3 -> 1 = Payload loaded, 0 = Payload unloaded - payload indicator
# bit 4 -> 1 = LIDAR ScanValid
# bit 5 -> 1 = Manual override
# 4 byte SystemVoltage (float) 4 - battery level for control electronics [V]
# 4 byte PowerVoltage (float) 8 - battery level for motors [V]
self.flags, system_voltage, power_voltage = struct.unpack_from('<Iff', data, offset)
self.lidar_valid = (self.flags & 0x10) == 0x10
self.emergency_stop = (self.flags & 0x01) == 0x01
self.voltage = [system_voltage, power_voltage]
self.bus.publish('voltage', [int(v*100) for v in self.voltage])
# skipped parsing of:
# 4 byte SpeedM1 (float) 12 - normalized motor M1 (R) speed <-1.0 1.0>
# 4 byte SpeedM2 (float) 16 - normalized motor M2 (L) speed <-1.0 1.0>
motors = struct.unpack_from('<ff', data, offset + 12)
# skipped parsing of:
# 4 byte ActualDir (float) 20 - normalized direction for PID controller
# 4 byte EncM1 (signed long) 24 - incremental encoders count for motor M1 (R) since last reset
# 4 byte EncM2 (signed long) 28 - incremental encoders count for motor M2 (L) since last reset
encoders = struct.unpack_from('<II', data, offset + 24)
# skipped parsing of:
# 1 byte GPS_Valid 32 - 1 = valid data from GPS module
# 1 byte GPS_Fix 33 - 0 = no fix, 1 = 2D fix, 2 = 3D fix
# 4 byte GPS_UTCDate (ulong) 34 - GPS date in YYMMDD format
# 4 byte GPS_UTCTime (ulong) 38 - GPS time in HHMMSS format
# 4 byte GPS_Lat (ulong) 42 - format Lat * 1E7
# 4 byte GPS_Lon (ulong) 46 - format Lon * 1E7
# 4 byte GPS_Brg (float) 50 - GPS Bearing <0 .. 359> deg
# 4 byte AHRS_q0 (float) 54 - Orientation Quaternion
# 4 byte AHRS_q1 (float) 58 -
# 4 byte AHRS_q2 (float) 62 -
# 4 byte AHRS_q3 (float) 66 -
qw, qx, qy, qz = struct.unpack_from('<ffff', data, offset+54)
orientation = qx, qy, qz, qw
# identity quat points to north, we need it to point to east
orientation = quaternion.multiply(orientation, [0, 0, 0.7071068, 0.7071068])
# correct roll axis by 1.7 degrees
self.orientation = quaternion.multiply(orientation, [0.0148348, 0, 0, 0.99989])
self.bus.publish('orientation', list(self.orientation))
q1, q2, q3, q0 = self.orientation # quaternion
#print(self.time, "{:.4f} {:.4f} {:.4f} {:.4f}".format(q0, q1, q2, q3))
ax = math.atan2(2*(q0*q1+q2*q3), 1-2*(q1*q1+q2*q2))
ay = math.asin(2*(q0*q2-q3*q1))
az = math.atan2(2*(q0*q3+q1*q2), 1-2*(q2*q2+q3*q3))
# rotation Euler angles are yaw, pitch and roll
#print(self.time, "{:.4f} {:.4f} {:.4f}".format(math.degrees(az), math.degrees(ay), math.degrees(ax)))
self.bus.publish('rotation', [round(math.degrees(angle)*100) for angle in [az, ay, ax]])
# 4 byte Yaw (float) 70 - Heading (Yaw) - machine orientation to magnetic north <0 .. 359> deg
self.yaw = struct.unpack_from('<f', data, offset + 70)[0]
#print(math.degrees(x), math.degrees(y), math.degrees(z), self.yaw)
# 4 byte AccelX (float) 74
# 4 byte AccelY (float) 78
# 4 byte AccelZ (float) 82
# 4 byte GyroX (float) 86
# 4 byte GyroY (float) 90
# 4 byte GyroZ (float) 94
# 4 byte MagX (float) 98
# 4 byte MagY (float) 102
# 4 byte MagZ (float) 106
# 4 byte SystemTick (ulong) 110 - Uptime in milisecond
uptime = struct.unpack_from('<I', data, offset + 110)[0]
if self.uptime is not None:
uptime_diff = uptime - self.uptime
self.uptime = uptime
if self.last_encoders is not None:
step = [sint32_diff(x, prev) for x, prev in zip(encoders, self.last_encoders)]
self.publish('encoders', step)
dist = ENC_SCALE * sum(step)/len(step)
angle = ENC_SCALE * (step[0] - step[1])/WHEEL_DISTANCE
x, y, heading = self.pose
# advance robot by given distance and angle
if abs(angle) < 0.0000001: # EPS
# Straight movement - a special case
x += dist * math.cos(heading)
y += dist * math.sin(heading)
#Not needed: heading += angle
else:
# Arc
r = dist / angle
x += -r * math.sin(heading) + r * math.sin(heading + angle)
y += +r * math.cos(heading) - r * math.cos(heading + angle)
heading += angle # not normalized
self.pose = (x, y, heading)
self.send_pose()
self.last_encoders = encoders
# 4 byte LidarTimestamp (ulong) 114 - Value of SystemTick when lidar scan was received
lidar_timestamp = struct.unpack_from('<I', data, offset + 114)[0]
lidar_diff = lidar_timestamp - self.lidar_timestamp
self.lidar_timestamp = lidar_timestamp
if lidar_diff > 150 and self.lidar_valid:
print(self.time, "lidar invalid:", lidar_diff)
self.lidar_valid = False
if lidar_diff != 0 and self.lidar_valid:
# laser
# 480 byte Lidar_Scan (ushort) 118 - 239 two-bytes distances from Lidar <0 .. 65535> in [cm]
# Scan is whole 360 deg with resolution 1.5 deg
scan = struct.unpack_from('<' + 'H'*239, data, offset + 118) # TODO should be 240
# restrict scan only to 270 degrees - cut 1/8th on both sides
# scan = scan[30:-30]
# zero_sides = 20
scan = [10 * d for d in reversed(scan)] # scale to millimeters
# scan[:zero_sides] = [0]*zero_sides
# scan[-zero_sides:] = [0]*zero_sides
self.publish('scan', scan)
def run(self):
try:
self.publish('raw', self.query_version())
while True:
dt, channel, data = self.listen()
self.time = dt
if channel == 'raw':
self._buf += data
packet = self.get_packet()
if packet is not None:
if len(packet) < 256: # TODO cmd value
print(packet)
else:
prev = self.flags
self.parse_packet(packet)
if prev != self.flags:
print(self.time, 'Flags:', hex(self.flags))
self.publish('raw', self.create_packet())
if channel == 'desired_speed':
self.desired_speed, self.desired_angular_speed = data[0]/1000.0, math.radians(data[1]/100.0)
if abs(self.desired_speed) < 0.2 and abs(self.desired_angular_speed) > 0.2:
if self.speeds.__name__ != "oscilate":
self.speeds = self.oscilate()
else:
if self.speeds.__name__ == "oscilate":
self.speeds = self.plain_speeds()
self.cmd_flags |= 0x02 # PWM ON
# if data == [0, 0]:
# print("TURN OFF")
# self.cmd_flags = 0x00 # turn everything OFF (hack for now)
except BusShutdownException:
pass
# vim: expandtab sw=4 ts=4
| 42.450172 | 138 | 0.548612 |
import ctypes
import struct
import math
from datetime import timedelta
from osgar.node import Node
from osgar.bus import BusShutdownException
from osgar.lib import quaternion
ENC_SCALE = 1.241/9958
WHEEL_DISTANCE = 0.88
RAMP_STEP = 0.1
def sint32_diff(a, b):
return ctypes.c_int32(a - b).value
class Cortexpilot(Node):
def __init__(self, config, bus):
super().__init__(config, bus)
bus.register('raw', 'encoders', 'emergency_stop', 'pose2d',
'voltage', 'rotation', 'orientation', 'scan')
self._buf = b''
self.desired_speed = 0.0
self.desired_angular_speed = 0.0
self.cmd_flags = 0x40
self.emergency_stop = None
self.pose = (0.0, 0.0, 0.0)
self.flags = None
self.last_encoders = None
self.yaw = None
self.lidar_valid = False
self.lidar_timestamp = 0
self.uptime = None
def send_pose(self):
x, y, heading = self.pose
self.publish('pose2d', [round(x*1000), round(y*1000),
round(math.degrees(heading)*100)])
def query_version(self):
ret = bytes([0, 0, 3, 0x1, 0x01])
checksum = sum(ret) & 0xFF
return ret + bytes([(256-checksum) & 0xFF])
def oscilate(self):
while True:
end = self.time + timedelta(seconds=1)
while self.time < end:
yield self.desired_speed, -self.desired_angular_speed
end = self.time + timedelta(seconds=1)
while self.time < end:
yield -self.desired_speed, -self.desired_angular_speed
def plain_speeds(self):
while True:
yield self.desired_speed, -self.desired_angular_speed
def create_packet(self):
if self.yaw is None:
self.yaw = 0.0
speed_frac, speed_dir = next(self.speeds)
speed_frac *= 2
speed_dir *= 1.2
if speed_frac < 0:
speed_dir = -speed_dir
if not self.lidar_valid:
speed_frac = 0.0
speed_dir = 0.0
flags = self.cmd_flags
flags |= (1<<8)
if self.emergency_stop is not None:
if self.emergency_stop:
flags |= (1<<11)
else:
flags |= (1<<10)
packet = struct.pack('<ffI', speed_frac, speed_dir, flags)
assert len(packet) < 256, len(packet)
ret = bytes([0, 0, len(packet) + 2 + 1, 0x1, 0x0D]) + packet
checksum = sum(ret) & 0xFF
return ret + bytes([(256-checksum) & 0xFF])
def get_packet(self):
data = self._buf
if len(data) < 3:
return None
high, mid, low = data[:3]
assert high == 0, high
size = 256 * mid + low + 3
if len(data) < size:
return None
ret, self._buf = data[:size], data[size:]
checksum = sum(ret) & 0xFF
assert checksum == 0, checksum
return ret
def parse_packet(self, data):
high, mid, low = data[:3]
assert high == 0, high
assert mid == 2, mid
assert low == 89, low
addr, cmd = data[3:5]
assert addr == 1, addr
assert cmd == 0xD, cmd
offset = 5
self.flags, system_voltage, power_voltage = struct.unpack_from('<Iff', data, offset)
self.lidar_valid = (self.flags & 0x10) == 0x10
self.emergency_stop = (self.flags & 0x01) == 0x01
self.voltage = [system_voltage, power_voltage]
self.bus.publish('voltage', [int(v*100) for v in self.voltage])
motors = struct.unpack_from('<ff', data, offset + 12)
encoders = struct.unpack_from('<II', data, offset + 24)
qw, qx, qy, qz = struct.unpack_from('<ffff', data, offset+54)
orientation = qx, qy, qz, qw
orientation = quaternion.multiply(orientation, [0, 0, 0.7071068, 0.7071068])
self.orientation = quaternion.multiply(orientation, [0.0148348, 0, 0, 0.99989])
self.bus.publish('orientation', list(self.orientation))
q1, q2, q3, q0 = self.orientation
ax = math.atan2(2*(q0*q1+q2*q3), 1-2*(q1*q1+q2*q2))
ay = math.asin(2*(q0*q2-q3*q1))
az = math.atan2(2*(q0*q3+q1*q2), 1-2*(q2*q2+q3*q3))
self.bus.publish('rotation', [round(math.degrees(angle)*100) for angle in [az, ay, ax]])
self.yaw = struct.unpack_from('<f', data, offset + 70)[0]
uptime = struct.unpack_from('<I', data, offset + 110)[0]
if self.uptime is not None:
uptime_diff = uptime - self.uptime
self.uptime = uptime
if self.last_encoders is not None:
step = [sint32_diff(x, prev) for x, prev in zip(encoders, self.last_encoders)]
self.publish('encoders', step)
dist = ENC_SCALE * sum(step)/len(step)
angle = ENC_SCALE * (step[0] - step[1])/WHEEL_DISTANCE
x, y, heading = self.pose
if abs(angle) < 0.0000001:
x += dist * math.cos(heading)
y += dist * math.sin(heading)
else:
r = dist / angle
x += -r * math.sin(heading) + r * math.sin(heading + angle)
y += +r * math.cos(heading) - r * math.cos(heading + angle)
heading += angle
self.pose = (x, y, heading)
self.send_pose()
self.last_encoders = encoders
lidar_timestamp = struct.unpack_from('<I', data, offset + 114)[0]
lidar_diff = lidar_timestamp - self.lidar_timestamp
self.lidar_timestamp = lidar_timestamp
if lidar_diff > 150 and self.lidar_valid:
print(self.time, "lidar invalid:", lidar_diff)
self.lidar_valid = False
if lidar_diff != 0 and self.lidar_valid:
scan = struct.unpack_from('<' + 'H'*239, data, offset + 118)
scan = [10 * d for d in reversed(scan)]
self.publish('scan', scan)
def run(self):
try:
self.publish('raw', self.query_version())
while True:
dt, channel, data = self.listen()
self.time = dt
if channel == 'raw':
self._buf += data
packet = self.get_packet()
if packet is not None:
if len(packet) < 256:
print(packet)
else:
prev = self.flags
self.parse_packet(packet)
if prev != self.flags:
print(self.time, 'Flags:', hex(self.flags))
self.publish('raw', self.create_packet())
if channel == 'desired_speed':
self.desired_speed, self.desired_angular_speed = data[0]/1000.0, math.radians(data[1]/100.0)
if abs(self.desired_speed) < 0.2 and abs(self.desired_angular_speed) > 0.2:
if self.speeds.__name__ != "oscilate":
self.speeds = self.oscilate()
else:
if self.speeds.__name__ == "oscilate":
self.speeds = self.plain_speeds()
self.cmd_flags |= 0x02
n:
pass
| true | true |
f722cf13a8e1c7f120022a0b0b8571ad1ca0538a | 2,274 | py | Python | Python/py_bites/bite_26.py | J0shu4B0y/Practice | ea6af3773520f12afcf72f25952a80614d0c13ef | [
"MIT"
] | 1 | 2021-07-18T08:40:05.000Z | 2021-07-18T08:40:05.000Z | Python/py_bites/bite_26.py | J0shu4B0y/Practice | ea6af3773520f12afcf72f25952a80614d0c13ef | [
"MIT"
] | 3 | 2020-02-21T13:43:44.000Z | 2020-02-21T13:54:57.000Z | Python/py_bites/bite_26.py | J0shu4B0y/Practice-Python | ea6af3773520f12afcf72f25952a80614d0c13ef | [
"MIT"
] | 2 | 2019-07-19T10:09:08.000Z | 2020-09-20T08:07:36.000Z | # A dictionary comprehension is like a list comprehension, but it constructs
# a dict instead of a list. They are convenient to quickly operate on each
# (key, value) pair of a dict. And often in one line of code, maybe two after
# checking PEP8 😉
#
# We think they are elegant, that's why we want you to know about them!
#
# In this Bite you are given a dict and a set. Write a dictionary comprehension
# that filters out the items in the set and returns the resulting dict, so if
# your dict is {1: 'bob', 2: 'julian', 3: 'tim'} and your set is {2, 3}, the
# resulting dict would be {1: 'bob'}.
#
# Check out the tests for more details. Have fun!
import unittest
from typing import Dict
bites = {
6: "PyBites Die Hard",
7: "Parsing dates from logs",
9: "Palindromes",
10: "Practice exceptions",
11: "Enrich a class with dunder methods",
12: "Write a user validation function",
13: "Convert dict in namedtuple/json",
14: "Generate a table of n sequences",
15: "Enumerate 2 sequences",
16: "Special PyBites date generator",
17: "Form teams from a group of friends",
18: "Find the most common word",
19: "Write a simple property",
20: "Write a context manager",
21: "Query a nested data structure",
}
exclude_bites = {6, 10, 16, 18, 21}
def filter_bites(bites=bites, bites_done=exclude_bites):
"""return the bites dict with the exclude_bites filtered out"""
return {
bite_id: bite_value
for bite_id, bite_value in bites.items()
if bite_id not in bites_done
}
class Test(unittest.TestCase):
def test_filter(self):
filtered_bites: Dict[int, str] = filter_bites()
snapshot: Dict[int, str] = {
7: "Parsing dates from logs",
9: "Palindromes",
11: "Enrich a class with dunder methods",
12: "Write a user validation function",
13: "Convert dict in namedtuple/json",
14: "Generate a table of n sequences",
15: "Enumerate 2 sequences",
17: "Form teams from a group of friends",
19: "Write a simple property",
20: "Write a context manager",
}
assert filtered_bites == snapshot
if __name__ == "__main__":
unittest.main()
| 33.441176 | 79 | 0.643799 |
#
# In this Bite you are given a dict and a set. Write a dictionary comprehension
# that filters out the items in the set and returns the resulting dict, so if
# your dict is {1: 'bob', 2: 'julian', 3: 'tim'} and your set is {2, 3}, the
# resulting dict would be {1: 'bob'}.
#
# Check out the tests for more details. Have fun!
import unittest
from typing import Dict
bites = {
6: "PyBites Die Hard",
7: "Parsing dates from logs",
9: "Palindromes",
10: "Practice exceptions",
11: "Enrich a class with dunder methods",
12: "Write a user validation function",
13: "Convert dict in namedtuple/json",
14: "Generate a table of n sequences",
15: "Enumerate 2 sequences",
16: "Special PyBites date generator",
17: "Form teams from a group of friends",
18: "Find the most common word",
19: "Write a simple property",
20: "Write a context manager",
21: "Query a nested data structure",
}
exclude_bites = {6, 10, 16, 18, 21}
def filter_bites(bites=bites, bites_done=exclude_bites):
return {
bite_id: bite_value
for bite_id, bite_value in bites.items()
if bite_id not in bites_done
}
class Test(unittest.TestCase):
def test_filter(self):
filtered_bites: Dict[int, str] = filter_bites()
snapshot: Dict[int, str] = {
7: "Parsing dates from logs",
9: "Palindromes",
11: "Enrich a class with dunder methods",
12: "Write a user validation function",
13: "Convert dict in namedtuple/json",
14: "Generate a table of n sequences",
15: "Enumerate 2 sequences",
17: "Form teams from a group of friends",
19: "Write a simple property",
20: "Write a context manager",
}
assert filtered_bites == snapshot
if __name__ == "__main__":
unittest.main()
| true | true |
f722cf2f9b737830daa3176c5c1a1ee9bdfcc8ce | 654 | py | Python | UVa/Competitive_Programming_Exercises/06-String_Processing/01-Ad_Hoc_String_Processing_Problems-Part_1/01-Cipher_Encode_Encrypt_Decode_Decrypt-Easier/11220.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | 1 | 2018-01-30T13:21:30.000Z | 2018-01-30T13:21:30.000Z | UVa/Competitive_Programming_Exercises/06-String_Processing/01-Ad_Hoc_String_Processing_Problems-Part_1/01-Cipher_Encode_Encrypt_Decode_Decrypt-Easier/11220.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | null | null | null | UVa/Competitive_Programming_Exercises/06-String_Processing/01-Ad_Hoc_String_Processing_Problems-Part_1/01-Cipher_Encode_Encrypt_Decode_Decrypt-Easier/11220.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | 1 | 2018-08-29T13:26:50.000Z | 2018-08-29T13:26:50.000Z | def solution():
first = True
for tc in range(1, 1 + int( input() ) ):
if not first:
print('')
else:
input()
first = False
print('Case #%d:' %(tc))
while True:
try:
v = list( input().strip().split() )
if len(v) == 0:
break
word, ct = '', 0
for w in v:
if ct < len(w):
word = word + w[ct]
ct += 1
print(word)
except EOFError:
break
if __name__ == '__main__':
solution()
| 24.222222 | 51 | 0.33945 | def solution():
first = True
for tc in range(1, 1 + int( input() ) ):
if not first:
print('')
else:
input()
first = False
print('Case #%d:' %(tc))
while True:
try:
v = list( input().strip().split() )
if len(v) == 0:
break
word, ct = '', 0
for w in v:
if ct < len(w):
word = word + w[ct]
ct += 1
print(word)
except EOFError:
break
if __name__ == '__main__':
solution()
| true | true |
f722d228b7d6bc6a016ef16a30d17f3671381777 | 24,105 | py | Python | CyberTron5000/utils/lists.py | niztg/CyberTron5000 | 6b93305ef26e022063bffa8620b53076ba5948f7 | [
"MIT"
] | 20 | 2020-06-20T20:26:33.000Z | 2021-01-12T20:47:52.000Z | CyberTron5000/utils/lists.py | niztg/CyberTron5000 | 6b93305ef26e022063bffa8620b53076ba5948f7 | [
"MIT"
] | 1,005 | 2020-07-09T18:27:17.000Z | 2020-07-30T20:41:33.000Z | CyberTron5000/utils/lists.py | niztg/CyberTron5000 | 6b93305ef26e022063bffa8620b53076ba5948f7 | [
"MIT"
] | 7 | 2020-07-09T18:23:24.000Z | 2020-11-21T20:47:03.000Z | """
For JSON lists.
"""
# I need to use yaml for all of this
# excuse me
from string import ascii_uppercase
import discord
STAT_NAMES = {
"speed": "SPEED",
"attack": "ATTACK",
"sp_atk": "SPECIAL ATTACK",
"sp_def": "SPECIAL DEFENSE",
"defense": "DEFENSE",
"hp": "HP",
"total": "TOTAL"
}
ALPHABET_NUMBER = {letter: number for (letter, number) in zip(ascii_uppercase, list(range(1, 27)))}
NUMBER_ALPHABET = {value: key for (key, value) in ALPHABET_NUMBER.items()}
TYPES = {
"normal": "<:normal:715628305541496915>",
"fighting": "<:fighting:715628306015191220>",
"fire": "<:fire:715626721402945567>",
"water": "<:water:715629330621005915>",
"grass": "<:grass:715629330830721104>",
"ground": "<:ground:715626721772175472>",
"rock": "<:rock:715626723126804532>",
"steel": "<:steel:715629330637520988>",
"fairy": "<:fairy:715629865071542328>",
"ghost": "<:ghost:715630366769021038>",
"dark": "<:dark:715630366651711549>",
"poison": "<:poison:715628305671389285>",
"dragon": " <:dragon:715630390597124177>",
"electric": "<:electric:715626721399013489>",
"ice": "<:ice:715630367687573774>",
"flying": "<:flying:715631197140811847>",
"bug": "<:bug:715627787427381319>",
"psychic": "<:psychic:715628305763663923>"
}
REGIONS = {
"europe": "Europe",
"us-east": "US East",
"india": "India",
"brazil": "Brazil",
"japan": "Japan",
"russia": "Russia",
"singapore": "Singapore",
"southafrica": "South Africa",
"sydney": "Sydney",
"hongkong": "Hong Kong",
"us-central": "US Central",
"us-south": "US South",
"us-west": "US West",
'eu-west': "EU West",
"eu-north": "EU North",
"eu-south": "EU South",
"eu-east": "EU East"
}
INDICATOR_LETTERS = {'1': '1️⃣', '2': '2️⃣', '3': '3️⃣', '4': '4️⃣', '5': '5️⃣', '6': '6️⃣', '7': '7️⃣', '8': '8️⃣',
'9': '9️⃣', '0': '0️⃣'}
sl = {discord.Status.online: "<:online:726127263401246832>",
discord.Status.offline: "<:offline:726127263203983440>",
discord.Status.idle: "<:idle:726127192165187594>",
discord.Status.dnd: "<:dnd:726127192001478746>"
}
REDDIT_EMOJIS = {
'Alpha Tester': '<:alphauser:742837407623151738>',
'Bellwether': '<:bellwether:742837613228064912>',
'Best Comment': '<:bestcomment:742837968179298355>',
'Best Link': '<:bestlink:742838163990642809>',
'ComboCommenter': '<:combocommenter:742838391078650017>',
'ComboLinker': '<:combolinker:742838391229644900>',
'Inciteful Comment': '<:incitefulcomment:742838606976122891>',
'Inciteful Link': '<:incitefullink:742838454999711765>',
'New User': '<:newuser:742838754841985164>',
'Not Forgotten': '<:notforgotten:742838985251881091>',
'Reddit Premium': '<:redditpremium:742839302840516860>',
'RPAN Broadcaster': '<:rpanbroadcaster:742839517689413714>',
'RPAN Viewer': '<:rpanviewer:742839518108844182>',
'Sequence | Editor': '<:sequenceeditor:742839825165713468>',
'Shutterbug': '<:shutterbug:742843728670097559>',
'Verified Email': '<:verifiedemail:742843907099983873>',
'Well-rounded': '<:wellrounded:742844034401173505>',
'Xbox Live': '<:xboxlive:742844235216322651>',
'Extra Life 2019': '<:extralife:742844662347333633>',
'Open Sorcerer': '<:opensorcerer:742844988479766609>',
'One-Year Club': '<:1_oneyear:742846966895083570>',
'Two-Year Club': '<:1_twoyear:742846967213719592>',
'Three-Year Club': '<:1_threeyear:742846966769123359>',
'Four-Year Club': '<:1_fouryear:742846966735831132>',
'Five-Year Club': '<:1_fiveyear:742846966794289212>',
'Six-Year Club': '<:1_sixyear:742846966899277860>',
'Seven-Year Club': '<:1_sevenyear:742846966979100803>',
'Eight-Year Club': '<:1_eightyear:742846938264764476>',
'Nine-Year Club': '<:1_nineyear:742846966630842399>',
'Ten-Year Club': '<:1_tenyear:742846967071375468>',
'Eleven-Year Club': '<:1_elevenyear:742846937992003616>',
'Twelve-Year Club': '<:1_twelveyear:742846967033364480>',
'Thirteen-Year Club': '<:1_thirteenyear:742846966689562625>',
'Gilding I': '<:gilding1:742851900386443286>',
'Gilding II': '<:gilding2:742851900545957990>',
'Gilding III': '<:gilding3:742851900549890088>',
'Gilding IV': '<:gilding4:742851938370060450>',
'Gilding V': '<:gilding5:742852020490338355>',
'Gilding VI': '<:gilding6:742852020808974487>',
'Gilding VII': '<:gilding7:742852020855111781>',
'Gilding VIII': '<:gilding9:742852020980940800>',
'Gilding IX': '<:gilding9:742852020980940800>',
'Gilding X': '<:gilding10:742852216963989646>',
'Gilding XI': '<:gilding11:742852286044438648>'
}
status_mapping = {
discord.ActivityType.listening: "Listening to",
discord.ActivityType.watching: "Watching",
discord.ActivityType.playing: "Playing",
discord.ActivityType.streaming: "Streaming",
discord.ActivityType.custom: "\u200b"
}
badge_mapping = {
"staff": "<:staff:730846674775179394>",
"partner": "<:partner:730846624275759105>",
"hypesquad": "<:hypesquad:730846721235746859>",
"bug_hunter": "<:bug_hunter:730868709274419281>",
"bug_hunter_level_2": "<:bug_hunter_level_2:730874021721276527>",
"hypesquad_bravery": "<:hypesquad_bravery:730851606941270147>",
"hypesquad_brilliance": "<:hypesquad_brilliance:730851606853320765>",
"hypesquad_balance": "<:hypesquad_balance:730851606832087141>",
"early_supporter": "<:early_supporter:730869784337580102>",
"verified_bot_developer": "<:verified_bot_developer:730849897410199572>",
}
audit_actions = {
discord.AuditLogAction.guild_update: "**updated the guild**",
discord.AuditLogAction.channel_update: "**updated channel**",
discord.AuditLogAction.channel_create: "**created channel**",
discord.AuditLogAction.channel_delete: "**deleted channel**",
discord.AuditLogAction.overwrite_create: "**created overwrite**",
discord.AuditLogAction.overwrite_update: "**updated overwrite**",
discord.AuditLogAction.overwrite_delete: "**deleted overwrite**",
discord.AuditLogAction.kick: "**kicked**",
discord.AuditLogAction.ban: "**banned**",
discord.AuditLogAction.unban: "**unbanned**",
discord.AuditLogAction.member_role_update: "**updated roles of**",
discord.AuditLogAction.member_move: "**moved member**",
discord.AuditLogAction.member_disconnect: "**disconnected member**",
discord.AuditLogAction.bot_add: "**added bot**",
discord.AuditLogAction.role_create: "**created role**",
discord.AuditLogAction.role_update: "**updated role**",
discord.AuditLogAction.role_delete: "**deleted role**",
discord.AuditLogAction.invite_create: "**created invite**",
discord.AuditLogAction.invite_update: "**updated invite**",
discord.AuditLogAction.invite_delete: "**deleted invite**",
discord.AuditLogAction.webhook_create: "**created webhook**",
discord.AuditLogAction.webhook_delete: "**deleted webhook**",
discord.AuditLogAction.webhook_update: "**updated webhook**",
discord.AuditLogAction.emoji_create: "**created emoji**",
discord.AuditLogAction.emoji_update: "**updated emoji**",
discord.AuditLogAction.emoji_delete: "**deleted emoji**",
discord.AuditLogAction.message_delete: "**deleted message by**",
discord.AuditLogAction.message_pin: "**pinned a message by**",
discord.AuditLogAction.message_unpin: "**unpinned a message by**",
discord.AuditLogAction.message_bulk_delete: "**bulk deleted messages**",
discord.AuditLogAction.integration_create: "**created integration**",
discord.AuditLogAction.integration_delete: "**deleted integration**",
discord.AuditLogAction.integration_update: "**updated integration**",
discord.AuditLogAction.member_update: "**updated member**"
}
engineer_bagdes = ["<:engineer1:732745844339638334>", "<:engineer2:732745844633370684>",
"<:engineer3:732745844716994690>", "<:engineer4:732745844754743306>"]
popular_badges = ['<:popular1:732745781660090459>', '<:popular2:732745781856960634>', '<:popular3:732745782054092840>',
'<:popular4:732745781714354198>']
legend_badges = ['<:legend1:732745816564826212>', '<:legend2:732745816590123041>', '<:legend3:73274581659838',
'<:legend4:732745816758026381>']
ANIMALS = {
'cat': '🐱',
'dog': '🐶',
'koala': '🐨',
'fox': '🦊',
'bird': '🐦',
'birb': '🐦',
'red_panda': '🔴',
'elephant': '🐘',
'panda': '🐼',
'racoon': '🦝',
'kangaroo': '🦘',
'giraffe': '🦒',
'whale': '🐋'
}
EMOTIONS = {
'hug': '<a:hug:748315930685210746>',
'wink': '😉',
'face-palm': '🤦♂️',
'pat': '<:pat:748316152123359322>'
}
INFRACTION_DESCRIPTIONS = {
"mute": "This will indefinitely mute the user by addding to them the `CyberMute` role, which restricts them from seeing any channels. To unmute them, manually remove the role from them or do `{0}unumute <user>`\nI need **Manage Channels** and **Manage Roles** permissions for this.",
"kick": "This will kick the user from the server.\nI need the **Kick Members** permission for this.",
"ban": "This will ban the user from the server. To unban a user, do `{0}unban <user id or username#user discriminator>`"
}
WEAPONS = [
"Sword of Mega Doom",
"Epic Gun",
"Mega Epic Gun",
"Grenade",
"Amazing Bruh Machine",
"Gun Lmao",
"Hyper Epic Gun",
"'Not even trying at this point' Rifle",
"Grand Sword of Chaos",
"Excalibur",
"Master Sword",
"Storm Pegasus",
"Rock Leone",
"Lightning L-Drago"
]
QUIPS = ["What two words would passengers never want to hear a pilot say?",
"You would never go on a roller coaster called _____",
"The secret to a happy life",
"If a winning coach gets Gatorade dumped on his head, what should get dumped on the losing coach?",
"You should never give alcohol to ______",
"Everyone knows that monkeys hate ______",
"The biggest downside to living in Hell",
"The worst thing for an evil witch to turn you into",
"The Skittles flavor that just missed the cut",
"On your wedding night, it would be horrible to find out that the person you married is ____",
"A name for a really bad Broadway musical",
"The first thing you would do after winning the lottery",
"Why ducks really fly south in the winter",
"America's energy crisis would be over if we made cars that ran on ______",
"It's incredibly rude to ____ with your mouth open",
"What's actually causing global warming?",
"A name for a brand of designer adult diapers",
"Name a TV drama that's about a vampire doctor",
"Something squirrels probably do when no one is looking",
"The crime you would commit if you could get away with it",
"What's the Mona Lisa smiling about?",
"A terrible name for a cruise ship",
"What FDR meant to say was We have nothing to fear, but _____",
"Come up with a title for an adult version of any classic video game",
"The name of a font nobody would ever use",
"Something you should never put on an open wound",
"Scientists say erosion, but we all know the Grand Canyon was actually made by _____",
"The real reason the dinosaurs died",
"Come up with the name of a country that doesn't exist",
"The best way to keep warm on a cold winter night",
"A college major you don't see at many universities",
"What would make baseball more entertaining to watch?",
"The best thing about going to prison",
"The best title for a new national anthem for the USA",
"Come up with the name of book that would sell a million copies, immediately",
"What would you do if you were left alone in the White House for an hour?",
"Invent a family-friendly replacement word that you could say instead of an actual curse word",
"The name of the reindeer Santa didn't pick to pull his sleigh",
"What's the first thing you would do if you could time travel?",
"The name of a pizza place you should never order from",
"A not-very-scary name for a pirate",
"Come up with a name for a beer made especially for monkeys",
"The best thing about living in an igloo",
"The worst way to be murdered",
"Something you shouldn't get your significant other for Valentine's Day",
"A dangerous thing to do while driving",
"Something you shouldn't wear to a job interview",
"The #1 reason penguins can't fly",
"Using only two words, a new state motto for Texas",
"The hardest thing about being Batman",
"A great way to kill time at work",
"Come up with a really bad TV show that starts with Baby",
"Why does the Tower of Pisa lean?",
"What's wrong with these kids today?",
"A great new invention that starts with Automatic",
"Come up with a really bad football penalty that begins with Intentional",
"A Starbucks coffee that should never exist",
"There's Gryffindor, Ravenclaw, Slytherin, and Hufflepuff, but what's the Hogwarts house few "
"have ever heard of?",
"The worst words to say for the opening of a eulogy at a funeral",
"Something you should never use as a scarf",
"Invent a holiday that you think everyone would enjoy",
"The best news you could get today",
"Usually, it's bacon,lettuce and tomato, but come up with a BLT you wouldn't want to eat",
"The worst thing you could stuff a bed mattress with",
"A great opening line to start a conversation with a stranger at a party",
"Something you would like to fill a swimming pool with",
"If you were allowed to name someone else's baby any weird thing you wanted, "
"what would you name it?",
"You know you're in for a bad taxi ride when _____",
"The terrible fate of the snowman Olaf in a director's cut of 'Frozen'",
"Sometimes, after a long day, you just need to ______",
"The worst way to spell Mississippi",
"Give me one good reason why I shouldn't spank you right now",
"The best pick-up line for an elderly singles mixer",
"A good stage name for a chimpanzee stripper",
"The best place to bury all those bodies",
"One place a finger shouldn't go",
"Come up with a name for the most difficult yoga pose known to mankind",
"What's lurking under your bed when you sleep?",
"The name of a canine comedy club with puppy stand-up comedians",
"A vanity license plate a jerk in an expensive car would get",
"A good fake name to use when checking into a hotel",
"A good catchphrase to yell every time you finish pooping",
"Your personal catchphrase if you were on one of those 'Real Housewives' shows",
"The Katy Perry Super Bowl halftime show would have been better with _____",
"Okay... fine! What do YOU want to talk about then?!!!",
"Miller Lite beer would make a lot of money if they came up with a beer called Miller Lite _____",
"A terrible name for a clown",
"An inappropriate thing to do at a cemetery",
"The last thing you see before blowing up",
"The best way to impress a Martian",
"Your final words before you were burned in Salem as a witch",
"A great president from a cartoon series would be: ____",
"I didn't mean to kill him! I just ___",
"A lesser talked about room in the White House",
"The easiest way to make a gamer mad",
"As Shakespeare said, '_____'",
"The name of the guy who invented Jazz",
"A movie guaranteed to be a hit with necrophiliacs",
"The name of a teacher you know you'd instantly hate",
"You have 6 words to anger an entire group of people",
"Something squirrels probably do when no one is looking",
"The name of a font nobody would ever use",
"Something you should never put on an open wound",
"The best way to keep warm on a cold winter night",
"What would make baseball more entertaining to watch?",
"The best thing about going to prison is ___",
"The best title for a new national anthem for the USA ",
"Come up with the name of book that would sell a million copies, immediately",
"What would you do if you were left alone in the White House for an hour?",
"The name of the reindeer Santa didn't pick to pull his sleigh",
"What's the first thing you would do if you could time travel?",
"The name of a pizza place you should never order from",
"Using only two words, a new state motto for Texas",
"The hardest thing about being Batman",
"Come up with a really bad TV show that starts with Baby",
"A great new invention that starts with Automatic",
"Come up with a really bad football penalty that begins with Intentional",
"A Starbucks coffee that should never exist",
"A terrible name for a clown",
"An inappropriate thing to do at a cemetery",
"Like chicken fingers or chicken poppers, a new appetizer name for your fun, theme restaurant: chicken _____",
"Thing you'd be most surprised to have a dentist to find in your mouth",
"Rename Winnie-the-Pooh to something more appropriate/descriptive",
"Name the sequel to Titanic if there were one.",
"What you'd guess is an unadvertised ingredient in most hot dogs",
"Name your new haircutting establishment",
"Something that would make an awful hat",
"How many monkeys is too many monkeys?",
"Something you'd be surprised to see a donkey do",
"The title you'd come up with if you were writing the Olympics theme song",
"Something you should never say to your mother",
"Where's the best place to hide from the shadow monsters?",
"The three ingredients in the worst smoothie ever",
"The most presidential name you can think of (that isn't already the name of a president)",
"A good way to get fired",
"If we can't afford to bury or cremate you, what should we do with your body?",
"A good place to hide boogers",
"Come up with the name for a new TV show with the word Spanky in it",
"A fun trick to play on the Pope",
"Where do you think the beef really is?",
"Something it'd be fun to throw off the Eiffel Tower",
"Write a newspaper headline that will really catch people's attention",
"The worst job title that starts with Assistant",
"The name of a new perfume by Betty White",
"The worst name for a robot",
"The most embarrassing name for a cat",
"The worst thing you could discover in your burrito",
"One thing never to do on a first date",
"Ozzy Osbourne's Twitter password, probably",
"Who let the dogs out?",
"What do vegans taste like?",
"An item NOT found in Taylor Swift's purse",
"Name a new reggae band made up entirely of chickens",
"Name a children's book by someone who hates children",
"The name of your new plumbing company",
"Make up a word that describes the sound of farting into a bowl of mac & cheese",
"A new ice cream flavor that no one would ever order",
"Name a new movie starring a talking goat who is president of the United States",
"Something that would not work well as a dip for tortilla chips",
"The name of a clothing store for overweight leprechauns",
"Something upsetting you could say to the cable guy as he installs your television service",
"The worst thing that could jump out of a bachelor party cake",
"Come up with a name for a new beer marketed toward babies",
"A terrible theme for a high school prom",
"Something you should not whisper to your grandmother",
"A terrible name for a 1930s gangster",
"Brand name of a bottled water sold in the land of Oz",
"A fun thing to yell as a baby is being born",
"The worst family secret that could come out over Thanksgiving dinner",
"The name of a toilet paper specifically designed for the Queen of England",
"A lawn decoration sure to make the neighbors mad",
"The worst thing to say when trying to adopt a pet",
"A good name for an erotic bakery",
"People wouldn't respect He-Man as much if, to gain his power, he held up his sword and shouted ____________________",
"Fun thing to do if locked in the mall overnight",
"The worst person to receive a sponge bath from",
"Pants would be a whole lot better if they ___",
"The most awesome Guinness World Record to break",
"A little-known way to get gum out of your hair",
"It's bad to be buried alive. It's worse to be buried alive with BLANK."
"Something that would not work as well as skis",
"A rejected title for The Good, The Bad and the Ugly wasThe Good, the Bad and the BLANK",
"A rejected name for a ship in the U.S. Naval Fleet: the USS BLANK",
"What to say to get out of jury duty",
"What the Statue of Liberty is hiding beneath that robe",
"dude just type something funny bro",
"Take any well-known restaurant and slightly change its name to something inappropriate",
"Little-known fact: The government allows peanut butter to contain up to 10% BLANK",
"A good sign that your house is haunted",
"A bad occupation for a robot to have",
"A sequel to the painting Dogs Playing Poker",
"The Tooth Fairy's other job",
"Little-known fact: A secret area in the White House is the BLANK room",
"An invention by Thomas Edison that never caught on",
"A bad place to skinny-dip",
"What time is it?",
"A birthday present you shouldn't get for your grandmother",
"A short motto everyone should live by",
"Invent a Christmas tradition sure to catch on",
"A bad thing to yell during church/friday khutba",
"The unsexiest thought you can have",
"A good improvement to make to Mt. Rushmore",
"The best way to start your day",
"The worst name for a summer camp",
"Something that's made worse by adding cheese",
"Three things are certain in life: Death, Taxes, and BLANK",
"A faster way to get home from the Land of Oz is to click your heels three times and say BLANK",
"Come up with a name for a rock band made up entirely of baby ducks",
"Something that is currently legal that should be banned",
"A word that should never follow Beef",
"The perfect song to hum on the toilet",
"A bad thing to say to a cop as he writes you a speeding ticket"
]
HANGMAN_STATES = {
0: '''```
+---+
| |
|
|
|
|
=========```''',
1: '''```
+---+
| |
O |
|
|
|
=========```''',
2: '''```
+---+
| |
O |
| |
|
|
=========```''',
3: '''```
+---+
| |
O |
/| |
|
|
=========```''',
4: '''```
+---+
| |
O |
/|\ |
|
|
=========```''',
5: '''```
+---+
| |
O |
/|\ |
/ |
|
=========```''',
6: '''```
+---+
| |
O |
/|\ |
/ \ |
|
=========```'''
}
IZLAM_QUIPS = [
"The real reason legion will never find love",
"How yahya will find his second wife",
"This is why the new server was created",
"Why there are so many boomers in this server"
] | 46.624758 | 287 | 0.642024 |
from string import ascii_uppercase
import discord
STAT_NAMES = {
"speed": "SPEED",
"attack": "ATTACK",
"sp_atk": "SPECIAL ATTACK",
"sp_def": "SPECIAL DEFENSE",
"defense": "DEFENSE",
"hp": "HP",
"total": "TOTAL"
}
ALPHABET_NUMBER = {letter: number for (letter, number) in zip(ascii_uppercase, list(range(1, 27)))}
NUMBER_ALPHABET = {value: key for (key, value) in ALPHABET_NUMBER.items()}
TYPES = {
"normal": "<:normal:715628305541496915>",
"fighting": "<:fighting:715628306015191220>",
"fire": "<:fire:715626721402945567>",
"water": "<:water:715629330621005915>",
"grass": "<:grass:715629330830721104>",
"ground": "<:ground:715626721772175472>",
"rock": "<:rock:715626723126804532>",
"steel": "<:steel:715629330637520988>",
"fairy": "<:fairy:715629865071542328>",
"ghost": "<:ghost:715630366769021038>",
"dark": "<:dark:715630366651711549>",
"poison": "<:poison:715628305671389285>",
"dragon": " <:dragon:715630390597124177>",
"electric": "<:electric:715626721399013489>",
"ice": "<:ice:715630367687573774>",
"flying": "<:flying:715631197140811847>",
"bug": "<:bug:715627787427381319>",
"psychic": "<:psychic:715628305763663923>"
}
REGIONS = {
"europe": "Europe",
"us-east": "US East",
"india": "India",
"brazil": "Brazil",
"japan": "Japan",
"russia": "Russia",
"singapore": "Singapore",
"southafrica": "South Africa",
"sydney": "Sydney",
"hongkong": "Hong Kong",
"us-central": "US Central",
"us-south": "US South",
"us-west": "US West",
'eu-west': "EU West",
"eu-north": "EU North",
"eu-south": "EU South",
"eu-east": "EU East"
}
INDICATOR_LETTERS = {'1': '1️⃣', '2': '2️⃣', '3': '3️⃣', '4': '4️⃣', '5': '5️⃣', '6': '6️⃣', '7': '7️⃣', '8': '8️⃣',
'9': '9️⃣', '0': '0️⃣'}
sl = {discord.Status.online: "<:online:726127263401246832>",
discord.Status.offline: "<:offline:726127263203983440>",
discord.Status.idle: "<:idle:726127192165187594>",
discord.Status.dnd: "<:dnd:726127192001478746>"
}
REDDIT_EMOJIS = {
'Alpha Tester': '<:alphauser:742837407623151738>',
'Bellwether': '<:bellwether:742837613228064912>',
'Best Comment': '<:bestcomment:742837968179298355>',
'Best Link': '<:bestlink:742838163990642809>',
'ComboCommenter': '<:combocommenter:742838391078650017>',
'ComboLinker': '<:combolinker:742838391229644900>',
'Inciteful Comment': '<:incitefulcomment:742838606976122891>',
'Inciteful Link': '<:incitefullink:742838454999711765>',
'New User': '<:newuser:742838754841985164>',
'Not Forgotten': '<:notforgotten:742838985251881091>',
'Reddit Premium': '<:redditpremium:742839302840516860>',
'RPAN Broadcaster': '<:rpanbroadcaster:742839517689413714>',
'RPAN Viewer': '<:rpanviewer:742839518108844182>',
'Sequence | Editor': '<:sequenceeditor:742839825165713468>',
'Shutterbug': '<:shutterbug:742843728670097559>',
'Verified Email': '<:verifiedemail:742843907099983873>',
'Well-rounded': '<:wellrounded:742844034401173505>',
'Xbox Live': '<:xboxlive:742844235216322651>',
'Extra Life 2019': '<:extralife:742844662347333633>',
'Open Sorcerer': '<:opensorcerer:742844988479766609>',
'One-Year Club': '<:1_oneyear:742846966895083570>',
'Two-Year Club': '<:1_twoyear:742846967213719592>',
'Three-Year Club': '<:1_threeyear:742846966769123359>',
'Four-Year Club': '<:1_fouryear:742846966735831132>',
'Five-Year Club': '<:1_fiveyear:742846966794289212>',
'Six-Year Club': '<:1_sixyear:742846966899277860>',
'Seven-Year Club': '<:1_sevenyear:742846966979100803>',
'Eight-Year Club': '<:1_eightyear:742846938264764476>',
'Nine-Year Club': '<:1_nineyear:742846966630842399>',
'Ten-Year Club': '<:1_tenyear:742846967071375468>',
'Eleven-Year Club': '<:1_elevenyear:742846937992003616>',
'Twelve-Year Club': '<:1_twelveyear:742846967033364480>',
'Thirteen-Year Club': '<:1_thirteenyear:742846966689562625>',
'Gilding I': '<:gilding1:742851900386443286>',
'Gilding II': '<:gilding2:742851900545957990>',
'Gilding III': '<:gilding3:742851900549890088>',
'Gilding IV': '<:gilding4:742851938370060450>',
'Gilding V': '<:gilding5:742852020490338355>',
'Gilding VI': '<:gilding6:742852020808974487>',
'Gilding VII': '<:gilding7:742852020855111781>',
'Gilding VIII': '<:gilding9:742852020980940800>',
'Gilding IX': '<:gilding9:742852020980940800>',
'Gilding X': '<:gilding10:742852216963989646>',
'Gilding XI': '<:gilding11:742852286044438648>'
}
status_mapping = {
discord.ActivityType.listening: "Listening to",
discord.ActivityType.watching: "Watching",
discord.ActivityType.playing: "Playing",
discord.ActivityType.streaming: "Streaming",
discord.ActivityType.custom: "\u200b"
}
badge_mapping = {
"staff": "<:staff:730846674775179394>",
"partner": "<:partner:730846624275759105>",
"hypesquad": "<:hypesquad:730846721235746859>",
"bug_hunter": "<:bug_hunter:730868709274419281>",
"bug_hunter_level_2": "<:bug_hunter_level_2:730874021721276527>",
"hypesquad_bravery": "<:hypesquad_bravery:730851606941270147>",
"hypesquad_brilliance": "<:hypesquad_brilliance:730851606853320765>",
"hypesquad_balance": "<:hypesquad_balance:730851606832087141>",
"early_supporter": "<:early_supporter:730869784337580102>",
"verified_bot_developer": "<:verified_bot_developer:730849897410199572>",
}
audit_actions = {
discord.AuditLogAction.guild_update: "**updated the guild**",
discord.AuditLogAction.channel_update: "**updated channel**",
discord.AuditLogAction.channel_create: "**created channel**",
discord.AuditLogAction.channel_delete: "**deleted channel**",
discord.AuditLogAction.overwrite_create: "**created overwrite**",
discord.AuditLogAction.overwrite_update: "**updated overwrite**",
discord.AuditLogAction.overwrite_delete: "**deleted overwrite**",
discord.AuditLogAction.kick: "**kicked**",
discord.AuditLogAction.ban: "**banned**",
discord.AuditLogAction.unban: "**unbanned**",
discord.AuditLogAction.member_role_update: "**updated roles of**",
discord.AuditLogAction.member_move: "**moved member**",
discord.AuditLogAction.member_disconnect: "**disconnected member**",
discord.AuditLogAction.bot_add: "**added bot**",
discord.AuditLogAction.role_create: "**created role**",
discord.AuditLogAction.role_update: "**updated role**",
discord.AuditLogAction.role_delete: "**deleted role**",
discord.AuditLogAction.invite_create: "**created invite**",
discord.AuditLogAction.invite_update: "**updated invite**",
discord.AuditLogAction.invite_delete: "**deleted invite**",
discord.AuditLogAction.webhook_create: "**created webhook**",
discord.AuditLogAction.webhook_delete: "**deleted webhook**",
discord.AuditLogAction.webhook_update: "**updated webhook**",
discord.AuditLogAction.emoji_create: "**created emoji**",
discord.AuditLogAction.emoji_update: "**updated emoji**",
discord.AuditLogAction.emoji_delete: "**deleted emoji**",
discord.AuditLogAction.message_delete: "**deleted message by**",
discord.AuditLogAction.message_pin: "**pinned a message by**",
discord.AuditLogAction.message_unpin: "**unpinned a message by**",
discord.AuditLogAction.message_bulk_delete: "**bulk deleted messages**",
discord.AuditLogAction.integration_create: "**created integration**",
discord.AuditLogAction.integration_delete: "**deleted integration**",
discord.AuditLogAction.integration_update: "**updated integration**",
discord.AuditLogAction.member_update: "**updated member**"
}
engineer_bagdes = ["<:engineer1:732745844339638334>", "<:engineer2:732745844633370684>",
"<:engineer3:732745844716994690>", "<:engineer4:732745844754743306>"]
popular_badges = ['<:popular1:732745781660090459>', '<:popular2:732745781856960634>', '<:popular3:732745782054092840>',
'<:popular4:732745781714354198>']
legend_badges = ['<:legend1:732745816564826212>', '<:legend2:732745816590123041>', '<:legend3:73274581659838',
'<:legend4:732745816758026381>']
ANIMALS = {
'cat': '🐱',
'dog': '🐶',
'koala': '🐨',
'fox': '🦊',
'bird': '🐦',
'birb': '🐦',
'red_panda': '🔴',
'elephant': '🐘',
'panda': '🐼',
'racoon': '🦝',
'kangaroo': '🦘',
'giraffe': '🦒',
'whale': '🐋'
}
EMOTIONS = {
'hug': '<a:hug:748315930685210746>',
'wink': '😉',
'face-palm': '🤦♂️',
'pat': '<:pat:748316152123359322>'
}
INFRACTION_DESCRIPTIONS = {
"mute": "This will indefinitely mute the user by addding to them the `CyberMute` role, which restricts them from seeing any channels. To unmute them, manually remove the role from them or do `{0}unumute <user>`\nI need **Manage Channels** and **Manage Roles** permissions for this.",
"kick": "This will kick the user from the server.\nI need the **Kick Members** permission for this.",
"ban": "This will ban the user from the server. To unban a user, do `{0}unban <user id or username#user discriminator>`"
}
WEAPONS = [
"Sword of Mega Doom",
"Epic Gun",
"Mega Epic Gun",
"Grenade",
"Amazing Bruh Machine",
"Gun Lmao",
"Hyper Epic Gun",
"'Not even trying at this point' Rifle",
"Grand Sword of Chaos",
"Excalibur",
"Master Sword",
"Storm Pegasus",
"Rock Leone",
"Lightning L-Drago"
]
QUIPS = ["What two words would passengers never want to hear a pilot say?",
"You would never go on a roller coaster called _____",
"The secret to a happy life",
"If a winning coach gets Gatorade dumped on his head, what should get dumped on the losing coach?",
"You should never give alcohol to ______",
"Everyone knows that monkeys hate ______",
"The biggest downside to living in Hell",
"The worst thing for an evil witch to turn you into",
"The Skittles flavor that just missed the cut",
"On your wedding night, it would be horrible to find out that the person you married is ____",
"A name for a really bad Broadway musical",
"The first thing you would do after winning the lottery",
"Why ducks really fly south in the winter",
"America's energy crisis would be over if we made cars that ran on ______",
"It's incredibly rude to ____ with your mouth open",
"What's actually causing global warming?",
"A name for a brand of designer adult diapers",
"Name a TV drama that's about a vampire doctor",
"Something squirrels probably do when no one is looking",
"The crime you would commit if you could get away with it",
"What's the Mona Lisa smiling about?",
"A terrible name for a cruise ship",
"What FDR meant to say was We have nothing to fear, but _____",
"Come up with a title for an adult version of any classic video game",
"The name of a font nobody would ever use",
"Something you should never put on an open wound",
"Scientists say erosion, but we all know the Grand Canyon was actually made by _____",
"The real reason the dinosaurs died",
"Come up with the name of a country that doesn't exist",
"The best way to keep warm on a cold winter night",
"A college major you don't see at many universities",
"What would make baseball more entertaining to watch?",
"The best thing about going to prison",
"The best title for a new national anthem for the USA",
"Come up with the name of book that would sell a million copies, immediately",
"What would you do if you were left alone in the White House for an hour?",
"Invent a family-friendly replacement word that you could say instead of an actual curse word",
"The name of the reindeer Santa didn't pick to pull his sleigh",
"What's the first thing you would do if you could time travel?",
"The name of a pizza place you should never order from",
"A not-very-scary name for a pirate",
"Come up with a name for a beer made especially for monkeys",
"The best thing about living in an igloo",
"The worst way to be murdered",
"Something you shouldn't get your significant other for Valentine's Day",
"A dangerous thing to do while driving",
"Something you shouldn't wear to a job interview",
"The #1 reason penguins can't fly",
"Using only two words, a new state motto for Texas",
"The hardest thing about being Batman",
"A great way to kill time at work",
"Come up with a really bad TV show that starts with Baby",
"Why does the Tower of Pisa lean?",
"What's wrong with these kids today?",
"A great new invention that starts with Automatic",
"Come up with a really bad football penalty that begins with Intentional",
"A Starbucks coffee that should never exist",
"There's Gryffindor, Ravenclaw, Slytherin, and Hufflepuff, but what's the Hogwarts house few "
"have ever heard of?",
"The worst words to say for the opening of a eulogy at a funeral",
"Something you should never use as a scarf",
"Invent a holiday that you think everyone would enjoy",
"The best news you could get today",
"Usually, it's bacon,lettuce and tomato, but come up with a BLT you wouldn't want to eat",
"The worst thing you could stuff a bed mattress with",
"A great opening line to start a conversation with a stranger at a party",
"Something you would like to fill a swimming pool with",
"If you were allowed to name someone else's baby any weird thing you wanted, "
"what would you name it?",
"You know you're in for a bad taxi ride when _____",
"The terrible fate of the snowman Olaf in a director's cut of 'Frozen'",
"Sometimes, after a long day, you just need to ______",
"The worst way to spell Mississippi",
"Give me one good reason why I shouldn't spank you right now",
"The best pick-up line for an elderly singles mixer",
"A good stage name for a chimpanzee stripper",
"The best place to bury all those bodies",
"One place a finger shouldn't go",
"Come up with a name for the most difficult yoga pose known to mankind",
"What's lurking under your bed when you sleep?",
"The name of a canine comedy club with puppy stand-up comedians",
"A vanity license plate a jerk in an expensive car would get",
"A good fake name to use when checking into a hotel",
"A good catchphrase to yell every time you finish pooping",
"Your personal catchphrase if you were on one of those 'Real Housewives' shows",
"The Katy Perry Super Bowl halftime show would have been better with _____",
"Okay... fine! What do YOU want to talk about then?!!!",
"Miller Lite beer would make a lot of money if they came up with a beer called Miller Lite _____",
"A terrible name for a clown",
"An inappropriate thing to do at a cemetery",
"The last thing you see before blowing up",
"The best way to impress a Martian",
"Your final words before you were burned in Salem as a witch",
"A great president from a cartoon series would be: ____",
"I didn't mean to kill him! I just ___",
"A lesser talked about room in the White House",
"The easiest way to make a gamer mad",
"As Shakespeare said, '_____'",
"The name of the guy who invented Jazz",
"A movie guaranteed to be a hit with necrophiliacs",
"The name of a teacher you know you'd instantly hate",
"You have 6 words to anger an entire group of people",
"Something squirrels probably do when no one is looking",
"The name of a font nobody would ever use",
"Something you should never put on an open wound",
"The best way to keep warm on a cold winter night",
"What would make baseball more entertaining to watch?",
"The best thing about going to prison is ___",
"The best title for a new national anthem for the USA ",
"Come up with the name of book that would sell a million copies, immediately",
"What would you do if you were left alone in the White House for an hour?",
"The name of the reindeer Santa didn't pick to pull his sleigh",
"What's the first thing you would do if you could time travel?",
"The name of a pizza place you should never order from",
"Using only two words, a new state motto for Texas",
"The hardest thing about being Batman",
"Come up with a really bad TV show that starts with Baby",
"A great new invention that starts with Automatic",
"Come up with a really bad football penalty that begins with Intentional",
"A Starbucks coffee that should never exist",
"A terrible name for a clown",
"An inappropriate thing to do at a cemetery",
"Like chicken fingers or chicken poppers, a new appetizer name for your fun, theme restaurant: chicken _____",
"Thing you'd be most surprised to have a dentist to find in your mouth",
"Rename Winnie-the-Pooh to something more appropriate/descriptive",
"Name the sequel to Titanic if there were one.",
"What you'd guess is an unadvertised ingredient in most hot dogs",
"Name your new haircutting establishment",
"Something that would make an awful hat",
"How many monkeys is too many monkeys?",
"Something you'd be surprised to see a donkey do",
"The title you'd come up with if you were writing the Olympics theme song",
"Something you should never say to your mother",
"Where's the best place to hide from the shadow monsters?",
"The three ingredients in the worst smoothie ever",
"The most presidential name you can think of (that isn't already the name of a president)",
"A good way to get fired",
"If we can't afford to bury or cremate you, what should we do with your body?",
"A good place to hide boogers",
"Come up with the name for a new TV show with the word Spanky in it",
"A fun trick to play on the Pope",
"Where do you think the beef really is?",
"Something it'd be fun to throw off the Eiffel Tower",
"Write a newspaper headline that will really catch people's attention",
"The worst job title that starts with Assistant",
"The name of a new perfume by Betty White",
"The worst name for a robot",
"The most embarrassing name for a cat",
"The worst thing you could discover in your burrito",
"One thing never to do on a first date",
"Ozzy Osbourne's Twitter password, probably",
"Who let the dogs out?",
"What do vegans taste like?",
"An item NOT found in Taylor Swift's purse",
"Name a new reggae band made up entirely of chickens",
"Name a children's book by someone who hates children",
"The name of your new plumbing company",
"Make up a word that describes the sound of farting into a bowl of mac & cheese",
"A new ice cream flavor that no one would ever order",
"Name a new movie starring a talking goat who is president of the United States",
"Something that would not work well as a dip for tortilla chips",
"The name of a clothing store for overweight leprechauns",
"Something upsetting you could say to the cable guy as he installs your television service",
"The worst thing that could jump out of a bachelor party cake",
"Come up with a name for a new beer marketed toward babies",
"A terrible theme for a high school prom",
"Something you should not whisper to your grandmother",
"A terrible name for a 1930s gangster",
"Brand name of a bottled water sold in the land of Oz",
"A fun thing to yell as a baby is being born",
"The worst family secret that could come out over Thanksgiving dinner",
"The name of a toilet paper specifically designed for the Queen of England",
"A lawn decoration sure to make the neighbors mad",
"The worst thing to say when trying to adopt a pet",
"A good name for an erotic bakery",
"People wouldn't respect He-Man as much if, to gain his power, he held up his sword and shouted ____________________",
"Fun thing to do if locked in the mall overnight",
"The worst person to receive a sponge bath from",
"Pants would be a whole lot better if they ___",
"The most awesome Guinness World Record to break",
"A little-known way to get gum out of your hair",
"It's bad to be buried alive. It's worse to be buried alive with BLANK."
"Something that would not work as well as skis",
"A rejected title for The Good, The Bad and the Ugly wasThe Good, the Bad and the BLANK",
"A rejected name for a ship in the U.S. Naval Fleet: the USS BLANK",
"What to say to get out of jury duty",
"What the Statue of Liberty is hiding beneath that robe",
"dude just type something funny bro",
"Take any well-known restaurant and slightly change its name to something inappropriate",
"Little-known fact: The government allows peanut butter to contain up to 10% BLANK",
"A good sign that your house is haunted",
"A bad occupation for a robot to have",
"A sequel to the painting Dogs Playing Poker",
"The Tooth Fairy's other job",
"Little-known fact: A secret area in the White House is the BLANK room",
"An invention by Thomas Edison that never caught on",
"A bad place to skinny-dip",
"What time is it?",
"A birthday present you shouldn't get for your grandmother",
"A short motto everyone should live by",
"Invent a Christmas tradition sure to catch on",
"A bad thing to yell during church/friday khutba",
"The unsexiest thought you can have",
"A good improvement to make to Mt. Rushmore",
"The best way to start your day",
"The worst name for a summer camp",
"Something that's made worse by adding cheese",
"Three things are certain in life: Death, Taxes, and BLANK",
"A faster way to get home from the Land of Oz is to click your heels three times and say BLANK",
"Come up with a name for a rock band made up entirely of baby ducks",
"Something that is currently legal that should be banned",
"A word that should never follow Beef",
"The perfect song to hum on the toilet",
"A bad thing to say to a cop as he writes you a speeding ticket"
]
HANGMAN_STATES = {
0: '''```
+---+
| |
|
|
|
|
=========```''',
1: '''```
+---+
| |
O |
|
|
|
=========```''',
2: '''```
+---+
| |
O |
| |
|
|
=========```''',
3: '''```
+---+
| |
O |
/| |
|
|
=========```''',
4: '''```
+---+
| |
O |
/|\ |
|
|
=========```''',
5: '''```
+---+
| |
O |
/|\ |
/ |
|
=========```''',
6: '''```
+---+
| |
O |
/|\ |
/ \ |
|
=========```'''
}
IZLAM_QUIPS = [
"The real reason legion will never find love",
"How yahya will find his second wife",
"This is why the new server was created",
"Why there are so many boomers in this server"
] | true | true |
f722d23bab0f457ad593aee01f86710874f7cd98 | 2,318 | py | Python | python/oxford_asl/hadamard_decode.py | ibme-qubic/oxford_asl | 1b7e9e8f3e8e82b3eb9fa98b232ef93ea4e2348d | [
"Apache-2.0"
] | 6 | 2017-06-27T18:59:59.000Z | 2019-12-30T08:27:47.000Z | python/oxford_asl/hadamard_decode.py | ibme-qubic/oxford_asl | 1b7e9e8f3e8e82b3eb9fa98b232ef93ea4e2348d | [
"Apache-2.0"
] | 14 | 2017-08-22T12:09:11.000Z | 2020-12-16T22:32:07.000Z | python/oxford_asl/hadamard_decode.py | ibme-qubic/oxford_asl | 1b7e9e8f3e8e82b3eb9fa98b232ef93ea4e2348d | [
"Apache-2.0"
] | 6 | 2018-05-03T14:44:54.000Z | 2020-08-26T10:31:22.000Z | #!/usr/bin/env python
"""
Does naive decoding of Hadamard time-encoded data
Assumes that the Hadamard blocks are in adjacent volumes in the data
and simply decodes each block, outputting the same order
"""
import sys
import argparse
from fsl.data.image import Image
import numpy as np
import scipy.linalg
class ArgumentParser(argparse.ArgumentParser):
def __init__(self, **kwargs):
argparse.ArgumentParser.__init__(self, prog="oxford_asl_hadamard_decode", add_help=False, **kwargs)
self.add_argument("--input", required=True, help="Input file name")
self.add_argument("--output", required=True, help="Output file name")
self.add_argument("--hadamard-size", type=int, help="Hadamard matrix size")
def main():
options = ArgumentParser().parse_args()
if options.input is None:
sys.stderr.write("Input file name must be specified")
sys.exit(1)
if options.output is None:
sys.stderr.write("Output file name must be specified")
sys.exit(1)
print("Hadamard decoding\n")
print(" - Input image: %s (assuming Hadamard cycles are in adjacent volumes)" % options.input)
print(" - Hadamard matrix size: %i" % options.hadamard_size)
print(" - Output image: %s" % options.output)
input_img = Image(options.input)
had_matrix = scipy.linalg.hadamard(options.hadamard_size)
shape = input_img.shape
if len(shape) != 4:
sys.stderr.write("Input image must be 4D")
sys.exit(1)
elif shape[3] % options.hadamard_size != 0:
sys.stderr.write("Input image has %i volumes, inconsistent with Hadamard matrix size of %i" % (shape[3], options.hadamard_size))
sys.exit(1)
nvols = shape[3]
had_size = options.hadamard_size
input_data = input_img.data
output_data = np.zeros(list(shape[:3]) + [(had_size-1)*nvols//had_size])
for vol in range(shape[3]):
rpt_idx = int(vol / had_size)
had_idx = vol % had_size
for sub_bolus in range(had_size-1):
output_data[..., rpt_idx*(had_size-1)+sub_bolus] += had_matrix[had_idx, sub_bolus+1]*input_data[..., vol]
output_img = Image(output_data, header=input_img.header)
output_img.save(options.output)
print("\nDONE - Output in %s" % options.output)
if __name__ == "__main__":
main()
| 36.21875 | 136 | 0.680328 |
import sys
import argparse
from fsl.data.image import Image
import numpy as np
import scipy.linalg
class ArgumentParser(argparse.ArgumentParser):
def __init__(self, **kwargs):
argparse.ArgumentParser.__init__(self, prog="oxford_asl_hadamard_decode", add_help=False, **kwargs)
self.add_argument("--input", required=True, help="Input file name")
self.add_argument("--output", required=True, help="Output file name")
self.add_argument("--hadamard-size", type=int, help="Hadamard matrix size")
def main():
options = ArgumentParser().parse_args()
if options.input is None:
sys.stderr.write("Input file name must be specified")
sys.exit(1)
if options.output is None:
sys.stderr.write("Output file name must be specified")
sys.exit(1)
print("Hadamard decoding\n")
print(" - Input image: %s (assuming Hadamard cycles are in adjacent volumes)" % options.input)
print(" - Hadamard matrix size: %i" % options.hadamard_size)
print(" - Output image: %s" % options.output)
input_img = Image(options.input)
had_matrix = scipy.linalg.hadamard(options.hadamard_size)
shape = input_img.shape
if len(shape) != 4:
sys.stderr.write("Input image must be 4D")
sys.exit(1)
elif shape[3] % options.hadamard_size != 0:
sys.stderr.write("Input image has %i volumes, inconsistent with Hadamard matrix size of %i" % (shape[3], options.hadamard_size))
sys.exit(1)
nvols = shape[3]
had_size = options.hadamard_size
input_data = input_img.data
output_data = np.zeros(list(shape[:3]) + [(had_size-1)*nvols//had_size])
for vol in range(shape[3]):
rpt_idx = int(vol / had_size)
had_idx = vol % had_size
for sub_bolus in range(had_size-1):
output_data[..., rpt_idx*(had_size-1)+sub_bolus] += had_matrix[had_idx, sub_bolus+1]*input_data[..., vol]
output_img = Image(output_data, header=input_img.header)
output_img.save(options.output)
print("\nDONE - Output in %s" % options.output)
if __name__ == "__main__":
main()
| true | true |
f722d435b7065b9e0bf3ff9e8d71f8097d38b5b2 | 299 | py | Python | examples_old/case17_jinja2/jinja2_vlans.py | josephwhite13/netmiko | c08c5ebb3484383f034e22b9576f88be07525f72 | [
"MIT"
] | 2,833 | 2015-01-04T20:04:10.000Z | 2022-03-31T13:03:17.000Z | examples_old/case17_jinja2/jinja2_vlans.py | josephwhite13/netmiko | c08c5ebb3484383f034e22b9576f88be07525f72 | [
"MIT"
] | 2,137 | 2015-01-28T17:33:41.000Z | 2022-03-31T18:41:21.000Z | examples_old/case17_jinja2/jinja2_vlans.py | georgesnow/netmiko | 185f51ca5c24ea2977d6ca31db1ae263aa72cc12 | [
"MIT"
] | 1,367 | 2015-01-04T20:04:10.000Z | 2022-03-31T19:13:28.000Z | #!/usr/bin/env python
from __future__ import print_function, unicode_literals
import jinja2
template_vars = {"vlan_id": 400, "vlan_name": "red400"}
vlan_template = """
vlan {{ vlan_id }}
name {{ vlan_name }}
"""
template = jinja2.Template(vlan_template)
print(template.render(template_vars))
| 19.933333 | 55 | 0.732441 |
from __future__ import print_function, unicode_literals
import jinja2
template_vars = {"vlan_id": 400, "vlan_name": "red400"}
vlan_template = """
vlan {{ vlan_id }}
name {{ vlan_name }}
"""
template = jinja2.Template(vlan_template)
print(template.render(template_vars))
| true | true |
f722d560e21b8fe61a176e40ec865d31c15c9b16 | 1,771 | py | Python | mars/tensor/arithmetic/fix.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 1 | 2022-02-02T03:03:48.000Z | 2022-02-02T03:03:48.000Z | mars/tensor/arithmetic/fix.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | mars/tensor/arithmetic/fix.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..utils import infer_dtype
from .core import TensorUnaryOp
from .utils import arithmetic_operand
@arithmetic_operand(sparse_mode="unary")
class TensorFix(TensorUnaryOp):
_op_type_ = OperandDef.FIX
_func_name = "fix"
@infer_dtype(np.fix)
def fix(x, out=None, **kwargs):
"""
Round to nearest integer towards zero.
Round a tensor of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters
----------
x : array_like
An tensor of floats to be rounded
out : Tensor, optional
Output tensor
Returns
-------
out : Tensor of floats
The array of rounded numbers
See Also
--------
trunc, floor, ceil
around : Round to given number of decimals
Examples
--------
>>> import mars.tensor as mt
>>> mt.fix(3.14).execute()
3.0
>>> mt.fix(3).execute()
3.0
>>> mt.fix([2.1, 2.9, -2.1, -2.9]).execute()
array([ 2., 2., -2., -2.])
"""
op = TensorFix(**kwargs)
return op(x, out=out)
| 25.3 | 74 | 0.659514 |
import numpy as np
from ... import opcodes as OperandDef
from ..utils import infer_dtype
from .core import TensorUnaryOp
from .utils import arithmetic_operand
@arithmetic_operand(sparse_mode="unary")
class TensorFix(TensorUnaryOp):
_op_type_ = OperandDef.FIX
_func_name = "fix"
@infer_dtype(np.fix)
def fix(x, out=None, **kwargs):
op = TensorFix(**kwargs)
return op(x, out=out)
| true | true |
f722d5dae4dfff62be13ac94c398cfdaef8e71f7 | 1,158 | py | Python | Chapter02/1194029 Nur Ikhsani Suwandy Futri 3a D4 TI studi_kasus/Event.py | nurikhsanisf/Python-Pararel_SISTER | b8d424cc12775c101222cdd332a497066a56dbbb | [
"MIT"
] | null | null | null | Chapter02/1194029 Nur Ikhsani Suwandy Futri 3a D4 TI studi_kasus/Event.py | nurikhsanisf/Python-Pararel_SISTER | b8d424cc12775c101222cdd332a497066a56dbbb | [
"MIT"
] | null | null | null | Chapter02/1194029 Nur Ikhsani Suwandy Futri 3a D4 TI studi_kasus/Event.py | nurikhsanisf/Python-Pararel_SISTER | b8d424cc12775c101222cdd332a497066a56dbbb | [
"MIT"
] | null | null | null | import logging
import threading
import time
import random
LOG_FORMAT = '%(asctime)s %(threadName)-17s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
items = []
event = threading.Event()
class Pasien(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self):
while True:
time.sleep(2)
event.wait()
item = items.pop()
logging.info('Pasien notify: {} pasien menuju ruangan bidan {}'\
.format(item, self.name))
class Bidan(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self):
for i in range(5):
time.sleep(2)
item = random.randint(0, 100)
items.append(item)
logging.info('Bidan notify: Bidan mengecek kandungan pasien {}'\
.format(item, self.name))
event.set()
event.clear()
if __name__ == "__main__":
t1 = Bidan()
t2 = Pasien()
t1.start()
t2.start()
t1.join()
t2.join()
| 24.125 | 76 | 0.560449 | import logging
import threading
import time
import random
LOG_FORMAT = '%(asctime)s %(threadName)-17s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
items = []
event = threading.Event()
class Pasien(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self):
while True:
time.sleep(2)
event.wait()
item = items.pop()
logging.info('Pasien notify: {} pasien menuju ruangan bidan {}'\
.format(item, self.name))
class Bidan(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self):
for i in range(5):
time.sleep(2)
item = random.randint(0, 100)
items.append(item)
logging.info('Bidan notify: Bidan mengecek kandungan pasien {}'\
.format(item, self.name))
event.set()
event.clear()
if __name__ == "__main__":
t1 = Bidan()
t2 = Pasien()
t1.start()
t2.start()
t1.join()
t2.join()
| true | true |
f722d62715166543e56dacac4436571f8c2ee94a | 49 | py | Python | smartlight_datastore/config.py | ande7590/rpi-smart-lights | 599cefb1f3aa63d85b33ab3f8a379faba4eb178c | [
"MIT"
] | null | null | null | smartlight_datastore/config.py | ande7590/rpi-smart-lights | 599cefb1f3aa63d85b33ab3f8a379faba4eb178c | [
"MIT"
] | null | null | null | smartlight_datastore/config.py | ande7590/rpi-smart-lights | 599cefb1f3aa63d85b33ab3f8a379faba4eb178c | [
"MIT"
] | null | null | null | DB_CONN_STR = 'sqlite:///event_repository.sqlite' | 49 | 49 | 0.795918 | DB_CONN_STR = 'sqlite:///event_repository.sqlite' | true | true |
f722d630618d00bd8ba8c171b7e64d05ff0083f7 | 1,884 | py | Python | testproject/shop/models.py | excitedleigh/lorikeet | fe99cf4557736891f2a0e951f0f748074d5e02d4 | [
"MIT"
] | 6 | 2020-02-15T19:22:30.000Z | 2020-12-27T10:58:58.000Z | testproject/shop/models.py | excitedleigh/lorikeet | fe99cf4557736891f2a0e951f0f748074d5e02d4 | [
"MIT"
] | 79 | 2019-12-29T22:13:51.000Z | 2022-03-30T23:12:37.000Z | testproject/shop/models.py | excitedleigh/lorikeet | fe99cf4557736891f2a0e951f0f748074d5e02d4 | [
"MIT"
] | 2 | 2019-12-29T22:06:19.000Z | 2020-08-27T20:30:47.000Z | from decimal import ROUND_DOWN, Decimal
from django.db import models
from lorikeet.exceptions import PaymentError
from lorikeet.models import (
Adjustment,
DeliveryAddress,
LineItem,
Payment,
PaymentMethod,
)
AUSTRALIAN_STATES = (
("NSW", "New South Wales"),
("VIC", "Victoria"),
("QLD", "Queensland"),
("WA", "Western Australia"),
("SA", "South Australia"),
("TAS", "Tasmania"),
("ACT", "Australian Capital Territory"),
("NT", "Northern Territory"),
)
class Product(models.Model):
name = models.CharField(max_length=255)
unit_price = models.DecimalField(max_digits=7, decimal_places=2)
class MyLineItem(LineItem):
product = models.ForeignKey(Product, on_delete=models.PROTECT)
quantity = models.PositiveSmallIntegerField()
def get_total(self):
return self.quantity * self.product.unit_price
class AustralianDeliveryAddress(DeliveryAddress):
addressee = models.CharField(max_length=255)
address = models.TextField()
suburb = models.CharField(max_length=255)
state = models.CharField(max_length=3, choices=AUSTRALIAN_STATES)
postcode = models.CharField(max_length=4)
class PipeCard(PaymentMethod):
card_id = models.CharField(max_length=30)
def make_payment(self, order, amount):
if self.card_id.endswith("9"):
raise PaymentError("Insufficient funds")
return PipePayment.objects.create(method=self, amount=amount)
class PipePayment(Payment):
amount = models.DecimalField(max_digits=7, decimal_places=2)
class CartDiscount(Adjustment):
percentage = models.PositiveSmallIntegerField()
def get_total(self, subtotal):
if not isinstance(subtotal, Decimal):
raise TypeError(subtotal)
discount = -subtotal * self.percentage / 100
return discount.quantize(Decimal(".01"), rounding=ROUND_DOWN)
| 28.119403 | 69 | 0.704352 | from decimal import ROUND_DOWN, Decimal
from django.db import models
from lorikeet.exceptions import PaymentError
from lorikeet.models import (
Adjustment,
DeliveryAddress,
LineItem,
Payment,
PaymentMethod,
)
AUSTRALIAN_STATES = (
("NSW", "New South Wales"),
("VIC", "Victoria"),
("QLD", "Queensland"),
("WA", "Western Australia"),
("SA", "South Australia"),
("TAS", "Tasmania"),
("ACT", "Australian Capital Territory"),
("NT", "Northern Territory"),
)
class Product(models.Model):
name = models.CharField(max_length=255)
unit_price = models.DecimalField(max_digits=7, decimal_places=2)
class MyLineItem(LineItem):
product = models.ForeignKey(Product, on_delete=models.PROTECT)
quantity = models.PositiveSmallIntegerField()
def get_total(self):
return self.quantity * self.product.unit_price
class AustralianDeliveryAddress(DeliveryAddress):
addressee = models.CharField(max_length=255)
address = models.TextField()
suburb = models.CharField(max_length=255)
state = models.CharField(max_length=3, choices=AUSTRALIAN_STATES)
postcode = models.CharField(max_length=4)
class PipeCard(PaymentMethod):
card_id = models.CharField(max_length=30)
def make_payment(self, order, amount):
if self.card_id.endswith("9"):
raise PaymentError("Insufficient funds")
return PipePayment.objects.create(method=self, amount=amount)
class PipePayment(Payment):
amount = models.DecimalField(max_digits=7, decimal_places=2)
class CartDiscount(Adjustment):
percentage = models.PositiveSmallIntegerField()
def get_total(self, subtotal):
if not isinstance(subtotal, Decimal):
raise TypeError(subtotal)
discount = -subtotal * self.percentage / 100
return discount.quantize(Decimal(".01"), rounding=ROUND_DOWN)
| true | true |
f722d6896401e6d535ed8442e5cd591255e59e26 | 1,640 | py | Python | Semester 6/MA 374 (Financial Engg. Lab)/Lab 1/180123062_ABSatyaprakash_q1 1.py | Imperial-lord/IITG | df4233905d2954511d5b16666f0d44cc38b9df90 | [
"MIT"
] | 4 | 2021-03-02T03:58:55.000Z | 2022-03-28T13:38:05.000Z | Semester 6/MA 374 (Financial Engg. Lab)/Lab 1/180123062_ABSatyaprakash_q1 1.py | Imperial-lord/IITG | df4233905d2954511d5b16666f0d44cc38b9df90 | [
"MIT"
] | null | null | null | Semester 6/MA 374 (Financial Engg. Lab)/Lab 1/180123062_ABSatyaprakash_q1 1.py | Imperial-lord/IITG | df4233905d2954511d5b16666f0d44cc38b9df90 | [
"MIT"
] | 4 | 2021-02-04T17:44:23.000Z | 2022-03-28T13:38:09.000Z | # Q.1 Run your program for M = 1, 5, 10, 20, 50, 100, 200, 400
# to get the initial option prices and tabulate them
# Pandas : pip install pandas
# Matplotlib: pip install matplotlib
# Numpy: pip install numpy
# Ipython: pip install ipython
import math
import pandas as pd
from IPython.display import display
# Function to get Option Price for a given M
def getOptionPrice(M, u, d, p):
callList = [0]*(M+1)
putList = [0]*(M+1)
for i in range(M+1):
callList[i] = max(S0*(u**i)*(d**(M-i)) - K, 0)
putList[i] = max(0, K - S0*(u**i)*(d**(M-i)))
for i in range(M):
for j in range(M-i):
callList[j] = ((1-p)*callList[j] + p*callList[j+1])*math.exp(-r*T/M)
putList[j] = ((1-p)*putList[j] + p*putList[j+1])*math.exp(-r*T/M)
return callList[0], putList[0]
# Given data
S0=100
K=105
T=5
r=0.05
sig=0.3
MList=[1, 5, 10, 20, 50, 100, 200, 400]
# Lists to store the option prices
callPrices = []
putPrices = []
for M in MList:
dt = T/M
u = math.exp(sig*math.sqrt(dt)+(r-sig*sig/2)*dt)
d = math.exp(-sig*math.sqrt(dt)+(r-sig*sig/2)*dt)
p = (math.exp(r*dt)-d)/(u-d)
# Check if No Arbitrage Principle has got violated
if p < 0 or p > 1:
print("No Arbitrage Principle has been Violated")
CallPrices.append('-')
PutPrices.append('-')
continue
call, put = getOptionPrice(M, u, d, p)
callPrices.append(call)
putPrices.append(put)
# Display the data using Pandas Dataframe
df = pd.DataFrame({'Step Size':MList,'Call Option Price': callPrices, 'Put Option Price': putPrices},)
display(df)
| 26.451613 | 102 | 0.60061 |
import math
import pandas as pd
from IPython.display import display
def getOptionPrice(M, u, d, p):
callList = [0]*(M+1)
putList = [0]*(M+1)
for i in range(M+1):
callList[i] = max(S0*(u**i)*(d**(M-i)) - K, 0)
putList[i] = max(0, K - S0*(u**i)*(d**(M-i)))
for i in range(M):
for j in range(M-i):
callList[j] = ((1-p)*callList[j] + p*callList[j+1])*math.exp(-r*T/M)
putList[j] = ((1-p)*putList[j] + p*putList[j+1])*math.exp(-r*T/M)
return callList[0], putList[0]
S0=100
K=105
T=5
r=0.05
sig=0.3
MList=[1, 5, 10, 20, 50, 100, 200, 400]
callPrices = []
putPrices = []
for M in MList:
dt = T/M
u = math.exp(sig*math.sqrt(dt)+(r-sig*sig/2)*dt)
d = math.exp(-sig*math.sqrt(dt)+(r-sig*sig/2)*dt)
p = (math.exp(r*dt)-d)/(u-d)
if p < 0 or p > 1:
print("No Arbitrage Principle has been Violated")
CallPrices.append('-')
PutPrices.append('-')
continue
call, put = getOptionPrice(M, u, d, p)
callPrices.append(call)
putPrices.append(put)
df = pd.DataFrame({'Step Size':MList,'Call Option Price': callPrices, 'Put Option Price': putPrices},)
display(df)
| true | true |
f722d729adacfea090edf12cc79bafd409f50426 | 204 | py | Python | google_auth/users/tests/test_models.py | data-leon/django-allauth-google-demo | e41de0128217d06853082bbccc23a46529bc4824 | [
"MIT"
] | 1 | 2021-11-18T18:43:05.000Z | 2021-11-18T18:43:05.000Z | google_auth/users/tests/test_models.py | data-leon/django-allauth-google-demo | e41de0128217d06853082bbccc23a46529bc4824 | [
"MIT"
] | null | null | null | google_auth/users/tests/test_models.py | data-leon/django-allauth-google-demo | e41de0128217d06853082bbccc23a46529bc4824 | [
"MIT"
] | null | null | null | import pytest
from google_auth.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| 20.4 | 64 | 0.77451 | import pytest
from google_auth.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| true | true |
f722d732521d717904d23e60794bd5ed17668867 | 3,488 | py | Python | projects/anosql/test/python/conftest.py | arrdem/source | df9aae1253ed415ade3a2b59e8a0996ff659543d | [
"MIT"
] | 4 | 2021-08-17T15:47:38.000Z | 2021-10-06T01:59:32.000Z | projects/anosql/test/python/conftest.py | arrdem/source | df9aae1253ed415ade3a2b59e8a0996ff659543d | [
"MIT"
] | 8 | 2021-08-14T17:47:08.000Z | 2021-09-20T20:22:47.000Z | projects/anosql/test/python/conftest.py | arrdem/source | df9aae1253ed415ade3a2b59e8a0996ff659543d | [
"MIT"
] | 1 | 2021-10-09T21:24:35.000Z | 2021-10-09T21:24:35.000Z | import csv
import os
import sqlite3
import pytest
BLOGDB_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "blogdb")
USERS_DATA_PATH = os.path.join(BLOGDB_PATH, "data", "users_data.csv")
BLOGS_DATA_PATH = os.path.join(BLOGDB_PATH, "data", "blogs_data.csv")
def populate_sqlite3_db(db_path):
conn = sqlite3.connect(db_path)
cur = conn.cursor()
cur.executescript(
"""
create table users (
userid integer not null primary key,
username text not null,
firstname integer not null,
lastname text not null
);
create table blogs (
blogid integer not null primary key,
userid integer not null,
title text not null,
content text not null,
published date not null default CURRENT_DATE,
foreign key(userid) references users(userid)
);
"""
)
with open(USERS_DATA_PATH) as fp:
users = list(csv.reader(fp))
cur.executemany(
"""
insert into users (
username,
firstname,
lastname
) values (?, ?, ?);""",
users,
)
with open(BLOGS_DATA_PATH) as fp:
blogs = list(csv.reader(fp))
cur.executemany(
"""
insert into blogs (
userid,
title,
content,
published
) values (?, ?, ?, ?);""",
blogs,
)
conn.commit()
conn.close()
@pytest.fixture()
def sqlite3_db_path(tmpdir):
db_path = os.path.join(tmpdir.strpath, "blogdb.db")
populate_sqlite3_db(db_path)
return db_path
@pytest.fixture()
def sqlite3_conn(sqlite3_db_path):
conn = sqlite3.connect(sqlite3_db_path)
yield conn
conn.close()
@pytest.fixture
def pg_conn(postgresql):
with postgresql:
# Loads data from blogdb fixture data
with postgresql.cursor() as cur:
cur.execute(
"""
create table users (
userid serial not null primary key,
username varchar(32) not null,
firstname varchar(255) not null,
lastname varchar(255) not null
);"""
)
cur.execute(
"""
create table blogs (
blogid serial not null primary key,
userid integer not null references users(userid),
title varchar(255) not null,
content text not null,
published date not null default CURRENT_DATE
);"""
)
with postgresql.cursor() as cur:
with open(USERS_DATA_PATH) as fp:
cur.copy_from(
fp, "users", sep=",", columns=["username", "firstname", "lastname"]
)
with open(BLOGS_DATA_PATH) as fp:
cur.copy_from(
fp,
"blogs",
sep=",",
columns=["userid", "title", "content", "published"],
)
return postgresql
@pytest.fixture()
def pg_dsn(pg_conn):
p = pg_conn.get_dsn_parameters()
return "postgres://{user}@{host}:{port}/{dbname}".format(**p)
| 28.357724 | 87 | 0.497133 | import csv
import os
import sqlite3
import pytest
BLOGDB_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "blogdb")
USERS_DATA_PATH = os.path.join(BLOGDB_PATH, "data", "users_data.csv")
BLOGS_DATA_PATH = os.path.join(BLOGDB_PATH, "data", "blogs_data.csv")
def populate_sqlite3_db(db_path):
conn = sqlite3.connect(db_path)
cur = conn.cursor()
cur.executescript(
"""
create table users (
userid integer not null primary key,
username text not null,
firstname integer not null,
lastname text not null
);
create table blogs (
blogid integer not null primary key,
userid integer not null,
title text not null,
content text not null,
published date not null default CURRENT_DATE,
foreign key(userid) references users(userid)
);
"""
)
with open(USERS_DATA_PATH) as fp:
users = list(csv.reader(fp))
cur.executemany(
"""
insert into users (
username,
firstname,
lastname
) values (?, ?, ?);""",
users,
)
with open(BLOGS_DATA_PATH) as fp:
blogs = list(csv.reader(fp))
cur.executemany(
"""
insert into blogs (
userid,
title,
content,
published
) values (?, ?, ?, ?);""",
blogs,
)
conn.commit()
conn.close()
@pytest.fixture()
def sqlite3_db_path(tmpdir):
db_path = os.path.join(tmpdir.strpath, "blogdb.db")
populate_sqlite3_db(db_path)
return db_path
@pytest.fixture()
def sqlite3_conn(sqlite3_db_path):
conn = sqlite3.connect(sqlite3_db_path)
yield conn
conn.close()
@pytest.fixture
def pg_conn(postgresql):
with postgresql:
with postgresql.cursor() as cur:
cur.execute(
"""
create table users (
userid serial not null primary key,
username varchar(32) not null,
firstname varchar(255) not null,
lastname varchar(255) not null
);"""
)
cur.execute(
"""
create table blogs (
blogid serial not null primary key,
userid integer not null references users(userid),
title varchar(255) not null,
content text not null,
published date not null default CURRENT_DATE
);"""
)
with postgresql.cursor() as cur:
with open(USERS_DATA_PATH) as fp:
cur.copy_from(
fp, "users", sep=",", columns=["username", "firstname", "lastname"]
)
with open(BLOGS_DATA_PATH) as fp:
cur.copy_from(
fp,
"blogs",
sep=",",
columns=["userid", "title", "content", "published"],
)
return postgresql
@pytest.fixture()
def pg_dsn(pg_conn):
p = pg_conn.get_dsn_parameters()
return "postgres://{user}@{host}:{port}/{dbname}".format(**p)
| true | true |
f722d79501f5788bfa9c7c0add43c4a9b6f1df3d | 9,472 | py | Python | wam/logger.py | MikeDT/bdm-whack-a-mole | 33b52008b2fae231b604c0af959df57e25dee61f | [
"MIT"
] | null | null | null | wam/logger.py | MikeDT/bdm-whack-a-mole | 33b52008b2fae231b604c0af959df57e25dee61f | [
"MIT"
] | null | null | null | wam/logger.py | MikeDT/bdm-whack-a-mole | 33b52008b2fae231b604c0af959df57e25dee61f | [
"MIT"
] | 1 | 2021-09-26T14:12:20.000Z | 2021-09-26T14:12:20.000Z | # -*- coding: utf-8 -*-
"""
logger module
============
This module contains the WamLogger class for the pygame Whack a Mole game
Attributes:
na
Todo:
* sort docstrings (e.g. class)
Related projects:
Adapted from initial toy project https://github.com/sonlexqt/whack-a-mole
which is under MIT license
@author: DZLR3
"""
import logging
from time import time
import pygame
import csv
class WamLogger:
"""
Logs the events within the game to a .log (text) file
Attributes
----------
na
Methods
-------
create_log_instance()
creates the logging text .log file
log_it(event)
logs an event that is passed to it
_log_***(event)
preps the inputs for logs a variety of game events via the log_it
function
log_end()
closes down the log
"""
def __init__(self, usr_timestamp=False,
log_file_root='../bdm-whack-a-mole/logs/'):
if usr_timestamp is False:
self.usr_timestamp = str(time())
else:
self.usr_timestamp = usr_timestamp
self.logger = logging.getLogger('WAM_Events_' + self.usr_timestamp)
self.log_file_root = log_file_root
if not len(self.logger.handlers):
self.create_log_instance()
def create_log_instance(self):
'''
Creates an instance of the log file in the log folder, marked up with
the current timestamp
Parameters
----------
self : self
Raises
------
OSError
If the file cannot be created
'''
self.logger.setLevel(logging.DEBUG)
try:
self.fh = logging.FileHandler(self.log_file_root +
'WAM_Events_' +
self.usr_timestamp + '.log')
except OSError:
print('Log file could not be created')
self.fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
self.ch = logging.StreamHandler()
self.ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
self.formatter = logging.Formatter('%(asctime)s - %(name)s - ' +
'%(levelname)s - %(message)s')
self.fh.setFormatter(self.formatter)
self.ch.setFormatter(self.formatter)
# add the handlers to the logger
self.logger.addHandler(self.fh)
self.logger.addHandler(self.ch)
def log_class_dict(self, class_name, class_dict):
'''
logs the initial conditions within the game classes
Parameters
----------
self : self
Returns
-------
na
logs the event via csv
'''
with open(self.log_file_root + 'WAM_Conditions_' +
self.usr_timestamp + '.log',
'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([class_name])
for key, value in class_dict.items():
writer.writerow([key, value])
writer.writerow([])
def _log_it(self, event=False):
'''
logs events within the game, either by being passed an event, or by
pulling event from the pygame construct, then adding to the logger
Parameters
----------
self : self
Raises
------
na
however the class does write to the logger in the event of a
logging failure (assumption being the logging component is
sufficiently robust and well documented to not require additional
non-naked exceptions)
'''
if event:
try:
self.logger.info(event)
except:
self.logger.info('Event Logging Failure')
else:
try:
self.logger.info(pygame.event.get())
except:
self.logger.info('Event Logging Failure')
def log_pygame_event(self, event):
'''
Logs a generic event in the game (i.e. pygame native)
Parameters
----------
event: pygame event object
pygame event object
Returns
-------
na
logs the event via _log_it
'''
self._log_it(event)
def log_2x2_rate(self, mouse_pos, TWO_X_TWO_LOC, TWO_X_TWO_LEN,
x_dim='skill_vs_luck_rating',
y_dim='hit_confidence'):
'''
Logs the players rating using the 2x2 grid system
Parameters
----------
mouse_pos: 2 int tuple
The xy coorindates of the rating
Returns
-------
na
logs the event via _log_it
'''
x = (mouse_pos[0] - TWO_X_TWO_LOC[0]) / TWO_X_TWO_LEN
y = (mouse_pos[1] - TWO_X_TWO_LOC[1]) / TWO_X_TWO_LEN
self._log_it("<Event(7-Rate {'" +
x_dim + "': " + str(x) + ", '" +
y_dim + "': " + str(y) + "})>")
def log_score(self, score_inc, score, skill_status, true_score):
'''
Logs the score, and the increment to the score
Parameters
----------
score_inc: float
The increment to the score
score: float
The current score
Returns
-------
na
logs the event via _log_it
'''
score_str = ("'score_inc': " + str(score_inc) + ", " +
"'score': " + str(score) + ", " +
"'skill/luck':" + str(skill_status) + "," +
"'true_score':" + str(true_score) + "})>")
self._log_it("<Event(11-Score {" + score_str)
def log_skill_change(self, skill_luck_ratio):
'''
Logs the score, and the increment to the score
Parameters
----------
score_inc: float
The increment to the score
score: float
The current score
Returns
-------
na
logs the event via _log_it
'''
self._log_it("<Event(12-Skill_Luck_Ratio {'New': " + str(skill_luck_ratio) + " })>")
def log_pause(self, pause_reason):
'''
Logs a pause request
Parameters
----------
pause_reason: string
The reason for the pause (e.g. demo ending, stage etc.)
Returns
-------
na
logs the event via _log_it
'''
self._log_it("<Event(8-Pause {'reason': " + str(pause_reason) + " })>")
def log_event_rate(self, action, event_act):
'''
Logs the rating event result
Parameters
----------
action: string
The rating type (partially deprecated)
event_act: int
The rating
Returns
-------
na
logs the event via _log_it
'''
self._log_it("<Event(7-Rate {'" + action + "': " + event_act + " })>")
def log_mole_event(self, xy):
'''
Logs the hit result for a given attempt
Parameters
----------
xy: tuple
The x and y coordinates of a mole emerging
Raises
------
AssertionError
Checks whether the xy coordinate is indeed a length two object
Returns
-------
na - logs the event via _log_it
'''
try:
assert len(xy) == 2
except AssertionError:
print('Mole event xy coorindates did not contain exactly two dims')
log_string = ("{'loc': (" +
str(xy[0]) + "," + str(xy[1]) +
")})>")
self._log_it("<Event(10-MoleUp) " + log_string)
def log_hit_result(self, result, xy, distance, relative_loc):
'''
Logs the hit result for a given attempt
Parameters
----------
result: tuple
The actual hit, margin hit and reported hit results
xy: tuple
The x and y coordinates of a mole emerging
distance: int
The distance from the centre of the mole
relative_loc: 2 int tuple
The relative location from mole centre for the strike
Raises
------
AssertionError
Checks whether the xy coordinate is indeed a length two object
Returns
-------
na - logs the event via _log_it
'''
try:
assert len(xy) == 2
except AssertionError:
print('Mole event xy coorindates did not contain exactly two dims')
log_string = ("{'result': " + str(result) + ', ' +
"'pos': (" +
str(xy[0]) + ", " +
str(xy[1]) + "), " +
"'distance': " + str(distance) + ", " +
"'relative_loc': " + str(relative_loc) + ", " +
"'window': None})>")
self._log_it("<Event(9-Hit Attempt " + log_string)
def log_end(self):
'''
shuts down the logger and log file
Parameters
----------
self : self
Returns
-------
na - ends log
'''
self.logger.info('******* HAPPY ANALYSING...LOG COMPLETE!!! *******')
logging.shutdown()
self.fh.close()
self.ch.close()
| 27.941003 | 92 | 0.50834 |
import logging
from time import time
import pygame
import csv
class WamLogger:
def __init__(self, usr_timestamp=False,
log_file_root='../bdm-whack-a-mole/logs/'):
if usr_timestamp is False:
self.usr_timestamp = str(time())
else:
self.usr_timestamp = usr_timestamp
self.logger = logging.getLogger('WAM_Events_' + self.usr_timestamp)
self.log_file_root = log_file_root
if not len(self.logger.handlers):
self.create_log_instance()
def create_log_instance(self):
self.logger.setLevel(logging.DEBUG)
try:
self.fh = logging.FileHandler(self.log_file_root +
'WAM_Events_' +
self.usr_timestamp + '.log')
except OSError:
print('Log file could not be created')
self.fh.setLevel(logging.DEBUG)
self.ch = logging.StreamHandler()
self.ch.setLevel(logging.ERROR)
self.formatter = logging.Formatter('%(asctime)s - %(name)s - ' +
'%(levelname)s - %(message)s')
self.fh.setFormatter(self.formatter)
self.ch.setFormatter(self.formatter)
self.logger.addHandler(self.fh)
self.logger.addHandler(self.ch)
def log_class_dict(self, class_name, class_dict):
with open(self.log_file_root + 'WAM_Conditions_' +
self.usr_timestamp + '.log',
'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([class_name])
for key, value in class_dict.items():
writer.writerow([key, value])
writer.writerow([])
def _log_it(self, event=False):
if event:
try:
self.logger.info(event)
except:
self.logger.info('Event Logging Failure')
else:
try:
self.logger.info(pygame.event.get())
except:
self.logger.info('Event Logging Failure')
def log_pygame_event(self, event):
self._log_it(event)
def log_2x2_rate(self, mouse_pos, TWO_X_TWO_LOC, TWO_X_TWO_LEN,
x_dim='skill_vs_luck_rating',
y_dim='hit_confidence'):
x = (mouse_pos[0] - TWO_X_TWO_LOC[0]) / TWO_X_TWO_LEN
y = (mouse_pos[1] - TWO_X_TWO_LOC[1]) / TWO_X_TWO_LEN
self._log_it("<Event(7-Rate {'" +
x_dim + "': " + str(x) + ", '" +
y_dim + "': " + str(y) + "})>")
def log_score(self, score_inc, score, skill_status, true_score):
score_str = ("'score_inc': " + str(score_inc) + ", " +
"'score': " + str(score) + ", " +
"'skill/luck':" + str(skill_status) + "," +
"'true_score':" + str(true_score) + "})>")
self._log_it("<Event(11-Score {" + score_str)
def log_skill_change(self, skill_luck_ratio):
self._log_it("<Event(12-Skill_Luck_Ratio {'New': " + str(skill_luck_ratio) + " })>")
def log_pause(self, pause_reason):
self._log_it("<Event(8-Pause {'reason': " + str(pause_reason) + " })>")
def log_event_rate(self, action, event_act):
self._log_it("<Event(7-Rate {'" + action + "': " + event_act + " })>")
def log_mole_event(self, xy):
try:
assert len(xy) == 2
except AssertionError:
print('Mole event xy coorindates did not contain exactly two dims')
log_string = ("{'loc': (" +
str(xy[0]) + "," + str(xy[1]) +
")})>")
self._log_it("<Event(10-MoleUp) " + log_string)
def log_hit_result(self, result, xy, distance, relative_loc):
try:
assert len(xy) == 2
except AssertionError:
print('Mole event xy coorindates did not contain exactly two dims')
log_string = ("{'result': " + str(result) + ', ' +
"'pos': (" +
str(xy[0]) + ", " +
str(xy[1]) + "), " +
"'distance': " + str(distance) + ", " +
"'relative_loc': " + str(relative_loc) + ", " +
"'window': None})>")
self._log_it("<Event(9-Hit Attempt " + log_string)
def log_end(self):
self.logger.info('******* HAPPY ANALYSING...LOG COMPLETE!!! *******')
logging.shutdown()
self.fh.close()
self.ch.close()
| true | true |
f722d8e0a9abffec1f363842baede8d6a727f7b7 | 608 | py | Python | src/example-codes/kick-bot-example-01.py | xChivalrouSx/discord-kick-bot | 5f72cd810fdd6b31fe3cffec99bfd992394804ac | [
"MIT"
] | 2 | 2022-01-20T14:47:57.000Z | 2022-01-22T11:24:47.000Z | src/example-codes/kick-bot-example-01.py | xChivalrouSx/discord-kick-bot | 5f72cd810fdd6b31fe3cffec99bfd992394804ac | [
"MIT"
] | null | null | null | src/example-codes/kick-bot-example-01.py | xChivalrouSx/discord-kick-bot | 5f72cd810fdd6b31fe3cffec99bfd992394804ac | [
"MIT"
] | 1 | 2022-01-20T14:48:02.000Z | 2022-01-20T14:48:02.000Z | import os
import random
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
bot = commands.Bot(command_prefix='.')
@bot.command(name='99', help='Responds with a random quote from Brooklyn 99')
async def nine_nine(ctx):
brooklyn_99_quotes = [
'I\'m the human form of the 💯 emoji.',
'Bingpot!',
(
'Cool. Cool cool cool cool cool cool cool, '
'no doubt no doubt no doubt no doubt.'
),
]
response = random.choice(brooklyn_99_quotes)
await ctx.send(response)
bot.run(TOKEN)
| 22.518519 | 77 | 0.651316 | import os
import random
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
bot = commands.Bot(command_prefix='.')
@bot.command(name='99', help='Responds with a random quote from Brooklyn 99')
async def nine_nine(ctx):
brooklyn_99_quotes = [
'I\'m the human form of the 💯 emoji.',
'Bingpot!',
(
'Cool. Cool cool cool cool cool cool cool, '
'no doubt no doubt no doubt no doubt.'
),
]
response = random.choice(brooklyn_99_quotes)
await ctx.send(response)
bot.run(TOKEN)
| true | true |
f722d8fc8584b5895e0f0fb0d406828a771457b0 | 42,655 | py | Python | quark/ipam.py | insequent/quark | 0ce9e49c6cbef4e6a910066a0617c6f997626928 | [
"Apache-2.0"
] | null | null | null | quark/ipam.py | insequent/quark | 0ce9e49c6cbef4e6a910066a0617c6f997626928 | [
"Apache-2.0"
] | null | null | null | quark/ipam.py | insequent/quark | 0ce9e49c6cbef4e6a910066a0617c6f997626928 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Quark Pluggable IPAM
"""
import functools
import itertools
import random
import time
import uuid
import netaddr
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from oslo.config import cfg
from oslo.db import exception as db_exception
from oslo.utils import timeutils
from oslo_concurrency import lockutils
from oslo_log import log as logging
from quark.db import api as db_api
from quark.db import ip_types
from quark.db import models
from quark import exceptions as q_exc
from quark import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
quark_opts = [
cfg.IntOpt('v6_allocation_attempts',
default=10,
help=_('Number of times to retry generating v6 addresses'
' before failure. Also implicitly controls how many'
' v6 addresses we assign to any port, as the random'
' values generated will be the same every time.')),
cfg.IntOpt("mac_address_retry_max",
default=20,
help=_("Number of times to attempt to allocate a new MAC"
" address before giving up.")),
cfg.IntOpt("ip_address_retry_max",
default=20,
help=_("Number of times to attempt to allocate a new IP"
" address before giving up.")),
cfg.BoolOpt("ipam_use_synchronization",
default=False,
help=_("Configures whether or not to use the experimental"
" semaphore logic around IPAM")),
cfg.BoolOpt("ipam_select_subnet_v6_locking",
default=True,
help=_("Controls whether or not SELECT ... FOR UPDATE is used"
" when retrieving v6 subnets explicitly."))
]
CONF.register_opts(quark_opts, "QUARK")
# NOTE(mdietz): equivalent to the following line, but converting
# v6 addresses in netaddr is very slow.
# netaddr.IPAddress("::0200:0:0:0").value
MAGIC_INT = 144115188075855872
def no_synchronization(*args, **kwargs):
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
return f(*args, **kwargs)
return inner
return wrap
def named(sema):
return "%s.%s" % (__name__, sema)
if CONF.QUARK.ipam_use_synchronization:
synchronized = lockutils.synchronized
else:
synchronized = no_synchronization
def rfc2462_ip(mac, cidr):
# NOTE(mdietz): see RFC2462
int_val = netaddr.IPNetwork(cidr).value
mac = netaddr.EUI(mac)
int_val += mac.eui64().value
int_val ^= MAGIC_INT
return int_val
def rfc3041_ip(port_id, cidr):
random.seed(int(uuid.UUID(port_id)))
int_val = netaddr.IPNetwork(cidr).value
while True:
val = int_val + random.getrandbits(64)
val ^= MAGIC_INT
yield val
def generate_v6(mac, port_id, cidr):
# NOTE(mdietz): RM10879 - if we don't have a MAC, don't panic, defer to
# our magic rfc3041_ip method instead. If an IP is created
# by the ip_addresses controller, we wouldn't necessarily
# have a MAC to base our generator on in that case for
# example.
if mac is not None:
yield rfc2462_ip(mac, cidr)
for addr in rfc3041_ip(port_id, cidr):
yield addr
def ipam_logged(fx):
def wrap(self, *args, **kwargs):
log = QuarkIPAMLog()
kwargs['ipam_log'] = log
try:
return fx(self, *args, **kwargs)
finally:
log.end()
return wrap
class QuarkIPAMLog(object):
def __init__(self):
self.entries = {}
self.success = True
def make_entry(self, fx_name):
if fx_name not in self.entries:
self.entries[fx_name] = []
entry = QuarkIPAMLogEntry(self, fx_name)
self.entries[fx_name].append(entry)
return entry
def _output(self, status, time_total, fails, successes):
status = "SUCCESS"
if not self.success:
status = "FAILED"
LOG.debug("STATUS:%s TIME:%f ATTEMPTS:%d PASS:%d FAIL:%d" %
(status, time_total, fails + successes, successes, fails))
def end(self):
total = 0
fails = 0
successes = 0
for fx, entries in self.entries.items():
for entry in entries:
total += entry.get_time()
if entry.success:
successes += 1
else:
fails += 1
self._output(self.success, total, fails, successes)
def failed(self):
self.success = False
class QuarkIPAMLogEntry(object):
def __init__(self, log, name):
self.name = name
self.log = log
self.start_time = time.time()
self.success = True
def failed(self):
self.success = False
def end(self):
self.end_time = time.time()
def get_time(self):
if not hasattr(self, 'end_time'):
return 0
return self.end_time - self.start_time
class QuarkIpam(object):
@synchronized(named("allocate_mac_address"))
def allocate_mac_address(self, context, net_id, port_id, reuse_after,
mac_address=None,
use_forbidden_mac_range=False):
if mac_address:
mac_address = netaddr.EUI(mac_address).value
kwargs = {"network_id": net_id, "port_id": port_id,
"mac_address": mac_address,
"use_forbidden_mac_range": use_forbidden_mac_range}
LOG.info(("Attempting to allocate a new MAC address "
"[{0}]").format(utils.pretty_kwargs(**kwargs)))
for retry in xrange(CONF.QUARK.mac_address_retry_max):
LOG.info("Attemping to reallocate deallocated MAC (step 1 of 3),"
" attempt {0} of {1}".format(
retry + 1, CONF.QUARK.mac_address_retry_max))
try:
with context.session.begin():
transaction = db_api.transaction_create(context)
update_kwargs = {
"deallocated": False,
"deallocated_at": None,
"transaction_id": transaction.id
}
filter_kwargs = {
"reuse_after": reuse_after,
"deallocated": True,
"address": mac_address
}
elevated = context.elevated()
result = db_api.mac_address_reallocate(
elevated, update_kwargs, **filter_kwargs)
if not result:
break
reallocated_mac = db_api.mac_address_reallocate_find(
elevated, transaction.id)
if reallocated_mac:
dealloc = netaddr.EUI(reallocated_mac["address"])
LOG.info("Found a suitable deallocated MAC {0}".format(
str(dealloc)))
LOG.info("MAC assignment for port ID {0} completed "
"with address {1}".format(port_id, dealloc))
return reallocated_mac
except Exception:
LOG.exception("Error in mac reallocate...")
continue
LOG.info("Couldn't find a suitable deallocated MAC, attempting "
"to create a new one")
# This could fail if a large chunk of MACs were chosen explicitly,
# but under concurrent load enough MAC creates should iterate without
# any given thread exhausting its retry count.
for retry in xrange(CONF.QUARK.mac_address_retry_max):
LOG.info("Attemping to find a range to create a new MAC in "
"(step 2 of 3), attempt {0} of {1}".format(
retry + 1, CONF.QUARK.mac_address_retry_max))
next_address = None
with context.session.begin():
try:
fn = db_api.mac_address_range_find_allocation_counts
mac_range = \
fn(context, address=mac_address,
use_forbidden_mac_range=use_forbidden_mac_range)
if not mac_range:
LOG.info("No MAC ranges could be found given "
"the criteria")
break
rng, addr_count = mac_range
LOG.info("Found a MAC range {0}".format(rng["cidr"]))
last = rng["last_address"]
first = rng["first_address"]
if (last - first + 1) <= addr_count:
# Somehow, the range got filled up without us
# knowing, so set the next_auto_assign to be -1
# so we never try to create new ones
# in this range
db_api.mac_range_update_set_full(context, rng)
LOG.info("MAC range {0} is full".format(rng["cidr"]))
continue
if mac_address:
next_address = mac_address
else:
next_address = rng["next_auto_assign_mac"]
if next_address + 1 > rng["last_address"]:
db_api.mac_range_update_set_full(context, rng)
else:
db_api.mac_range_update_next_auto_assign_mac(
context, rng)
context.session.refresh(rng)
except Exception:
LOG.exception("Error in updating mac range")
continue
# Based on the above, this should only fail if a MAC was
# was explicitly chosen at some point. As such, fall through
# here and get in line for a new MAC address to try
try:
mac_readable = str(netaddr.EUI(next_address))
LOG.info("Attempting to create new MAC {0} "
"(step 3 of 3)".format(mac_readable))
with context.session.begin():
address = db_api.mac_address_create(
context, address=next_address,
mac_address_range_id=rng["id"])
LOG.info("MAC assignment for port ID {0} completed with "
"address {1}".format(port_id, mac_readable))
return address
except Exception:
LOG.info("Failed to create new MAC {0}".format(mac_readable))
LOG.exception("Error in creating mac. MAC possibly duplicate")
continue
raise exceptions.MacAddressGenerationFailure(net_id=net_id)
@synchronized(named("reallocate_ip"))
def attempt_to_reallocate_ip(self, context, net_id, port_id, reuse_after,
version=None, ip_address=None,
segment_id=None, subnets=None, **kwargs):
version = version or [4, 6]
elevated = context.elevated()
LOG.info("Attempting to reallocate an IP (step 1 of 3) - [{0}]".format(
utils.pretty_kwargs(network_id=net_id, port_id=port_id,
version=version, segment_id=segment_id,
subnets=subnets)))
if version == 6:
# Defers to the create case. The reason why is we'd have to look
# up subnets here to correctly generate the v6. If we split them
# up into reallocate and create, we'd be looking up the same
# subnets twice, which is a waste of time.
# TODO(mdietz): after reviewing this code, this block annoyingly
# doesn't trigger in the ANY case, since we end up
# using a list of [4, 6]. It works as expected most
# of the time, but we can anticipate that isolated
# networks will end up using sequential assignment.
# Probably want to rework this logic to compensate
# at some point. Considering they all come from the
# same MAC address pool, nothing bad will happen,
# just worth noticing and fixing.
LOG.info("Identified as v6 case, deferring to IP create path")
return []
sub_ids = []
if subnets:
sub_ids = subnets
elif segment_id:
subnets = db_api.subnet_find(elevated,
network_id=net_id,
segment_id=segment_id)
sub_ids = [s["id"] for s in subnets]
if not sub_ids:
LOG.info("No subnets matching segment_id {0} could be "
"found".format(segment_id))
raise exceptions.IpAddressGenerationFailure(
net_id=net_id)
ip_kwargs = {
"network_id": net_id,
"reuse_after": reuse_after,
"deallocated": True,
"ip_address": ip_address,
"version": version,
}
if ip_address:
del ip_kwargs["deallocated"]
if sub_ids:
ip_kwargs["subnet_id"] = sub_ids
ipam_log = kwargs.get('ipam_log', None)
for retry in xrange(CONF.QUARK.ip_address_retry_max):
attempt = None
if ipam_log:
attempt = ipam_log.make_entry("attempt_to_reallocate_ip")
LOG.info("Attempt {0} of {1}".format(
retry + 1, CONF.QUARK.ip_address_retry_max))
try:
with context.session.begin():
transaction = db_api.transaction_create(context)
m = models.IPAddress
update_kwargs = {
m.transaction_id: transaction.id,
m.address_type: kwargs.get("address_type", ip_types.FIXED),
m.deallocated: False,
m.deallocated_at: None,
m.used_by_tenant_id: context.tenant_id,
m.allocated_at: timeutils.utcnow(),
}
result = db_api.ip_address_reallocate(
elevated, update_kwargs, **ip_kwargs)
if not result:
LOG.info("Couldn't update any reallocatable addresses "
"given the criteria")
if attempt:
attempt.failed()
break
updated_address = db_api.ip_address_reallocate_find(
elevated, transaction.id)
if not updated_address:
if attempt:
attempt.failed()
continue
LOG.info("Address {0} is reallocated".format(
updated_address["address_readable"]))
return [updated_address]
except Exception:
if attempt:
attempt.failed()
LOG.exception("Error in reallocate ip...")
finally:
if attempt:
attempt.end()
return []
def is_strategy_satisfied(self, ip_addresses, allocate_complete=False):
return ip_addresses
def _allocate_from_subnet(self, context, net_id, subnet,
port_id, reuse_after, ip_address=None, **kwargs):
LOG.info("Creating a new address in subnet {0} - [{1}]".format(
subnet["_cidr"], utils.pretty_kwargs(network_id=net_id,
subnet=subnet,
port_id=port_id,
ip_address=ip_address)))
ip_policy_cidrs = models.IPPolicy.get_ip_policy_cidrs(subnet)
next_ip = ip_address
if not next_ip:
if subnet["next_auto_assign_ip"] != -1:
next_ip = netaddr.IPAddress(subnet["next_auto_assign_ip"] - 1)
else:
next_ip = netaddr.IPAddress(subnet["last_ip"])
if subnet["ip_version"] == 4:
next_ip = next_ip.ipv4()
LOG.info("Next IP is {0}".format(str(next_ip)))
if ip_policy_cidrs and next_ip in ip_policy_cidrs and not ip_address:
LOG.info("Next IP {0} violates policy".format(str(next_ip)))
raise q_exc.IPAddressPolicyRetryableFailure(ip_addr=next_ip,
net_id=net_id)
try:
with context.session.begin():
address = db_api.ip_address_create(
context, address=next_ip, subnet_id=subnet["id"],
deallocated=0, version=subnet["ip_version"],
network_id=net_id,
port_id=port_id,
address_type=kwargs.get('address_type', ip_types.FIXED))
address["deallocated"] = 0
except Exception:
# NOTE(mdietz): Our version of sqlalchemy incorrectly raises None
# here when there's an IP conflict
if ip_address:
raise exceptions.IpAddressInUse(ip_address=next_ip,
net_id=net_id)
raise q_exc.IPAddressRetryableFailure(ip_addr=next_ip,
net_id=net_id)
return address
def _allocate_from_v6_subnet(self, context, net_id, subnet,
port_id, reuse_after, ip_address=None,
**kwargs):
"""This attempts to allocate v6 addresses as per RFC2462 and RFC3041.
To accomodate this, we effectively treat all v6 assignment as a
first time allocation utilizing the MAC address of the VIF. Because
we recycle MACs, we will eventually attempt to recreate a previously
generated v6 address. Instead of failing, we've opted to handle
reallocating that address in this method.
This should provide a performance boost over attempting to check
each and every subnet in the existing reallocate logic, as we'd
have to iterate over each and every subnet returned
"""
LOG.info("Attempting to allocate a v6 address - [{0}]".format(
utils.pretty_kwargs(network_id=net_id, subnet=subnet,
port_id=port_id, ip_address=ip_address)))
if ip_address:
LOG.info("IP %s explicitly requested, deferring to standard "
"allocation" % ip_address)
return self._allocate_from_subnet(context, net_id=net_id,
subnet=subnet, port_id=port_id,
reuse_after=reuse_after,
ip_address=ip_address, **kwargs)
else:
mac = kwargs.get("mac_address")
if mac:
mac = kwargs["mac_address"].get("address")
ip_policy_cidrs = models.IPPolicy.get_ip_policy_cidrs(subnet)
for tries, ip_address in enumerate(
generate_v6(mac, port_id, subnet["cidr"])):
LOG.info("Attempt {0} of {1}".format(
tries + 1, CONF.QUARK.v6_allocation_attempts))
if tries > CONF.QUARK.v6_allocation_attempts - 1:
LOG.info("Exceeded v6 allocation attempts, bailing")
raise exceptions.IpAddressGenerationFailure(
net_id=net_id)
ip_address = netaddr.IPAddress(ip_address).ipv6()
LOG.info("Generated a new v6 address {0}".format(
str(ip_address)))
# NOTE(mdietz): treating the IPSet as a boolean caused netaddr
# to attempt to enumerate the entire set!
if (ip_policy_cidrs is not None and
ip_address in ip_policy_cidrs):
LOG.info("Address {0} excluded by policy".format(
str(ip_address)))
continue
# TODO(mdietz): replace this with a compare-and-swap loop
with context.session.begin():
address = db_api.ip_address_find(
context, network_id=net_id, ip_address=ip_address,
scope=db_api.ONE, reuse_after=reuse_after,
deallocated=True, subnet_id=subnet["id"],
lock_mode=True)
if address:
LOG.info("Address {0} exists, claiming".format(
str(ip_address)))
return db_api.ip_address_update(
context, address, deallocated=False,
deallocated_at=None,
used_by_tenant_id=context.tenant_id,
allocated_at=timeutils.utcnow(),
address_type=kwargs.get('address_type',
ip_types.FIXED))
# This triggers when the IP is allocated to another tenant,
# either because we missed it due to our filters above, or
# in an extremely unlikely race between the find and here.
try:
with context.session.begin():
return db_api.ip_address_create(
context, address=ip_address,
subnet_id=subnet["id"],
version=subnet["ip_version"], network_id=net_id,
address_type=kwargs.get('address_type',
ip_types.FIXED))
except db_exception.DBDuplicateEntry:
LOG.info("{0} exists but was already "
"allocated".format(str(ip_address)))
LOG.debug("Duplicate entry found when inserting subnet_id"
" %s ip_address %s", subnet["id"], ip_address)
def _allocate_ips_from_subnets(self, context, new_addresses, net_id,
subnets, port_id, reuse_after,
ip_address=None, **kwargs):
LOG.info("Allocating IP(s) from chosen subnet(s) (step 3 of 3) - "
"[{0}]".format(utils.pretty_kwargs(
network_id=net_id, port_id=port_id,
new_addresses=new_addresses, ip_address=ip_address)))
subnets = subnets or []
for subnet in subnets:
if not subnet:
continue
LOG.info("Attempting to allocate from {0} - {1}".format(
subnet["id"], subnet["_cidr"]))
address = None
if int(subnet["ip_version"]) == 4:
address = self._allocate_from_subnet(context, net_id,
subnet, port_id,
reuse_after,
ip_address, **kwargs)
else:
address = self._allocate_from_v6_subnet(context, net_id,
subnet, port_id,
reuse_after,
ip_address, **kwargs)
if address:
LOG.info("Created IP {0}".format(
address["address_readable"]))
new_addresses.append(address)
return new_addresses
def _notify_new_addresses(self, context, new_addresses):
for addr in new_addresses:
payload = dict(used_by_tenant_id=addr["used_by_tenant_id"],
ip_block_id=addr["subnet_id"],
ip_address=addr["address_readable"],
device_ids=[p["device_id"] for p in addr["ports"]],
created_at=addr["created_at"])
n_rpc.get_notifier("network").info(context,
"ip_block.address.create",
payload)
@ipam_logged
def allocate_ip_address(self, context, new_addresses, net_id, port_id,
reuse_after, segment_id=None, version=None,
ip_addresses=None, subnets=None, **kwargs):
elevated = context.elevated()
subnets = subnets or []
ip_addresses = ip_addresses or []
ipam_log = kwargs.get('ipam_log', None)
LOG.info("Starting a new IP address(es) allocation. Strategy "
"is {0} - [{1}]".format(
self.get_name(),
utils.pretty_kwargs(network_id=net_id, port_id=port_id,
new_addresses=new_addresses,
ip_addresses=ip_addresses,
subnets=subnets,
segment_id=segment_id,
version=version)))
def _try_reallocate_ip_address(ipam_log, ip_addr=None):
new_addresses.extend(self.attempt_to_reallocate_ip(
context, net_id, port_id, reuse_after, version=None,
ip_address=ip_addr, segment_id=segment_id, subnets=subnets,
**kwargs))
def _try_allocate_ip_address(ipam_log, ip_addr=None, sub=None):
for retry in xrange(CONF.QUARK.ip_address_retry_max):
attempt = None
if ipam_log:
attempt = ipam_log.make_entry("_try_allocate_ip_address")
LOG.info("Allocating new IP attempt {0} of {1}".format(
retry + 1, CONF.QUARK.ip_address_retry_max))
if not sub:
subnets = self._choose_available_subnet(
elevated, net_id, version, segment_id=segment_id,
ip_address=ip_addr, reallocated_ips=new_addresses)
else:
subnets = [self.select_subnet(context, net_id,
ip_addr, segment_id,
subnet_ids=[sub])]
LOG.info("Subnet selection returned {0} viable subnet(s) - "
"IDs: {1}".format(len(subnets),
", ".join([str(s["id"])
for s in subnets if s])))
try:
self._allocate_ips_from_subnets(context, new_addresses,
net_id, subnets,
port_id, reuse_after,
ip_addr, **kwargs)
except q_exc.IPAddressRetryableFailure:
LOG.exception("Error in allocating IP")
if attempt:
LOG.debug("ATTEMPT FAILED")
attempt.failed()
remaining = CONF.QUARK.ip_address_retry_max - retry - 1
if remaining > 0:
LOG.info("{0} retries remain, retrying...".format(
remaining))
else:
LOG.info("No retries remaing, bailing")
continue
finally:
if attempt:
attempt.end()
break
ip_addresses = [netaddr.IPAddress(ip_address)
for ip_address in ip_addresses]
if ip_addresses:
for ip_address in ip_addresses:
_try_reallocate_ip_address(ipam_log, ip_address)
else:
_try_reallocate_ip_address(ipam_log)
if self.is_strategy_satisfied(new_addresses):
return
else:
LOG.info("Reallocated addresses {0} but still need more addresses "
"to satisfy strategy {1}. Falling back to creating "
"IPs".format(new_addresses, self.get_name()))
if ip_addresses or subnets:
for ip_address, subnet in itertools.izip_longest(ip_addresses,
subnets):
_try_allocate_ip_address(ipam_log, ip_address, subnet)
else:
_try_allocate_ip_address(ipam_log)
if self.is_strategy_satisfied(new_addresses, allocate_complete=True):
self._notify_new_addresses(context, new_addresses)
LOG.info("IPAM for port ID {0} completed with addresses "
"{1}".format(port_id,
[a["address_readable"]
for a in new_addresses]))
return
ipam_log.failed()
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
def deallocate_ip_address(self, context, address):
address["deallocated"] = 1
address["address_type"] = None
payload = dict(used_by_tenant_id=address["used_by_tenant_id"],
ip_block_id=address["subnet_id"],
ip_address=address["address_readable"],
device_ids=[p["device_id"] for p in address["ports"]],
created_at=address["created_at"],
deleted_at=timeutils.utcnow())
n_rpc.get_notifier("network").info(context,
"ip_block.address.delete",
payload)
def deallocate_ips_by_port(self, context, port=None, **kwargs):
ips_removed = []
for addr in port["ip_addresses"]:
if "ip_address" in kwargs:
ip = kwargs["ip_address"]
if ip != netaddr.IPAddress(int(addr["address"])):
continue
# Note: only deallocate ip if this is the
# only port mapped
if len(addr["ports"]) == 1:
self.deallocate_ip_address(context, addr)
ips_removed.append(addr)
port["ip_addresses"] = list(
set(port["ip_addresses"]) - set(ips_removed))
# NCP-1509(roaet):
# - started using admin_context due to tenant not claiming when realloc
def deallocate_mac_address(self, context, address):
admin_context = context.elevated()
mac = db_api.mac_address_find(admin_context, address=address,
scope=db_api.ONE)
if not mac:
raise exceptions.NotFound(
message="No MAC address %s found" % netaddr.EUI(address))
if mac["mac_address_range"]["do_not_use"]:
db_api.mac_address_delete(admin_context, mac)
else:
db_api.mac_address_update(admin_context, mac, deallocated=True,
deallocated_at=timeutils.utcnow())
def _select_subnet(self, context, net_id, ip_address, segment_id,
subnet_ids, **filters):
# NCP-1480: Don't need to lock V6 subnets, since we don't use
# next_auto_assign_ip for them. We already uniquely identified
# the V6 we're going to get by generating a MAC in a previous step.
# Also note that this only works under BOTH or BOTH_REQUIRED. ANY
# does not pass an ip_version
lock_subnets = True
if (not CONF.QUARK.ipam_select_subnet_v6_locking and
"ip_version" in filters and
int(filters["ip_version"]) == 6):
lock_subnets = False
select_api = db_api.subnet_find_ordered_by_most_full
# TODO(mdietz): Add configurable, alternate subnet selection here
subnets = select_api(context, net_id, lock_subnets=lock_subnets,
segment_id=segment_id, scope=db_api.ALL,
subnet_id=subnet_ids, **filters)
if not subnets:
LOG.info("No subnets found given the search criteria!")
return
# TODO(mdietz): Making this into an iterator because we want to move
# to selecting 1 subnet at a time and paginating rather
# than the bulk fetch. Without locks, we need to
# minimize looking at stale data to save ourselves
# some retries. Getting then 1 at a time will
# facilitate this.
for subnet, ips_in_subnet in subnets:
yield subnet, ips_in_subnet
def _should_mark_subnet_full(self, context, subnet, ipnet, ip_address,
ips_in_subnet):
ip = subnet["next_auto_assign_ip"]
# NOTE(mdietz): When atomically updated, this probably
# doesn't need the lower bounds check but
# I'm not comfortable removing it yet.
if (subnet["ip_version"] == 4 and ip < subnet["first_ip"] or
ip > subnet["last_ip"]):
return True
ip_policy = None
if not ip_address:
# Policies don't prevent explicit assignment, so we only
# need to check if we're allocating a new IP
ip_policy = subnet.get("ip_policy")
policy_size = ip_policy["size"] if ip_policy else 0
if ipnet.size > (ips_in_subnet + policy_size - 1):
return False
return True
def _ip_in_subnet(self, subnet, subnet_ids, ipnet, ip_address):
if ip_address:
requested_ip = netaddr.IPAddress(ip_address)
if ipnet.version == 4 and requested_ip.version != 4:
requested_ip = requested_ip.ipv4()
if requested_ip not in ipnet:
if subnet_ids is not None:
LOG.info("Requested IP {0} not in subnet {1}, "
"retrying".format(str(requested_ip),
str(ipnet)))
raise q_exc.IPAddressNotInSubnet(
ip_addr=ip_address, subnet_id=subnet["id"])
return False
return True
# RM6180(roaet):
# - removed session.begin due to deadlocks
# - fix off-by-one error and overflow
@synchronized(named("select_subnet"))
def select_subnet(self, context, net_id, ip_address, segment_id,
subnet_ids=None, **filters):
LOG.info("Selecting subnet(s) - (Step 2 of 3) [{0}]".format(
utils.pretty_kwargs(network_id=net_id, ip_address=ip_address,
segment_id=segment_id, subnet_ids=subnet_ids,
ip_version=filters.get("ip_version"))))
# TODO(mdietz): Invert the iterator and the session, should only be
# one subnet per attempt. We should also only be fetching
# the subnet and usage when we need to. Otherwise
# we're locking every subnet for a segment, and once
# we stop locking, we're looking at stale data.
with context.session.begin():
for subnet, ips_in_subnet in self._select_subnet(context, net_id,
ip_address,
segment_id,
subnet_ids,
**filters):
ipnet = netaddr.IPNetwork(subnet["cidr"])
LOG.info("Trying subnet ID: {0} - CIDR: {1}".format(
subnet["id"], subnet["_cidr"]))
if not self._ip_in_subnet(subnet, subnet_ids, ipnet,
ip_address):
continue
if self._should_mark_subnet_full(context, subnet, ipnet,
ip_address, ips_in_subnet):
LOG.info("Marking subnet {0} as full".format(subnet["id"]))
updated = db_api.subnet_update_set_full(context, subnet)
# Ensure the session is aware of the changes to the subnet
if updated:
context.session.refresh(subnet)
continue
if not ip_address and subnet["ip_version"] == 4:
auto_inc = db_api.subnet_update_next_auto_assign_ip
updated = auto_inc(context, subnet)
if updated:
context.session.refresh(subnet)
else:
# This means the subnet was marked full
# while we were checking out policies.
# Fall out and go back to the outer retry
# loop.
return
LOG.info("Subnet {0} - {1} {2} looks viable, "
"returning".format(subnet["id"], subnet["_cidr"],
subnet["next_auto_assign_ip"]))
return subnet
class QuarkIpamANY(QuarkIpam):
@classmethod
def get_name(self):
return "ANY"
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
filters = {}
if version:
filters["ip_version"] = version
subnet = self.select_subnet(context, net_id, ip_address, segment_id,
**filters)
if subnet:
return [subnet]
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
class QuarkIpamBOTH(QuarkIpam):
@classmethod
def get_name(self):
return "BOTH"
def is_strategy_satisfied(self, reallocated_ips, allocate_complete=False):
req = [4, 6]
for ip in reallocated_ips:
if ip is not None:
req.remove(ip["version"])
ips_allocated = len(req)
if ips_allocated == 0:
return True
elif ips_allocated == 1 and allocate_complete:
return True
return False
def attempt_to_reallocate_ip(self, context, net_id, port_id,
reuse_after, version=None,
ip_address=None, segment_id=None,
subnets=None, **kwargs):
both_versions = []
for ver in (4, 6):
address = super(QuarkIpamBOTH, self).attempt_to_reallocate_ip(
context, net_id, port_id, reuse_after, ver, ip_address,
segment_id, subnets=subnets, **kwargs)
both_versions.extend(address)
return both_versions
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
both_subnet_versions = []
need_versions = [4, 6]
for i in reallocated_ips:
if i["version"] in need_versions:
need_versions.remove(i["version"])
filters = {}
for ver in need_versions:
filters["ip_version"] = ver
sub = self.select_subnet(context, net_id, ip_address, segment_id,
**filters)
if sub:
both_subnet_versions.append(sub)
if not reallocated_ips and not both_subnet_versions:
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
return both_subnet_versions
class QuarkIpamBOTHREQ(QuarkIpamBOTH):
@classmethod
def get_name(self):
return "BOTH_REQUIRED"
def is_strategy_satisfied(self, reallocated_ips, allocate_complete=False):
req = [4, 6]
for ip in reallocated_ips:
if ip is not None:
req.remove(ip["version"])
ips_allocated = len(req)
if ips_allocated == 0:
return True
return False
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
subnets = super(QuarkIpamBOTHREQ, self)._choose_available_subnet(
context, net_id, version, segment_id, ip_address, reallocated_ips)
if len(reallocated_ips) + len(subnets) < 2:
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
return subnets
class IpamRegistry(object):
def __init__(self):
self.strategies = {
QuarkIpamANY.get_name(): QuarkIpamANY(),
QuarkIpamBOTH.get_name(): QuarkIpamBOTH(),
QuarkIpamBOTHREQ.get_name(): QuarkIpamBOTHREQ()}
def is_valid_strategy(self, strategy_name):
if strategy_name in self.strategies:
return True
return False
def get_strategy(self, strategy_name):
if self.is_valid_strategy(strategy_name):
return self.strategies[strategy_name]
fallback = CONF.QUARK.default_ipam_strategy
LOG.warn("IPAM strategy %s not found, "
"using default %s" % (strategy_name, fallback))
return self.strategies[fallback]
IPAM_REGISTRY = IpamRegistry()
| 42.740481 | 79 | 0.530559 |
import functools
import itertools
import random
import time
import uuid
import netaddr
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from oslo.config import cfg
from oslo.db import exception as db_exception
from oslo.utils import timeutils
from oslo_concurrency import lockutils
from oslo_log import log as logging
from quark.db import api as db_api
from quark.db import ip_types
from quark.db import models
from quark import exceptions as q_exc
from quark import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
quark_opts = [
cfg.IntOpt('v6_allocation_attempts',
default=10,
help=_('Number of times to retry generating v6 addresses'
' before failure. Also implicitly controls how many'
' v6 addresses we assign to any port, as the random'
' values generated will be the same every time.')),
cfg.IntOpt("mac_address_retry_max",
default=20,
help=_("Number of times to attempt to allocate a new MAC"
" address before giving up.")),
cfg.IntOpt("ip_address_retry_max",
default=20,
help=_("Number of times to attempt to allocate a new IP"
" address before giving up.")),
cfg.BoolOpt("ipam_use_synchronization",
default=False,
help=_("Configures whether or not to use the experimental"
" semaphore logic around IPAM")),
cfg.BoolOpt("ipam_select_subnet_v6_locking",
default=True,
help=_("Controls whether or not SELECT ... FOR UPDATE is used"
" when retrieving v6 subnets explicitly."))
]
CONF.register_opts(quark_opts, "QUARK")
MAGIC_INT = 144115188075855872
def no_synchronization(*args, **kwargs):
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
return f(*args, **kwargs)
return inner
return wrap
def named(sema):
return "%s.%s" % (__name__, sema)
if CONF.QUARK.ipam_use_synchronization:
synchronized = lockutils.synchronized
else:
synchronized = no_synchronization
def rfc2462_ip(mac, cidr):
int_val = netaddr.IPNetwork(cidr).value
mac = netaddr.EUI(mac)
int_val += mac.eui64().value
int_val ^= MAGIC_INT
return int_val
def rfc3041_ip(port_id, cidr):
random.seed(int(uuid.UUID(port_id)))
int_val = netaddr.IPNetwork(cidr).value
while True:
val = int_val + random.getrandbits(64)
val ^= MAGIC_INT
yield val
def generate_v6(mac, port_id, cidr):
# have a MAC to base our generator on in that case for
# example.
if mac is not None:
yield rfc2462_ip(mac, cidr)
for addr in rfc3041_ip(port_id, cidr):
yield addr
def ipam_logged(fx):
def wrap(self, *args, **kwargs):
log = QuarkIPAMLog()
kwargs['ipam_log'] = log
try:
return fx(self, *args, **kwargs)
finally:
log.end()
return wrap
class QuarkIPAMLog(object):
def __init__(self):
self.entries = {}
self.success = True
def make_entry(self, fx_name):
if fx_name not in self.entries:
self.entries[fx_name] = []
entry = QuarkIPAMLogEntry(self, fx_name)
self.entries[fx_name].append(entry)
return entry
def _output(self, status, time_total, fails, successes):
status = "SUCCESS"
if not self.success:
status = "FAILED"
LOG.debug("STATUS:%s TIME:%f ATTEMPTS:%d PASS:%d FAIL:%d" %
(status, time_total, fails + successes, successes, fails))
def end(self):
total = 0
fails = 0
successes = 0
for fx, entries in self.entries.items():
for entry in entries:
total += entry.get_time()
if entry.success:
successes += 1
else:
fails += 1
self._output(self.success, total, fails, successes)
def failed(self):
self.success = False
class QuarkIPAMLogEntry(object):
def __init__(self, log, name):
self.name = name
self.log = log
self.start_time = time.time()
self.success = True
def failed(self):
self.success = False
def end(self):
self.end_time = time.time()
def get_time(self):
if not hasattr(self, 'end_time'):
return 0
return self.end_time - self.start_time
class QuarkIpam(object):
@synchronized(named("allocate_mac_address"))
def allocate_mac_address(self, context, net_id, port_id, reuse_after,
mac_address=None,
use_forbidden_mac_range=False):
if mac_address:
mac_address = netaddr.EUI(mac_address).value
kwargs = {"network_id": net_id, "port_id": port_id,
"mac_address": mac_address,
"use_forbidden_mac_range": use_forbidden_mac_range}
LOG.info(("Attempting to allocate a new MAC address "
"[{0}]").format(utils.pretty_kwargs(**kwargs)))
for retry in xrange(CONF.QUARK.mac_address_retry_max):
LOG.info("Attemping to reallocate deallocated MAC (step 1 of 3),"
" attempt {0} of {1}".format(
retry + 1, CONF.QUARK.mac_address_retry_max))
try:
with context.session.begin():
transaction = db_api.transaction_create(context)
update_kwargs = {
"deallocated": False,
"deallocated_at": None,
"transaction_id": transaction.id
}
filter_kwargs = {
"reuse_after": reuse_after,
"deallocated": True,
"address": mac_address
}
elevated = context.elevated()
result = db_api.mac_address_reallocate(
elevated, update_kwargs, **filter_kwargs)
if not result:
break
reallocated_mac = db_api.mac_address_reallocate_find(
elevated, transaction.id)
if reallocated_mac:
dealloc = netaddr.EUI(reallocated_mac["address"])
LOG.info("Found a suitable deallocated MAC {0}".format(
str(dealloc)))
LOG.info("MAC assignment for port ID {0} completed "
"with address {1}".format(port_id, dealloc))
return reallocated_mac
except Exception:
LOG.exception("Error in mac reallocate...")
continue
LOG.info("Couldn't find a suitable deallocated MAC, attempting "
"to create a new one")
for retry in xrange(CONF.QUARK.mac_address_retry_max):
LOG.info("Attemping to find a range to create a new MAC in "
"(step 2 of 3), attempt {0} of {1}".format(
retry + 1, CONF.QUARK.mac_address_retry_max))
next_address = None
with context.session.begin():
try:
fn = db_api.mac_address_range_find_allocation_counts
mac_range = \
fn(context, address=mac_address,
use_forbidden_mac_range=use_forbidden_mac_range)
if not mac_range:
LOG.info("No MAC ranges could be found given "
"the criteria")
break
rng, addr_count = mac_range
LOG.info("Found a MAC range {0}".format(rng["cidr"]))
last = rng["last_address"]
first = rng["first_address"]
if (last - first + 1) <= addr_count:
db_api.mac_range_update_set_full(context, rng)
LOG.info("MAC range {0} is full".format(rng["cidr"]))
continue
if mac_address:
next_address = mac_address
else:
next_address = rng["next_auto_assign_mac"]
if next_address + 1 > rng["last_address"]:
db_api.mac_range_update_set_full(context, rng)
else:
db_api.mac_range_update_next_auto_assign_mac(
context, rng)
context.session.refresh(rng)
except Exception:
LOG.exception("Error in updating mac range")
continue
try:
mac_readable = str(netaddr.EUI(next_address))
LOG.info("Attempting to create new MAC {0} "
"(step 3 of 3)".format(mac_readable))
with context.session.begin():
address = db_api.mac_address_create(
context, address=next_address,
mac_address_range_id=rng["id"])
LOG.info("MAC assignment for port ID {0} completed with "
"address {1}".format(port_id, mac_readable))
return address
except Exception:
LOG.info("Failed to create new MAC {0}".format(mac_readable))
LOG.exception("Error in creating mac. MAC possibly duplicate")
continue
raise exceptions.MacAddressGenerationFailure(net_id=net_id)
@synchronized(named("reallocate_ip"))
def attempt_to_reallocate_ip(self, context, net_id, port_id, reuse_after,
version=None, ip_address=None,
segment_id=None, subnets=None, **kwargs):
version = version or [4, 6]
elevated = context.elevated()
LOG.info("Attempting to reallocate an IP (step 1 of 3) - [{0}]".format(
utils.pretty_kwargs(network_id=net_id, port_id=port_id,
version=version, segment_id=segment_id,
subnets=subnets)))
if version == 6:
# up subnets here to correctly generate the v6. If we split them
# up into reallocate and create, we'd be looking up the same
# using a list of [4, 6]. It works as expected most
# of the time, but we can anticipate that isolated
# networks will end up using sequential assignment.
# Probably want to rework this logic to compensate
# at some point. Considering they all come from the
# same MAC address pool, nothing bad will happen,
# just worth noticing and fixing.
LOG.info("Identified as v6 case, deferring to IP create path")
return []
sub_ids = []
if subnets:
sub_ids = subnets
elif segment_id:
subnets = db_api.subnet_find(elevated,
network_id=net_id,
segment_id=segment_id)
sub_ids = [s["id"] for s in subnets]
if not sub_ids:
LOG.info("No subnets matching segment_id {0} could be "
"found".format(segment_id))
raise exceptions.IpAddressGenerationFailure(
net_id=net_id)
ip_kwargs = {
"network_id": net_id,
"reuse_after": reuse_after,
"deallocated": True,
"ip_address": ip_address,
"version": version,
}
if ip_address:
del ip_kwargs["deallocated"]
if sub_ids:
ip_kwargs["subnet_id"] = sub_ids
ipam_log = kwargs.get('ipam_log', None)
for retry in xrange(CONF.QUARK.ip_address_retry_max):
attempt = None
if ipam_log:
attempt = ipam_log.make_entry("attempt_to_reallocate_ip")
LOG.info("Attempt {0} of {1}".format(
retry + 1, CONF.QUARK.ip_address_retry_max))
try:
with context.session.begin():
transaction = db_api.transaction_create(context)
m = models.IPAddress
update_kwargs = {
m.transaction_id: transaction.id,
m.address_type: kwargs.get("address_type", ip_types.FIXED),
m.deallocated: False,
m.deallocated_at: None,
m.used_by_tenant_id: context.tenant_id,
m.allocated_at: timeutils.utcnow(),
}
result = db_api.ip_address_reallocate(
elevated, update_kwargs, **ip_kwargs)
if not result:
LOG.info("Couldn't update any reallocatable addresses "
"given the criteria")
if attempt:
attempt.failed()
break
updated_address = db_api.ip_address_reallocate_find(
elevated, transaction.id)
if not updated_address:
if attempt:
attempt.failed()
continue
LOG.info("Address {0} is reallocated".format(
updated_address["address_readable"]))
return [updated_address]
except Exception:
if attempt:
attempt.failed()
LOG.exception("Error in reallocate ip...")
finally:
if attempt:
attempt.end()
return []
def is_strategy_satisfied(self, ip_addresses, allocate_complete=False):
return ip_addresses
def _allocate_from_subnet(self, context, net_id, subnet,
port_id, reuse_after, ip_address=None, **kwargs):
LOG.info("Creating a new address in subnet {0} - [{1}]".format(
subnet["_cidr"], utils.pretty_kwargs(network_id=net_id,
subnet=subnet,
port_id=port_id,
ip_address=ip_address)))
ip_policy_cidrs = models.IPPolicy.get_ip_policy_cidrs(subnet)
next_ip = ip_address
if not next_ip:
if subnet["next_auto_assign_ip"] != -1:
next_ip = netaddr.IPAddress(subnet["next_auto_assign_ip"] - 1)
else:
next_ip = netaddr.IPAddress(subnet["last_ip"])
if subnet["ip_version"] == 4:
next_ip = next_ip.ipv4()
LOG.info("Next IP is {0}".format(str(next_ip)))
if ip_policy_cidrs and next_ip in ip_policy_cidrs and not ip_address:
LOG.info("Next IP {0} violates policy".format(str(next_ip)))
raise q_exc.IPAddressPolicyRetryableFailure(ip_addr=next_ip,
net_id=net_id)
try:
with context.session.begin():
address = db_api.ip_address_create(
context, address=next_ip, subnet_id=subnet["id"],
deallocated=0, version=subnet["ip_version"],
network_id=net_id,
port_id=port_id,
address_type=kwargs.get('address_type', ip_types.FIXED))
address["deallocated"] = 0
except Exception:
if ip_address:
raise exceptions.IpAddressInUse(ip_address=next_ip,
net_id=net_id)
raise q_exc.IPAddressRetryableFailure(ip_addr=next_ip,
net_id=net_id)
return address
def _allocate_from_v6_subnet(self, context, net_id, subnet,
port_id, reuse_after, ip_address=None,
**kwargs):
LOG.info("Attempting to allocate a v6 address - [{0}]".format(
utils.pretty_kwargs(network_id=net_id, subnet=subnet,
port_id=port_id, ip_address=ip_address)))
if ip_address:
LOG.info("IP %s explicitly requested, deferring to standard "
"allocation" % ip_address)
return self._allocate_from_subnet(context, net_id=net_id,
subnet=subnet, port_id=port_id,
reuse_after=reuse_after,
ip_address=ip_address, **kwargs)
else:
mac = kwargs.get("mac_address")
if mac:
mac = kwargs["mac_address"].get("address")
ip_policy_cidrs = models.IPPolicy.get_ip_policy_cidrs(subnet)
for tries, ip_address in enumerate(
generate_v6(mac, port_id, subnet["cidr"])):
LOG.info("Attempt {0} of {1}".format(
tries + 1, CONF.QUARK.v6_allocation_attempts))
if tries > CONF.QUARK.v6_allocation_attempts - 1:
LOG.info("Exceeded v6 allocation attempts, bailing")
raise exceptions.IpAddressGenerationFailure(
net_id=net_id)
ip_address = netaddr.IPAddress(ip_address).ipv6()
LOG.info("Generated a new v6 address {0}".format(
str(ip_address)))
# NOTE(mdietz): treating the IPSet as a boolean caused netaddr
# to attempt to enumerate the entire set!
if (ip_policy_cidrs is not None and
ip_address in ip_policy_cidrs):
LOG.info("Address {0} excluded by policy".format(
str(ip_address)))
continue
# TODO(mdietz): replace this with a compare-and-swap loop
with context.session.begin():
address = db_api.ip_address_find(
context, network_id=net_id, ip_address=ip_address,
scope=db_api.ONE, reuse_after=reuse_after,
deallocated=True, subnet_id=subnet["id"],
lock_mode=True)
if address:
LOG.info("Address {0} exists, claiming".format(
str(ip_address)))
return db_api.ip_address_update(
context, address, deallocated=False,
deallocated_at=None,
used_by_tenant_id=context.tenant_id,
allocated_at=timeutils.utcnow(),
address_type=kwargs.get('address_type',
ip_types.FIXED))
# This triggers when the IP is allocated to another tenant,
# either because we missed it due to our filters above, or
# in an extremely unlikely race between the find and here.
try:
with context.session.begin():
return db_api.ip_address_create(
context, address=ip_address,
subnet_id=subnet["id"],
version=subnet["ip_version"], network_id=net_id,
address_type=kwargs.get('address_type',
ip_types.FIXED))
except db_exception.DBDuplicateEntry:
LOG.info("{0} exists but was already "
"allocated".format(str(ip_address)))
LOG.debug("Duplicate entry found when inserting subnet_id"
" %s ip_address %s", subnet["id"], ip_address)
def _allocate_ips_from_subnets(self, context, new_addresses, net_id,
subnets, port_id, reuse_after,
ip_address=None, **kwargs):
LOG.info("Allocating IP(s) from chosen subnet(s) (step 3 of 3) - "
"[{0}]".format(utils.pretty_kwargs(
network_id=net_id, port_id=port_id,
new_addresses=new_addresses, ip_address=ip_address)))
subnets = subnets or []
for subnet in subnets:
if not subnet:
continue
LOG.info("Attempting to allocate from {0} - {1}".format(
subnet["id"], subnet["_cidr"]))
address = None
if int(subnet["ip_version"]) == 4:
address = self._allocate_from_subnet(context, net_id,
subnet, port_id,
reuse_after,
ip_address, **kwargs)
else:
address = self._allocate_from_v6_subnet(context, net_id,
subnet, port_id,
reuse_after,
ip_address, **kwargs)
if address:
LOG.info("Created IP {0}".format(
address["address_readable"]))
new_addresses.append(address)
return new_addresses
def _notify_new_addresses(self, context, new_addresses):
for addr in new_addresses:
payload = dict(used_by_tenant_id=addr["used_by_tenant_id"],
ip_block_id=addr["subnet_id"],
ip_address=addr["address_readable"],
device_ids=[p["device_id"] for p in addr["ports"]],
created_at=addr["created_at"])
n_rpc.get_notifier("network").info(context,
"ip_block.address.create",
payload)
@ipam_logged
def allocate_ip_address(self, context, new_addresses, net_id, port_id,
reuse_after, segment_id=None, version=None,
ip_addresses=None, subnets=None, **kwargs):
elevated = context.elevated()
subnets = subnets or []
ip_addresses = ip_addresses or []
ipam_log = kwargs.get('ipam_log', None)
LOG.info("Starting a new IP address(es) allocation. Strategy "
"is {0} - [{1}]".format(
self.get_name(),
utils.pretty_kwargs(network_id=net_id, port_id=port_id,
new_addresses=new_addresses,
ip_addresses=ip_addresses,
subnets=subnets,
segment_id=segment_id,
version=version)))
def _try_reallocate_ip_address(ipam_log, ip_addr=None):
new_addresses.extend(self.attempt_to_reallocate_ip(
context, net_id, port_id, reuse_after, version=None,
ip_address=ip_addr, segment_id=segment_id, subnets=subnets,
**kwargs))
def _try_allocate_ip_address(ipam_log, ip_addr=None, sub=None):
for retry in xrange(CONF.QUARK.ip_address_retry_max):
attempt = None
if ipam_log:
attempt = ipam_log.make_entry("_try_allocate_ip_address")
LOG.info("Allocating new IP attempt {0} of {1}".format(
retry + 1, CONF.QUARK.ip_address_retry_max))
if not sub:
subnets = self._choose_available_subnet(
elevated, net_id, version, segment_id=segment_id,
ip_address=ip_addr, reallocated_ips=new_addresses)
else:
subnets = [self.select_subnet(context, net_id,
ip_addr, segment_id,
subnet_ids=[sub])]
LOG.info("Subnet selection returned {0} viable subnet(s) - "
"IDs: {1}".format(len(subnets),
", ".join([str(s["id"])
for s in subnets if s])))
try:
self._allocate_ips_from_subnets(context, new_addresses,
net_id, subnets,
port_id, reuse_after,
ip_addr, **kwargs)
except q_exc.IPAddressRetryableFailure:
LOG.exception("Error in allocating IP")
if attempt:
LOG.debug("ATTEMPT FAILED")
attempt.failed()
remaining = CONF.QUARK.ip_address_retry_max - retry - 1
if remaining > 0:
LOG.info("{0} retries remain, retrying...".format(
remaining))
else:
LOG.info("No retries remaing, bailing")
continue
finally:
if attempt:
attempt.end()
break
ip_addresses = [netaddr.IPAddress(ip_address)
for ip_address in ip_addresses]
if ip_addresses:
for ip_address in ip_addresses:
_try_reallocate_ip_address(ipam_log, ip_address)
else:
_try_reallocate_ip_address(ipam_log)
if self.is_strategy_satisfied(new_addresses):
return
else:
LOG.info("Reallocated addresses {0} but still need more addresses "
"to satisfy strategy {1}. Falling back to creating "
"IPs".format(new_addresses, self.get_name()))
if ip_addresses or subnets:
for ip_address, subnet in itertools.izip_longest(ip_addresses,
subnets):
_try_allocate_ip_address(ipam_log, ip_address, subnet)
else:
_try_allocate_ip_address(ipam_log)
if self.is_strategy_satisfied(new_addresses, allocate_complete=True):
self._notify_new_addresses(context, new_addresses)
LOG.info("IPAM for port ID {0} completed with addresses "
"{1}".format(port_id,
[a["address_readable"]
for a in new_addresses]))
return
ipam_log.failed()
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
def deallocate_ip_address(self, context, address):
address["deallocated"] = 1
address["address_type"] = None
payload = dict(used_by_tenant_id=address["used_by_tenant_id"],
ip_block_id=address["subnet_id"],
ip_address=address["address_readable"],
device_ids=[p["device_id"] for p in address["ports"]],
created_at=address["created_at"],
deleted_at=timeutils.utcnow())
n_rpc.get_notifier("network").info(context,
"ip_block.address.delete",
payload)
def deallocate_ips_by_port(self, context, port=None, **kwargs):
ips_removed = []
for addr in port["ip_addresses"]:
if "ip_address" in kwargs:
ip = kwargs["ip_address"]
if ip != netaddr.IPAddress(int(addr["address"])):
continue
# Note: only deallocate ip if this is the
# only port mapped
if len(addr["ports"]) == 1:
self.deallocate_ip_address(context, addr)
ips_removed.append(addr)
port["ip_addresses"] = list(
set(port["ip_addresses"]) - set(ips_removed))
# NCP-1509(roaet):
# - started using admin_context due to tenant not claiming when realloc
def deallocate_mac_address(self, context, address):
admin_context = context.elevated()
mac = db_api.mac_address_find(admin_context, address=address,
scope=db_api.ONE)
if not mac:
raise exceptions.NotFound(
message="No MAC address %s found" % netaddr.EUI(address))
if mac["mac_address_range"]["do_not_use"]:
db_api.mac_address_delete(admin_context, mac)
else:
db_api.mac_address_update(admin_context, mac, deallocated=True,
deallocated_at=timeutils.utcnow())
def _select_subnet(self, context, net_id, ip_address, segment_id,
subnet_ids, **filters):
# NCP-1480: Don't need to lock V6 subnets, since we don't use
# next_auto_assign_ip for them. We already uniquely identified
# the V6 we're going to get by generating a MAC in a previous step.
lock_subnets = True
if (not CONF.QUARK.ipam_select_subnet_v6_locking and
"ip_version" in filters and
int(filters["ip_version"]) == 6):
lock_subnets = False
select_api = db_api.subnet_find_ordered_by_most_full
subnets = select_api(context, net_id, lock_subnets=lock_subnets,
segment_id=segment_id, scope=db_api.ALL,
subnet_id=subnet_ids, **filters)
if not subnets:
LOG.info("No subnets found given the search criteria!")
return
for subnet, ips_in_subnet in subnets:
yield subnet, ips_in_subnet
def _should_mark_subnet_full(self, context, subnet, ipnet, ip_address,
ips_in_subnet):
ip = subnet["next_auto_assign_ip"]
# I'm not comfortable removing it yet.
if (subnet["ip_version"] == 4 and ip < subnet["first_ip"] or
ip > subnet["last_ip"]):
return True
ip_policy = None
if not ip_address:
# need to check if we're allocating a new IP
ip_policy = subnet.get("ip_policy")
policy_size = ip_policy["size"] if ip_policy else 0
if ipnet.size > (ips_in_subnet + policy_size - 1):
return False
return True
def _ip_in_subnet(self, subnet, subnet_ids, ipnet, ip_address):
if ip_address:
requested_ip = netaddr.IPAddress(ip_address)
if ipnet.version == 4 and requested_ip.version != 4:
requested_ip = requested_ip.ipv4()
if requested_ip not in ipnet:
if subnet_ids is not None:
LOG.info("Requested IP {0} not in subnet {1}, "
"retrying".format(str(requested_ip),
str(ipnet)))
raise q_exc.IPAddressNotInSubnet(
ip_addr=ip_address, subnet_id=subnet["id"])
return False
return True
@synchronized(named("select_subnet"))
def select_subnet(self, context, net_id, ip_address, segment_id,
subnet_ids=None, **filters):
LOG.info("Selecting subnet(s) - (Step 2 of 3) [{0}]".format(
utils.pretty_kwargs(network_id=net_id, ip_address=ip_address,
segment_id=segment_id, subnet_ids=subnet_ids,
ip_version=filters.get("ip_version"))))
# we stop locking, we're looking at stale data.
with context.session.begin():
for subnet, ips_in_subnet in self._select_subnet(context, net_id,
ip_address,
segment_id,
subnet_ids,
**filters):
ipnet = netaddr.IPNetwork(subnet["cidr"])
LOG.info("Trying subnet ID: {0} - CIDR: {1}".format(
subnet["id"], subnet["_cidr"]))
if not self._ip_in_subnet(subnet, subnet_ids, ipnet,
ip_address):
continue
if self._should_mark_subnet_full(context, subnet, ipnet,
ip_address, ips_in_subnet):
LOG.info("Marking subnet {0} as full".format(subnet["id"]))
updated = db_api.subnet_update_set_full(context, subnet)
if updated:
context.session.refresh(subnet)
continue
if not ip_address and subnet["ip_version"] == 4:
auto_inc = db_api.subnet_update_next_auto_assign_ip
updated = auto_inc(context, subnet)
if updated:
context.session.refresh(subnet)
else:
return
LOG.info("Subnet {0} - {1} {2} looks viable, "
"returning".format(subnet["id"], subnet["_cidr"],
subnet["next_auto_assign_ip"]))
return subnet
class QuarkIpamANY(QuarkIpam):
@classmethod
def get_name(self):
return "ANY"
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
filters = {}
if version:
filters["ip_version"] = version
subnet = self.select_subnet(context, net_id, ip_address, segment_id,
**filters)
if subnet:
return [subnet]
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
class QuarkIpamBOTH(QuarkIpam):
@classmethod
def get_name(self):
return "BOTH"
def is_strategy_satisfied(self, reallocated_ips, allocate_complete=False):
req = [4, 6]
for ip in reallocated_ips:
if ip is not None:
req.remove(ip["version"])
ips_allocated = len(req)
if ips_allocated == 0:
return True
elif ips_allocated == 1 and allocate_complete:
return True
return False
def attempt_to_reallocate_ip(self, context, net_id, port_id,
reuse_after, version=None,
ip_address=None, segment_id=None,
subnets=None, **kwargs):
both_versions = []
for ver in (4, 6):
address = super(QuarkIpamBOTH, self).attempt_to_reallocate_ip(
context, net_id, port_id, reuse_after, ver, ip_address,
segment_id, subnets=subnets, **kwargs)
both_versions.extend(address)
return both_versions
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
both_subnet_versions = []
need_versions = [4, 6]
for i in reallocated_ips:
if i["version"] in need_versions:
need_versions.remove(i["version"])
filters = {}
for ver in need_versions:
filters["ip_version"] = ver
sub = self.select_subnet(context, net_id, ip_address, segment_id,
**filters)
if sub:
both_subnet_versions.append(sub)
if not reallocated_ips and not both_subnet_versions:
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
return both_subnet_versions
class QuarkIpamBOTHREQ(QuarkIpamBOTH):
@classmethod
def get_name(self):
return "BOTH_REQUIRED"
def is_strategy_satisfied(self, reallocated_ips, allocate_complete=False):
req = [4, 6]
for ip in reallocated_ips:
if ip is not None:
req.remove(ip["version"])
ips_allocated = len(req)
if ips_allocated == 0:
return True
return False
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
subnets = super(QuarkIpamBOTHREQ, self)._choose_available_subnet(
context, net_id, version, segment_id, ip_address, reallocated_ips)
if len(reallocated_ips) + len(subnets) < 2:
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
return subnets
class IpamRegistry(object):
def __init__(self):
self.strategies = {
QuarkIpamANY.get_name(): QuarkIpamANY(),
QuarkIpamBOTH.get_name(): QuarkIpamBOTH(),
QuarkIpamBOTHREQ.get_name(): QuarkIpamBOTHREQ()}
def is_valid_strategy(self, strategy_name):
if strategy_name in self.strategies:
return True
return False
def get_strategy(self, strategy_name):
if self.is_valid_strategy(strategy_name):
return self.strategies[strategy_name]
fallback = CONF.QUARK.default_ipam_strategy
LOG.warn("IPAM strategy %s not found, "
"using default %s" % (strategy_name, fallback))
return self.strategies[fallback]
IPAM_REGISTRY = IpamRegistry()
| true | true |
f722d9f8f126934689d84b4892161ba981c1213a | 631 | py | Python | cunyfirstapi/helper.py | ericshermancs/CunyFirstAPI | e6dc30cb01d575fe2fa67db611f84bede1b2c976 | [
"MIT"
] | 10 | 2019-01-14T12:07:55.000Z | 2019-12-03T13:39:01.000Z | cunyfirstapi/helper.py | ericshermancs/CunyFirstAPI | e6dc30cb01d575fe2fa67db611f84bede1b2c976 | [
"MIT"
] | 35 | 2019-01-13T21:48:39.000Z | 2019-04-22T22:32:13.000Z | cunyfirstapi/helper.py | ericshermancs/CunyFirstAPI | e6dc30cb01d575fe2fa67db611f84bede1b2c976 | [
"MIT"
] | 8 | 2019-01-13T20:08:50.000Z | 2021-02-25T03:50:03.000Z | ###***********************************###
'''
CUNYFirstAPI
File: helper.py
Core Maintainers: Ehud Adler, Akiva Sherman,
Yehuda Moskovits
Copyright: Copyright 2019, Ehud Adler
License: MIT
'''
###***********************************###
import datetime
def get_semester():
now = datetime.datetime.now()
today = (now.month, now.day)
if (1, 15) <= today < (6, 15):
return f'{now.year} Spring Term'
elif (6, 15) <= today < (9, 15):
return f'{now.year} Summer Term'
else:
if now.month == 1:
return f'{now.year-1} Fall Term'
else:
return f'{now.year} Fall Term' | 25.24 | 44 | 0.51664 | Spring Term'
elif (6, 15) <= today < (9, 15):
return f'{now.year} Summer Term'
else:
if now.month == 1:
return f'{now.year-1} Fall Term'
else:
return f'{now.year} Fall Term' | true | true |
f722da438f0cbb249d70631a10fd188b0e013e1d | 12,846 | py | Python | tests/test_multi_put.py | BBooijLiewes/django-binder | b5bf0aad14657fd57d575f9a0ef21468533f64a7 | [
"MIT"
] | null | null | null | tests/test_multi_put.py | BBooijLiewes/django-binder | b5bf0aad14657fd57d575f9a0ef21468533f64a7 | [
"MIT"
] | null | null | null | tests/test_multi_put.py | BBooijLiewes/django-binder | b5bf0aad14657fd57d575f9a0ef21468533f64a7 | [
"MIT"
] | null | null | null | import json
from django.test import TestCase, Client
from django.contrib.auth.models import User
from binder.json import jsonloads
from .testapp.models import Animal, Zoo, ZooEmployee, ContactPerson
from .compare import assert_json, MAYBE, ANY
class MultiPutTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
def test_put_several_simple_items(self):
model_data = {
'data': [{
'id': -1,
'name': 'Scooby Doo',
}, {
'id': -2,
'name': 'Scrappy Doo',
}]
}
response = self.client.put('/animal/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
self.assertEqual(2, len(returned_data['idmap']['animal']))
self.assertIsNotNone(returned_data['idmap']['animal'][0])
self.assertIsNotNone(returned_data['idmap']['animal'][1])
idmap = dict(returned_data['idmap']['animal'])
scooby = Animal.objects.get(pk=idmap[-1])
self.assertEqual(scooby.name, 'Scooby Doo')
scrappy = Animal.objects.get(pk=idmap[-2])
self.assertEqual(scrappy.name, 'Scrappy Doo')
def test_put_with_mixed_ids_updates_existing_items(self):
scooby = Animal(name='Scoooooby Dooooo')
scooby.save()
model_data = {
'data': [{
'id': scooby.pk,
'name': 'Scooby Doo',
}, {
'id': -1,
'name': 'Scrappy Doo',
}]
}
response = self.client.put('/animal/', data=json.dumps(model_data), content_type='application/json')
returned_data = jsonloads(response.content)
self.assertEqual(1, len(returned_data['idmap']['animal']))
self.assertIsNotNone(returned_data['idmap']['animal'][0])
self.assertNotEqual(scooby.pk, returned_data['idmap']['animal'][0][1])
idmap = dict(returned_data['idmap']['animal'])
scooby = Animal.objects.get(pk=scooby.pk)
self.assertEqual(scooby.name, 'Scooby Doo')
scrappy = Animal.objects.get(pk=idmap[-1])
self.assertEqual(scrappy.name, 'Scrappy Doo')
def test_put_relations_from_referencing_side(self):
with_model_data = {
'data': [{
'id': -2,
'zoo': -1,
'name': 'Daffy Duck',
}, {
'id': -3,
'zoo': -2,
'name': 'Pluto',
}, {
# Mix up the order, this should not matter
'id': -1,
'zoo': -1,
'name': 'Scooby Doo',
}, {
'id': -4,
'zoo': -2,
'name': 'Stimpson J Cat',
}],
'with': {
'zoo': [{
'id': -1,
'name': 'Slagharen',
}, {
# Unreferenced from main entity, but should still be created
'id': -3,
'name': 'Apenheul',
}, {
'id': -2,
'name': 'Burgers\' Zoo',
}],
},
}
response = self.client.put('/animal/', data=json.dumps(with_model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
animal_idmap = dict(returned_data['idmap']['animal'])
zoo_idmap = dict(returned_data['idmap']['zoo'])
self.assertEqual(4, len(animal_idmap))
self.assertEqual(3, len(zoo_idmap))
# Check zoos
slagharen = Zoo.objects.get(pk=zoo_idmap[-1])
self.assertEqual('Slagharen', slagharen.name)
burgers = Zoo.objects.get(pk=zoo_idmap[-2])
self.assertEqual("Burgers' Zoo", burgers.name)
apenheul = Zoo.objects.get(pk=zoo_idmap[-3])
self.assertEqual('Apenheul', apenheul.name)
# Check animals
scooby = Animal.objects.get(pk=animal_idmap[-1])
self.assertEqual('Scooby Doo', scooby.name)
self.assertEqual(slagharen, scooby.zoo)
daffy = Animal.objects.get(pk=animal_idmap[-2])
self.assertEqual('Daffy Duck', daffy.name)
self.assertEqual(slagharen, daffy.zoo)
pluto = Animal.objects.get(pk=animal_idmap[-3])
self.assertEqual('Pluto', pluto.name)
self.assertEqual(burgers, pluto.zoo)
stimpy = Animal.objects.get(pk=animal_idmap[-4])
self.assertEqual('Stimpson J Cat', stimpy.name)
self.assertEqual(burgers, stimpy.zoo)
def test_put_relations_from_referenced_side(self):
with_model_data = {
'data': [{
'id': -1,
'name': 'Central Park Zoo',
# TODO
#'animals': [-1, -2],
}, {
# A gap in IDs, should not matter either
'id': -3,
'name': 'Artis',
#'animals': [-4],
}],
'with': {
'animal': [{
'id': -1,
'name': 'Alex the lion',
'zoo': -1,
}, {
'id': -2,
'name': 'Ren Höek',
'zoo': -1,
}, {
'id': -3,
'name': 'Tom',
}, {
'id': -4,
'name': 'Jerry',
'zoo': -3,
}],
},
}
response = self.client.put('/zoo/', data=json.dumps(with_model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
zoo_idmap = dict(returned_data['idmap']['zoo'])
animal_idmap = dict(returned_data['idmap']['animal'])
self.assertEqual(2, len(zoo_idmap))
self.assertEqual(4, len(animal_idmap))
# Check zoos
central_park = Zoo.objects.get(pk=zoo_idmap[-1])
self.assertEqual('Central Park Zoo', central_park.name)
artis = Zoo.objects.get(pk=zoo_idmap[-3])
self.assertEqual('Artis', artis.name)
# Check animals
alex = Animal.objects.get(pk=animal_idmap[-1])
self.assertEqual('Alex the lion', alex.name)
self.assertEqual(central_park, alex.zoo)
ren = Animal.objects.get(pk=animal_idmap[-2])
self.assertEqual('Ren Höek', ren.name)
self.assertEqual(central_park, ren.zoo)
tom = Animal.objects.get(pk=animal_idmap[-3])
self.assertEqual('Tom', tom.name)
self.assertIsNone(tom.zoo)
jerry = Animal.objects.get(pk=animal_idmap[-4])
self.assertEqual('Jerry', jerry.name)
self.assertEqual(artis, jerry.zoo)
def test_put_remove_item(self):
with_model_data = {
'data': [{
'id': -1,
'name': 'Scooby Doo',
'zoo': -1,
}],
'with': {
'zoo': [{
'id': -1,
'name': 'Artis',
}],
},
}
response = self.client.put('/animal/', data=json.dumps(with_model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
animal_idmap = dict(returned_data['idmap']['animal'])
scooby_pk = animal_idmap[-1]
scooby = Animal.objects.get(pk=scooby_pk)
self.assertEqual('Artis', scooby.zoo.name)
with_model_data = {
'data': [{
'id': scooby_pk,
'zoo': None,
}],
'with': {},
}
response = self.client.put('/animal/', data=json.dumps(with_model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
scooby = Animal.objects.get(pk=scooby_pk)
self.assertEqual(None, scooby.zoo)
def test_put_reverse_ref(self):
model_data = {
'data': [{
'id': -1,
'name': 'Apenheul',
'zoo_employees': [-2, -3]
}],
'with': {
'zoo_employee': [{
'id': -2,
'name': 'Harambe',
}, {
'id': -3,
'name': 'Bokito',
}]
}
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
zoo_ids = {i for i, _ in returned_data['idmap']['zoo']}
zoo_employee_ids = {i for i, _ in returned_data['idmap']['zoo_employee']}
self.assertEqual(zoo_ids, {-1})
self.assertEqual(zoo_employee_ids, {-2, -3})
def test_remove_relation_through_backref(self):
model_data = {
'data': [{
'id': -1,
'name': 'Apenheul',
'animals': [-2, -3]
}],
'with': {
'animal': [{
'id': -2,
'name': 'Harambe',
}, {
'id': -3,
'name': 'Bokito',
}]
}
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
zoo_id = returned_data['idmap']['zoo'][0][1]
update_data = {'animals': []}
response = self.client.put('/zoo/{}/?with=animals'.format(zoo_id), data=json.dumps(update_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
self.assertEqual(returned_data['animals'], [])
def test_remove_relation_through_backref_non_nullable_soft_deletable(self):
model_data = {
'data': [{
'id': -1,
'name': 'Apenheul',
'zoo_employees': [-2, -3]
}],
'with': {
'zoo_employee': [{
'id': -2,
'name': 'Harambe',
}, {
'id': -3,
'name': 'Bokito',
}]
}
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
zoo_id = returned_data['idmap']['zoo'][0][1]
harambe_id = returned_data['idmap']['zoo_employee'][0][1]
bokito_id = returned_data['idmap']['zoo_employee'][1][1]
# Fixup if needed (gosh this format is FUBAR)
if returned_data['idmap']['zoo_employee'][0][0] == -3:
harambe_id, bokito_id = bokito_id, harambe_id
update_data = {'zoo_employees': [bokito_id]}
response = self.client.put('/zoo/{}/'.format(zoo_id), data=json.dumps(update_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
self.assertSetEqual(set(returned_data['zoo_employees']), set([bokito_id, harambe_id]))
bokito = ZooEmployee.objects.get(id=bokito_id)
harambe = ZooEmployee.objects.get(id=harambe_id)
self.assertFalse(bokito.deleted)
self.assertTrue(harambe.deleted)
def test_remove_relation_through_backref_with_custom_unsetter(self):
model_data = {
'data': [{
'id': -1,
'name': 'Apenheul',
'animals': [-2, -3]
}],
'with': {
'animal': [{
'id': -2,
'name': 'Harambe',
}, {
'id': -3,
'name': 'Bokito',
}]
}
}
response = self.client.put('/caretaker/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
caretaker_id = returned_data['idmap']['caretaker'][0][1]
animal_ids = [new for old, new in returned_data['idmap']['animal']]
update_data = {'animals': []}
response = self.client.put('/caretaker/{}/'.format(caretaker_id), data=json.dumps(update_data), content_type='application/json')
self.assertEqual(response.status_code, 400)
returned_data = jsonloads(response.content)
assert_json(returned_data, {
'errors': {
'animal': {
str(animal_id): {
'caretaker': [
{'code': 'cant_unset', MAYBE('message'): ANY(str)}
]
}
for animal_id in animal_ids
}
},
'code': 'ValidationError',
MAYBE('debug'): ANY(),
})
# This is a regression test for a deprecation issue that was
# removed in Django 2.0: now you need to use .set on m2m
# relations when updating the reference list.
# This apparently only happened in the multi-put, but still....
def test_update_model_with_m2m_field_causes_no_error(self):
artis = Zoo(name='Artis')
artis.full_clean()
artis.save()
contact1 = ContactPerson(name='cp1')
contact1.full_clean()
contact1.save()
contact1.zoos.add(artis)
contact2 = ContactPerson(name='cp2')
contact2.full_clean()
contact2.save()
contact2.zoos.add(artis)
model_data = {
'data': [{
'id': artis.id,
'contacts': [contact1.pk],
}]
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
artis.refresh_from_db()
self.assertSetEqual({contact1.pk}, {c.pk for c in artis.contacts.all()})
# Now from the other end
model_data = {
'data': [{
'id': contact1.id,
'zoos': [],
}]
}
response = self.client.put('/contact_person/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
contact1.refresh_from_db()
self.assertSetEqual(set(), {c.pk for c in contact1.zoos.all()})
# Regression test: When multi-putting a one to one field from the
# other end it would fail hard.
def test_non_nullable_relation_does_not_cause_error(self):
model_data = {
'data': [{
'id': -1,
'name': 'Gaia',
'animals': [-2],
}],
'with': {
'animal': [{
'id': -2,
'name': 'Stimpson J. Cat',
'nickname': -3,
}],
'nickname': [{
'id': -3,
'nickname': 'Stimpy',
}],
}
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
nickname_id = returned_data['idmap']['nickname'][0][1]
animal_id = returned_data['idmap']['animal'][0][1]
stimpy = Animal.objects.get(pk=animal_id)
self.assertEqual(stimpy.nickname.id, nickname_id)
| 26.651452 | 131 | 0.64666 | import json
from django.test import TestCase, Client
from django.contrib.auth.models import User
from binder.json import jsonloads
from .testapp.models import Animal, Zoo, ZooEmployee, ContactPerson
from .compare import assert_json, MAYBE, ANY
class MultiPutTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
def test_put_several_simple_items(self):
model_data = {
'data': [{
'id': -1,
'name': 'Scooby Doo',
}, {
'id': -2,
'name': 'Scrappy Doo',
}]
}
response = self.client.put('/animal/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
self.assertEqual(2, len(returned_data['idmap']['animal']))
self.assertIsNotNone(returned_data['idmap']['animal'][0])
self.assertIsNotNone(returned_data['idmap']['animal'][1])
idmap = dict(returned_data['idmap']['animal'])
scooby = Animal.objects.get(pk=idmap[-1])
self.assertEqual(scooby.name, 'Scooby Doo')
scrappy = Animal.objects.get(pk=idmap[-2])
self.assertEqual(scrappy.name, 'Scrappy Doo')
def test_put_with_mixed_ids_updates_existing_items(self):
scooby = Animal(name='Scoooooby Dooooo')
scooby.save()
model_data = {
'data': [{
'id': scooby.pk,
'name': 'Scooby Doo',
}, {
'id': -1,
'name': 'Scrappy Doo',
}]
}
response = self.client.put('/animal/', data=json.dumps(model_data), content_type='application/json')
returned_data = jsonloads(response.content)
self.assertEqual(1, len(returned_data['idmap']['animal']))
self.assertIsNotNone(returned_data['idmap']['animal'][0])
self.assertNotEqual(scooby.pk, returned_data['idmap']['animal'][0][1])
idmap = dict(returned_data['idmap']['animal'])
scooby = Animal.objects.get(pk=scooby.pk)
self.assertEqual(scooby.name, 'Scooby Doo')
scrappy = Animal.objects.get(pk=idmap[-1])
self.assertEqual(scrappy.name, 'Scrappy Doo')
def test_put_relations_from_referencing_side(self):
with_model_data = {
'data': [{
'id': -2,
'zoo': -1,
'name': 'Daffy Duck',
}, {
'id': -3,
'zoo': -2,
'name': 'Pluto',
}, {
'id': -1,
'zoo': -1,
'name': 'Scooby Doo',
}, {
'id': -4,
'zoo': -2,
'name': 'Stimpson J Cat',
}],
'with': {
'zoo': [{
'id': -1,
'name': 'Slagharen',
}, {
'id': -3,
'name': 'Apenheul',
}, {
'id': -2,
'name': 'Burgers\' Zoo',
}],
},
}
response = self.client.put('/animal/', data=json.dumps(with_model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
animal_idmap = dict(returned_data['idmap']['animal'])
zoo_idmap = dict(returned_data['idmap']['zoo'])
self.assertEqual(4, len(animal_idmap))
self.assertEqual(3, len(zoo_idmap))
# Check zoos
slagharen = Zoo.objects.get(pk=zoo_idmap[-1])
self.assertEqual('Slagharen', slagharen.name)
burgers = Zoo.objects.get(pk=zoo_idmap[-2])
self.assertEqual("Burgers' Zoo", burgers.name)
apenheul = Zoo.objects.get(pk=zoo_idmap[-3])
self.assertEqual('Apenheul', apenheul.name)
scooby = Animal.objects.get(pk=animal_idmap[-1])
self.assertEqual('Scooby Doo', scooby.name)
self.assertEqual(slagharen, scooby.zoo)
daffy = Animal.objects.get(pk=animal_idmap[-2])
self.assertEqual('Daffy Duck', daffy.name)
self.assertEqual(slagharen, daffy.zoo)
pluto = Animal.objects.get(pk=animal_idmap[-3])
self.assertEqual('Pluto', pluto.name)
self.assertEqual(burgers, pluto.zoo)
stimpy = Animal.objects.get(pk=animal_idmap[-4])
self.assertEqual('Stimpson J Cat', stimpy.name)
self.assertEqual(burgers, stimpy.zoo)
def test_put_relations_from_referenced_side(self):
with_model_data = {
'data': [{
'id': -1,
'name': 'Central Park Zoo',
}, {
'id': -3,
'name': 'Artis',
}],
'with': {
'animal': [{
'id': -1,
'name': 'Alex the lion',
'zoo': -1,
}, {
'id': -2,
'name': 'Ren Höek',
'zoo': -1,
}, {
'id': -3,
'name': 'Tom',
}, {
'id': -4,
'name': 'Jerry',
'zoo': -3,
}],
},
}
response = self.client.put('/zoo/', data=json.dumps(with_model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
zoo_idmap = dict(returned_data['idmap']['zoo'])
animal_idmap = dict(returned_data['idmap']['animal'])
self.assertEqual(2, len(zoo_idmap))
self.assertEqual(4, len(animal_idmap))
central_park = Zoo.objects.get(pk=zoo_idmap[-1])
self.assertEqual('Central Park Zoo', central_park.name)
artis = Zoo.objects.get(pk=zoo_idmap[-3])
self.assertEqual('Artis', artis.name)
alex = Animal.objects.get(pk=animal_idmap[-1])
self.assertEqual('Alex the lion', alex.name)
self.assertEqual(central_park, alex.zoo)
ren = Animal.objects.get(pk=animal_idmap[-2])
self.assertEqual('Ren Höek', ren.name)
self.assertEqual(central_park, ren.zoo)
tom = Animal.objects.get(pk=animal_idmap[-3])
self.assertEqual('Tom', tom.name)
self.assertIsNone(tom.zoo)
jerry = Animal.objects.get(pk=animal_idmap[-4])
self.assertEqual('Jerry', jerry.name)
self.assertEqual(artis, jerry.zoo)
def test_put_remove_item(self):
with_model_data = {
'data': [{
'id': -1,
'name': 'Scooby Doo',
'zoo': -1,
}],
'with': {
'zoo': [{
'id': -1,
'name': 'Artis',
}],
},
}
response = self.client.put('/animal/', data=json.dumps(with_model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
animal_idmap = dict(returned_data['idmap']['animal'])
scooby_pk = animal_idmap[-1]
scooby = Animal.objects.get(pk=scooby_pk)
self.assertEqual('Artis', scooby.zoo.name)
with_model_data = {
'data': [{
'id': scooby_pk,
'zoo': None,
}],
'with': {},
}
response = self.client.put('/animal/', data=json.dumps(with_model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
scooby = Animal.objects.get(pk=scooby_pk)
self.assertEqual(None, scooby.zoo)
def test_put_reverse_ref(self):
model_data = {
'data': [{
'id': -1,
'name': 'Apenheul',
'zoo_employees': [-2, -3]
}],
'with': {
'zoo_employee': [{
'id': -2,
'name': 'Harambe',
}, {
'id': -3,
'name': 'Bokito',
}]
}
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
zoo_ids = {i for i, _ in returned_data['idmap']['zoo']}
zoo_employee_ids = {i for i, _ in returned_data['idmap']['zoo_employee']}
self.assertEqual(zoo_ids, {-1})
self.assertEqual(zoo_employee_ids, {-2, -3})
def test_remove_relation_through_backref(self):
model_data = {
'data': [{
'id': -1,
'name': 'Apenheul',
'animals': [-2, -3]
}],
'with': {
'animal': [{
'id': -2,
'name': 'Harambe',
}, {
'id': -3,
'name': 'Bokito',
}]
}
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
zoo_id = returned_data['idmap']['zoo'][0][1]
update_data = {'animals': []}
response = self.client.put('/zoo/{}/?with=animals'.format(zoo_id), data=json.dumps(update_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
self.assertEqual(returned_data['animals'], [])
def test_remove_relation_through_backref_non_nullable_soft_deletable(self):
model_data = {
'data': [{
'id': -1,
'name': 'Apenheul',
'zoo_employees': [-2, -3]
}],
'with': {
'zoo_employee': [{
'id': -2,
'name': 'Harambe',
}, {
'id': -3,
'name': 'Bokito',
}]
}
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
zoo_id = returned_data['idmap']['zoo'][0][1]
harambe_id = returned_data['idmap']['zoo_employee'][0][1]
bokito_id = returned_data['idmap']['zoo_employee'][1][1]
if returned_data['idmap']['zoo_employee'][0][0] == -3:
harambe_id, bokito_id = bokito_id, harambe_id
update_data = {'zoo_employees': [bokito_id]}
response = self.client.put('/zoo/{}/'.format(zoo_id), data=json.dumps(update_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
self.assertSetEqual(set(returned_data['zoo_employees']), set([bokito_id, harambe_id]))
bokito = ZooEmployee.objects.get(id=bokito_id)
harambe = ZooEmployee.objects.get(id=harambe_id)
self.assertFalse(bokito.deleted)
self.assertTrue(harambe.deleted)
def test_remove_relation_through_backref_with_custom_unsetter(self):
model_data = {
'data': [{
'id': -1,
'name': 'Apenheul',
'animals': [-2, -3]
}],
'with': {
'animal': [{
'id': -2,
'name': 'Harambe',
}, {
'id': -3,
'name': 'Bokito',
}]
}
}
response = self.client.put('/caretaker/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
caretaker_id = returned_data['idmap']['caretaker'][0][1]
animal_ids = [new for old, new in returned_data['idmap']['animal']]
update_data = {'animals': []}
response = self.client.put('/caretaker/{}/'.format(caretaker_id), data=json.dumps(update_data), content_type='application/json')
self.assertEqual(response.status_code, 400)
returned_data = jsonloads(response.content)
assert_json(returned_data, {
'errors': {
'animal': {
str(animal_id): {
'caretaker': [
{'code': 'cant_unset', MAYBE('message'): ANY(str)}
]
}
for animal_id in animal_ids
}
},
'code': 'ValidationError',
MAYBE('debug'): ANY(),
})
def test_update_model_with_m2m_field_causes_no_error(self):
artis = Zoo(name='Artis')
artis.full_clean()
artis.save()
contact1 = ContactPerson(name='cp1')
contact1.full_clean()
contact1.save()
contact1.zoos.add(artis)
contact2 = ContactPerson(name='cp2')
contact2.full_clean()
contact2.save()
contact2.zoos.add(artis)
model_data = {
'data': [{
'id': artis.id,
'contacts': [contact1.pk],
}]
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
artis.refresh_from_db()
self.assertSetEqual({contact1.pk}, {c.pk for c in artis.contacts.all()})
model_data = {
'data': [{
'id': contact1.id,
'zoos': [],
}]
}
response = self.client.put('/contact_person/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
contact1.refresh_from_db()
self.assertSetEqual(set(), {c.pk for c in contact1.zoos.all()})
def test_non_nullable_relation_does_not_cause_error(self):
model_data = {
'data': [{
'id': -1,
'name': 'Gaia',
'animals': [-2],
}],
'with': {
'animal': [{
'id': -2,
'name': 'Stimpson J. Cat',
'nickname': -3,
}],
'nickname': [{
'id': -3,
'nickname': 'Stimpy',
}],
}
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
nickname_id = returned_data['idmap']['nickname'][0][1]
animal_id = returned_data['idmap']['animal'][0][1]
stimpy = Animal.objects.get(pk=animal_id)
self.assertEqual(stimpy.nickname.id, nickname_id)
| true | true |
f722db006999ad8a5629b23732e08caa3e6e44b7 | 4,499 | py | Python | Stain_seperation/stain_Norm_Vahadane.py | Pathology-Consistent-Stain-Transfer/Unpaired-Stain-Transfer-using-Pathology-Consistent-Constrained-Generative-Adversarial-Networks | b57c56b314e65a0f31d9e44f57174108599c8b14 | [
"MIT"
] | 2 | 2021-11-10T12:28:37.000Z | 2021-11-15T10:12:31.000Z | Stain_seperation/stain_Norm_Vahadane.py | Pathology-Consistent-Stain-Transfer/Unpaired-Stain-Transfer-using-Pathology-Consistent-Constrained-Generative-Adversarial-Networks | b57c56b314e65a0f31d9e44f57174108599c8b14 | [
"MIT"
] | null | null | null | Stain_seperation/stain_Norm_Vahadane.py | Pathology-Consistent-Stain-Transfer/Unpaired-Stain-Transfer-using-Pathology-Consistent-Constrained-Generative-Adversarial-Networks | b57c56b314e65a0f31d9e44f57174108599c8b14 | [
"MIT"
] | 2 | 2021-11-10T12:28:42.000Z | 2021-11-10T12:38:45.000Z | """
Stain normalization inspired by method of:
A. Vahadane et al., ‘Structure-Preserving Color Normalization and Sparse Stain Separation for Histological Images’, IEEE Transactions on Medical Imaging, vol. 35, no. 8, pp. 1962–1971, Aug. 2016.
Uses the spams package:
http://spams-devel.gforge.inria.fr/index.html
Use with python via e.g https://anaconda.org/conda-forge/python-spams
"""
# windows: pip install spams-bin
# linux:pip install python-spams
import spams
import numpy as np
import Stain_seperation.stain_utils as ut
def get_stain_matrix(I, threshold=0.8, lamda=0.1):
"""
Get 2x3 stain matrix. First row H and second row E
:param I:
:param threshold:
:param lamda:
:return:
"""
mask = ut.notwhite_mask(I, thresh=threshold).reshape((-1,))
OD = ut.RGB_to_OD(I).reshape((-1, 3))
OD = OD[mask]
dictionary = spams.trainDL(OD.T, K=2, lambda1=lamda, mode=2, modeD=0, posAlpha=True, posD=True, verbose=False).T
if dictionary[0, 0] < dictionary[1, 0]:
dictionary = dictionary[[1, 0], :]
dictionary = ut.normalize_rows(dictionary)
return dictionary
class normalizer(object):
"""
A stain normalization object
"""
def __init__(self):
self.stain_matrix_target = np.array([[0.62600721, 0.62330743, 0.46861798],
[0.3203682, 0.5473311, 0.77317067]])
# Ki67 Normalization initial matirx obtained from "Sample_target"
# [[0.58594418, 0.68469766, 0.43342651]
# [0.3203682, 0.5473311, 0.77317067]]
# [[0.62600721,0.62330743,0.46861798],
# [0.35395456,0.58236586,0.73182387]]
# [[0.58583788, 0.66078505, 0.46920901],
# [0.3536072, 0.56354522, 0.74657801]]
# HE Normalization initial matirx obtained from "Sample_target"
# self.stain_matrix_target = np.array([[0.60559458, 0.69559906, 0.38651928],
# [0.1100605, 0.94701408, 0.30174662]])
# [[0.59958405,0.70248408,0.38342546]
# [0.06893222,0.95236792,0.2970584]]
# [[0.60559458 0.69559906 0.38651928]
# [0.1100605 0.94701408 0.30174662]]
# [[0.60715608 0.72015621 0.3357626]
# [0.21154943 0.9271104 0.30937542]]
def fit(self, target_list):
if target_list.__len__() > 1:
Ws = []
for f_id in range(target_list.__len__()):
target = ut.read_image(target_list[f_id])
target = ut.standardize_brightness(target)
stain_matrix_target = get_stain_matrix(target)
Ws.append(stain_matrix_target)
Ws = np.asarray(Ws)
Median_W = np.median(Ws, axis=0)
self.stain_matrix_target = ut.normalize_rows(Median_W)
print('WSI target stain matrix: ', self.stain_matrix_target)
else:
target = ut.read_image(target_list[0])
target = ut.standardize_brightness(target)
self.stain_matrix_target = get_stain_matrix(target)
print('Single target image stain matrix: ', self.stain_matrix_target)
def stains_Vec_RGB(self, stain_matrix_target):
return ut.OD_to_RGB(stain_matrix_target)
def transform(self, I):
I = ut.standardize_brightness(I)
stain_matrix_source = get_stain_matrix(I)
source_concentrations = ut.get_concentrations(I, stain_matrix_source)
return (255 * np.exp(-1 * np.dot(source_concentrations, self.stain_matrix_target).reshape(I.shape))).astype(
np.uint8)
def hematoxylin_eosin(self, I):
I = ut.standardize_brightness(I)
h, w, _ = I.shape
stain_matrix_source = get_stain_matrix(I)
source_concentrations = ut.get_concentrations(I, stain_matrix_source)
H = source_concentrations[:, 0].reshape(h, w)
H = np.exp(-1 * H)
E = source_concentrations[:, 1].reshape(h, w)
E = np.exp(-1 * E)
# H = np.reshape(source_concentrations[:, 0], newshape=(h*w, 1))
# H = (255 * np.exp(-1 * np.dot(H, np.reshape(stain_matrix_source[0],
# newshape=(1, 3))).reshape(I.shape))).astype(np.uint8)
# E = np.reshape(source_concentrations[:, 1], newshape=(h*w, 1))
# E = (255 * np.exp(-1 * np.dot(E, np.reshape(stain_matrix_source[1],
# newshape=(1, 3))).reshape(I.shape))).astype(np.uint8)
return H, E
| 39.121739 | 195 | 0.61058 |
import spams
import numpy as np
import Stain_seperation.stain_utils as ut
def get_stain_matrix(I, threshold=0.8, lamda=0.1):
mask = ut.notwhite_mask(I, thresh=threshold).reshape((-1,))
OD = ut.RGB_to_OD(I).reshape((-1, 3))
OD = OD[mask]
dictionary = spams.trainDL(OD.T, K=2, lambda1=lamda, mode=2, modeD=0, posAlpha=True, posD=True, verbose=False).T
if dictionary[0, 0] < dictionary[1, 0]:
dictionary = dictionary[[1, 0], :]
dictionary = ut.normalize_rows(dictionary)
return dictionary
class normalizer(object):
def __init__(self):
self.stain_matrix_target = np.array([[0.62600721, 0.62330743, 0.46861798],
[0.3203682, 0.5473311, 0.77317067]])
def fit(self, target_list):
if target_list.__len__() > 1:
Ws = []
for f_id in range(target_list.__len__()):
target = ut.read_image(target_list[f_id])
target = ut.standardize_brightness(target)
stain_matrix_target = get_stain_matrix(target)
Ws.append(stain_matrix_target)
Ws = np.asarray(Ws)
Median_W = np.median(Ws, axis=0)
self.stain_matrix_target = ut.normalize_rows(Median_W)
print('WSI target stain matrix: ', self.stain_matrix_target)
else:
target = ut.read_image(target_list[0])
target = ut.standardize_brightness(target)
self.stain_matrix_target = get_stain_matrix(target)
print('Single target image stain matrix: ', self.stain_matrix_target)
def stains_Vec_RGB(self, stain_matrix_target):
return ut.OD_to_RGB(stain_matrix_target)
def transform(self, I):
I = ut.standardize_brightness(I)
stain_matrix_source = get_stain_matrix(I)
source_concentrations = ut.get_concentrations(I, stain_matrix_source)
return (255 * np.exp(-1 * np.dot(source_concentrations, self.stain_matrix_target).reshape(I.shape))).astype(
np.uint8)
def hematoxylin_eosin(self, I):
I = ut.standardize_brightness(I)
h, w, _ = I.shape
stain_matrix_source = get_stain_matrix(I)
source_concentrations = ut.get_concentrations(I, stain_matrix_source)
H = source_concentrations[:, 0].reshape(h, w)
H = np.exp(-1 * H)
E = source_concentrations[:, 1].reshape(h, w)
E = np.exp(-1 * E)
return H, E
| true | true |
f722db16de4f68a88069a8853bd00208bf0c7609 | 8,257 | py | Python | example_zoo/tensorflow/models/mnist/official/mnist/mnist.py | Meghanath-Data/ml-on-gcp | bfd96dce610e26236c4448ba0d4eb430ca2817ff | [
"Apache-2.0"
] | 465 | 2017-08-28T13:21:25.000Z | 2022-03-31T03:35:30.000Z | example_zoo/tensorflow/models/mnist/official/mnist/mnist.py | Meghanath-Data/ml-on-gcp | bfd96dce610e26236c4448ba0d4eb430ca2817ff | [
"Apache-2.0"
] | 74 | 2017-09-08T23:05:01.000Z | 2022-03-12T00:54:37.000Z | example_zoo/tensorflow/models/mnist/official/mnist/mnist.py | Meghanath-Data/ml-on-gcp | bfd96dce610e26236c4448ba0d4eb430ca2817ff | [
"Apache-2.0"
] | 187 | 2017-08-28T13:21:28.000Z | 2022-02-22T01:47:28.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app as absl_app
from absl import flags
flags.DEFINE_string(name="job-dir", default="/tmp", help="AI Platform Training passes this to the training script.")
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.mnist import dataset
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
LEARNING_RATE = 1e-4
def create_model(data_format):
"""Model to recognize digits in the MNIST dataset.
Network structure is equivalent to:
https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py
and
https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py
But uses the tf.keras API.
Args:
data_format: Either 'channels_first' or 'channels_last'. 'channels_first' is
typically faster on GPUs while 'channels_last' is typically faster on
CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
Returns:
A tf.keras.Model.
"""
if data_format == 'channels_first':
input_shape = [1, 28, 28]
else:
assert data_format == 'channels_last'
input_shape = [28, 28, 1]
l = tf.keras.layers
max_pool = l.MaxPooling2D(
(2, 2), (2, 2), padding='same', data_format=data_format)
# The model consists of a sequential chain of layers, so tf.keras.Sequential
# (a subclass of tf.keras.Model) makes for a compact description.
return tf.keras.Sequential(
[
l.Reshape(
target_shape=input_shape,
input_shape=(28 * 28,)),
l.Conv2D(
32,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu),
max_pool,
l.Conv2D(
64,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu),
max_pool,
l.Flatten(),
l.Dense(1024, activation=tf.nn.relu),
l.Dropout(0.4),
l.Dense(10)
])
def define_mnist_flags():
flags_core.define_base()
flags_core.define_performance(num_parallel_calls=False)
flags_core.define_image()
flags.adopt_module_key_flags(flags_core)
flags_core.set_defaults(data_dir='/tmp/mnist_data',
model_dir='/tmp/mnist_model',
batch_size=100,
train_epochs=40)
def model_fn(features, labels, mode, params):
"""The model_fn argument for creating an Estimator."""
model = create_model(params['data_format'])
image = features
if isinstance(image, dict):
image = features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(image, training=False)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
logits = model(image, training=True)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
accuracy = tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1))
# Name tensors to be logged with LoggingTensorHook.
tf.identity(LEARNING_RATE, 'learning_rate')
tf.identity(loss, 'cross_entropy')
tf.identity(accuracy[1], name='train_accuracy')
# Save accuracy scalar to Tensorboard output.
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_or_create_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
logits = model(image, training=False)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy':
tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1)),
})
def run_mnist(flags_obj):
"""Run MNIST training and eval loop.
Args:
flags_obj: An object containing parsed flag values.
"""
model_helpers.apply_clean(flags_obj)
model_function = model_fn
session_config = tf.ConfigProto(
inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
allow_soft_placement=True)
distribution_strategy = distribution_utils.get_distribution_strategy(
flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy, session_config=session_config)
data_format = flags_obj.data_format
if data_format is None:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
mnist_classifier = tf.estimator.Estimator(
model_fn=model_function,
model_dir=flags_obj.model_dir,
config=run_config,
params={
'data_format': data_format,
})
# Set up training and evaluation input functions.
def train_input_fn():
"""Prepare data for training."""
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes use less memory. MNIST is a small
# enough dataset that we can easily shuffle the full epoch.
ds = dataset.train(flags_obj.data_dir)
ds = ds.cache().shuffle(buffer_size=50000).batch(flags_obj.batch_size)
# Iterate through the dataset a set number (`epochs_between_evals`) of times
# during each training session.
ds = ds.repeat(flags_obj.epochs_between_evals)
return ds
def eval_input_fn():
return dataset.test(flags_obj.data_dir).batch(
flags_obj.batch_size).make_one_shot_iterator().get_next()
# Set up hook that outputs training logs every 100 steps.
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks, model_dir=flags_obj.model_dir,
batch_size=flags_obj.batch_size)
# Train and evaluate model.
for _ in range(flags_obj.train_epochs // flags_obj.epochs_between_evals):
mnist_classifier.train(input_fn=train_input_fn, hooks=train_hooks)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print('\nEvaluation results:\n\t%s\n' % eval_results)
if model_helpers.past_stop_threshold(flags_obj.stop_threshold,
eval_results['accuracy']):
break
# Export the model
if flags_obj.export_dir is not None:
image = tf.placeholder(tf.float32, [None, 28, 28])
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'image': image,
})
mnist_classifier.export_savedmodel(flags_obj.export_dir, input_fn,
strip_default_attrs=True)
def main(_):
run_mnist(flags.FLAGS)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
define_mnist_flags()
absl_app.run(main)
| 34.693277 | 116 | 0.696258 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app as absl_app
from absl import flags
flags.DEFINE_string(name="job-dir", default="/tmp", help="AI Platform Training passes this to the training script.")
import tensorflow as tf
from official.mnist import dataset
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
LEARNING_RATE = 1e-4
def create_model(data_format):
if data_format == 'channels_first':
input_shape = [1, 28, 28]
else:
assert data_format == 'channels_last'
input_shape = [28, 28, 1]
l = tf.keras.layers
max_pool = l.MaxPooling2D(
(2, 2), (2, 2), padding='same', data_format=data_format)
return tf.keras.Sequential(
[
l.Reshape(
target_shape=input_shape,
input_shape=(28 * 28,)),
l.Conv2D(
32,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu),
max_pool,
l.Conv2D(
64,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu),
max_pool,
l.Flatten(),
l.Dense(1024, activation=tf.nn.relu),
l.Dropout(0.4),
l.Dense(10)
])
def define_mnist_flags():
flags_core.define_base()
flags_core.define_performance(num_parallel_calls=False)
flags_core.define_image()
flags.adopt_module_key_flags(flags_core)
flags_core.set_defaults(data_dir='/tmp/mnist_data',
model_dir='/tmp/mnist_model',
batch_size=100,
train_epochs=40)
def model_fn(features, labels, mode, params):
model = create_model(params['data_format'])
image = features
if isinstance(image, dict):
image = features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(image, training=False)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
logits = model(image, training=True)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
accuracy = tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1))
tf.identity(LEARNING_RATE, 'learning_rate')
tf.identity(loss, 'cross_entropy')
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_or_create_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
logits = model(image, training=False)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy':
tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1)),
})
def run_mnist(flags_obj):
model_helpers.apply_clean(flags_obj)
model_function = model_fn
session_config = tf.ConfigProto(
inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
allow_soft_placement=True)
distribution_strategy = distribution_utils.get_distribution_strategy(
flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy, session_config=session_config)
data_format = flags_obj.data_format
if data_format is None:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
mnist_classifier = tf.estimator.Estimator(
model_fn=model_function,
model_dir=flags_obj.model_dir,
config=run_config,
params={
'data_format': data_format,
})
def train_input_fn():
ds = dataset.train(flags_obj.data_dir)
ds = ds.cache().shuffle(buffer_size=50000).batch(flags_obj.batch_size)
ds = ds.repeat(flags_obj.epochs_between_evals)
return ds
def eval_input_fn():
return dataset.test(flags_obj.data_dir).batch(
flags_obj.batch_size).make_one_shot_iterator().get_next()
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks, model_dir=flags_obj.model_dir,
batch_size=flags_obj.batch_size)
for _ in range(flags_obj.train_epochs // flags_obj.epochs_between_evals):
mnist_classifier.train(input_fn=train_input_fn, hooks=train_hooks)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print('\nEvaluation results:\n\t%s\n' % eval_results)
if model_helpers.past_stop_threshold(flags_obj.stop_threshold,
eval_results['accuracy']):
break
if flags_obj.export_dir is not None:
image = tf.placeholder(tf.float32, [None, 28, 28])
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'image': image,
})
mnist_classifier.export_savedmodel(flags_obj.export_dir, input_fn,
strip_default_attrs=True)
def main(_):
run_mnist(flags.FLAGS)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
define_mnist_flags()
absl_app.run(main)
| true | true |
f722db1ad969e16207fbc173be16db45732e51aa | 383 | py | Python | problems/0033/p33.py | xztor/EulerProject | 091e345336d0766505c88737adef82cc5ab4ac93 | [
"MIT"
] | null | null | null | problems/0033/p33.py | xztor/EulerProject | 091e345336d0766505c88737adef82cc5ab4ac93 | [
"MIT"
] | null | null | null | problems/0033/p33.py | xztor/EulerProject | 091e345336d0766505c88737adef82cc5ab4ac93 | [
"MIT"
] | null | null | null | import sys
sys.path.insert(0, '../common/')
import utils
def same():
for n in range(10,100):
for d in range(n+1, 100):
nd = str(n)
dd = str(d)
if nd[0] == dd[1] and int(nd[1]) * d == n * int(dd[0]):
yield n,d
if nd[1] == dd[0] and int(nd[0]) * d == n * int(dd[1]):
yield n,d
(n,d)=reduce(lambda (a,b),(d,c): (a*d,b*c), same(), (1,1))
print d/utils.gcd(n,d)
| 23.9375 | 58 | 0.519582 | import sys
sys.path.insert(0, '../common/')
import utils
def same():
for n in range(10,100):
for d in range(n+1, 100):
nd = str(n)
dd = str(d)
if nd[0] == dd[1] and int(nd[1]) * d == n * int(dd[0]):
yield n,d
if nd[1] == dd[0] and int(nd[0]) * d == n * int(dd[1]):
yield n,d
(n,d)=reduce(lambda (a,b),(d,c): (a*d,b*c), same(), (1,1))
print d/utils.gcd(n,d)
| false | true |
f722dca5b75dfc9a9c2c40ddd526b7c412549462 | 2,597 | py | Python | tmp.py | zdadadaz/jcc_dfdc | 672b61771e22b369c7950c89299b0a7a2f7586ad | [
"Apache-2.0"
] | 1 | 2021-01-31T04:39:11.000Z | 2021-01-31T04:39:11.000Z | tmp.py | zdadadaz/jcc_dfdc | 672b61771e22b369c7950c89299b0a7a2f7586ad | [
"Apache-2.0"
] | null | null | null | tmp.py | zdadadaz/jcc_dfdc | 672b61771e22b369c7950c89299b0a7a2f7586ad | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 12 19:28:12 2020
@author: zdadadaz
"""
import json
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
# dir_json = './../fb_whole/metadata_21.json'
# train_list =[]
# with open(dir_json) as json_file:
# data = json.load(json_file)
# train_list = pd.DataFrame.from_dict(data, orient='index')
# train_list.reset_index(level=0, inplace=True)
# train_list[train_list['label']=='REAL'].iloc[1]
base = pd.read_csv('submission_base.csv')
mtcn = pd.read_csv('submission_mtcn.csv')
whole = pd.read_csv('metadata_small.csv')
sLength = len(base['label'])
base['wrong'] = pd.Series(np.random.randn(sLength), index=base.index)
base['original'] = pd.Series(np.random.randn(sLength), index=base.index)
base['folder'] = pd.Series(np.random.randn(sLength), index=base.index)
base['res'] = pd.Series(np.random.randn(sLength), index=base.index)
mtcn['wrong'] = pd.Series(np.random.randn(sLength), index=base.index)
mtcn['original'] = pd.Series(np.random.randn(sLength), index=base.index)
mtcn['folder'] = pd.Series(np.random.randn(sLength), index=base.index)
mtcn['res'] = pd.Series(np.random.randn(sLength), index=base.index)
for i in range(len(base)):
print(str(i))
fn = base.iloc[i][0]
label = whole[whole['filename']==fn]['label']
score =0
origin = "n"
folder = whole[whole['filename']==fn]['folder']
if list(label)[0] =="FAKE":
score = 1
origin = whole[whole['filename']==fn]['original']
base['wrong'][i]= abs(score - base.iloc[i][1])>0.5
base['original'][i]= list(origin)[0]
base['folder'][i]= list(folder)[0]
base['res'][i]= list(label)[0]
mtcn['wrong'][i]= abs(score - mtcn.iloc[i][1])>0.5
mtcn['original'][i]= list(origin)[0]
mtcn['folder'][i]= list(folder)[0]
mtcn['res'][i]= list(label)[0]
for i, d in base.groupby('res'):
base['label'].plot(kind='hist', figsize=(15, 5), bins=20, alpha=0.8, title='base')
plt.legend(['FAKE','REAL'])
plt.show()
for i, d in base.groupby('res'):
mtcn['label'].plot(kind='hist', figsize=(15, 5), bins=20, title='MTCNN', alpha=0.8)
plt.legend(['FAKE','REAL'])
plt.show()
TP = sum(np.array(base['label']>0.5) & np.array(base['res']=="FAKE"))
FP = sum(np.array(base['label']>0.5) & np.array(base['res']=="REAL"))
TN = sum(np.array(base['label']<=0.5) & np.array(base['res']=="FAKE"))
FN = sum(np.array(base['label']<=0.5) & np.array(base['res']=="REAL"))
precision = TP/len(base)*2
recall = TP/(TP+FP)
Fake_precision = TP/(TP+TN)
Real_precision = FN/(FP+FN) | 35.094595 | 87 | 0.638044 |
import json
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
base = pd.read_csv('submission_base.csv')
mtcn = pd.read_csv('submission_mtcn.csv')
whole = pd.read_csv('metadata_small.csv')
sLength = len(base['label'])
base['wrong'] = pd.Series(np.random.randn(sLength), index=base.index)
base['original'] = pd.Series(np.random.randn(sLength), index=base.index)
base['folder'] = pd.Series(np.random.randn(sLength), index=base.index)
base['res'] = pd.Series(np.random.randn(sLength), index=base.index)
mtcn['wrong'] = pd.Series(np.random.randn(sLength), index=base.index)
mtcn['original'] = pd.Series(np.random.randn(sLength), index=base.index)
mtcn['folder'] = pd.Series(np.random.randn(sLength), index=base.index)
mtcn['res'] = pd.Series(np.random.randn(sLength), index=base.index)
for i in range(len(base)):
print(str(i))
fn = base.iloc[i][0]
label = whole[whole['filename']==fn]['label']
score =0
origin = "n"
folder = whole[whole['filename']==fn]['folder']
if list(label)[0] =="FAKE":
score = 1
origin = whole[whole['filename']==fn]['original']
base['wrong'][i]= abs(score - base.iloc[i][1])>0.5
base['original'][i]= list(origin)[0]
base['folder'][i]= list(folder)[0]
base['res'][i]= list(label)[0]
mtcn['wrong'][i]= abs(score - mtcn.iloc[i][1])>0.5
mtcn['original'][i]= list(origin)[0]
mtcn['folder'][i]= list(folder)[0]
mtcn['res'][i]= list(label)[0]
for i, d in base.groupby('res'):
base['label'].plot(kind='hist', figsize=(15, 5), bins=20, alpha=0.8, title='base')
plt.legend(['FAKE','REAL'])
plt.show()
for i, d in base.groupby('res'):
mtcn['label'].plot(kind='hist', figsize=(15, 5), bins=20, title='MTCNN', alpha=0.8)
plt.legend(['FAKE','REAL'])
plt.show()
TP = sum(np.array(base['label']>0.5) & np.array(base['res']=="FAKE"))
FP = sum(np.array(base['label']>0.5) & np.array(base['res']=="REAL"))
TN = sum(np.array(base['label']<=0.5) & np.array(base['res']=="FAKE"))
FN = sum(np.array(base['label']<=0.5) & np.array(base['res']=="REAL"))
precision = TP/len(base)*2
recall = TP/(TP+FP)
Fake_precision = TP/(TP+TN)
Real_precision = FN/(FP+FN) | true | true |
f722dca9b0422776482ea7f888de525b779028d1 | 19,420 | py | Python | data_analysis/calculate_field_data.py | antonvs88/crowddynamics-research | 61260aa26a6d5bc213252bf96eaa472a551918e3 | [
"MIT"
] | null | null | null | data_analysis/calculate_field_data.py | antonvs88/crowddynamics-research | 61260aa26a6d5bc213252bf96eaa472a551918e3 | [
"MIT"
] | null | null | null | data_analysis/calculate_field_data.py | antonvs88/crowddynamics-research | 61260aa26a6d5bc213252bf96eaa472a551918e3 | [
"MIT"
] | 1 | 2019-08-27T16:34:50.000Z | 2019-08-27T16:34:50.000Z | from scipy.spatial import Voronoi, voronoi_plot_2d
import h5py
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
from shapely.geometry import Polygon, MultiLineString, Point
from shapely.ops import polygonize
from descartes import PolygonPatch
from voronoi_finite_polygons_2d import voronoi_finite_polygons_2d
from recursive_mean import recursive_mean
# Bound box representing the room. Used later in making Voronoi tessalation.
width = 20
height = 20
boundbox = Polygon([(0, 0), (0, height), (width, height), (width, 0)])
# Create a grid structure over the room geometry.
# Cell size in the grid, determines the resolution of the micro-macro converted data
cell_size = 0.1
m = np.round(width / cell_size)
n = np.round(height / cell_size)
m = m.astype(int)
n = n.astype(int)
X = np.linspace(0, width, m + 1)
Y = np.linspace(0, height, n + 1)
hlines = [((x1, yi), (x2, yi)) for x1, x2 in zip(X[:-1], Y[1:]) for yi in Y]
vlines = [((xi, y1), (xi, y2)) for y1, y2 in zip(Y[:-1], Y[1:]) for xi in X]
grids = list(polygonize(MultiLineString(hlines + vlines)))
# The data is divided into four intervals. The number of pedestrians in the room determines the intervals.
# The data when the 10 first and 10 last pedestrians leave the room is omitted to get rid of transient
# behavior of the "crowd system".
interval1_start = 190
interval2_start = 145
interval3_start = 100
interval4_start = 55
interval4_end = 10
# These should be the midpoints of the cells
mid_x, mid_y = np.meshgrid(np.arange(cell_size/2, width, cell_size), np.arange(cell_size/2, height, cell_size))
# The vector in each cell, pointing from the midpoint of the cell to the middle of the exit.
# Used later in calculating the radial speed.
direction = np.zeros((mid_x.shape[0],mid_x.shape[0],2))
direction[:, :, 0] = mid_x - 20
direction[:, :, 1] = mid_y - 10
d_norm = np.sqrt(direction[:,:,0] * direction[:,:,0] + direction[:,:,1] * direction[:,:,1])
# We will calculate mean values of some part of the data recursively by taking a "chunk" of the data.
chunk = 1000 # chunk size
# The outer loop goes through the folders. The data from the simulations should be stored there in .npy.gz format.
mylist = ['taset0'] # name of the folder, where the data is; can be an array of folders
for i in range(0, len(mylist)):
# The inner loop goes through the simulations (in this case it goes through just one simulation)
for j in range(int(sys.argv[1]), int(sys.argv[1]) + 1):
# Data of pedestrians in the room at different times (0="not in room", 1="in room").
if os.path.exists("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'in_room1', j, '.npy.gz')):
in_room = np.loadtxt("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'in_room1', j, '.npy.gz'))
# Calculate number of pedestrians in room at different times
sum_in_room = np.sum(in_room, axis=1)
# The time steps when there are 190 pedestrians in the room
time_interval1_start = np.where(sum_in_room == interval1_start)
# Take the first instant when there are 190 pedestrians in the room.
#
# If there are no time steps when there are 190 pedestrians in the room (because two pedestrians have
# evacuated simultaneously, and thus the number of pedestrians go from 191 to 189), take the times when
# there are 189 pedestrians in the room.
if np.size(time_interval1_start) == 0:
time_interval1_start = np.where(sum_in_room == (interval1_start - 1))[0][0]
else:
time_interval1_start = np.where(sum_in_room == interval1_start)[0][0]
# The time steps when there are 145 pedestrians in the room
time_interval2_start = np.where(sum_in_room == interval2_start)
# Take the first instant when there are 145 pedestrians in the room.
#
# If there are no time steps when there are 145 pedestrians in the room (because two pedestrians have
# evacuated simultaneously and the number of pedestrians go from 146 to 144), take the times when
# there are 144 pedestrians in the room.
if np.size(time_interval2_start) == 0:
time_interval2_start = np.where(sum_in_room == (interval2_start - 1))[0][0]
else:
time_interval2_start = np.where(sum_in_room == interval2_start)[0][0]
# The time steps when there are 100 pedestrians in the room
time_interval3_start = np.where(sum_in_room == interval3_start)
# Take the first instant when there are 100 pedestrians in the room.
#
# If there are no time steps when there are 100 pedestrians in the room (because two pedestrians have
# evacuated simultaneously and the number of pedestrians go from 101 to 99), take the times when
# there are 99 pedestrians in the room.
if np.size(time_interval3_start) == 0:
time_interval3_start = np.where(sum_in_room == (interval3_start - 1))[0][0]
else:
time_interval3_start = np.where(sum_in_room == interval3_start)[0][0]
# The time steps when there are 55 pedestrians in the room
time_interval4_start = np.where(sum_in_room == interval4_start)
# Take the first instant when there are 55 pedestrians in the room.
#
# If there is no time steps when there are 55 pedestrians in the room (because two pedestrians have
# evacuated simultaneously and the number of pedestrians go from 56 to 54), take the times when
# there are 54 pedestrians in the room.
if np.size(time_interval4_start) == 0:
time_interval4_start = np.where(sum_in_room == (interval4_start - 1))[0][0]
else:
time_interval4_start = np.where(sum_in_room == interval4_start)[0][0]
# The time steps when there 10 pedestrians in the room
time_interval4_end = np.where(sum_in_room == interval4_end)
# Take the first instant when there are 10 pedestrians in the room.
#
# If there are no time steps when there are 10 pedestrians in the room (because two pedestrians have
# evacuated simultaneously and the number of pedestrians go from 11 to 9), take the times when
# there are 9 pedestrians in the room.
if np.size(time_interval4_end) == 0:
time_interval4_end = np.where(sum_in_room == (interval4_end - 1))[0][0]
else:
time_interval4_end = np.where(sum_in_room == interval4_end)[0][0]
# Data of x-positions of pedestrians at different times.
# NOTE! The data is sampled at a finer resolution, thus we take only every second element of the array.
if os.path.exists("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'positions_x', j, '.npy.gz')):
positions_x = np.loadtxt("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'positions_x', j, '.npy.gz'))
positions_x = positions_x[0::2] # take every second element
# Data of y-positions of pedestrians at different times.
# NOTE! The data is sampled at a finer resolution, thus we take only every second element of the array.
if os.path.exists("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'positions_y', j, '.npy.gz')):
positions_y = np.loadtxt("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'positions_y', j, '.npy.gz'))
positions_y = positions_y[0::2] # take every second element
# Data of pedestrians' velocities x-component at different times.
if os.path.exists("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'velocities_x', j, '.npy.gz')):
velocities_x = np.loadtxt("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'velocities_x', j, '.npy.gz'))
# Data of pedestrians' velocities y-component at different times.
if os.path.exists("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'velocities_y', j, '.npy.gz')):
velocities_y = np.loadtxt("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'velocities_y', j, '.npy.gz'))
# Arrays to save the micro-macro converted data
velocity_x = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # velocity x-component
velocity_y = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # velocity y-component
speed = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # speed
density = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # density
projection = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # radial speed
# Loop through the data when the number of pedestrians in the room goes from 190 to 10.
# Using the Voronoi-method derive the macroscopic quantities.
for t in range(time_interval1_start, time_interval4_end):
# Positions of pedestrians inside the room
agents_in_room = np.where(in_room[t, :] == 1)[0] # which pedestrians are in the room
n_agents_in_room = len(agents_in_room) # number of pedestrians in the room
points = np.concatenate((np.reshape(positions_x[t, agents_in_room], (n_agents_in_room, 1)),
np.reshape(positions_y[t, agents_in_room], (n_agents_in_room, 1))), axis=1)
# x- and y-components of velocities of pedestrians in room
x_component = velocities_x[t, agents_in_room]
y_component = velocities_y[t, agents_in_room]
# Create a Voronoi tessalation from pedestrian center points
vor = Voronoi(points)
# Add also the Voronoi regions on the rim to the tessalation
#
# new_vertices contains all the vertices in the tessalation
# new_regions contains the vertices used for each Voronoi area
#
# https://stackoverflow.com/questions/20515554/colorize-voronoi-diagram
# https://gist.github.com/pv/8036995
new_regions, new_vertices = voronoi_finite_polygons_2d(vor)
# Loop through the Voronoi tessalations and calculate the density for each cell in the grid
# (Steffen B, Seyfried A (2010) Methods for measuring pedestrian density, flow, speed and direction
# with minimal scatter. Physica A: Statistical mechanics and its applications 389(9):1902-1910)
for r in range(0, len(new_regions)):
region = new_regions[r]
# Shapely Polygon object from Voronoi cell
voronoi_cell = Polygon(shell=new_vertices[region]) & boundbox
# Area of the Voronoi cell
vor_area = voronoi_cell.area
# Calculate minimal and maximal x- and y-coordinate values of the Voronoi cell
minx, miny, maxx, maxy = voronoi_cell.bounds
# Round the minimal and maximal values to belong to a cell in the square grid
minx, miny, maxx, maxy = np.round(
(minx / cell_size, miny / cell_size, maxx / cell_size, maxy / cell_size)).astype(int)
# Make sure that min and max values don't get out of bounds.
minx = np.maximum(0, minx - 1)
miny = np.maximum(0, miny - 1)
maxx = np.minimum(m, maxx + 1)
maxy = np.minimum(n, maxy + 1)
# Loop over cells in the grid intersecting with the Voronoi cell.
for x in range(minx, maxx):
for y in range(miny, maxy):
intersect_area = grids[x * n + y].intersection(voronoi_cell).area # intersecting area
# Calculate the contribution of the pedestrian to the density and velocity in the grid cell.
density[t - time_interval1_start, y, x] += intersect_area / vor_area
velocity_x[t - time_interval1_start, y, x] += intersect_area * x_component[r]
velocity_y[t - time_interval1_start, y, x] += intersect_area * y_component[r]
# Finalize calculating the weighted density and velocity in the cell, by dividing it by the cell area
density[t - time_interval1_start, :, :] /= cell_size * cell_size
velocity_x[t - time_interval1_start, :, :] /= cell_size * cell_size
velocity_y[t - time_interval1_start, :, :] /= cell_size * cell_size
# Flip the density matrix upside down because of peculiar indexing in python
density[t - time_interval1_start, :, :] = np.flipud(density[t - time_interval1_start, :, :])
velocity_x[t - time_interval1_start, :, :] = np.flipud(velocity_x[t - time_interval1_start, :, :])
velocity_y[t - time_interval1_start, :, :] = np.flipud(velocity_y[t - time_interval1_start, :, :])
# Calculate speed in cells from the resultant velocity vectors
speed[t - time_interval1_start, :, :] = np.hypot(velocity_x[t - time_interval1_start, :, :],
velocity_y[t - time_interval1_start, :, :])
# Radial speed (calculate projections of actualized velocities on desired velocities)
projection[t - time_interval1_start, :, :] = (velocity_x[t - time_interval1_start, :, :] *
direction[:, :, 0] + velocity_y[t -
time_interval1_start, :, :] *
direction[:, :, 1]) / d_norm
# Save the length of the time intervals
intervals = np.array((time_interval2_start - time_interval1_start, time_interval3_start - time_interval2_start,
time_interval4_start - time_interval3_start, time_interval4_end - time_interval4_start))
np.save("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'intervals', j, '.npy'), intervals)
# Save the macroscopic data of speed, density and radial speed in .hdf5 format for each time interval
# NOTE: The data is not averaged over time. The averaging is done in "average_fields.py". If one wants
# to save space the averaging should be performed already in this code.
# First interval (190...145 agents in the room)
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval1', j, '.hdf5'), 'w') as hf1:
hf1.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval1', j, '.npy.gz'),
data=speed[time_interval1_start - time_interval1_start:
time_interval2_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval1', j, '.hdf5')) as hf2:
hf2.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval1', j, '.npy.gz'),
data=density[time_interval1_start - time_interval1_start:
time_interval2_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval1', j, '.hdf5')) as hf3:
hf3.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval1', j, '.npy.gz'),
data=projection[time_interval1_start - time_interval1_start:
time_interval2_start - time_interval1_start, :, :])
# Second interval (145...100 agents in the room)
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval2', j, '.hdf5'), 'w') as hf4:
hf4.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval2', j, '.npy.gz'),
data=speed[time_interval2_start - time_interval1_start:
time_interval3_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval2', j, '.hdf5')) as hf5:
hf5.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval2', j, '.npy.gz'),
data=density[time_interval2_start - time_interval1_start:
time_interval3_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval2', j, '.hdf5')) as hf6:
hf6.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval2', j, '.npy.gz'),
data=projection[time_interval2_start - time_interval1_start:
time_interval3_start - time_interval1_start, :, :])
# First interval (100...55 agents in the room)
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval3', j, '.hdf5'), 'w') as hf7:
hf7.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval3', j, '.npy.gz'),
data=speed[time_interval3_start - time_interval1_start:
time_interval4_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval3', j, '.hdf5')) as hf8:
hf8.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval3', j, '.npy.gz'),
data=density[time_interval3_start - time_interval1_start:
time_interval4_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval3', j, '.hdf5')) as hf9:
hf9.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval3', j, '.npy.gz'),
data=projection[time_interval3_start - time_interval1_start:
time_interval4_start - time_interval1_start, :, :])
# First interval (190...145 agents in the room)
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval4', j, '.hdf5'), 'w') as hf10:
hf10.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval4', j, '.npy.gz'),
data=speed[time_interval4_start - time_interval1_start:
time_interval4_end - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval4', j, '.hdf5')) as hf11:
hf11.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval4', j, '.npy.gz'),
data=density[time_interval4_start - time_interval1_start:
time_interval4_end - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval4', j, '.hdf5')) as hf12:
hf12.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval4', j, '.npy.gz'),
data=projection[time_interval4_start - time_interval1_start:
time_interval4_end - time_interval1_start, :, :])
| 63.881579 | 126 | 0.612564 | from scipy.spatial import Voronoi, voronoi_plot_2d
import h5py
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
from shapely.geometry import Polygon, MultiLineString, Point
from shapely.ops import polygonize
from descartes import PolygonPatch
from voronoi_finite_polygons_2d import voronoi_finite_polygons_2d
from recursive_mean import recursive_mean
width = 20
height = 20
boundbox = Polygon([(0, 0), (0, height), (width, height), (width, 0)])
cell_size = 0.1
m = np.round(width / cell_size)
n = np.round(height / cell_size)
m = m.astype(int)
n = n.astype(int)
X = np.linspace(0, width, m + 1)
Y = np.linspace(0, height, n + 1)
hlines = [((x1, yi), (x2, yi)) for x1, x2 in zip(X[:-1], Y[1:]) for yi in Y]
vlines = [((xi, y1), (xi, y2)) for y1, y2 in zip(Y[:-1], Y[1:]) for xi in X]
grids = list(polygonize(MultiLineString(hlines + vlines)))
interval1_start = 190
interval2_start = 145
interval3_start = 100
interval4_start = 55
interval4_end = 10
mid_x, mid_y = np.meshgrid(np.arange(cell_size/2, width, cell_size), np.arange(cell_size/2, height, cell_size))
direction = np.zeros((mid_x.shape[0],mid_x.shape[0],2))
direction[:, :, 0] = mid_x - 20
direction[:, :, 1] = mid_y - 10
d_norm = np.sqrt(direction[:,:,0] * direction[:,:,0] + direction[:,:,1] * direction[:,:,1])
chunk = 1000
mylist = ['taset0']
for i in range(0, len(mylist)):
for j in range(int(sys.argv[1]), int(sys.argv[1]) + 1):
if os.path.exists("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'in_room1', j, '.npy.gz')):
in_room = np.loadtxt("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'in_room1', j, '.npy.gz'))
sum_in_room = np.sum(in_room, axis=1)
time_interval1_start = np.where(sum_in_room == interval1_start)
if np.size(time_interval1_start) == 0:
time_interval1_start = np.where(sum_in_room == (interval1_start - 1))[0][0]
else:
time_interval1_start = np.where(sum_in_room == interval1_start)[0][0]
time_interval2_start = np.where(sum_in_room == interval2_start)
if np.size(time_interval2_start) == 0:
time_interval2_start = np.where(sum_in_room == (interval2_start - 1))[0][0]
else:
time_interval2_start = np.where(sum_in_room == interval2_start)[0][0]
time_interval3_start = np.where(sum_in_room == interval3_start)
if np.size(time_interval3_start) == 0:
time_interval3_start = np.where(sum_in_room == (interval3_start - 1))[0][0]
else:
time_interval3_start = np.where(sum_in_room == interval3_start)[0][0]
time_interval4_start = np.where(sum_in_room == interval4_start)
if np.size(time_interval4_start) == 0:
time_interval4_start = np.where(sum_in_room == (interval4_start - 1))[0][0]
else:
time_interval4_start = np.where(sum_in_room == interval4_start)[0][0]
time_interval4_end = np.where(sum_in_room == interval4_end)
if np.size(time_interval4_end) == 0:
time_interval4_end = np.where(sum_in_room == (interval4_end - 1))[0][0]
else:
time_interval4_end = np.where(sum_in_room == interval4_end)[0][0]
if os.path.exists("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'positions_x', j, '.npy.gz')):
positions_x = np.loadtxt("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'positions_x', j, '.npy.gz'))
positions_x = positions_x[0::2]
if os.path.exists("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'positions_y', j, '.npy.gz')):
positions_y = np.loadtxt("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'positions_y', j, '.npy.gz'))
positions_y = positions_y[0::2]
if os.path.exists("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'velocities_x', j, '.npy.gz')):
velocities_x = np.loadtxt("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'velocities_x', j, '.npy.gz'))
# Data of pedestrians' velocities y-component at different times.
if os.path.exists("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'velocities_y', j, '.npy.gz')):
velocities_y = np.loadtxt("{}{}{}{}{}{}".format('simulation_data/', mylist[i], '/', 'velocities_y', j, '.npy.gz'))
velocity_x = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16)
velocity_y = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16)
speed = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16)
density = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16)
projection = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16)
for t in range(time_interval1_start, time_interval4_end):
agents_in_room = np.where(in_room[t, :] == 1)[0]
n_agents_in_room = len(agents_in_room)
points = np.concatenate((np.reshape(positions_x[t, agents_in_room], (n_agents_in_room, 1)),
np.reshape(positions_y[t, agents_in_room], (n_agents_in_room, 1))), axis=1)
x_component = velocities_x[t, agents_in_room]
y_component = velocities_y[t, agents_in_room]
vor = Voronoi(points)
new_regions, new_vertices = voronoi_finite_polygons_2d(vor)
for r in range(0, len(new_regions)):
region = new_regions[r]
voronoi_cell = Polygon(shell=new_vertices[region]) & boundbox
vor_area = voronoi_cell.area
minx, miny, maxx, maxy = voronoi_cell.bounds
minx, miny, maxx, maxy = np.round(
(minx / cell_size, miny / cell_size, maxx / cell_size, maxy / cell_size)).astype(int)
minx = np.maximum(0, minx - 1)
miny = np.maximum(0, miny - 1)
maxx = np.minimum(m, maxx + 1)
maxy = np.minimum(n, maxy + 1)
# Loop over cells in the grid intersecting with the Voronoi cell.
for x in range(minx, maxx):
for y in range(miny, maxy):
intersect_area = grids[x * n + y].intersection(voronoi_cell).area # intersecting area
# Calculate the contribution of the pedestrian to the density and velocity in the grid cell.
density[t - time_interval1_start, y, x] += intersect_area / vor_area
velocity_x[t - time_interval1_start, y, x] += intersect_area * x_component[r]
velocity_y[t - time_interval1_start, y, x] += intersect_area * y_component[r]
# Finalize calculating the weighted density and velocity in the cell, by dividing it by the cell area
density[t - time_interval1_start, :, :] /= cell_size * cell_size
velocity_x[t - time_interval1_start, :, :] /= cell_size * cell_size
velocity_y[t - time_interval1_start, :, :] /= cell_size * cell_size
# Flip the density matrix upside down because of peculiar indexing in python
density[t - time_interval1_start, :, :] = np.flipud(density[t - time_interval1_start, :, :])
velocity_x[t - time_interval1_start, :, :] = np.flipud(velocity_x[t - time_interval1_start, :, :])
velocity_y[t - time_interval1_start, :, :] = np.flipud(velocity_y[t - time_interval1_start, :, :])
# Calculate speed in cells from the resultant velocity vectors
speed[t - time_interval1_start, :, :] = np.hypot(velocity_x[t - time_interval1_start, :, :],
velocity_y[t - time_interval1_start, :, :])
# Radial speed (calculate projections of actualized velocities on desired velocities)
projection[t - time_interval1_start, :, :] = (velocity_x[t - time_interval1_start, :, :] *
direction[:, :, 0] + velocity_y[t -
time_interval1_start, :, :] *
direction[:, :, 1]) / d_norm
# Save the length of the time intervals
intervals = np.array((time_interval2_start - time_interval1_start, time_interval3_start - time_interval2_start,
time_interval4_start - time_interval3_start, time_interval4_end - time_interval4_start))
np.save("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'intervals', j, '.npy'), intervals)
# Save the macroscopic data of speed, density and radial speed in .hdf5 format for each time interval
# NOTE: The data is not averaged over time. The averaging is done in "average_fields.py". If one wants
# to save space the averaging should be performed already in this code.
# First interval (190...145 agents in the room)
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval1', j, '.hdf5'), 'w') as hf1:
hf1.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval1', j, '.npy.gz'),
data=speed[time_interval1_start - time_interval1_start:
time_interval2_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval1', j, '.hdf5')) as hf2:
hf2.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval1', j, '.npy.gz'),
data=density[time_interval1_start - time_interval1_start:
time_interval2_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval1', j, '.hdf5')) as hf3:
hf3.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval1', j, '.npy.gz'),
data=projection[time_interval1_start - time_interval1_start:
time_interval2_start - time_interval1_start, :, :])
# Second interval (145...100 agents in the room)
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval2', j, '.hdf5'), 'w') as hf4:
hf4.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval2', j, '.npy.gz'),
data=speed[time_interval2_start - time_interval1_start:
time_interval3_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval2', j, '.hdf5')) as hf5:
hf5.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval2', j, '.npy.gz'),
data=density[time_interval2_start - time_interval1_start:
time_interval3_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval2', j, '.hdf5')) as hf6:
hf6.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval2', j, '.npy.gz'),
data=projection[time_interval2_start - time_interval1_start:
time_interval3_start - time_interval1_start, :, :])
# First interval (100...55 agents in the room)
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval3', j, '.hdf5'), 'w') as hf7:
hf7.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval3', j, '.npy.gz'),
data=speed[time_interval3_start - time_interval1_start:
time_interval4_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval3', j, '.hdf5')) as hf8:
hf8.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval3', j, '.npy.gz'),
data=density[time_interval3_start - time_interval1_start:
time_interval4_start - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval3', j, '.hdf5')) as hf9:
hf9.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval3', j, '.npy.gz'),
data=projection[time_interval3_start - time_interval1_start:
time_interval4_start - time_interval1_start, :, :])
# First interval (190...145 agents in the room)
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval4', j, '.hdf5'), 'w') as hf10:
hf10.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'speed_interval4', j, '.npy.gz'),
data=speed[time_interval4_start - time_interval1_start:
time_interval4_end - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval4', j, '.hdf5')) as hf11:
hf11.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'density_interval4', j, '.npy.gz'),
data=density[time_interval4_start - time_interval1_start:
time_interval4_end - time_interval1_start, :, :])
with h5py.File("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval4', j, '.hdf5')) as hf12:
hf12.create_dataset("{}{}{}{}{}{}".format('fields/', mylist[i], '/', 'projection_interval4', j, '.npy.gz'),
data=projection[time_interval4_start - time_interval1_start:
time_interval4_end - time_interval1_start, :, :])
| true | true |
f722dd0436ffb7377a886d3f98bee18ab155b9c8 | 1,933 | py | Python | beluga/numeric/compilation/component_compilation.py | doublefloyd/beluga | 740bda376634945ef51bf1cf946fcbe002e9bc7f | [
"MIT"
] | 20 | 2017-10-02T13:09:58.000Z | 2022-03-28T20:50:35.000Z | beluga/numeric/compilation/component_compilation.py | doublefloyd/beluga | 740bda376634945ef51bf1cf946fcbe002e9bc7f | [
"MIT"
] | 187 | 2018-02-04T20:35:03.000Z | 2021-01-27T15:04:18.000Z | beluga/numeric/compilation/component_compilation.py | doublefloyd/beluga | 740bda376634945ef51bf1cf946fcbe002e9bc7f | [
"MIT"
] | 12 | 2018-01-19T04:00:09.000Z | 2022-03-28T16:44:17.000Z | import numpy as np
from scipy.integrate import simps
from beluga.numeric.compilation import jit_lambdify, jit_compile_func
from beluga.symbolic.data_classes.components_structures import CostStruct
def compile_control(control_options, args, ham_func, lambdify_func=jit_lambdify):
num_options = len(control_options)
if num_options == 0:
return None
elif num_options == 1:
compiled_option = lambdify_func(args, control_options[0])
def calc_u(_y, _p, _k):
return np.array(compiled_option(_y, _p, _k))
else:
compiled_options = lambdify_func(args, control_options)
def calc_u(_y, _p, _k):
u_set = np.array(compiled_options(_y, _p, _k))
u = u_set[0, :]
ham = ham_func(_y, u, _p, _k)
for n in range(1, num_options):
ham_i = ham_func(_y, u_set[n, :], _p, _k)
if ham_i < ham:
u = u_set[n, :]
ham = ham_i
return u
return jit_compile_func(calc_u, args, func_name='control_function')
def compile_cost(symbolic_cost: CostStruct, dynamic_args, bc_args, lambdify_func=jit_lambdify):
compute_initial_cost = lambdify_func(bc_args, symbolic_cost.initial)
compute_terminal_cost = lambdify_func(bc_args, symbolic_cost.terminal)
compute_path_cost = lambdify_func(dynamic_args, symbolic_cost.path)
def compute_cost(_t, _y, _q, _u, _p, _k):
if len(_q) > 0:
cost = compute_initial_cost(_y[0, :], _q[0, :], _p, _k) \
+ compute_terminal_cost(_y[-1, :], _q[-1, :], _p, _k)
else:
cost = compute_initial_cost(_y[0, :], _q, _p, _k) + compute_terminal_cost(_y[-1, :], _q, _p, _k)
path_cost = np.array([compute_path_cost(yi, ui, _p, _k) for yi, ui in zip(_y, _u)])
cost += simps(path_cost, _t, even='last')
return cost
return compute_cost
| 32.216667 | 108 | 0.628557 | import numpy as np
from scipy.integrate import simps
from beluga.numeric.compilation import jit_lambdify, jit_compile_func
from beluga.symbolic.data_classes.components_structures import CostStruct
def compile_control(control_options, args, ham_func, lambdify_func=jit_lambdify):
num_options = len(control_options)
if num_options == 0:
return None
elif num_options == 1:
compiled_option = lambdify_func(args, control_options[0])
def calc_u(_y, _p, _k):
return np.array(compiled_option(_y, _p, _k))
else:
compiled_options = lambdify_func(args, control_options)
def calc_u(_y, _p, _k):
u_set = np.array(compiled_options(_y, _p, _k))
u = u_set[0, :]
ham = ham_func(_y, u, _p, _k)
for n in range(1, num_options):
ham_i = ham_func(_y, u_set[n, :], _p, _k)
if ham_i < ham:
u = u_set[n, :]
ham = ham_i
return u
return jit_compile_func(calc_u, args, func_name='control_function')
def compile_cost(symbolic_cost: CostStruct, dynamic_args, bc_args, lambdify_func=jit_lambdify):
compute_initial_cost = lambdify_func(bc_args, symbolic_cost.initial)
compute_terminal_cost = lambdify_func(bc_args, symbolic_cost.terminal)
compute_path_cost = lambdify_func(dynamic_args, symbolic_cost.path)
def compute_cost(_t, _y, _q, _u, _p, _k):
if len(_q) > 0:
cost = compute_initial_cost(_y[0, :], _q[0, :], _p, _k) \
+ compute_terminal_cost(_y[-1, :], _q[-1, :], _p, _k)
else:
cost = compute_initial_cost(_y[0, :], _q, _p, _k) + compute_terminal_cost(_y[-1, :], _q, _p, _k)
path_cost = np.array([compute_path_cost(yi, ui, _p, _k) for yi, ui in zip(_y, _u)])
cost += simps(path_cost, _t, even='last')
return cost
return compute_cost
| true | true |
f722dd71125e52999eda3863c5f0bc63d59fab14 | 7,541 | py | Python | allennlp/data/iterators/bucket_iterator.py | sandeep82945/allennlp | d5c64b530b821c59079b72f12e82cb9ce86facf9 | [
"Apache-2.0"
] | null | null | null | allennlp/data/iterators/bucket_iterator.py | sandeep82945/allennlp | d5c64b530b821c59079b72f12e82cb9ce86facf9 | [
"Apache-2.0"
] | null | null | null | allennlp/data/iterators/bucket_iterator.py | sandeep82945/allennlp | d5c64b530b821c59079b72f12e82cb9ce86facf9 | [
"Apache-2.0"
] | null | null | null | import logging
import random
from collections import deque
from typing import List, Tuple, Iterable, cast, Dict, Deque
from overrides import overrides
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import lazy_groups_of, add_noise_to_dict_values
from allennlp.data.dataset import Batch
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator
from allennlp.data.vocabulary import Vocabulary
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def sort_by_padding(instances: List[Instance],
sorting_keys: List[Tuple[str, str]], # pylint: disable=invalid-sequence-index
vocab: Vocabulary,
padding_noise: float = 0.0) -> List[Instance]:
"""
Sorts the instances by their padding lengths, using the keys in
``sorting_keys`` (in the order in which they are provided). ``sorting_keys`` is a list of
``(field_name, padding_key)`` tuples.
"""
instances_with_lengths = []
for instance in instances:
# Make sure instance is indexed before calling .get_padding
instance.index_fields(vocab)
padding_lengths = cast(Dict[str, Dict[str, float]], instance.get_padding_lengths())
if padding_noise > 0.0:
noisy_lengths = {}
for field_name, field_lengths in padding_lengths.items():
noisy_lengths[field_name] = add_noise_to_dict_values(field_lengths, padding_noise)
padding_lengths = noisy_lengths
instance_with_lengths = ([padding_lengths[field_name][padding_key]
for (field_name, padding_key) in sorting_keys],
instance)
instances_with_lengths.append(instance_with_lengths)
instances_with_lengths.sort(key=lambda x: x[0])
return [instance_with_lengths[-1] for instance_with_lengths in instances_with_lengths]
@DataIterator.register("bucket")
class BucketIterator(DataIterator):
"""
An iterator which by default, pads batches with respect to the maximum input lengths `per
batch`. Additionally, you can provide a list of field names and padding keys which the dataset
will be sorted by before doing this batching, causing inputs with similar length to be batched
together, making computation more efficient (as less time is wasted on padded elements of the
batch).
Parameters
----------
sorting_keys : List[Tuple[str, str]]
To bucket inputs into batches, we want to group the instances by padding length, so that we
minimize the amount of padding necessary per batch. In order to do this, we need to know
which fields need what type of padding, and in what order.
For example, ``[("sentence1", "num_tokens"), ("sentence2", "num_tokens"), ("sentence1",
"num_token_characters")]`` would sort a dataset first by the "num_tokens" of the
"sentence1" field, then by the "num_tokens" of the "sentence2" field, and finally by the
"num_token_characters" of the "sentence1" field. TODO(mattg): we should have some
documentation somewhere that gives the standard padding keys used by different fields.
padding_noise : float, optional (default=.1)
When sorting by padding length, we add a bit of noise to the lengths, so that the sorting
isn't deterministic. This parameter determines how much noise we add, as a percentage of
the actual padding value for each instance.
biggest_batch_first : bool, optional (default=False)
This is largely for testing, to see how large of a batch you can safely use with your GPU.
This will let you try out the largest batch that you have in the data `first`, so that if
you're going to run out of memory, you know it early, instead of waiting through the whole
epoch to find out at the end that you're going to crash.
Note that if you specify ``max_instances_in_memory``, the first batch will only be the
biggest from among the first "max instances in memory" instances.
batch_size : int, optional, (default = 32)
The size of each batch of instances yielded when calling the iterator.
instances_per_epoch : int, optional, (default = None)
See :class:`BasicIterator`.
max_instances_in_memory : int, optional, (default = None)
See :class:`BasicIterator`.
maximum_samples_per_batch : ``Tuple[str, int]``, (default = None)
See :class:`BasicIterator`.
skip_smaller_batches : bool, optional, (default = False)
When the number of data samples is not dividable by `batch_size`,
some batches might be smaller than `batch_size`.
If set to `True`, those smaller batches will be discarded.
"""
def __init__(self,
sorting_keys: List[Tuple[str, str]],
padding_noise: float = 0.1,
biggest_batch_first: bool = False,
batch_size: int = 32,
instances_per_epoch: int = None,
max_instances_in_memory: int = None,
cache_instances: bool = False,
track_epoch: bool = False,
maximum_samples_per_batch: Tuple[str, int] = None,
skip_smaller_batches: bool = False) -> None:
if not sorting_keys:
raise ConfigurationError("BucketIterator requires sorting_keys to be specified")
super().__init__(cache_instances=cache_instances,
track_epoch=track_epoch,
batch_size=batch_size,
instances_per_epoch=instances_per_epoch,
max_instances_in_memory=max_instances_in_memory,
maximum_samples_per_batch=maximum_samples_per_batch)
self._sorting_keys = sorting_keys
self._padding_noise = padding_noise
self._biggest_batch_first = biggest_batch_first
self._skip_smaller_batches = skip_smaller_batches
@overrides
def _create_batches(self, instances: Iterable[Instance], shuffle: bool) -> Iterable[Batch]:
for instance_list in self._memory_sized_lists(instances):
batches = []
excess: Deque[Instance] = deque()
for batch_instances in lazy_groups_of(iter(instance_list), self._batch_size):
for possibly_smaller_batches in self._ensure_batch_is_sufficiently_small(batch_instances, excess):
if self._skip_smaller_batches and len(possibly_smaller_batches) < self._batch_size:
continue
batches.append(Batch(possibly_smaller_batches))
if excess and (not self._skip_smaller_batches or len(excess) == self._batch_size):
batches.append(Batch(excess))
# TODO(brendanr): Add multi-GPU friendly grouping, i.e. group
# num_gpu batches together, shuffle and then expand the groups.
# This guards against imbalanced batches across GPUs.
move_to_front = self._biggest_batch_first and len(batches) > 1
if move_to_front:
# We'll actually pop the last _two_ batches, because the last one might not be full.
last_batch = batches.pop()
penultimate_batch = batches.pop()
if move_to_front:
batches.insert(0, penultimate_batch)
batches.insert(0, last_batch)
yield from batches
| 52.006897 | 114 | 0.670203 | import logging
import random
from collections import deque
from typing import List, Tuple, Iterable, cast, Dict, Deque
from overrides import overrides
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import lazy_groups_of, add_noise_to_dict_values
from allennlp.data.dataset import Batch
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator
from allennlp.data.vocabulary import Vocabulary
logger = logging.getLogger(__name__)
def sort_by_padding(instances: List[Instance],
sorting_keys: List[Tuple[str, str]],
vocab: Vocabulary,
padding_noise: float = 0.0) -> List[Instance]:
instances_with_lengths = []
for instance in instances:
instance.index_fields(vocab)
padding_lengths = cast(Dict[str, Dict[str, float]], instance.get_padding_lengths())
if padding_noise > 0.0:
noisy_lengths = {}
for field_name, field_lengths in padding_lengths.items():
noisy_lengths[field_name] = add_noise_to_dict_values(field_lengths, padding_noise)
padding_lengths = noisy_lengths
instance_with_lengths = ([padding_lengths[field_name][padding_key]
for (field_name, padding_key) in sorting_keys],
instance)
instances_with_lengths.append(instance_with_lengths)
instances_with_lengths.sort(key=lambda x: x[0])
return [instance_with_lengths[-1] for instance_with_lengths in instances_with_lengths]
@DataIterator.register("bucket")
class BucketIterator(DataIterator):
def __init__(self,
sorting_keys: List[Tuple[str, str]],
padding_noise: float = 0.1,
biggest_batch_first: bool = False,
batch_size: int = 32,
instances_per_epoch: int = None,
max_instances_in_memory: int = None,
cache_instances: bool = False,
track_epoch: bool = False,
maximum_samples_per_batch: Tuple[str, int] = None,
skip_smaller_batches: bool = False) -> None:
if not sorting_keys:
raise ConfigurationError("BucketIterator requires sorting_keys to be specified")
super().__init__(cache_instances=cache_instances,
track_epoch=track_epoch,
batch_size=batch_size,
instances_per_epoch=instances_per_epoch,
max_instances_in_memory=max_instances_in_memory,
maximum_samples_per_batch=maximum_samples_per_batch)
self._sorting_keys = sorting_keys
self._padding_noise = padding_noise
self._biggest_batch_first = biggest_batch_first
self._skip_smaller_batches = skip_smaller_batches
@overrides
def _create_batches(self, instances: Iterable[Instance], shuffle: bool) -> Iterable[Batch]:
for instance_list in self._memory_sized_lists(instances):
batches = []
excess: Deque[Instance] = deque()
for batch_instances in lazy_groups_of(iter(instance_list), self._batch_size):
for possibly_smaller_batches in self._ensure_batch_is_sufficiently_small(batch_instances, excess):
if self._skip_smaller_batches and len(possibly_smaller_batches) < self._batch_size:
continue
batches.append(Batch(possibly_smaller_batches))
if excess and (not self._skip_smaller_batches or len(excess) == self._batch_size):
batches.append(Batch(excess))
move_to_front = self._biggest_batch_first and len(batches) > 1
if move_to_front:
last_batch = batches.pop()
penultimate_batch = batches.pop()
if move_to_front:
batches.insert(0, penultimate_batch)
batches.insert(0, last_batch)
yield from batches
| true | true |
f722ddc0e88f53d99b4c5a095c72b24f73c7f856 | 1,854 | py | Python | provisioner/sources/test.py | srlehn/opennms-provisioner | aec001a24ed71ff54a9ff6d05c178c84d4d2c71d | [
"MIT"
] | 1 | 2019-04-23T11:57:11.000Z | 2019-04-23T11:57:11.000Z | provisioner/sources/test.py | srlehn/opennms-provisioner | aec001a24ed71ff54a9ff6d05c178c84d4d2c71d | [
"MIT"
] | 1 | 2019-04-23T14:01:39.000Z | 2019-04-23T14:01:39.000Z | provisioner/sources/test.py | srlehn/opennms-provisioner | aec001a24ed71ff54a9ff6d05c178c84d4d2c71d | [
"MIT"
] | 2 | 2018-11-17T16:10:59.000Z | 2019-04-23T11:57:49.000Z | """
opennms-provisioner test source module
This module is the provides test sources for opennms-provisioner.
:license: MIT, see LICENSE for more details
:copyright: (c) 2018 by Michael Batz, see AUTHORS for more details
"""
import provisioner.source
import provisioner.opennms
class DummySource(provisioner.source.Source):
""" Dummy source.
This is source is a test and demonstrates the implementation
of an own source. It exports two test nodes.
Attributes:
name: name of the source
parameters: dictionary with parameters for this source
"""
def __init__(self, name, parameters):
provisioner.source.Source.__init__(self, name, parameters)
def get_nodes(self):
# create nodelist
nodelist = []
# get parameters from config
cat1 = self.get_parameter("cat1", None)
cat2 = self.get_parameter("cat2", None)
# create testnode 1
node_1 = provisioner.opennms.Node("testnode1", "1")
node_1.add_interface("127.0.0.1")
node_1.add_service("127.0.0.1", "ICMP")
node_1.add_service("127.0.0.1", "SNMP")
node_1.add_asset("city", "Fulda")
node_1.add_asset("zip", "36041")
node_1.add_category("Test")
if cat1:
node_1.add_category(cat1)
if cat2:
node_1.add_category(cat2)
# create testnode2
node_2 = provisioner.opennms.Node("testnode2", "2")
node_2.add_interface("127.0.0.1")
node_2.add_asset("city", "Fulda")
node_2.add_asset("zip", "36041")
node_2.add_category("Test")
if cat1:
node_2.add_category(cat1)
if cat2:
node_2.add_category(cat2)
# add nodes to list and return nodelist
nodelist.append(node_1)
nodelist.append(node_2)
return nodelist
| 29.903226 | 66 | 0.633225 | import provisioner.source
import provisioner.opennms
class DummySource(provisioner.source.Source):
def __init__(self, name, parameters):
provisioner.source.Source.__init__(self, name, parameters)
def get_nodes(self):
nodelist = []
cat1 = self.get_parameter("cat1", None)
cat2 = self.get_parameter("cat2", None)
node_1 = provisioner.opennms.Node("testnode1", "1")
node_1.add_interface("127.0.0.1")
node_1.add_service("127.0.0.1", "ICMP")
node_1.add_service("127.0.0.1", "SNMP")
node_1.add_asset("city", "Fulda")
node_1.add_asset("zip", "36041")
node_1.add_category("Test")
if cat1:
node_1.add_category(cat1)
if cat2:
node_1.add_category(cat2)
node_2 = provisioner.opennms.Node("testnode2", "2")
node_2.add_interface("127.0.0.1")
node_2.add_asset("city", "Fulda")
node_2.add_asset("zip", "36041")
node_2.add_category("Test")
if cat1:
node_2.add_category(cat1)
if cat2:
node_2.add_category(cat2)
nodelist.append(node_1)
nodelist.append(node_2)
return nodelist
| true | true |
f722de16a1236eef65e4b072cfd145721bdf797a | 15,252 | py | Python | AI/node.py | mayfieldmobster/DECI | c2c9165aeec7344048dd9479049dc490033881d5 | [
"MIT"
] | 1 | 2021-12-19T01:09:12.000Z | 2021-12-19T01:09:12.000Z | AI/node.py | mayfieldmobster/DECI | c2c9165aeec7344048dd9479049dc490033881d5 | [
"MIT"
] | null | null | null | AI/node.py | mayfieldmobster/DECI | c2c9165aeec7344048dd9479049dc490033881d5 | [
"MIT"
] | null | null | null | """
node
"""
import socket
import random
import pickle
import time
import ast
import concurrent.futures
from ecdsa import SigningKey, VerifyingKey, SECP112r2
#recieve from nodes
def receive(local_ip):
"""
message is split into array the first value the type of message the second value is the message
"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((local_ip, 1379))
server.listen()
while True:
try:
client, address = server.accept()
message = client.recv(2048).decode("utf-8").split(" ")
server.close()
return message, address
except Exception as e:
print(e)
# send to node
def send(host, message, port=1379, send_all=False):
"""
sends a message to the given host
tries the default port and if it doesn't work search for actual port
this process is skipped if send to all for speed
"""
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect((host, port))
client.send(message.encode("utf-8"))
print(f"Message to {host} {message}\n")
return
except Exception as e:
if not send_all:
if isinstance(e, ConnectionRefusedError):
try:
with open("info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
for node in nodes:
if node[1] == host:
if not int(node["port"]) == 1379:
client.connect((host, int(node["port"])))
client.send(message.encode("utf-8"))
print(f"Message to {host} {message}\n")
return
except Exception as e:
return "node offline"
# check if nodes online
def online(address):
"""
asks if a node is online and if it is it returns yh
"""
print(address)
try:
send(address, "ONLINE?")
except:
return False
time.sleep(5)
message = request_reader("YH", ip=address)
if message:
message = message[0].split(" ")
if message[1] == "yh":
return True
else:
return False
def rand_act_node(num_nodes=1):
"""
returns a list of random active nodes which is x length
"""
with open("./info/Public_key.txt", "r") as file:
key = file.read()
nodes = []
i = 0
while i != num_nodes: # turn into for loop
with open("info/Nodes.pickle", "rb") as file:
all_nodes = pickle.load(file)
node_index = random.randint(0, len(all_nodes) - 1)
node = all_nodes[node_index]
print(node)
if node["pub_key"] == key:
continue
alive = online(node["ip"])
if alive:
nodes.append(node)
i += 1
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def request_reader(type, script_identity = 0.0, ip="192.168.68.1"):
with open("recent_messages.txt", "r") as file:
lines = file.read().splitlines()
NREQ_protocol = ["NREQ"]#node request
DEP_protocol = ["DEP"]
yh_protocol = ["yh"]
NODE_Lines = []
NREQ_Lines = []
DEP_Lines = []
yh_Lines = []
if str(lines) != "[]":
for line in lines:
line = line.split(" ")
if line[0] == "" or line[0] == "\n":
del line # delete blank lines
elif line[1] in NREQ_protocol:
NREQ_Lines.append(" ".join(line))
elif line[1] in DEP_protocol and line[2] == script_identity:
DEP_Lines.append(" ".join(line))
elif line[1] in yh_protocol:
yh_Lines.append(" ".join(line))
if type == "YH":
if len(yh_Lines) != 0:
new_lines = []
with open("recent_messages.txt", "r") as file:
file_lines = file.readlines()
for f_line in file_lines:
f_line.split(" ")
if not yh_Lines[0] in f_line:
if not f_line.strip("\n") == "":
new_lines.append(f_line)
open("recent_messages.txt", "w").close()
with open("recent_messages.txt", "a") as file:
for n_line in new_lines:
file.write(n_line)
return yh_Lines
if type == "NODE":
if len(NODE_Lines) != 0:
new_lines = []
with open("recent_messages.txt", "r") as file:
file_lines = file.readlines()
for f_line in file_lines:
f_line.split(" ")
if not NODE_Lines[0] in f_line:
if not f_line.strip("\n") == "":
new_lines.append(f_line)
open("recent_messages.txt", "w").close()
with open("recent_messages.txt", "a") as file:
for n_line in new_lines:
file.write(n_line)
return NODE_Lines
if type == "NREQ":
if len(NREQ_Lines) != 0:
new_lines = []
with open("recent_messages.txt", "r+") as file:
file_lines = file.readlines()
for f_line in file_lines:
f_line.split(" ")
if not NREQ_Lines[0] in f_line:
if not f_line.strip("\n") == "":
new_lines.append(f_line)
open("recent_messages.txt", "w").close()
with open("recent_messages.txt", "a") as file:
for n_line in new_lines:
file.write(n_line)
return NREQ_Lines
if type == "DEP":
if len(DEP_Lines) != 0:
new_lines = []
with open("recent_messages.txt", "r") as file:
file_lines = file.readlines()
for f_line in file_lines:
f_line.split(" ")
if not DEP_Lines[0] in f_line:
if not f_line.strip("\n") == "":
new_lines.append(f_line)
open("recent_messages.txt", "w").close()
with open("recent_messages.txt", "a") as file:
for n_line in new_lines:
file.write(n_line)
return DEP_Lines
def send_to_all(message):
"""
sends to all nodes
"""
with open("./info/Nodes.pickle", "rb") as file:
all_nodes = pickle.load(file)
for node in all_nodes:
print(message)
print(node["ip"], node["port"])
send(node["ip"], message, port=node["port"], send_all=True)
def announce(pub_key, port, version, num_gpus, benchmark, priv_key):
announcement_time = str(time.time())
if not isinstance(priv_key, bytes):
priv_key = SigningKey.from_string(bytes.fromhex(priv_key), curve=SECP112r2)
sig = str(priv_key.sign(announcement_time.encode()).hex())
send_to_all(f'HELLO {str(time.time())} {pub_key} {port} {version} {num_gpus} {benchmark} {sig.hex()}')
print("Announcement sent")
return announcement_time
def update(pub_key, port, version, num_gpus, benchmark, priv_key):
update_time = str(time.time())
if not isinstance(priv_key, bytes):
priv_key = SigningKey.from_string(bytes.fromhex(priv_key), curve=SECP112r2)
sig = str(priv_key.sign(update_time.encode()).hex())
send_to_all(f"UPDATE {update_time} {pub_key} {str(port)} {version} {num_gpus} {benchmark} {sig}")
def delete(pub_key, priv_key):
update_time = str(time.time())
if not isinstance(priv_key, bytes):
priv_key = SigningKey.from_string(bytes.fromhex(priv_key), curve=SECP112r2)
sig = str(priv_key.sign(update_time.encode()).hex())
send_to_all(f"DELETE {update_time} {pub_key} {sig}")
def new_node(initiation_time, ip, pub_key, port, node_version, num_gpus, benchmark, sig):
with open("info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
public_key = VerifyingKey.from_string(bytes.fromhex(pub_key), curve=SECP112r2)
try:
assert public_key.verify(bytes.fromhex(sig), str(initiation_time).encode())
new_node = {"time": initiation_time, "ip": ip, "pub_key": pub_key, "port": port, "version": node_version,
"num_gpus": num_gpus, "benchmark": benchmark}
for node in nodes:
if node["pub_key"] == pub_key:
return
if node["ip"] == ip:
return
nodes.append(new_node)
with open("info/Nodes.pickle", "wb") as file:
pickle.dump(nodes, file)
except Exception as e:
print(e)
return "node invalid"
def update_node(ip, update_time, pub_key, port, node_version,num_gpus,benchmark ,sig):
with open("info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
public_key = VerifyingKey.from_string(bytes.fromhex(pub_key), curve=SECP112r2)
try:
assert public_key.verify(bytes.fromhex(sig), str(update_time).encode())
for node in nodes:
if node["ip"] == ip:
node["pub_key"] = pub_key
node["port"] = port
node["version"] = node_version
node["num_gpus"] = num_gpus
node["benchmark"] = benchmark
with open("info/Nodes.pickle", "wb") as file:
pickle.dump(nodes, file)
print("NODE UPDATED")
except:
return "update invalid"
def delete_node(deletion_time, ip, pub_key, sig):
with open("info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
public_key = VerifyingKey.from_string(bytes.fromhex(pub_key), curve=SECP112r2)
try:
assert public_key.verify(bytes.fromhex(sig), str(deletion_time).encode())
for node in nodes:
if node["ip"] == ip and node["pub_key"] == pub_key:
del node
with open("info/Nodes.pickle", "wb") as file:
pickle.dump(nodes, file)
except:
return "cancel invalid"
def get_nodes():
node = rand_act_node()
send(node[1],"GET_NODES")
while True:
time.sleep(1)
line = request_reader("NREQ")
line = line.split(" ")
nodes = line[2]
nodes = ast.literal_eval(nodes)
with open("./info/Nodes.pickle", "wb") as file:
pickle.dump(nodes, file)
def send_node(host):
with open("info/Nodes.pickle", "rb") as file:
Nodes = pickle.load(file)
str_node = str(Nodes)
str_node = str_node.replace(" ", "")
send(host, "NREQ " + str_node)
def new_node(time, ip, pub_key, port, version, node_type):
with open("info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
new_node = [time, ip, pub_key, port, version, node_type]
for node in nodes:
if node[2] == pub_key:
return
nodes.append(new_node)
with open("info/Nodes.pickle","wb") as file:
pickle.dump(nodes, file)
def version(ver):
send_to_all(f"VERSION {ver}")
def version_update(ip, ver):
with open("./info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
for nod in nodes:
if nod[1] == ip:
nod[4] = ver
break
class NodeError(Exception):
pass
class UnrecognisedCommand(NodeError):
pass
class ValueTypeError(NodeError):
pass
class UnrecognisedArg(NodeError):
pass
def message_handler(message):
try:
protocol = message[1]
except:
raise UnrecognisedArg("No Protocol Found")
node_types = ["Lite", "Blockchain", "AI"]
if protocol == "GET_NODES":
# host, GET_NODES
if len(message) != 2:
raise UnrecognisedArg("number of args given incorrect")
elif protocol == "HELLO":
# host, HELLO, announcement_time, public key, port, version, node type, sig
if len(message) != 8:
raise UnrecognisedArg("number of args given incorrect")
try:
float(message[2])
if "." not in message[2]:
Exception()
except:
raise ValueTypeError("time not given as float")
if len(message[3]) != 56:
raise UnrecognisedArg("Public Key is the wrong size")
try:
port = int(message[4])
except:
raise ValueTypeError("port not given as int")
if not port > 0 and port < 65535:
raise ValueTypeError("TCP port out of range")
try:
float(message[5])
if "." not in message[5]:
Exception()
except:
raise ValueTypeError("version not given as float")
if message[6] not in node_types:
raise UnrecognisedArg("Node Type Unknown")
elif protocol == "ONLINE?":
# host, ONLINE?
if len(message) != 2:
raise UnrecognisedArg("number of args given incorrect")
elif protocol == "UPDATE":
# host, UPDATE, update time, public key, port, version, sig
if len(message) != 7:
raise UnrecognisedArg("number of args given incorrect")
try:
float(message[2])
if "." not in message[2]:
Exception()
except:
raise ValueTypeError("time not given as float")
if len(message[3]) != 56:
raise UnrecognisedArg("Public Key is the wrong size")
try:
port = int(message[4])
except:
raise ValueTypeError("port not given as int")
if not port >= 0 and port < 65535:
raise ValueTypeError("TCP port out of range")
try:
float(message[5])
if "." not in message[5]:
Exception()
except:
raise ValueTypeError("version not given as float")
elif protocol == "DELETE":
# host, DELETE, public key, sig
if len(message) != 4:
raise UnrecognisedArg("number of args given incorrect")
if len(message[2]) != 56:
raise UnrecognisedArg("Public Key is the wrong size")
elif protocol == "NREQ":
# host, NREQ, Nodes
try:
ast.literal_eval(message[2])
except:
raise ValueTypeError("Blockchain not given as Node List")
elif protocol == "ERROR":
pass
elif protocol == "DIST":
if len(message) != 4:
raise UnrecognisedArg("number of args given ")
elif protocol == "DEP":
if len(message) != 4:
raise UnrecognisedArg("number of args given ")
elif protocol == "AI":
if len(message) != 6:
raise UnrecognisedArg("number of args given")
if len(ast.literal_eval(message[4])) > 10:
raise ValueTypeError("Too many nodes given")
else:
raise UnrecognisedCommand("protocol unrecognised")
if __name__ == "__main__":
receive()
| 32.109474 | 113 | 0.54819 |
import socket
import random
import pickle
import time
import ast
import concurrent.futures
from ecdsa import SigningKey, VerifyingKey, SECP112r2
def receive(local_ip):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((local_ip, 1379))
server.listen()
while True:
try:
client, address = server.accept()
message = client.recv(2048).decode("utf-8").split(" ")
server.close()
return message, address
except Exception as e:
print(e)
def send(host, message, port=1379, send_all=False):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect((host, port))
client.send(message.encode("utf-8"))
print(f"Message to {host} {message}\n")
return
except Exception as e:
if not send_all:
if isinstance(e, ConnectionRefusedError):
try:
with open("info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
for node in nodes:
if node[1] == host:
if not int(node["port"]) == 1379:
client.connect((host, int(node["port"])))
client.send(message.encode("utf-8"))
print(f"Message to {host} {message}\n")
return
except Exception as e:
return "node offline"
def online(address):
print(address)
try:
send(address, "ONLINE?")
except:
return False
time.sleep(5)
message = request_reader("YH", ip=address)
if message:
message = message[0].split(" ")
if message[1] == "yh":
return True
else:
return False
def rand_act_node(num_nodes=1):
with open("./info/Public_key.txt", "r") as file:
key = file.read()
nodes = []
i = 0
while i != num_nodes:
with open("info/Nodes.pickle", "rb") as file:
all_nodes = pickle.load(file)
node_index = random.randint(0, len(all_nodes) - 1)
node = all_nodes[node_index]
print(node)
if node["pub_key"] == key:
continue
alive = online(node["ip"])
if alive:
nodes.append(node)
i += 1
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def request_reader(type, script_identity = 0.0, ip="192.168.68.1"):
with open("recent_messages.txt", "r") as file:
lines = file.read().splitlines()
NREQ_protocol = ["NREQ"]
DEP_protocol = ["DEP"]
yh_protocol = ["yh"]
NODE_Lines = []
NREQ_Lines = []
DEP_Lines = []
yh_Lines = []
if str(lines) != "[]":
for line in lines:
line = line.split(" ")
if line[0] == "" or line[0] == "\n":
del line
elif line[1] in NREQ_protocol:
NREQ_Lines.append(" ".join(line))
elif line[1] in DEP_protocol and line[2] == script_identity:
DEP_Lines.append(" ".join(line))
elif line[1] in yh_protocol:
yh_Lines.append(" ".join(line))
if type == "YH":
if len(yh_Lines) != 0:
new_lines = []
with open("recent_messages.txt", "r") as file:
file_lines = file.readlines()
for f_line in file_lines:
f_line.split(" ")
if not yh_Lines[0] in f_line:
if not f_line.strip("\n") == "":
new_lines.append(f_line)
open("recent_messages.txt", "w").close()
with open("recent_messages.txt", "a") as file:
for n_line in new_lines:
file.write(n_line)
return yh_Lines
if type == "NODE":
if len(NODE_Lines) != 0:
new_lines = []
with open("recent_messages.txt", "r") as file:
file_lines = file.readlines()
for f_line in file_lines:
f_line.split(" ")
if not NODE_Lines[0] in f_line:
if not f_line.strip("\n") == "":
new_lines.append(f_line)
open("recent_messages.txt", "w").close()
with open("recent_messages.txt", "a") as file:
for n_line in new_lines:
file.write(n_line)
return NODE_Lines
if type == "NREQ":
if len(NREQ_Lines) != 0:
new_lines = []
with open("recent_messages.txt", "r+") as file:
file_lines = file.readlines()
for f_line in file_lines:
f_line.split(" ")
if not NREQ_Lines[0] in f_line:
if not f_line.strip("\n") == "":
new_lines.append(f_line)
open("recent_messages.txt", "w").close()
with open("recent_messages.txt", "a") as file:
for n_line in new_lines:
file.write(n_line)
return NREQ_Lines
if type == "DEP":
if len(DEP_Lines) != 0:
new_lines = []
with open("recent_messages.txt", "r") as file:
file_lines = file.readlines()
for f_line in file_lines:
f_line.split(" ")
if not DEP_Lines[0] in f_line:
if not f_line.strip("\n") == "":
new_lines.append(f_line)
open("recent_messages.txt", "w").close()
with open("recent_messages.txt", "a") as file:
for n_line in new_lines:
file.write(n_line)
return DEP_Lines
def send_to_all(message):
with open("./info/Nodes.pickle", "rb") as file:
all_nodes = pickle.load(file)
for node in all_nodes:
print(message)
print(node["ip"], node["port"])
send(node["ip"], message, port=node["port"], send_all=True)
def announce(pub_key, port, version, num_gpus, benchmark, priv_key):
announcement_time = str(time.time())
if not isinstance(priv_key, bytes):
priv_key = SigningKey.from_string(bytes.fromhex(priv_key), curve=SECP112r2)
sig = str(priv_key.sign(announcement_time.encode()).hex())
send_to_all(f'HELLO {str(time.time())} {pub_key} {port} {version} {num_gpus} {benchmark} {sig.hex()}')
print("Announcement sent")
return announcement_time
def update(pub_key, port, version, num_gpus, benchmark, priv_key):
update_time = str(time.time())
if not isinstance(priv_key, bytes):
priv_key = SigningKey.from_string(bytes.fromhex(priv_key), curve=SECP112r2)
sig = str(priv_key.sign(update_time.encode()).hex())
send_to_all(f"UPDATE {update_time} {pub_key} {str(port)} {version} {num_gpus} {benchmark} {sig}")
def delete(pub_key, priv_key):
update_time = str(time.time())
if not isinstance(priv_key, bytes):
priv_key = SigningKey.from_string(bytes.fromhex(priv_key), curve=SECP112r2)
sig = str(priv_key.sign(update_time.encode()).hex())
send_to_all(f"DELETE {update_time} {pub_key} {sig}")
def new_node(initiation_time, ip, pub_key, port, node_version, num_gpus, benchmark, sig):
with open("info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
public_key = VerifyingKey.from_string(bytes.fromhex(pub_key), curve=SECP112r2)
try:
assert public_key.verify(bytes.fromhex(sig), str(initiation_time).encode())
new_node = {"time": initiation_time, "ip": ip, "pub_key": pub_key, "port": port, "version": node_version,
"num_gpus": num_gpus, "benchmark": benchmark}
for node in nodes:
if node["pub_key"] == pub_key:
return
if node["ip"] == ip:
return
nodes.append(new_node)
with open("info/Nodes.pickle", "wb") as file:
pickle.dump(nodes, file)
except Exception as e:
print(e)
return "node invalid"
def update_node(ip, update_time, pub_key, port, node_version,num_gpus,benchmark ,sig):
with open("info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
public_key = VerifyingKey.from_string(bytes.fromhex(pub_key), curve=SECP112r2)
try:
assert public_key.verify(bytes.fromhex(sig), str(update_time).encode())
for node in nodes:
if node["ip"] == ip:
node["pub_key"] = pub_key
node["port"] = port
node["version"] = node_version
node["num_gpus"] = num_gpus
node["benchmark"] = benchmark
with open("info/Nodes.pickle", "wb") as file:
pickle.dump(nodes, file)
print("NODE UPDATED")
except:
return "update invalid"
def delete_node(deletion_time, ip, pub_key, sig):
with open("info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
public_key = VerifyingKey.from_string(bytes.fromhex(pub_key), curve=SECP112r2)
try:
assert public_key.verify(bytes.fromhex(sig), str(deletion_time).encode())
for node in nodes:
if node["ip"] == ip and node["pub_key"] == pub_key:
del node
with open("info/Nodes.pickle", "wb") as file:
pickle.dump(nodes, file)
except:
return "cancel invalid"
def get_nodes():
node = rand_act_node()
send(node[1],"GET_NODES")
while True:
time.sleep(1)
line = request_reader("NREQ")
line = line.split(" ")
nodes = line[2]
nodes = ast.literal_eval(nodes)
with open("./info/Nodes.pickle", "wb") as file:
pickle.dump(nodes, file)
def send_node(host):
with open("info/Nodes.pickle", "rb") as file:
Nodes = pickle.load(file)
str_node = str(Nodes)
str_node = str_node.replace(" ", "")
send(host, "NREQ " + str_node)
def new_node(time, ip, pub_key, port, version, node_type):
with open("info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
new_node = [time, ip, pub_key, port, version, node_type]
for node in nodes:
if node[2] == pub_key:
return
nodes.append(new_node)
with open("info/Nodes.pickle","wb") as file:
pickle.dump(nodes, file)
def version(ver):
send_to_all(f"VERSION {ver}")
def version_update(ip, ver):
with open("./info/Nodes.pickle", "rb") as file:
nodes = pickle.load(file)
for nod in nodes:
if nod[1] == ip:
nod[4] = ver
break
class NodeError(Exception):
pass
class UnrecognisedCommand(NodeError):
pass
class ValueTypeError(NodeError):
pass
class UnrecognisedArg(NodeError):
pass
def message_handler(message):
try:
protocol = message[1]
except:
raise UnrecognisedArg("No Protocol Found")
node_types = ["Lite", "Blockchain", "AI"]
if protocol == "GET_NODES":
if len(message) != 2:
raise UnrecognisedArg("number of args given incorrect")
elif protocol == "HELLO":
if len(message) != 8:
raise UnrecognisedArg("number of args given incorrect")
try:
float(message[2])
if "." not in message[2]:
Exception()
except:
raise ValueTypeError("time not given as float")
if len(message[3]) != 56:
raise UnrecognisedArg("Public Key is the wrong size")
try:
port = int(message[4])
except:
raise ValueTypeError("port not given as int")
if not port > 0 and port < 65535:
raise ValueTypeError("TCP port out of range")
try:
float(message[5])
if "." not in message[5]:
Exception()
except:
raise ValueTypeError("version not given as float")
if message[6] not in node_types:
raise UnrecognisedArg("Node Type Unknown")
elif protocol == "ONLINE?":
if len(message) != 2:
raise UnrecognisedArg("number of args given incorrect")
elif protocol == "UPDATE":
if len(message) != 7:
raise UnrecognisedArg("number of args given incorrect")
try:
float(message[2])
if "." not in message[2]:
Exception()
except:
raise ValueTypeError("time not given as float")
if len(message[3]) != 56:
raise UnrecognisedArg("Public Key is the wrong size")
try:
port = int(message[4])
except:
raise ValueTypeError("port not given as int")
if not port >= 0 and port < 65535:
raise ValueTypeError("TCP port out of range")
try:
float(message[5])
if "." not in message[5]:
Exception()
except:
raise ValueTypeError("version not given as float")
elif protocol == "DELETE":
if len(message) != 4:
raise UnrecognisedArg("number of args given incorrect")
if len(message[2]) != 56:
raise UnrecognisedArg("Public Key is the wrong size")
elif protocol == "NREQ":
try:
ast.literal_eval(message[2])
except:
raise ValueTypeError("Blockchain not given as Node List")
elif protocol == "ERROR":
pass
elif protocol == "DIST":
if len(message) != 4:
raise UnrecognisedArg("number of args given ")
elif protocol == "DEP":
if len(message) != 4:
raise UnrecognisedArg("number of args given ")
elif protocol == "AI":
if len(message) != 6:
raise UnrecognisedArg("number of args given")
if len(ast.literal_eval(message[4])) > 10:
raise ValueTypeError("Too many nodes given")
else:
raise UnrecognisedCommand("protocol unrecognised")
if __name__ == "__main__":
receive()
| true | true |
f722df9dfce13247463d0feb247befdc7e936950 | 688 | py | Python | constants/classes.py | zsxoff/neural-orthodontic-cephalometry | 0b683343bdfb36491aa880def5ff4c9de78030d9 | [
"MIT"
] | 4 | 2020-05-24T18:18:05.000Z | 2021-02-19T01:35:14.000Z | constants/classes.py | zsxoff/neural-orthodontic-cephalometry | 0b683343bdfb36491aa880def5ff4c9de78030d9 | [
"MIT"
] | 3 | 2021-01-02T18:32:52.000Z | 2021-09-25T13:25:11.000Z | constants/classes.py | zsxoff/neural-orthodontic-cephalometry | 0b683343bdfb36491aa880def5ff4c9de78030d9 | [
"MIT"
] | 1 | 2021-09-18T01:15:29.000Z | 2021-09-18T01:15:29.000Z | """List of classes in lexical and NPZ order."""
# ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! !
# ! PLEASE DO NOT MODIFY THIS LIST OF CLASSES !
# ! LIST MODIFICATION CAUSES GREAT F**K UP !
# ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! !
CLASSES = [
"A", # 0
"Ar", # 1
"B", # 2
"Ba", # 3
"C", # 4
"DT pog", # 5
"EN pn", # 6
"Gn", # 7
"Go", # 8
"LL", # 9
"Me", # 10
"N", # 11
"Or", # 12
"Po", # 13
"Pog", # 14
"Pt", # 15
"S", # 16
"SNA", # 17
"SNP pm", # 18
"Se", # 19
"Sn", # 20
"UL", # 21
"aii", # 22
"ais", # 23
"ii", # 24
"is", # 25
"n_", # 26
]
| 18.594595 | 47 | 0.305233 |
CLASSES = [
"A",
"Ar",
"B",
"Ba",
"C",
"DT pog",
"EN pn",
"Gn",
"Go",
"LL",
"Me",
"N",
"Or",
"Po",
"Pog",
"Pt",
"S",
"SNA",
"SNP pm",
"Se",
"Sn",
"UL",
"aii",
"ais",
"ii",
"is",
"n_",
]
| true | true |
f722e16e7e62695ee78cb9df7316e785fa093630 | 6,285 | py | Python | remote/lib/instruments.py | dominoe/emumem | a1babfd93e533e20eb4561e2cc749cbb40d5d999 | [
"MIT"
] | null | null | null | remote/lib/instruments.py | dominoe/emumem | a1babfd93e533e20eb4561e2cc749cbb40d5d999 | [
"MIT"
] | null | null | null | remote/lib/instruments.py | dominoe/emumem | a1babfd93e533e20eb4561e2cc749cbb40d5d999 | [
"MIT"
] | null | null | null | ######################################################
### ULTIMA MODIFICACIÓN: 2019-06-27
######################################################
import visa, paramiko, numpy
from time import sleep
######################################################
class RASPBERRY_PI2_SSH:
hostname = None
port = None
username = None
password = None
ssh_client = None
command = None
def connect(self,hostname,port,username,password):
self.ssh_client = paramiko.SSHClient()
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_client.connect(hostname, port=port, username=username, password=password)
def execute(self,command):
stdin, stdout, stderr = self.ssh_client.exec_command(command)
tmp = stdout.readlines()
return ''.join(tmp)
def execute_background(self,command):
self.ssh_client.exec_command(command)
def get_pids(self,name):
return self.execute('sudo ps -x | grep -e "{}"'.format(name) + ' | awk \'{print $1}\'').splitlines()
def kill_instances(self,name):
self.execute_background('sudo kill $(sudo ps -x | grep -e "{}"'.format(name) + ' | awk \'{print $1}\')')
def wait(self,time):
sleep(time)
######################################################
class RIGOL_DG4162:
name = None
resource = None
instrument = None
# wave: CUST, HARM, NOI, PULS, RAMP, SIN, SQU, USER
def open_instrument(self,name):
self.name = name
self.resource = visa.ResourceManager()
self.instrument = self.resource.open_resource(self.name)
def set_instrument(self,wave,frequency,amplitude=0.0,offset=0.0,phase=0.0,channel=1):
self.instrument.write(":SOUR%d:APPL:%s %.1f,%.1f,%.1f,%.1f"%(channel,wave,frequency,amplitude,offset,phase))
def set_signal(self,signal,channel=1):
self.instrument.write("SOUR{0}:TRAC:DATA VOLATILE ".format(channel) + ',' + ','.join(map(str, signal[0,:])))
self.instrument.write("SOUR{0}:TRAC:DATA:POINts:INTerpolate OFF".format(channel))
def burst_signal(self,channel,cycles):
self.instrument.write(":SOUR{0}:BURS:NCYC {1}".format(channel,cycles))
self.instrument.write(":SOUR{0}:BURS ON".format(channel))
print("BURST")
def burst_off(self,channel):
self.instrument.write(":SOUR{0}:BURS OFF".format(channel))
def trig_source(self, channel, src):
self.instrument.write(":SOUR{0}:BURS:TRIG:SOUR {1}".format(channel,src))
def trig_slope(self, channel, slope):
self.instrument.write(":SOUR{0}:BURS:TRIG:SLOP {1}".format(channel,slope))
def trigger(self):
print ("Gen triggered")
self.instrument.write("*TRG")
def set_output(self,status,channel=1):
if status == 1:
str_status = "ON"
elif status == 0:
str_status = "OFF"
self.instrument.write(":OUTP{0} {1}".format(channel,str_status))
######################################################
class RIGOL_DS1204B:
name = None
resource = None
instrument = None
v_scale = None
v_offset = None
t_scale = None
t_offset = None
s_rate = None
y_ref = None
y_inc = None
t_scale_list = [list(numpy.array([1,2,5])*10**(i)) for i in range(-9,2)]
def open_instrument(self,name):
self.name = name
self.resource = visa.ResourceManager()
self.instrument = self.resource.open_resource(self.name)
def run(self):
self.instrument.query(':RUN')
def stop(self):
self.instrument.query(':STOP')
def single(self):
self.instrument.query(':SINGLE')
def set_value(self,s):
self.instrument.query(s)
def get_value(self,s):
return float(self.instrument.query(s))
def set_t_scale(self,SCALE):
self.set_value(':TIM:SCAL {0}'.format(SCALE))
def set_v_scale(self,CH,SCALE):
self.set_value(':CHAN{0}:SCAL {1}'.format(CH,SCALE))
def set_t_offset(self,OFFSET):
self.set_value(':TIM:OFFS {0}'.format(OFFSET))
def set_v_offset(self,CH,OFFSET):
self.set_value('CHAN{0}:OFFS {1}'.format(CH,OFFSET))
def set_trig_lvl(self,LEVEL):
self.set_value(':TRIG:EDGE:LEV {0}'.format(LEVEL))
def get_v_scale(self,CH):
return self.get_value(':CHAN{0}:SCAL?'.format(CH))
def get_v_offset(self,CH):
return self.get_value(':CHAN{0}:OFFS?'.format(CH))
def get_t_scale(self):
return self.get_value(':TIM:SCAL?')
def get_t_offset(self):
return self.get_value(':TIM:OFFS?')
def get_s_rate(self):
return self.get_value(':ACQ:SRAT?')
def get_y_ref(self):
tmp = self.instrument.query(':WAV:PRE?')
tmp = tmp.split(',')
return float(tmp[9])
def get_y_inc(self,CH):
return self.get_value(':WAV:YINC? CHAN{0}'.format(CH))
def get_vrms(self,CH):
return self.get_value(':MEAS:VRMS? CHAN{0}'.format(CH))
def get_vmax(self,CH):
return self.get_value(':MEAS:VMAX? CHAN{0}'.format(CH))
def get_vmin(self,CH):
return self.get_value(':MEAS:VMIN? CHAN{0}'.format(CH))
def get_vmean(self,CH):
return self.get_value(':MEAS:VAV? CHAN{0}'.format(CH))
def get_sample_rate(self,CH):
return self.get_value(':ACQ:SRAT? CHAN{0}'.format(CH))
def get_channel(self,CH):
self.instrument.write(':WAV:POIN:MODE MAX')
# self.instrument.write(':STOP')
data = self.instrument.query_binary_values(':WAV:DATA? CHAN{0}'.format(CH), datatype='B', container=numpy.array)
time = numpy.arange(len(data)) / self.get_s_rate() - self.get_t_offset()
data = -(data - self.get_y_ref()) * self.get_y_inc(CH) - self.get_v_offset(CH)
# self.instrument.write(':RUN')
return time, data
def get_trig_lvl(self):
self.get_value(':TRIG:EDGE:LEV?')
###################################################### | 33.078947 | 121 | 0.571519 | | false | true |
f722e281c5085cd6d3f518b1eadcf92e9e58e061 | 733 | py | Python | profiles_api/permissions.py | oscarperezt1991/profiles-rest-api | 3b222e1e3c3c404ad84386c4f2111edfc4a49247 | [
"MIT"
] | null | null | null | profiles_api/permissions.py | oscarperezt1991/profiles-rest-api | 3b222e1e3c3c404ad84386c4f2111edfc4a49247 | [
"MIT"
] | null | null | null | profiles_api/permissions.py | oscarperezt1991/profiles-rest-api | 3b222e1e3c3c404ad84386c4f2111edfc4a49247 | [
"MIT"
] | null | null | null | from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
"""Allow user to edit their own profile"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to edit their own profile"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class UpdateOwnStatus(permissions.BasePermission):
"""Allow users to update their own status"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to edit their own status"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id
| 31.869565 | 60 | 0.691678 | from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class UpdateOwnStatus(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id
| true | true |
f722e3518ac0822ccb1a513b5b56bb885f11d5f4 | 90,504 | py | Python | bin/Python27/Lib/site-packages/scipy/linalg/tests/test_decomp.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | null | null | null | bin/Python27/Lib/site-packages/scipy/linalg/tests/test_decomp.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | null | null | null | bin/Python27/Lib/site-packages/scipy/linalg/tests/test_decomp.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | 1 | 2020-08-08T12:44:48.000Z | 2020-08-08T12:44:48.000Z | """ Test functions for linalg.decomp module
"""
from __future__ import division, print_function, absolute_import
__usage__ = """
Build linalg:
python setup_linalg.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
Run tests if linalg is not installed:
python tests/test_decomp.py
"""
import numpy as np
from numpy.testing import (TestCase, assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_equal,
assert_raises, assert_, assert_allclose,
run_module_suite, dec)
from scipy._lib.six import xrange
from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr,
schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq,
eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, qz, orth, ordqz)
from scipy.linalg.lapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \
dsbev, dsbevd, dsbevx, zhbevd, zhbevx
from scipy.linalg.misc import norm
from numpy import array, transpose, sometrue, diag, ones, linalg, \
argsort, zeros, arange, float32, complex64, dot, conj, identity, \
ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \
asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\
triu, tril
from numpy.random import normal, seed, random
from scipy.linalg._testutils import assert_no_overwrite
# digit precision to use in asserts for different types
DIGITS = {'d':11, 'D':11, 'f':4, 'F':4}
# XXX: This function should be available through numpy.testing
def assert_dtype_equal(act, des):
if isinstance(act, ndarray):
act = act.dtype
else:
act = dtype(act)
if isinstance(des, ndarray):
des = des.dtype
else:
des = dtype(des)
assert_(act == des, 'dtype mismatch: "%s" (should be "%s") ' % (act, des))
# XXX: This function should not be defined here, but somewhere in
# scipy.linalg namespace
def symrand(dim_or_eigv):
"""Return a random symmetric (Hermitian) matrix.
If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues
uniformly distributed on (-1,1).
If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose
eigenvalues are 'a'.
"""
if isinstance(dim_or_eigv, int):
dim = dim_or_eigv
d = random(dim)*2 - 1
elif (isinstance(dim_or_eigv, ndarray) and
len(dim_or_eigv.shape) == 1):
dim = dim_or_eigv.shape[0]
d = dim_or_eigv
else:
raise TypeError("input type not supported.")
v = random_rot(dim)
h = dot(dot(v.T.conj(), diag(d)), v)
# to avoid roundoff errors, symmetrize the matrix (again)
h = 0.5*(h.T+h)
return h
# XXX: This function should not be defined here, but somewhere in
# scipy.linalg namespace
def random_rot(dim):
"""Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., 'The efficient generation of random orthogonal
matrices with an application to condition estimators', SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization"""
H = eye(dim)
D = ones((dim,))
for n in range(1, dim):
x = normal(size=(dim-n+1,))
D[n-1] = sign(x[0])
x[0] -= D[n-1]*sqrt((x*x).sum())
# Householder transformation
Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum()
mat = eye(dim)
mat[n-1:,n-1:] = Hx
H = dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = -D.prod()
H = (D*H.T).T
return H
class TestEigVals(TestCase):
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w = eigvals(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
def test_simple_tr(self):
a = array([[1,2,3],[1,2,3],[2,5,6]],'d')
a = transpose(a).copy()
a = transpose(a)
w = eigvals(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
def test_simple_complex(self):
a = [[1,2,3],[1,2,3],[2,5,6+1j]]
w = eigvals(a)
exact_w = [(9+1j+sqrt(92+6j))/2,
0,
(9+1j-sqrt(92+6j))/2]
assert_array_almost_equal(w,exact_w)
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w = eigvals(a, check_finite=False)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
class TestEig(object):
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w,v = eig(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
v0 = array([1,1,(1+sqrt(93)/3)/2])
v1 = array([3.,0,-1])
v2 = array([1,1,(1-sqrt(93)/3)/2])
v0 = v0 / sqrt(dot(v0,transpose(v0)))
v1 = v1 / sqrt(dot(v1,transpose(v1)))
v2 = v2 / sqrt(dot(v2,transpose(v2)))
assert_array_almost_equal(w,exact_w)
assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))
assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))
assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))
for i in range(3):
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])
w,v = eig(a,left=1,right=0)
for i in range(3):
assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i])
def test_simple_complex_eig(self):
a = [[1,2],[-2,1]]
w,vl,vr = eig(a,left=1,right=1)
assert_array_almost_equal(w, array([1+2j, 1-2j]))
for i in range(2):
assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])
for i in range(2):
assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),
conjugate(w[i])*vl[:,i])
def test_simple_complex(self):
a = [[1,2,3],[1,2,3],[2,5,6+1j]]
w,vl,vr = eig(a,left=1,right=1)
for i in range(3):
assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])
for i in range(3):
assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),
conjugate(w[i])*vl[:,i])
def _check_gen_eig(self, A, B):
A, B = asarray(A), asarray(B)
msg = "\n%r\n%r" % (A, B)
w, vr = eig(A,B)
wt = eigvals(A,B)
val1 = dot(A, vr)
val2 = dot(B, vr) * w
res = val1 - val2
for i in range(res.shape[1]):
if all(isfinite(res[:, i])):
assert_array_almost_equal(res[:, i], 0, err_msg=msg)
assert_array_almost_equal(sort(w[isfinite(w)]), sort(wt[isfinite(wt)]),
err_msg=msg)
length = np.empty(len(vr))
for i in xrange(len(vr)):
length[i] = norm(vr[:, i])
assert_array_almost_equal(length, np.ones(length.size), err_msg=msg)
@dec.knownfailureif(True, "See gh-2254.")
def test_singular(self):
# Example taken from
# http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html
A = array(([22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34],
[27,31,26,21,15], [38,44,44,24,30]))
B = array(([13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25],
[16,25,27,14,23], [24,35,18,21,22]))
olderr = np.seterr(all='ignore')
try:
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_falker(self):
"""Test matrices giving some Nan generalized eigen values."""
M = diag(array(([1,0,3])))
K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2]))
D = array(([1,-1,0],[-1,1,0],[0,0,0]))
Z = zeros((3,3))
I = identity(3)
A = bmat([[I,Z],[Z,-K]])
B = bmat([[Z,I],[M,D]])
olderr = np.seterr(all='ignore')
try:
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_bad_geneig(self):
# Ticket #709 (strange return values from DGGEV)
def matrices(omega):
c1 = -9 + omega**2
c2 = 2*omega
A = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, c1, 0],
[0, 0, 0, c1]]
B = [[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, -c2],
[0, 1, c2, 0]]
return A, B
# With a buggy LAPACK, this can fail for different omega on different
# machines -- so we need to test several values
olderr = np.seterr(all='ignore')
try:
for k in xrange(100):
A, B = matrices(omega=k*5./100)
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w,v = eig(a, check_finite=False)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
v0 = array([1,1,(1+sqrt(93)/3)/2])
v1 = array([3.,0,-1])
v2 = array([1,1,(1-sqrt(93)/3)/2])
v0 = v0 / sqrt(dot(v0,transpose(v0)))
v1 = v1 / sqrt(dot(v1,transpose(v1)))
v2 = v2 / sqrt(dot(v2,transpose(v2)))
assert_array_almost_equal(w,exact_w)
assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))
assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))
assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))
for i in range(3):
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])
def test_not_square_error(self):
"""Check that passing a non-square array raises a ValueError."""
A = np.arange(6).reshape(3,2)
assert_raises(ValueError, eig, A)
def test_shape_mismatch(self):
"""Check that passing arrays of with different shapes raises a ValueError."""
A = identity(2)
B = np.arange(9.0).reshape(3,3)
assert_raises(ValueError, eig, A, B)
assert_raises(ValueError, eig, B, A)
class TestEigBanded(TestCase):
def __init__(self, *args):
TestCase.__init__(self, *args)
self.create_bandmat()
def create_bandmat(self):
"""Create the full matrix `self.fullmat` and
the corresponding band matrix `self.bandmat`."""
N = 10
self.KL = 2 # number of subdiagonals (below the diagonal)
self.KU = 2 # number of superdiagonals (above the diagonal)
# symmetric band matrix
self.sym_mat = (diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1)
+ diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# hermitian band matrix
self.herm_mat = (diag(-1.0*ones(N))
+ 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1)
+ diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# general real band matrix
self.real_mat = (diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1)
+ diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# general complex band matrix
self.comp_mat = (1j*diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1)
+ diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# Eigenvalues and -vectors from linalg.eig
ew, ev = linalg.eig(self.sym_mat)
ew = ew.real
args = argsort(ew)
self.w_sym_lin = ew[args]
self.evec_sym_lin = ev[:,args]
ew, ev = linalg.eig(self.herm_mat)
ew = ew.real
args = argsort(ew)
self.w_herm_lin = ew[args]
self.evec_herm_lin = ev[:,args]
# Extract upper bands from symmetric and hermitian band matrices
# (for use in dsbevd, dsbevx, zhbevd, zhbevx
# and their single precision versions)
LDAB = self.KU + 1
self.bandmat_sym = zeros((LDAB, N), dtype=float)
self.bandmat_herm = zeros((LDAB, N), dtype=complex)
for i in xrange(LDAB):
self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i)
self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i)
# Extract bands from general real and complex band matrix
# (for use in dgbtrf, dgbtrs and their single precision versions)
LDAB = 2*self.KL + self.KU + 1
self.bandmat_real = zeros((LDAB, N), dtype=float)
self.bandmat_real[2*self.KL,:] = diag(self.real_mat) # diagonal
for i in xrange(self.KL):
# superdiagonals
self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1)
# subdiagonals
self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1)
self.bandmat_comp = zeros((LDAB, N), dtype=complex)
self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat) # diagonal
for i in xrange(self.KL):
# superdiagonals
self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1)
# subdiagonals
self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1)
# absolute value for linear equation system A*x = b
self.b = 1.0*arange(N)
self.bc = self.b * (1 + 1j)
#####################################################################
def test_dsbev(self):
"""Compare dsbev eigenvalues and eigenvectors with
the result of linalg.eig."""
w, evec, info = dsbev(self.bandmat_sym, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_dsbevd(self):
"""Compare dsbevd eigenvalues and eigenvectors with
the result of linalg.eig."""
w, evec, info = dsbevd(self.bandmat_sym, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_dsbevx(self):
"""Compare dsbevx eigenvalues and eigenvectors
with the result of linalg.eig."""
N,N = shape(self.sym_mat)
## Achtung: Argumente 0.0,0.0,range?
w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N,
compute_v=1, range=2)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_zhbevd(self):
"""Compare zhbevd eigenvalues and eigenvectors
with the result of linalg.eig."""
w, evec, info = zhbevd(self.bandmat_herm, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_herm_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
def test_zhbevx(self):
"""Compare zhbevx eigenvalues and eigenvectors
with the result of linalg.eig."""
N,N = shape(self.herm_mat)
## Achtung: Argumente 0.0,0.0,range?
w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N,
compute_v=1, range=2)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_herm_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
def test_eigvals_banded(self):
"""Compare eigenvalues of eigvals_banded with those of linalg.eig."""
w_sym = eigvals_banded(self.bandmat_sym)
w_sym = w_sym.real
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
w_herm = eigvals_banded(self.bandmat_herm)
w_herm = w_herm.real
assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_sym_ind = eigvals_banded(self.bandmat_sym,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_sym_ind),
self.w_sym_lin[ind1:ind2+1])
w_herm_ind = eigvals_banded(self.bandmat_herm,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_herm_ind),
self.w_herm_lin[ind1:ind2+1])
# extracting eigenvalues with respect to a value range
v_lower = self.w_sym_lin[ind1] - 1.0e-5
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val = eigvals_banded(self.bandmat_sym,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val = eigvals_banded(self.bandmat_herm,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
w_sym = eigvals_banded(self.bandmat_sym, check_finite=False)
w_sym = w_sym.real
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
def test_eig_banded(self):
"""Compare eigenvalues and eigenvectors of eig_banded
with those of linalg.eig. """
w_sym, evec_sym = eig_banded(self.bandmat_sym)
evec_sym_ = evec_sym[:,argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
w_herm, evec_herm = eig_banded(self.bandmat_herm)
evec_herm_ = evec_herm[:,argsort(w_herm.real)]
assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin))
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_sym_ind),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_ind),
abs(self.evec_sym_lin[:,ind1:ind2+1]))
w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_herm_ind),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_ind),
abs(self.evec_herm_lin[:,ind1:ind2+1]))
# extracting eigenvalues with respect to a value range
v_lower = self.w_sym_lin[ind1] - 1.0e-5
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_val),
abs(self.evec_sym_lin[:,ind1:ind2+1]))
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_val),
abs(self.evec_herm_lin[:,ind1:ind2+1]))
w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False)
evec_sym_ = evec_sym[:,argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
def test_dgbtrf(self):
"""Compare dgbtrf LU factorisation with the LU factorisation result
of linalg.lu."""
M,N = shape(self.real_mat)
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
# extract matrix u from lu_symm_band
u = diag(lu_symm_band[2*self.KL,:])
for i in xrange(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_zgbtrf(self):
"""Compare zgbtrf LU factorisation with the LU factorisation result
of linalg.lu."""
M,N = shape(self.comp_mat)
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
# extract matrix u from lu_symm_band
u = diag(lu_symm_band[2*self.KL,:])
for i in xrange(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_dgbtrs(self):
"""Compare dgbtrs solutions for linear equation system A*x = b
with solutions of linalg.solve."""
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv)
y_lin = linalg.solve(self.real_mat, self.b)
assert_array_almost_equal(y, y_lin)
def test_zgbtrs(self):
"""Compare zgbtrs solutions for linear equation system A*x = b
with solutions of linalg.solve."""
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv)
y_lin = linalg.solve(self.comp_mat, self.bc)
assert_array_almost_equal(y, y_lin)
def test_eigh():
DIM = 6
v = {'dim': (DIM,),
'dtype': ('f','d','F','D'),
'overwrite': (True, False),
'lower': (True, False),
'turbo': (True, False),
'eigvals': (None, (2, DIM-2))}
for dim in v['dim']:
for typ in v['dtype']:
for overwrite in v['overwrite']:
for turbo in v['turbo']:
for eigenvalues in v['eigvals']:
for lower in v['lower']:
yield (eigenhproblem_standard,
'ordinary',
dim, typ, overwrite, lower,
turbo, eigenvalues)
yield (eigenhproblem_general,
'general ',
dim, typ, overwrite, lower,
turbo, eigenvalues)
def test_eigh_of_sparse():
# This tests the rejection of inputs that eigh cannot currently handle.
import scipy.sparse
a = scipy.sparse.identity(2).tocsc()
b = np.atleast_2d(a)
assert_raises(ValueError, eigh, a)
assert_raises(ValueError, eigh, b)
def _complex_symrand(dim, dtype):
a1, a2 = symrand(dim), symrand(dim)
# add antisymmetric matrix as imag part
a = a1 + 1j*(triu(a2)-tril(a2))
return a.astype(dtype)
def eigenhproblem_standard(desc, dim, dtype,
overwrite, lower, turbo,
eigenvalues):
"""Solve a standard eigenvalue problem."""
if iscomplex(empty(1, dtype=dtype)):
a = _complex_symrand(dim, dtype)
else:
a = symrand(dim).astype(dtype)
if overwrite:
a_c = a.copy()
else:
a_c = a
w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigenvalues)
assert_dtype_equal(z.dtype, dtype)
w = w.astype(dtype)
diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real
assert_array_almost_equal(diag_, w, DIGITS[dtype])
def eigenhproblem_general(desc, dim, dtype,
overwrite, lower, turbo,
eigenvalues):
"""Solve a generalized eigenvalue problem."""
if iscomplex(empty(1, dtype=dtype)):
a = _complex_symrand(dim, dtype)
b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype)
else:
a = symrand(dim).astype(dtype)
b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype)
if overwrite:
a_c, b_c = a.copy(), b.copy()
else:
a_c, b_c = a, b
w, z = eigh(a, b, overwrite_a=overwrite, lower=lower,
overwrite_b=overwrite, turbo=turbo, eigvals=eigenvalues)
assert_dtype_equal(z.dtype, dtype)
w = w.astype(dtype)
diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real
assert_array_almost_equal(diag1_, w, DIGITS[dtype])
diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real
assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype])
def test_eigh_integer():
a = array([[1,2],[2,7]])
b = array([[3,1],[1,5]])
w,z = eigh(a)
w,z = eigh(a,b)
class TestLU(TestCase):
def __init__(self, *args, **kw):
TestCase.__init__(self, *args, **kw)
self.a = array([[1,2,3],[1,2,3],[2,5,6]])
self.ca = array([[1,2,3],[1,2,3],[2,5j,6]])
# Those matrices are more robust to detect problems in permutation
# matrices than the ones above
self.b = array([[1,2,3],[4,5,6],[7,8,9]])
self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]])
# Reectangular matrices
self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
# Medium sizes matrices
self.med = random((30, 40))
self.cmed = random((30, 40)) + 1.j * random((30, 40))
def _test_common(self, data):
p,l,u = lu(data)
assert_array_almost_equal(dot(dot(p,l),u),data)
pl,u = lu(data,permute_l=1)
assert_array_almost_equal(dot(pl,u),data)
# Simple tests
def test_simple(self):
self._test_common(self.a)
def test_simple_complex(self):
self._test_common(self.ca)
def test_simple2(self):
self._test_common(self.b)
def test_simple2_complex(self):
self._test_common(self.cb)
# rectangular matrices tests
def test_hrectangular(self):
self._test_common(self.hrect)
def test_vrectangular(self):
self._test_common(self.vrect)
def test_hrectangular_complex(self):
self._test_common(self.chrect)
def test_vrectangular_complex(self):
self._test_common(self.cvrect)
# Bigger matrices
def test_medium1(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common(self.med)
def test_medium1_complex(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common(self.cmed)
def test_check_finite(self):
p, l, u = lu(self.a, check_finite=False)
assert_array_almost_equal(dot(dot(p,l),u), self.a)
def test_simple_known(self):
# Ticket #1458
for order in ['C', 'F']:
A = np.array([[2, 1],[0, 1.]], order=order)
LU, P = lu_factor(A)
assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]]))
assert_array_equal(P, np.array([0, 1]))
class TestLUSingle(TestLU):
"""LU testers for single precision, real and double"""
def __init__(self, *args, **kw):
TestLU.__init__(self, *args, **kw)
self.a = self.a.astype(float32)
self.ca = self.ca.astype(complex64)
self.b = self.b.astype(float32)
self.cb = self.cb.astype(complex64)
self.hrect = self.hrect.astype(float32)
self.chrect = self.hrect.astype(complex64)
self.vrect = self.vrect.astype(float32)
self.cvrect = self.vrect.astype(complex64)
self.med = self.vrect.astype(float32)
self.cmed = self.vrect.astype(complex64)
class TestLUSolve(TestCase):
def setUp(self):
seed(1234)
def test_lu(self):
a0 = random((10,10))
b = random((10,))
for order in ['C', 'F']:
a = np.array(a0, order=order)
x1 = solve(a,b)
lu_a = lu_factor(a)
x2 = lu_solve(lu_a,b)
assert_array_almost_equal(x1,x2)
def test_check_finite(self):
a = random((10,10))
b = random((10,))
x1 = solve(a,b)
lu_a = lu_factor(a, check_finite=False)
x2 = lu_solve(lu_a,b, check_finite=False)
assert_array_almost_equal(x1,x2)
class TestSVD_GESDD(TestCase):
def setUp(self):
self.lapack_driver = 'gesdd'
seed(1234)
def test_degenerate(self):
assert_raises(TypeError, svd, [[1.]], lapack_driver=1.)
assert_raises(ValueError, svd, [[1.]], lapack_driver='foo')
def test_simple(self):
a = [[1,2,3],[1,20,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_singular(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0]))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1]))
assert_array_almost_equal(dot(transpose(vh),vh),identity(2))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_random(self):
n = 20
m = 15
for i in range(3):
for a in [random([n,m]),random([m,n])]:
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1]))
assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0]))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_complex(self):
a = [[1,2,3],[1,2j,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))
assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0]))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_random_complex(self):
n = 20
m = 15
for i in range(3):
for full_matrices in (True, False):
for a in [random([n,m]),random([m,n])]:
a = a + 1j*random(list(a.shape))
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))
# This fails when [m,n]
# assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(len(vh),dtype=vh.dtype.char))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_crash_1580(self):
sizes = [(13, 23), (30, 50), (60, 100)]
np.random.seed(1234)
for sz in sizes:
for dt in [np.float32, np.float64, np.complex64, np.complex128]:
a = np.random.rand(*sz).astype(dt)
# should not crash
svd(a, lapack_driver=self.lapack_driver)
def test_check_finite(self):
a = [[1,2,3],[1,20,3],[2,5,6]]
u,s,vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_gh_5039(self):
# This is a smoke test for https://github.com/scipy/scipy/issues/5039
#
# The following is reported to raise "ValueError: On entry to DGESDD
# parameter number 12 had an illegal value".
# `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')`
# This is reported to only show up on LAPACK 3.0.3.
#
# The matrix below is taken from the call to
# `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest
b = np.array(
[[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.],
[0., 0.16666667, 0.66666667, 0.16666667, 0., 0.],
[0., 0., 0.16666667, 0.66666667, 0.16666667, 0.],
[0., 0., 0., 0.16666667, 0.66666667, 0.16666667]])
svd(b, lapack_driver=self.lapack_driver)
class TestSVD_GESVD(TestSVD_GESDD):
def setUp(self):
self.lapack_driver = 'gesvd'
seed(1234)
class TestSVDVals(TestCase):
def test_empty(self):
for a in [[]], np.empty((2, 0)), np.ones((0, 3)):
s = svdvals(a)
assert_equal(s, np.empty(0))
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_complex(self):
a = [[1,2,3],[1,20,3j],[2,5,6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet_complex(self):
a = [[1,2,3],[4,5j,6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet_complex(self):
a = [[1,2],[4,5],[3j,4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
s = svdvals(a, check_finite=False)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
@dec.slow
def test_crash_2609(self):
np.random.seed(1234)
a = np.random.rand(1500, 2800)
# Shouldn't crash:
svdvals(a)
class TestDiagSVD(TestCase):
def test_simple(self):
assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]])
class TestQR(TestCase):
def setUp(self):
seed(1234)
def test_simple(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_left(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
qc,r2 = qr_multiply(a, identity(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_right(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), qc)
assert_array_almost_equal(r, r2)
qc,r = qr_multiply(a, identity(3))
assert_array_almost_equal(q, qc)
def test_simple_pivoting(self):
a = np.asarray([[8,2,3],[2,9,3],[5,3,6]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_left_pivoting(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
def test_simple_right_pivoting(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), qc)
def test_simple_trap(self):
a = [[8,2,3],[2,9,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
def test_simple_trap_pivoting(self):
a = np.asarray([[8,2,3],[2,9,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall(self):
# full version
a = [[8,2],[2,9],[5,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_tall_pivoting(self):
# full version pivoting
a = np.asarray([[8,2],[2,9],[5,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall_e(self):
# economy version
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (3,2))
assert_equal(r.shape, (2,2))
def test_simple_tall_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8,2],[2,9],[5,3]])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall_left(self):
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode="economic")
c = [1, 2]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
c = array([1,2,0])
qc,r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(dot(q, c[:2]), qc)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_tall_left_pivoting(self):
a = [[8,2],[2,9],[5,3]]
q,r,jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc,r,kpvt = qr_multiply(a, c, "left", True)
assert_array_equal(jpvt, kpvt)
assert_array_almost_equal(dot(q, c), qc)
qc,r,jpvt = qr_multiply(a, identity(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_tall_right(self):
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode="economic")
c = [1, 2, 3]
cq,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
assert_array_almost_equal(r, r2)
cq,r = qr_multiply(a, identity(3))
assert_array_almost_equal(cq, q)
def test_simple_tall_right_pivoting(self):
a = [[8,2],[2,9],[5,3]]
q,r,jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2, 3]
cq,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), cq)
cq,r,jpvt = qr_multiply(a, identity(3), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_fat(self):
# full version
a = [[8,2,5],[2,9,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
def test_simple_fat_pivoting(self):
# full version pivoting
a = np.asarray([[8,2,5],[2,9,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_fat_e(self):
# economy version
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
def test_simple_fat_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8,2,3],[2,9,5]])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_fat_left(self):
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode="economic")
c = [1, 2]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_fat_left_pivoting(self):
a = [[8,2,3],[2,9,5]]
q,r,jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
qc,r,jpvt = qr_multiply(a, identity(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_fat_right(self):
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode="economic")
c = [1, 2]
cq,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
assert_array_almost_equal(r, r2)
cq,r = qr_multiply(a, identity(2))
assert_array_almost_equal(cq, q)
def test_simple_fat_right_pivoting(self):
a = [[8,2,3],[2,9,5]]
q,r,jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2]
cq,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), cq)
cq,r,jpvt = qr_multiply(a, identity(2), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_complex(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_complex_left(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_complex_right(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), qc)
qc,r = qr_multiply(a, identity(3))
assert_array_almost_equal(q, qc)
def test_simple_tall_complex_left(self):
a = [[8,2+3j],[2,9],[5+7j,3]]
q,r = qr(a, mode="economic")
c = [1, 2+2j]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
c = array([1,2,0])
qc,r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(dot(q, c[:2]), qc)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_complex_left_conjugate(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(dot(q.conjugate(), c), qc)
def test_simple_complex_tall_left_conjugate(self):
a = [[3,3+4j],[5,2+2j],[3,2]]
q,r = qr(a, mode='economic')
c = [1, 3+4j]
qc,r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(dot(q.conjugate(), c), qc)
def test_simple_complex_right_conjugate(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, conjugate=True)
assert_array_almost_equal(dot(c, q.conjugate()), qc)
def test_simple_complex_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_complex_left_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
def test_simple_complex_right_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), qc)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
def test_random_left(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
c = random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(q, qc)
def test_random_right(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
c = random([n])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(n))
assert_array_almost_equal(q, cq)
def test_random_pivoting(self):
n = 20
for k in range(2):
a = random([n,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_tall(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a)
def test_random_tall_left(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode="economic")
c = random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(qc, q)
def test_random_tall_right(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode="economic")
c = random([m])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(m))
assert_array_almost_equal(cq, q)
def test_random_tall_pivoting(self):
# full version pivoting
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_tall_e(self):
# economy version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (m,n))
assert_equal(r.shape, (n,n))
def test_random_tall_e_pivoting(self):
# economy version pivoting
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (m,n))
assert_equal(r.shape, (n,n))
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_trap(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a)
def test_random_trap_pivoting(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
def test_random_complex_left(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
c = random([n])+1j*random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(q, qc)
def test_random_complex_right(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
c = random([n])+1j*random([n])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(n))
assert_array_almost_equal(q, cq)
def test_random_complex_pivoting(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_check_finite(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a, check_finite=False)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_lwork(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
# Get comparison values
q,r = qr(a, lwork=None)
# Test against minimum valid lwork
q2,r2 = qr(a, lwork=3)
assert_array_almost_equal(q2,q)
assert_array_almost_equal(r2,r)
# Test against larger lwork
q3,r3 = qr(a, lwork=10)
assert_array_almost_equal(q3,q)
assert_array_almost_equal(r3,r)
# Test against explicit lwork=-1
q4,r4 = qr(a, lwork=-1)
assert_array_almost_equal(q4,q)
assert_array_almost_equal(r4,r)
# Test against invalid lwork
assert_raises(Exception, qr, (a,), {'lwork':0})
assert_raises(Exception, qr, (a,), {'lwork':2})
class TestRQ(TestCase):
def setUp(self):
seed(1234)
def test_simple(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_r(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a)
r2 = rq(a, mode='r')
assert_array_almost_equal(r, r2)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_simple_trap(self):
a = [[8,2,3],[2,9,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_simple_tall(self):
a = [[8,2],[2,9],[5,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(r,q),a)
def test_simple_fat(self):
a = [[8,2,5],[2,9,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_simple_complex(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
r,q = rq(a)
assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_random_tall(self):
m = 200
n = 100
for k in range(2):
a = random([m,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_trap(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_trap_economic(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
r,q = rq(a, mode='economic')
assert_array_almost_equal(dot(q,transpose(q)),identity(m))
assert_array_almost_equal(dot(r,q),a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_complex_economic(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])+1j*random([m,n])
r,q = rq(a, mode='economic')
assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m))
assert_array_almost_equal(dot(r,q),a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_check_finite(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a, check_finite=False)
assert_array_almost_equal(dot(q, transpose(q)),identity(3))
assert_array_almost_equal(dot(r,q),a)
transp = transpose
any = sometrue
class TestSchur(TestCase):
def test_simple(self):
a = [[8,12,3],[2,9,3],[10,3,6]]
t,z = schur(a)
assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)
tc,zc = schur(a,'complex')
assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc))))
assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a)
tc2,zc2 = rsf2csf(tc,zc)
assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a)
def test_sort(self):
a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]
s,u,sdim = schur(a,sort='lhp')
assert_array_almost_equal([[0.1134,0.5436,0.8316,0.],
[-0.1134,-0.8245,0.5544,0.],
[-0.8213,0.1308,0.0265,-0.5547],
[-0.5475,0.0872,0.0177,0.8321]],
u,3)
assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174],
[0.,-0.5000,9.4472,-0.7184],
[0.,0.,1.4142,-0.1456],
[0.,0.,0.,0.5]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='rhp')
assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
[-0.4862,0.4930,-0.1434,-0.7071],
[0.6042,0.3944,-0.6924,0.],
[0.4028,0.5986,0.6924,0.]],
u,3)
assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
[0.,0.5,6.5809,-3.1870],
[0.,0.,-1.4142,0.9270],
[0.,0.,0.,-0.5]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='iuc')
assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042],
[-0.8321,0.,-0.3814,-0.4028],
[0.,0.7071,-0.5134,0.4862],
[0.,0.7071,0.5134,-0.4862]],
u,3)
assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974],
[0.,0.5000,-3.3191,-14.4130],
[0.,0.,1.4142,2.1573],
[0.,0.,0.,-1.4142]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='ouc')
assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.],
[-0.4862,0.5134,0.7071,0.],
[0.6042,0.5721,0.,-0.5547],
[0.4028,0.3814,0.,0.8321]],
u,3)
assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974],
[0.,-1.4142,3.3191,6.5809],
[0.,0.,-0.5000,0.],
[0.,0.,0.,0.5000]],
s,3)
assert_equal(2,sdim)
rhp_function = lambda x: x >= 0.0
s,u,sdim = schur(a,sort=rhp_function)
assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
[-0.4862,0.4930,-0.1434,-0.7071],
[0.6042,0.3944,-0.6924,0.],
[0.4028,0.5986,0.6924,0.]],
u,3)
assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
[0.,0.5,6.5809,-3.1870],
[0.,0.,-1.4142,0.9270],
[0.,0.,0.,-0.5]],
s,3)
assert_equal(2,sdim)
def test_sort_errors(self):
a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]
assert_raises(ValueError, schur, a, sort='unsupported')
assert_raises(ValueError, schur, a, sort=1)
def test_check_finite(self):
a = [[8,12,3],[2,9,3],[10,3,6]]
t,z = schur(a, check_finite=False)
assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)
class TestHessenberg(TestCase):
def test_simple(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000,42.2037,-156.3165],
[-537.6783,152.5511,-554.9272],
[0,0.0728, 2.4489]]
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
assert_array_almost_equal(h,h1,decimal=4)
def test_simple_complex(self):
a = [[-149, -50,-154],
[537, 180j, 546],
[-27j, -9, -25]]
h,q = hessenberg(a,calc_q=1)
h1 = dot(transp(conj(q)),dot(a,q))
assert_array_almost_equal(h1,h)
def test_simple2(self):
a = [[1,2,3,4,5,6,7],
[0,2,3,4,6,7,2],
[0,2,2,3,0,3,2],
[0,0,2,8,0,0,2],
[0,3,1,2,0,1,2],
[0,1,2,3,0,1,0],
[0,0,0,0,0,1,2]]
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
def test_simple3(self):
a = np.eye(3)
a[-1, 0] = 2
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(dot(transp(q), dot(a, q)), h)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
h,q = hessenberg(a,calc_q=1)
h1 = dot(transp(conj(q)),dot(a,q))
assert_array_almost_equal(h1,h)
def test_check_finite(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000,42.2037,-156.3165],
[-537.6783,152.5511,-554.9272],
[0,0.0728, 2.4489]]
h,q = hessenberg(a,calc_q=1, check_finite=False)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
assert_array_almost_equal(h,h1,decimal=4)
def test_2x2(self):
a = [[2, 1], [7, 12]]
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(q, np.eye(2))
assert_array_almost_equal(h, a)
b = [[2-7j, 1+2j], [7+3j, 12-2j]]
h2, q2 = hessenberg(b, calc_q=1)
assert_array_almost_equal(q2, np.eye(2))
assert_array_almost_equal(h2, b)
class TestQZ(TestCase):
def setUp(self):
seed(12345)
def test_qz_single(self):
n = 5
A = random([n,n]).astype(float32)
B = random([n,n]).astype(float32)
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_double(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_complex(self):
n = 5
A = random([n,n]) + 1j*random([n,n])
B = random([n,n]) + 1j*random([n,n])
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))
assert_(all(diag(BB) >= 0))
assert_(all(diag(BB).imag == 0))
def test_qz_complex64(self):
n = 5
A = (random([n,n]) + 1j*random([n,n])).astype(complex64)
B = (random([n,n]) + 1j*random([n,n])).astype(complex64)
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A, decimal=5)
assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B, decimal=5)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n), decimal=5)
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n), decimal=5)
assert_(all(diag(BB) >= 0))
assert_(all(diag(BB).imag == 0))
def test_qz_double_complex(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B, output='complex')
aa = dot(dot(Q,AA),Z.conjugate().T)
assert_array_almost_equal(aa.real, A)
assert_array_almost_equal(aa.imag, 0)
bb = dot(dot(Q,BB),Z.conjugate().T)
assert_array_almost_equal(bb.real, B)
assert_array_almost_equal(bb.imag, 0)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_double_sort(self):
# from http://www.nag.com/lapack-ex/node119.html
# NOTE: These matrices may be ill-conditioned and lead to a
# seg fault on certain python versions when compiled with
# sse2 or sse3 older ATLAS/LAPACK binaries for windows
# A = np.array([[3.9, 12.5, -34.5, -0.5],
# [ 4.3, 21.5, -47.5, 7.5],
# [ 4.3, 21.5, -43.5, 3.5],
# [ 4.4, 26.0, -46.0, 6.0 ]])
# B = np.array([[ 1.0, 2.0, -3.0, 1.0],
# [1.0, 3.0, -5.0, 4.0],
# [1.0, 3.0, -4.0, 3.0],
# [1.0, 3.0, -4.0, 4.0]])
A = np.array([[3.9, 12.5, -34.5, 2.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 1.5, -43.5, 3.5],
[4.4, 6.0, -46.0, 6.0]])
B = np.array([[1.0, 1.0, -3.0, 1.0],
[1.0, 3.0, -5.0, 4.4],
[1.0, 2.0, -4.0, 1.0],
[1.2, 3.0, -4.0, 4.0]])
sort = lambda ar,ai,beta: ai == 0
assert_raises(ValueError, qz, A, B, sort=sort)
if False:
AA,BB,Q,Z,sdim = qz(A,B,sort=sort)
# assert_(sdim == 2)
assert_(sdim == 4)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
assert_array_almost_equal(np.abs(AA), np.abs(np.array(
[[35.7864, -80.9061, -12.0629, -9.498],
[0., 2.7638, -2.3505, 7.3256],
[0., 0., 0.6258, -0.0398],
[0., 0., 0., -12.8217]])), 4)
assert_array_almost_equal(np.abs(BB), np.abs(np.array(
[[4.5324, -8.7878, 3.2357, -3.5526],
[0., 1.4314, -2.1894, 0.9709],
[0., 0., 1.3126, -0.3468],
[0., 0., 0., 0.559]])), 4)
assert_array_almost_equal(np.abs(Q), np.abs(np.array(
[[-0.4193, -0.605, -0.1894, -0.6498],
[-0.5495, 0.6987, 0.2654, -0.3734],
[-0.4973, -0.3682, 0.6194, 0.4832],
[-0.5243, 0.1008, -0.7142, 0.4526]])), 4)
assert_array_almost_equal(np.abs(Z), np.abs(np.array(
[[-0.9471, -0.2971, -0.1217, 0.0055],
[-0.0367, 0.1209, 0.0358, 0.9913],
[0.3171, -0.9041, -0.2547, 0.1312],
[0.0346, 0.2824, -0.9587, 0.0014]])), 4)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
# assert_array_almost_equal(abs(AA), abs(np.array([
# [3.8009, -69.4505, 50.3135, -43.2884],
# [0.0000, 9.2033, -0.2001, 5.9881],
# [0.0000, 0.0000, 1.4279, 4.4453],
# [0.0000, 0.0000, 0.9019, -1.1962]])), 4)
# assert_array_almost_equal(abs(BB), abs(np.array([
# [1.9005, -10.2285, 0.8658, -5.2134],
# [0.0000, 2.3008, 0.7915, 0.4262],
# [0.0000, 0.0000, 0.8101, 0.0000],
# [0.0000, 0.0000, 0.0000, -0.2823]])), 4)
# assert_array_almost_equal(abs(Q), abs(np.array([
# [0.4642, 0.7886, 0.2915, -0.2786],
# [0.5002, -0.5986, 0.5638, -0.2713],
# [0.5002, 0.0154, -0.0107, 0.8657],
# [0.5331, -0.1395, -0.7727, -0.3151]])), 4)
# assert_array_almost_equal(dot(Q,Q.T), eye(4))
# assert_array_almost_equal(abs(Z), abs(np.array([
# [0.9961, -0.0014, 0.0887, -0.0026],
# [0.0057, -0.0404, -0.0938, -0.9948],
# [0.0626, 0.7194, -0.6908, 0.0363],
# [0.0626, -0.6934, -0.7114, 0.0956]])), 4)
# assert_array_almost_equal(dot(Z,Z.T), eye(4))
# def test_qz_complex_sort(self):
# cA = np.array([
# [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j],
# [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],
# [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],
# [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])
# cB = np.array([
# [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j],
# [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j],
# [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],
# [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])
# AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')
# eigenvalues = diag(AAS)/diag(BBS)
# assert_(all(np.real(eigenvalues[:sdim] < 0)))
# assert_(all(np.real(eigenvalues[sdim:] > 0)))
def test_check_finite(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B,check_finite=False)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def _make_pos(X):
# the decompositions can have different signs than verified results
return np.sign(X)*X
class TestOrdQZ(TestCase):
@classmethod
def setupClass(cls):
# http://www.nag.com/lapack-ex/node119.html
cls.A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j,
7.5 + 0.5j],
[-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j,
-10.5 - 1.5j],
[4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j,
-7.5 - 3.5j],
[5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j,
-19.0 - 32.5j]])
cls.B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j],
[0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j],
[1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j],
[0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]])
# http://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml
cls.A2 = np.array([[3.9, 12.5, -34.5, -0.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 21.5, -43.5, 3.5],
[4.4, 26.0, -46.0, 6.0]])
cls.B2 = np.array([[1, 2, -3, 1],
[1, 3, -5, 4],
[1, 3, -4, 3],
[1, 3, -4, 4]])
# example with the eigenvalues
# -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j,
# 0.61244091
# thus featuring:
# * one complex conjugate eigenvalue pair,
# * one eigenvalue in the lhp
# * 2 eigenvalues in the unit circle
# * 2 non-real eigenvalues
cls.A3 = np.array([[5., 1., 3., 3.],
[4., 4., 2., 7.],
[7., 4., 1., 3.],
[0., 4., 8., 7.]])
cls.B3 = np.array([[8., 10., 6., 10.],
[7., 7., 2., 9.],
[9., 1., 6., 6.],
[5., 1., 4., 7.]])
def qz_decomp(self, sort):
retc = ordqz(self.A1, self.B1, sort=sort)
ret1 = ordqz(self.A2, self.B2, sort=sort)
ret2 = ordqz(self.A3, self.B3, sort=sort)
return retc, ret1, ret2
def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z):
I = np.eye(*A.shape)
# make sure Q and Z are orthogonal
assert_array_almost_equal(Q.dot(Q.T.conj()), I)
assert_array_almost_equal(Z.dot(Z.T.conj()), I)
# check factorization
assert_array_almost_equal(Q.dot(AA), A.dot(Z))
assert_array_almost_equal(Q.dot(BB), B.dot(Z))
# check shape of AA and BB
assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape))
assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape))
# check eigenvalues
for i in range(A.shape[0]):
# does the current diagonal element belong to a 2-by-2 block
# that was already checked?
if i > 0 and A[i, i - 1] != 0:
continue
# take care of 2-by-2 blocks
if i < AA.shape[0] - 1 and AA[i + 1, i] != 0:
evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2])
# make sure the pair of complex conjugate eigenvalues
# is ordered consistently (positive imaginary part first)
if evals[0].imag < 0:
evals = evals[[1, 0]]
tmp = alpha[i:i + 2]/beta[i:i + 2]
if tmp[0].imag < 0:
tmp = tmp[[1, 0]]
assert_array_almost_equal(evals, tmp)
else:
assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i])
sortfun = sort
if sortfun == 'lhp':
sortfun = lambda x, y: (x/y).real < 0
if sortfun == 'rhp':
sortfun = lambda x, y: (x/y).real > 0
if sortfun == 'iuc':
sortfun = lambda x, y: np.abs(x/y) < 1
if sortfun == 'ouc':
sortfun = lambda x, y: np.abs(x/y) > 1
lastsort = True
for i in range(A.shape[0]):
cursort = sortfun(alpha[i], beta[i])
# once the sorting criterion was not matched all subsequent
# eigenvalues also shouldn't match
if not lastsort:
assert(not cursort)
lastsort = cursort
def test_lhp(self):
retc, ret1, ret2 = self.qz_decomp('lhp')
self.check(self.A1, self.B1, 'lhp', *retc)
self.check(self.A2, self.B2, 'lhp', *ret1)
self.check(self.A3, self.B3, 'lhp', *ret2)
def test_rhp(self):
retc, ret1, ret2 = self.qz_decomp('rhp')
self.check(self.A1, self.B1, 'rhp', *retc)
self.check(self.A2, self.B2, 'rhp', *ret1)
self.check(self.A3, self.B3, 'rhp', *ret2)
def test_iuc(self):
retc, ret1, ret2 = self.qz_decomp('iuc')
self.check(self.A1, self.B1, 'iuc', *retc)
self.check(self.A2, self.B2, 'iuc', *ret1)
self.check(self.A3, self.B3, 'iuc', *ret2)
def test_ouc(self):
retc, ret1, ret2 = self.qz_decomp('ouc')
self.check(self.A1, self.B1, 'ouc', *retc)
self.check(self.A2, self.B2, 'ouc', *ret1)
self.check(self.A3, self.B3, 'ouc', *ret2)
def test_ref(self):
# real eigenvalues first (top-left corner)
sort = lambda x, y: (x/y).imag == 0
retc, ret1, ret2 = self.qz_decomp(sort)
self.check(self.A1, self.B1, sort, *retc)
self.check(self.A2, self.B2, sort, *ret1)
self.check(self.A3, self.B3, sort, *ret2)
def test_cef(self):
# complex eigenvalues first (top-left corner)
sort = lambda x, y: (x/y).imag != 0
retc, ret1, ret2 = self.qz_decomp(sort)
self.check(self.A1, self.B1, sort, *retc)
self.check(self.A2, self.B2, sort, *ret1)
self.check(self.A3, self.B3, sort, *ret2)
def test_diff_input_types(self):
ret = ordqz(self.A1, self.B2, sort='lhp')
self.check(self.A1, self.B2, 'lhp', *ret)
ret = ordqz(self.B2, self.A1, sort='lhp')
self.check(self.B2, self.A1, 'lhp', *ret)
class TestOrdQZWorkspaceSize(TestCase):
def setUp(self):
seed(12345)
def test_decompose(self):
N = 202
# raises error if lwork parameter to dtrsen is too small
for ddtype in [np.float32, np.float64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
# sort = lambda alphar, alphai, beta: alphar**2 + alphai**2< beta**2
sort = lambda alpha, beta: alpha < beta
[S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='real')
for ddtype in [np.complex, np.complex64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
sort = lambda alpha, beta: alpha < beta
[S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='complex')
@dec.slow
def test_decompose_ouc(self):
N = 202
# segfaults if lwork parameter to dtrsen is too small
for ddtype in [np.float32, np.float64, np.complex, np.complex64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
[S,T,alpha,beta,U,V] = ordqz(A,B,sort='ouc')
class TestDatacopied(TestCase):
def test_datacopied(self):
from scipy.linalg.decomp import _datacopied
M = matrix([[0,1],[2,3]])
A = asarray(M)
L = M.tolist()
M2 = M.copy()
class Fake1:
def __array__(self):
return A
class Fake2:
__array_interface__ = A.__array_interface__
F1 = Fake1()
F2 = Fake2()
for item, status in [(M, False), (A, False), (L, True),
(M2, False), (F1, False), (F2, False)]:
arr = asarray(item)
assert_equal(_datacopied(arr, item), status,
err_msg=repr(item))
def test_aligned_mem_float():
"""Check linalg works with non-aligned memory"""
# Allocate 402 bytes of memory (allocated on boundary)
a = arange(402, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem():
"""Check linalg works with non-aligned memory"""
# Allocate 804 bytes of memory (allocated on boundary)
a = arange(804, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=4, count=100, dtype=float)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem_complex():
"""Check that complex objects don't need to be completely aligned"""
# Allocate 1608 bytes of memory (allocated on boundary)
a = zeros(1608, dtype=np.uint8)
# Create an array with boundary offset 8
z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)
z.shape = 10, 10
eig(z, overwrite_a=True)
# This does not need special handling
eig(z.T, overwrite_a=True)
def check_lapack_misaligned(func, args, kwargs):
args = list(args)
for i in range(len(args)):
a = args[:]
if isinstance(a[i],np.ndarray):
# Try misaligning a[i]
aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)
aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype)
aa.shape = a[i].shape
aa[...] = a[i]
a[i] = aa
func(*a,**kwargs)
if len(a[i].shape) > 1:
a[i] = a[i].T
func(*a,**kwargs)
@dec.knownfailureif(True, "Ticket #1152, triggers a segfault in rare cases.")
def test_lapack_misaligned():
M = np.eye(10,dtype=float)
R = np.arange(100)
R.shape = 10,10
S = np.arange(20000,dtype=np.uint8)
S = np.frombuffer(S.data, offset=4, count=100, dtype=float)
S.shape = 10, 10
b = np.ones(10)
LU, piv = lu_factor(S)
for (func, args, kwargs) in [
(eig,(S,),dict(overwrite_a=True)), # crash
(eigvals,(S,),dict(overwrite_a=True)), # no crash
(lu,(S,),dict(overwrite_a=True)), # no crash
(lu_factor,(S,),dict(overwrite_a=True)), # no crash
(lu_solve,((LU,piv),b),dict(overwrite_b=True)),
(solve,(S,b),dict(overwrite_a=True,overwrite_b=True)),
(svd,(M,),dict(overwrite_a=True)), # no crash
(svd,(R,),dict(overwrite_a=True)), # no crash
(svd,(S,),dict(overwrite_a=True)), # crash
(svdvals,(S,),dict()), # no crash
(svdvals,(S,),dict(overwrite_a=True)), # crash
(cholesky,(M,),dict(overwrite_a=True)), # no crash
(qr,(S,),dict(overwrite_a=True)), # crash
(rq,(S,),dict(overwrite_a=True)), # crash
(hessenberg,(S,),dict(overwrite_a=True)), # crash
(schur,(S,),dict(overwrite_a=True)), # crash
]:
yield check_lapack_misaligned, func, args, kwargs
# not properly tested
# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd
class TestOverwrite(object):
def test_eig(self):
assert_no_overwrite(eig, [(3,3)])
assert_no_overwrite(eig, [(3,3), (3,3)])
def test_eigh(self):
assert_no_overwrite(eigh, [(3,3)])
assert_no_overwrite(eigh, [(3,3), (3,3)])
def test_eig_banded(self):
assert_no_overwrite(eig_banded, [(3,2)])
def test_eigvals(self):
assert_no_overwrite(eigvals, [(3,3)])
def test_eigvalsh(self):
assert_no_overwrite(eigvalsh, [(3,3)])
def test_eigvals_banded(self):
assert_no_overwrite(eigvals_banded, [(3,2)])
def test_hessenberg(self):
assert_no_overwrite(hessenberg, [(3,3)])
def test_lu_factor(self):
assert_no_overwrite(lu_factor, [(3,3)])
def test_lu_solve(self):
x = np.array([[1,2,3], [4,5,6], [7,8,8]])
xlu = lu_factor(x)
assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)])
def test_lu(self):
assert_no_overwrite(lu, [(3,3)])
def test_qr(self):
assert_no_overwrite(qr, [(3,3)])
def test_rq(self):
assert_no_overwrite(rq, [(3,3)])
def test_schur(self):
assert_no_overwrite(schur, [(3,3)])
def test_schur_complex(self):
assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)],
dtypes=[np.float32, np.float64])
def test_svd(self):
assert_no_overwrite(svd, [(3,3)])
assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3,3)])
def test_svdvals(self):
assert_no_overwrite(svdvals, [(3,3)])
def _check_orth(n):
X = np.ones((n, 2), dtype=float)
Y = orth(X)
assert_equal(Y.shape, (n, 1))
assert_allclose(Y, Y.mean(), atol=1e-10)
Y = orth(X.T)
assert_equal(Y.shape, (2, 1))
assert_allclose(Y, Y.mean())
@dec.slow
@dec.skipif(np.dtype(np.intp).itemsize < 8, "test only on 64-bit, else too slow")
def test_orth_memory_efficiency():
# Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable.
# Keep in mind that @dec.slow tests are likely to be running
# under configurations that support 4Gb+ memory for tests related to
# 32 bit overflow.
n = 10*1000*1000
try:
_check_orth(n)
except MemoryError:
raise AssertionError('memory error perhaps caused by orth regression')
def test_orth():
for n in 1, 2, 3, 10, 100:
_check_orth(n)
if __name__ == "__main__":
run_module_suite()
| 37.899497 | 115 | 0.520132 | from __future__ import division, print_function, absolute_import
__usage__ = """
Build linalg:
python setup_linalg.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
Run tests if linalg is not installed:
python tests/test_decomp.py
"""
import numpy as np
from numpy.testing import (TestCase, assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_equal,
assert_raises, assert_, assert_allclose,
run_module_suite, dec)
from scipy._lib.six import xrange
from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr,
schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq,
eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, qz, orth, ordqz)
from scipy.linalg.lapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \
dsbev, dsbevd, dsbevx, zhbevd, zhbevx
from scipy.linalg.misc import norm
from numpy import array, transpose, sometrue, diag, ones, linalg, \
argsort, zeros, arange, float32, complex64, dot, conj, identity, \
ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \
asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\
triu, tril
from numpy.random import normal, seed, random
from scipy.linalg._testutils import assert_no_overwrite
DIGITS = {'d':11, 'D':11, 'f':4, 'F':4}
def assert_dtype_equal(act, des):
if isinstance(act, ndarray):
act = act.dtype
else:
act = dtype(act)
if isinstance(des, ndarray):
des = des.dtype
else:
des = dtype(des)
assert_(act == des, 'dtype mismatch: "%s" (should be "%s") ' % (act, des))
def symrand(dim_or_eigv):
if isinstance(dim_or_eigv, int):
dim = dim_or_eigv
d = random(dim)*2 - 1
elif (isinstance(dim_or_eigv, ndarray) and
len(dim_or_eigv.shape) == 1):
dim = dim_or_eigv.shape[0]
d = dim_or_eigv
else:
raise TypeError("input type not supported.")
v = random_rot(dim)
h = dot(dot(v.T.conj(), diag(d)), v)
h = 0.5*(h.T+h)
return h
def random_rot(dim):
H = eye(dim)
D = ones((dim,))
for n in range(1, dim):
x = normal(size=(dim-n+1,))
D[n-1] = sign(x[0])
x[0] -= D[n-1]*sqrt((x*x).sum())
Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum()
mat = eye(dim)
mat[n-1:,n-1:] = Hx
H = dot(H, mat)
D[-1] = -D.prod()
H = (D*H.T).T
return H
class TestEigVals(TestCase):
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w = eigvals(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
def test_simple_tr(self):
a = array([[1,2,3],[1,2,3],[2,5,6]],'d')
a = transpose(a).copy()
a = transpose(a)
w = eigvals(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
def test_simple_complex(self):
a = [[1,2,3],[1,2,3],[2,5,6+1j]]
w = eigvals(a)
exact_w = [(9+1j+sqrt(92+6j))/2,
0,
(9+1j-sqrt(92+6j))/2]
assert_array_almost_equal(w,exact_w)
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w = eigvals(a, check_finite=False)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
class TestEig(object):
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w,v = eig(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
v0 = array([1,1,(1+sqrt(93)/3)/2])
v1 = array([3.,0,-1])
v2 = array([1,1,(1-sqrt(93)/3)/2])
v0 = v0 / sqrt(dot(v0,transpose(v0)))
v1 = v1 / sqrt(dot(v1,transpose(v1)))
v2 = v2 / sqrt(dot(v2,transpose(v2)))
assert_array_almost_equal(w,exact_w)
assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))
assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))
assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))
for i in range(3):
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])
w,v = eig(a,left=1,right=0)
for i in range(3):
assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i])
def test_simple_complex_eig(self):
a = [[1,2],[-2,1]]
w,vl,vr = eig(a,left=1,right=1)
assert_array_almost_equal(w, array([1+2j, 1-2j]))
for i in range(2):
assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])
for i in range(2):
assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),
conjugate(w[i])*vl[:,i])
def test_simple_complex(self):
a = [[1,2,3],[1,2,3],[2,5,6+1j]]
w,vl,vr = eig(a,left=1,right=1)
for i in range(3):
assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])
for i in range(3):
assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),
conjugate(w[i])*vl[:,i])
def _check_gen_eig(self, A, B):
A, B = asarray(A), asarray(B)
msg = "\n%r\n%r" % (A, B)
w, vr = eig(A,B)
wt = eigvals(A,B)
val1 = dot(A, vr)
val2 = dot(B, vr) * w
res = val1 - val2
for i in range(res.shape[1]):
if all(isfinite(res[:, i])):
assert_array_almost_equal(res[:, i], 0, err_msg=msg)
assert_array_almost_equal(sort(w[isfinite(w)]), sort(wt[isfinite(wt)]),
err_msg=msg)
length = np.empty(len(vr))
for i in xrange(len(vr)):
length[i] = norm(vr[:, i])
assert_array_almost_equal(length, np.ones(length.size), err_msg=msg)
@dec.knownfailureif(True, "See gh-2254.")
def test_singular(self):
A = array(([22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34],
[27,31,26,21,15], [38,44,44,24,30]))
B = array(([13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25],
[16,25,27,14,23], [24,35,18,21,22]))
olderr = np.seterr(all='ignore')
try:
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_falker(self):
M = diag(array(([1,0,3])))
K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2]))
D = array(([1,-1,0],[-1,1,0],[0,0,0]))
Z = zeros((3,3))
I = identity(3)
A = bmat([[I,Z],[Z,-K]])
B = bmat([[Z,I],[M,D]])
olderr = np.seterr(all='ignore')
try:
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_bad_geneig(self):
c1 = -9 + omega**2
c2 = 2*omega
A = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, c1, 0],
[0, 0, 0, c1]]
B = [[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, -c2],
[0, 1, c2, 0]]
return A, B
olderr = np.seterr(all='ignore')
try:
for k in xrange(100):
A, B = matrices(omega=k*5./100)
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w,v = eig(a, check_finite=False)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
v0 = array([1,1,(1+sqrt(93)/3)/2])
v1 = array([3.,0,-1])
v2 = array([1,1,(1-sqrt(93)/3)/2])
v0 = v0 / sqrt(dot(v0,transpose(v0)))
v1 = v1 / sqrt(dot(v1,transpose(v1)))
v2 = v2 / sqrt(dot(v2,transpose(v2)))
assert_array_almost_equal(w,exact_w)
assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))
assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))
assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))
for i in range(3):
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])
def test_not_square_error(self):
A = np.arange(6).reshape(3,2)
assert_raises(ValueError, eig, A)
def test_shape_mismatch(self):
A = identity(2)
B = np.arange(9.0).reshape(3,3)
assert_raises(ValueError, eig, A, B)
assert_raises(ValueError, eig, B, A)
class TestEigBanded(TestCase):
def __init__(self, *args):
TestCase.__init__(self, *args)
self.create_bandmat()
def create_bandmat(self):
N = 10
self.KL = 2
self.KU = 2
self.sym_mat = (diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1)
+ diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
self.herm_mat = (diag(-1.0*ones(N))
+ 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1)
+ diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
self.real_mat = (diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1)
+ diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
self.comp_mat = (1j*diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1)
+ diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
ew, ev = linalg.eig(self.sym_mat)
ew = ew.real
args = argsort(ew)
self.w_sym_lin = ew[args]
self.evec_sym_lin = ev[:,args]
ew, ev = linalg.eig(self.herm_mat)
ew = ew.real
args = argsort(ew)
self.w_herm_lin = ew[args]
self.evec_herm_lin = ev[:,args]
LDAB = self.KU + 1
self.bandmat_sym = zeros((LDAB, N), dtype=float)
self.bandmat_herm = zeros((LDAB, N), dtype=complex)
for i in xrange(LDAB):
self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i)
self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i)
LDAB = 2*self.KL + self.KU + 1
self.bandmat_real = zeros((LDAB, N), dtype=float)
self.bandmat_real[2*self.KL,:] = diag(self.real_mat)
for i in xrange(self.KL):
self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1)
self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1)
self.bandmat_comp = zeros((LDAB, N), dtype=complex)
self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat)
for i in xrange(self.KL):
self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1)
self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1)
self.b = 1.0*arange(N)
self.bc = self.b * (1 + 1j)
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val = eigvals_banded(self.bandmat_sym,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val = eigvals_banded(self.bandmat_herm,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
w_sym = eigvals_banded(self.bandmat_sym, check_finite=False)
w_sym = w_sym.real
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
def test_eig_banded(self):
w_sym, evec_sym = eig_banded(self.bandmat_sym)
evec_sym_ = evec_sym[:,argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
w_herm, evec_herm = eig_banded(self.bandmat_herm)
evec_herm_ = evec_herm[:,argsort(w_herm.real)]
assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin))
ind1 = 2
ind2 = 6
w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_sym_ind),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_ind),
abs(self.evec_sym_lin[:,ind1:ind2+1]))
w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_herm_ind),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_ind),
abs(self.evec_herm_lin[:,ind1:ind2+1]))
v_lower = self.w_sym_lin[ind1] - 1.0e-5
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_val),
abs(self.evec_sym_lin[:,ind1:ind2+1]))
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_val),
abs(self.evec_herm_lin[:,ind1:ind2+1]))
w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False)
evec_sym_ = evec_sym[:,argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
def test_dgbtrf(self):
M,N = shape(self.real_mat)
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
u = diag(lu_symm_band[2*self.KL,:])
for i in xrange(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_zgbtrf(self):
M,N = shape(self.comp_mat)
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
u = diag(lu_symm_band[2*self.KL,:])
for i in xrange(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_dgbtrs(self):
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv)
y_lin = linalg.solve(self.real_mat, self.b)
assert_array_almost_equal(y, y_lin)
def test_zgbtrs(self):
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv)
y_lin = linalg.solve(self.comp_mat, self.bc)
assert_array_almost_equal(y, y_lin)
def test_eigh():
DIM = 6
v = {'dim': (DIM,),
'dtype': ('f','d','F','D'),
'overwrite': (True, False),
'lower': (True, False),
'turbo': (True, False),
'eigvals': (None, (2, DIM-2))}
for dim in v['dim']:
for typ in v['dtype']:
for overwrite in v['overwrite']:
for turbo in v['turbo']:
for eigenvalues in v['eigvals']:
for lower in v['lower']:
yield (eigenhproblem_standard,
'ordinary',
dim, typ, overwrite, lower,
turbo, eigenvalues)
yield (eigenhproblem_general,
'general ',
dim, typ, overwrite, lower,
turbo, eigenvalues)
def test_eigh_of_sparse():
import scipy.sparse
a = scipy.sparse.identity(2).tocsc()
b = np.atleast_2d(a)
assert_raises(ValueError, eigh, a)
assert_raises(ValueError, eigh, b)
def _complex_symrand(dim, dtype):
a1, a2 = symrand(dim), symrand(dim)
a = a1 + 1j*(triu(a2)-tril(a2))
return a.astype(dtype)
def eigenhproblem_standard(desc, dim, dtype,
overwrite, lower, turbo,
eigenvalues):
if iscomplex(empty(1, dtype=dtype)):
a = _complex_symrand(dim, dtype)
else:
a = symrand(dim).astype(dtype)
if overwrite:
a_c = a.copy()
else:
a_c = a
w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigenvalues)
assert_dtype_equal(z.dtype, dtype)
w = w.astype(dtype)
diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real
assert_array_almost_equal(diag_, w, DIGITS[dtype])
def eigenhproblem_general(desc, dim, dtype,
overwrite, lower, turbo,
eigenvalues):
if iscomplex(empty(1, dtype=dtype)):
a = _complex_symrand(dim, dtype)
b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype)
else:
a = symrand(dim).astype(dtype)
b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype)
if overwrite:
a_c, b_c = a.copy(), b.copy()
else:
a_c, b_c = a, b
w, z = eigh(a, b, overwrite_a=overwrite, lower=lower,
overwrite_b=overwrite, turbo=turbo, eigvals=eigenvalues)
assert_dtype_equal(z.dtype, dtype)
w = w.astype(dtype)
diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real
assert_array_almost_equal(diag1_, w, DIGITS[dtype])
diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real
assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype])
def test_eigh_integer():
a = array([[1,2],[2,7]])
b = array([[3,1],[1,5]])
w,z = eigh(a)
w,z = eigh(a,b)
class TestLU(TestCase):
def __init__(self, *args, **kw):
TestCase.__init__(self, *args, **kw)
self.a = array([[1,2,3],[1,2,3],[2,5,6]])
self.ca = array([[1,2,3],[1,2,3],[2,5j,6]])
self.b = array([[1,2,3],[4,5,6],[7,8,9]])
self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]])
self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
self.med = random((30, 40))
self.cmed = random((30, 40)) + 1.j * random((30, 40))
def _test_common(self, data):
p,l,u = lu(data)
assert_array_almost_equal(dot(dot(p,l),u),data)
pl,u = lu(data,permute_l=1)
assert_array_almost_equal(dot(pl,u),data)
def test_simple(self):
self._test_common(self.a)
def test_simple_complex(self):
self._test_common(self.ca)
def test_simple2(self):
self._test_common(self.b)
def test_simple2_complex(self):
self._test_common(self.cb)
def test_hrectangular(self):
self._test_common(self.hrect)
def test_vrectangular(self):
self._test_common(self.vrect)
def test_hrectangular_complex(self):
self._test_common(self.chrect)
def test_vrectangular_complex(self):
self._test_common(self.cvrect)
def test_medium1(self):
self._test_common(self.med)
def test_medium1_complex(self):
self._test_common(self.cmed)
def test_check_finite(self):
p, l, u = lu(self.a, check_finite=False)
assert_array_almost_equal(dot(dot(p,l),u), self.a)
def test_simple_known(self):
for order in ['C', 'F']:
A = np.array([[2, 1],[0, 1.]], order=order)
LU, P = lu_factor(A)
assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]]))
assert_array_equal(P, np.array([0, 1]))
class TestLUSingle(TestLU):
def __init__(self, *args, **kw):
TestLU.__init__(self, *args, **kw)
self.a = self.a.astype(float32)
self.ca = self.ca.astype(complex64)
self.b = self.b.astype(float32)
self.cb = self.cb.astype(complex64)
self.hrect = self.hrect.astype(float32)
self.chrect = self.hrect.astype(complex64)
self.vrect = self.vrect.astype(float32)
self.cvrect = self.vrect.astype(complex64)
self.med = self.vrect.astype(float32)
self.cmed = self.vrect.astype(complex64)
class TestLUSolve(TestCase):
def setUp(self):
seed(1234)
def test_lu(self):
a0 = random((10,10))
b = random((10,))
for order in ['C', 'F']:
a = np.array(a0, order=order)
x1 = solve(a,b)
lu_a = lu_factor(a)
x2 = lu_solve(lu_a,b)
assert_array_almost_equal(x1,x2)
def test_check_finite(self):
a = random((10,10))
b = random((10,))
x1 = solve(a,b)
lu_a = lu_factor(a, check_finite=False)
x2 = lu_solve(lu_a,b, check_finite=False)
assert_array_almost_equal(x1,x2)
class TestSVD_GESDD(TestCase):
def setUp(self):
self.lapack_driver = 'gesdd'
seed(1234)
def test_degenerate(self):
assert_raises(TypeError, svd, [[1.]], lapack_driver=1.)
assert_raises(ValueError, svd, [[1.]], lapack_driver='foo')
def test_simple(self):
a = [[1,2,3],[1,20,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_singular(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0]))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1]))
assert_array_almost_equal(dot(transpose(vh),vh),identity(2))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_random(self):
n = 20
m = 15
for i in range(3):
for a in [random([n,m]),random([m,n])]:
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1]))
assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0]))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_complex(self):
a = [[1,2,3],[1,2j,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))
assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0]))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_random_complex(self):
n = 20
m = 15
for i in range(3):
for full_matrices in (True, False):
for a in [random([n,m]),random([m,n])]:
a = a + 1j*random(list(a.shape))
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_crash_1580(self):
sizes = [(13, 23), (30, 50), (60, 100)]
np.random.seed(1234)
for sz in sizes:
for dt in [np.float32, np.float64, np.complex64, np.complex128]:
a = np.random.rand(*sz).astype(dt)
svd(a, lapack_driver=self.lapack_driver)
def test_check_finite(self):
a = [[1,2,3],[1,20,3],[2,5,6]]
u,s,vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_gh_5039(self):
# parameter number 12 had an illegal value".
b = np.array(
[[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.],
[0., 0.16666667, 0.66666667, 0.16666667, 0., 0.],
[0., 0., 0.16666667, 0.66666667, 0.16666667, 0.],
[0., 0., 0., 0.16666667, 0.66666667, 0.16666667]])
svd(b, lapack_driver=self.lapack_driver)
class TestSVD_GESVD(TestSVD_GESDD):
def setUp(self):
self.lapack_driver = 'gesvd'
seed(1234)
class TestSVDVals(TestCase):
def test_empty(self):
for a in [[]], np.empty((2, 0)), np.ones((0, 3)):
s = svdvals(a)
assert_equal(s, np.empty(0))
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_complex(self):
a = [[1,2,3],[1,20,3j],[2,5,6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet_complex(self):
a = [[1,2,3],[4,5j,6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet_complex(self):
a = [[1,2],[4,5],[3j,4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
s = svdvals(a, check_finite=False)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
@dec.slow
def test_crash_2609(self):
np.random.seed(1234)
a = np.random.rand(1500, 2800)
svdvals(a)
class TestDiagSVD(TestCase):
def test_simple(self):
assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]])
class TestQR(TestCase):
def setUp(self):
seed(1234)
def test_simple(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_left(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
qc,r2 = qr_multiply(a, identity(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_right(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), qc)
assert_array_almost_equal(r, r2)
qc,r = qr_multiply(a, identity(3))
assert_array_almost_equal(q, qc)
def test_simple_pivoting(self):
a = np.asarray([[8,2,3],[2,9,3],[5,3,6]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_left_pivoting(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
def test_simple_right_pivoting(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), qc)
def test_simple_trap(self):
a = [[8,2,3],[2,9,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
def test_simple_trap_pivoting(self):
a = np.asarray([[8,2,3],[2,9,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall(self):
# full version
a = [[8,2],[2,9],[5,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_tall_pivoting(self):
# full version pivoting
a = np.asarray([[8,2],[2,9],[5,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall_e(self):
# economy version
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (3,2))
assert_equal(r.shape, (2,2))
def test_simple_tall_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8,2],[2,9],[5,3]])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall_left(self):
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode="economic")
c = [1, 2]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
c = array([1,2,0])
qc,r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(dot(q, c[:2]), qc)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_tall_left_pivoting(self):
a = [[8,2],[2,9],[5,3]]
q,r,jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc,r,kpvt = qr_multiply(a, c, "left", True)
assert_array_equal(jpvt, kpvt)
assert_array_almost_equal(dot(q, c), qc)
qc,r,jpvt = qr_multiply(a, identity(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_tall_right(self):
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode="economic")
c = [1, 2, 3]
cq,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
assert_array_almost_equal(r, r2)
cq,r = qr_multiply(a, identity(3))
assert_array_almost_equal(cq, q)
def test_simple_tall_right_pivoting(self):
a = [[8,2],[2,9],[5,3]]
q,r,jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2, 3]
cq,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), cq)
cq,r,jpvt = qr_multiply(a, identity(3), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_fat(self):
# full version
a = [[8,2,5],[2,9,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
def test_simple_fat_pivoting(self):
# full version pivoting
a = np.asarray([[8,2,5],[2,9,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_fat_e(self):
# economy version
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
def test_simple_fat_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8,2,3],[2,9,5]])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_fat_left(self):
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode="economic")
c = [1, 2]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_fat_left_pivoting(self):
a = [[8,2,3],[2,9,5]]
q,r,jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
qc,r,jpvt = qr_multiply(a, identity(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_fat_right(self):
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode="economic")
c = [1, 2]
cq,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
assert_array_almost_equal(r, r2)
cq,r = qr_multiply(a, identity(2))
assert_array_almost_equal(cq, q)
def test_simple_fat_right_pivoting(self):
a = [[8,2,3],[2,9,5]]
q,r,jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2]
cq,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), cq)
cq,r,jpvt = qr_multiply(a, identity(2), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_complex(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_complex_left(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_complex_right(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), qc)
qc,r = qr_multiply(a, identity(3))
assert_array_almost_equal(q, qc)
def test_simple_tall_complex_left(self):
a = [[8,2+3j],[2,9],[5+7j,3]]
q,r = qr(a, mode="economic")
c = [1, 2+2j]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
c = array([1,2,0])
qc,r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(dot(q, c[:2]), qc)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_complex_left_conjugate(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(dot(q.conjugate(), c), qc)
def test_simple_complex_tall_left_conjugate(self):
a = [[3,3+4j],[5,2+2j],[3,2]]
q,r = qr(a, mode='economic')
c = [1, 3+4j]
qc,r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(dot(q.conjugate(), c), qc)
def test_simple_complex_right_conjugate(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, conjugate=True)
assert_array_almost_equal(dot(c, q.conjugate()), qc)
def test_simple_complex_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_complex_left_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
def test_simple_complex_right_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), qc)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
def test_random_left(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
c = random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(q, qc)
def test_random_right(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
c = random([n])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(n))
assert_array_almost_equal(q, cq)
def test_random_pivoting(self):
n = 20
for k in range(2):
a = random([n,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_tall(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a)
def test_random_tall_left(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode="economic")
c = random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(qc, q)
def test_random_tall_right(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode="economic")
c = random([m])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(m))
assert_array_almost_equal(cq, q)
def test_random_tall_pivoting(self):
# full version pivoting
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_tall_e(self):
# economy version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (m,n))
assert_equal(r.shape, (n,n))
def test_random_tall_e_pivoting(self):
# economy version pivoting
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (m,n))
assert_equal(r.shape, (n,n))
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_trap(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a)
def test_random_trap_pivoting(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
def test_random_complex_left(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
c = random([n])+1j*random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(q, qc)
def test_random_complex_right(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
c = random([n])+1j*random([n])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(n))
assert_array_almost_equal(q, cq)
def test_random_complex_pivoting(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_check_finite(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a, check_finite=False)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_lwork(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
# Get comparison values
q,r = qr(a, lwork=None)
# Test against minimum valid lwork
q2,r2 = qr(a, lwork=3)
assert_array_almost_equal(q2,q)
assert_array_almost_equal(r2,r)
# Test against larger lwork
q3,r3 = qr(a, lwork=10)
assert_array_almost_equal(q3,q)
assert_array_almost_equal(r3,r)
# Test against explicit lwork=-1
q4,r4 = qr(a, lwork=-1)
assert_array_almost_equal(q4,q)
assert_array_almost_equal(r4,r)
# Test against invalid lwork
assert_raises(Exception, qr, (a,), {'lwork':0})
assert_raises(Exception, qr, (a,), {'lwork':2})
class TestRQ(TestCase):
def setUp(self):
seed(1234)
def test_simple(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_r(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a)
r2 = rq(a, mode='r')
assert_array_almost_equal(r, r2)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_simple_trap(self):
a = [[8,2,3],[2,9,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_simple_tall(self):
a = [[8,2],[2,9],[5,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(r,q),a)
def test_simple_fat(self):
a = [[8,2,5],[2,9,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_simple_complex(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
r,q = rq(a)
assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_random_tall(self):
m = 200
n = 100
for k in range(2):
a = random([m,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_trap(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_trap_economic(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
r,q = rq(a, mode='economic')
assert_array_almost_equal(dot(q,transpose(q)),identity(m))
assert_array_almost_equal(dot(r,q),a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_complex_economic(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])+1j*random([m,n])
r,q = rq(a, mode='economic')
assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m))
assert_array_almost_equal(dot(r,q),a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_check_finite(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a, check_finite=False)
assert_array_almost_equal(dot(q, transpose(q)),identity(3))
assert_array_almost_equal(dot(r,q),a)
transp = transpose
any = sometrue
class TestSchur(TestCase):
def test_simple(self):
a = [[8,12,3],[2,9,3],[10,3,6]]
t,z = schur(a)
assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)
tc,zc = schur(a,'complex')
assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc))))
assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a)
tc2,zc2 = rsf2csf(tc,zc)
assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a)
def test_sort(self):
a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]
s,u,sdim = schur(a,sort='lhp')
assert_array_almost_equal([[0.1134,0.5436,0.8316,0.],
[-0.1134,-0.8245,0.5544,0.],
[-0.8213,0.1308,0.0265,-0.5547],
[-0.5475,0.0872,0.0177,0.8321]],
u,3)
assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174],
[0.,-0.5000,9.4472,-0.7184],
[0.,0.,1.4142,-0.1456],
[0.,0.,0.,0.5]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='rhp')
assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
[-0.4862,0.4930,-0.1434,-0.7071],
[0.6042,0.3944,-0.6924,0.],
[0.4028,0.5986,0.6924,0.]],
u,3)
assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
[0.,0.5,6.5809,-3.1870],
[0.,0.,-1.4142,0.9270],
[0.,0.,0.,-0.5]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='iuc')
assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042],
[-0.8321,0.,-0.3814,-0.4028],
[0.,0.7071,-0.5134,0.4862],
[0.,0.7071,0.5134,-0.4862]],
u,3)
assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974],
[0.,0.5000,-3.3191,-14.4130],
[0.,0.,1.4142,2.1573],
[0.,0.,0.,-1.4142]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='ouc')
assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.],
[-0.4862,0.5134,0.7071,0.],
[0.6042,0.5721,0.,-0.5547],
[0.4028,0.3814,0.,0.8321]],
u,3)
assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974],
[0.,-1.4142,3.3191,6.5809],
[0.,0.,-0.5000,0.],
[0.,0.,0.,0.5000]],
s,3)
assert_equal(2,sdim)
rhp_function = lambda x: x >= 0.0
s,u,sdim = schur(a,sort=rhp_function)
assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
[-0.4862,0.4930,-0.1434,-0.7071],
[0.6042,0.3944,-0.6924,0.],
[0.4028,0.5986,0.6924,0.]],
u,3)
assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
[0.,0.5,6.5809,-3.1870],
[0.,0.,-1.4142,0.9270],
[0.,0.,0.,-0.5]],
s,3)
assert_equal(2,sdim)
def test_sort_errors(self):
a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]
assert_raises(ValueError, schur, a, sort='unsupported')
assert_raises(ValueError, schur, a, sort=1)
def test_check_finite(self):
a = [[8,12,3],[2,9,3],[10,3,6]]
t,z = schur(a, check_finite=False)
assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)
class TestHessenberg(TestCase):
def test_simple(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000,42.2037,-156.3165],
[-537.6783,152.5511,-554.9272],
[0,0.0728, 2.4489]]
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
assert_array_almost_equal(h,h1,decimal=4)
def test_simple_complex(self):
a = [[-149, -50,-154],
[537, 180j, 546],
[-27j, -9, -25]]
h,q = hessenberg(a,calc_q=1)
h1 = dot(transp(conj(q)),dot(a,q))
assert_array_almost_equal(h1,h)
def test_simple2(self):
a = [[1,2,3,4,5,6,7],
[0,2,3,4,6,7,2],
[0,2,2,3,0,3,2],
[0,0,2,8,0,0,2],
[0,3,1,2,0,1,2],
[0,1,2,3,0,1,0],
[0,0,0,0,0,1,2]]
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
def test_simple3(self):
a = np.eye(3)
a[-1, 0] = 2
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(dot(transp(q), dot(a, q)), h)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
h,q = hessenberg(a,calc_q=1)
h1 = dot(transp(conj(q)),dot(a,q))
assert_array_almost_equal(h1,h)
def test_check_finite(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000,42.2037,-156.3165],
[-537.6783,152.5511,-554.9272],
[0,0.0728, 2.4489]]
h,q = hessenberg(a,calc_q=1, check_finite=False)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
assert_array_almost_equal(h,h1,decimal=4)
def test_2x2(self):
a = [[2, 1], [7, 12]]
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(q, np.eye(2))
assert_array_almost_equal(h, a)
b = [[2-7j, 1+2j], [7+3j, 12-2j]]
h2, q2 = hessenberg(b, calc_q=1)
assert_array_almost_equal(q2, np.eye(2))
assert_array_almost_equal(h2, b)
class TestQZ(TestCase):
def setUp(self):
seed(12345)
def test_qz_single(self):
n = 5
A = random([n,n]).astype(float32)
B = random([n,n]).astype(float32)
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_double(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_complex(self):
n = 5
A = random([n,n]) + 1j*random([n,n])
B = random([n,n]) + 1j*random([n,n])
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))
assert_(all(diag(BB) >= 0))
assert_(all(diag(BB).imag == 0))
def test_qz_complex64(self):
n = 5
A = (random([n,n]) + 1j*random([n,n])).astype(complex64)
B = (random([n,n]) + 1j*random([n,n])).astype(complex64)
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A, decimal=5)
assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B, decimal=5)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n), decimal=5)
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n), decimal=5)
assert_(all(diag(BB) >= 0))
assert_(all(diag(BB).imag == 0))
def test_qz_double_complex(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B, output='complex')
aa = dot(dot(Q,AA),Z.conjugate().T)
assert_array_almost_equal(aa.real, A)
assert_array_almost_equal(aa.imag, 0)
bb = dot(dot(Q,BB),Z.conjugate().T)
assert_array_almost_equal(bb.real, B)
assert_array_almost_equal(bb.imag, 0)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_double_sort(self):
# from http://www.nag.com/lapack-ex/node119.html
# NOTE: These matrices may be ill-conditioned and lead to a
# seg fault on certain python versions when compiled with
# sse2 or sse3 older ATLAS/LAPACK binaries for windows
# A = np.array([[3.9, 12.5, -34.5, -0.5],
# [ 4.3, 21.5, -47.5, 7.5],
# [ 4.3, 21.5, -43.5, 3.5],
# [ 4.4, 26.0, -46.0, 6.0 ]])
# B = np.array([[ 1.0, 2.0, -3.0, 1.0],
# [1.0, 3.0, -5.0, 4.0],
# [1.0, 3.0, -4.0, 3.0],
# [1.0, 3.0, -4.0, 4.0]])
A = np.array([[3.9, 12.5, -34.5, 2.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 1.5, -43.5, 3.5],
[4.4, 6.0, -46.0, 6.0]])
B = np.array([[1.0, 1.0, -3.0, 1.0],
[1.0, 3.0, -5.0, 4.4],
[1.0, 2.0, -4.0, 1.0],
[1.2, 3.0, -4.0, 4.0]])
sort = lambda ar,ai,beta: ai == 0
assert_raises(ValueError, qz, A, B, sort=sort)
if False:
AA,BB,Q,Z,sdim = qz(A,B,sort=sort)
# assert_(sdim == 2)
assert_(sdim == 4)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
assert_array_almost_equal(np.abs(AA), np.abs(np.array(
[[35.7864, -80.9061, -12.0629, -9.498],
[0., 2.7638, -2.3505, 7.3256],
[0., 0., 0.6258, -0.0398],
[0., 0., 0., -12.8217]])), 4)
assert_array_almost_equal(np.abs(BB), np.abs(np.array(
[[4.5324, -8.7878, 3.2357, -3.5526],
[0., 1.4314, -2.1894, 0.9709],
[0., 0., 1.3126, -0.3468],
[0., 0., 0., 0.559]])), 4)
assert_array_almost_equal(np.abs(Q), np.abs(np.array(
[[-0.4193, -0.605, -0.1894, -0.6498],
[-0.5495, 0.6987, 0.2654, -0.3734],
[-0.4973, -0.3682, 0.6194, 0.4832],
[-0.5243, 0.1008, -0.7142, 0.4526]])), 4)
assert_array_almost_equal(np.abs(Z), np.abs(np.array(
[[-0.9471, -0.2971, -0.1217, 0.0055],
[-0.0367, 0.1209, 0.0358, 0.9913],
[0.3171, -0.9041, -0.2547, 0.1312],
[0.0346, 0.2824, -0.9587, 0.0014]])), 4)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
# assert_array_almost_equal(abs(AA), abs(np.array([
# [3.8009, -69.4505, 50.3135, -43.2884],
# [0.0000, 9.2033, -0.2001, 5.9881],
# [0.0000, 0.0000, 1.4279, 4.4453],
# [0.0000, 0.0000, 0.9019, -1.1962]])), 4)
# assert_array_almost_equal(abs(BB), abs(np.array([
# [1.9005, -10.2285, 0.8658, -5.2134],
# [0.0000, 2.3008, 0.7915, 0.4262],
# [0.0000, 0.0000, 0.8101, 0.0000],
# [0.0000, 0.0000, 0.0000, -0.2823]])), 4)
# assert_array_almost_equal(abs(Q), abs(np.array([
# [0.4642, 0.7886, 0.2915, -0.2786],
# [0.5002, -0.5986, 0.5638, -0.2713],
# [0.5002, 0.0154, -0.0107, 0.8657],
# [0.5331, -0.1395, -0.7727, -0.3151]])), 4)
# assert_array_almost_equal(dot(Q,Q.T), eye(4))
# assert_array_almost_equal(abs(Z), abs(np.array([
# [0.9961, -0.0014, 0.0887, -0.0026],
# [0.0057, -0.0404, -0.0938, -0.9948],
# [0.0626, 0.7194, -0.6908, 0.0363],
# [0.0626, -0.6934, -0.7114, 0.0956]])), 4)
# assert_array_almost_equal(dot(Z,Z.T), eye(4))
# def test_qz_complex_sort(self):
# cA = np.array([
# [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j],
# [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],
# [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],
# [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])
# cB = np.array([
# [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j],
# [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j],
# [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],
# [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])
# AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')
# eigenvalues = diag(AAS)/diag(BBS)
# assert_(all(np.real(eigenvalues[:sdim] < 0)))
# assert_(all(np.real(eigenvalues[sdim:] > 0)))
def test_check_finite(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B,check_finite=False)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def _make_pos(X):
# the decompositions can have different signs than verified results
return np.sign(X)*X
class TestOrdQZ(TestCase):
@classmethod
def setupClass(cls):
# http://www.nag.com/lapack-ex/node119.html
cls.A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j,
7.5 + 0.5j],
[-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j,
-10.5 - 1.5j],
[4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j,
-7.5 - 3.5j],
[5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j,
-19.0 - 32.5j]])
cls.B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j],
[0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j],
[1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j],
[0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]])
# http://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml
cls.A2 = np.array([[3.9, 12.5, -34.5, -0.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 21.5, -43.5, 3.5],
[4.4, 26.0, -46.0, 6.0]])
cls.B2 = np.array([[1, 2, -3, 1],
[1, 3, -5, 4],
[1, 3, -4, 3],
[1, 3, -4, 4]])
# example with the eigenvalues
# -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j,
# 0.61244091
# thus featuring:
# * one complex conjugate eigenvalue pair,
# * one eigenvalue in the lhp
# * 2 eigenvalues in the unit circle
# * 2 non-real eigenvalues
cls.A3 = np.array([[5., 1., 3., 3.],
[4., 4., 2., 7.],
[7., 4., 1., 3.],
[0., 4., 8., 7.]])
cls.B3 = np.array([[8., 10., 6., 10.],
[7., 7., 2., 9.],
[9., 1., 6., 6.],
[5., 1., 4., 7.]])
def qz_decomp(self, sort):
retc = ordqz(self.A1, self.B1, sort=sort)
ret1 = ordqz(self.A2, self.B2, sort=sort)
ret2 = ordqz(self.A3, self.B3, sort=sort)
return retc, ret1, ret2
def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z):
I = np.eye(*A.shape)
# make sure Q and Z are orthogonal
assert_array_almost_equal(Q.dot(Q.T.conj()), I)
assert_array_almost_equal(Z.dot(Z.T.conj()), I)
# check factorization
assert_array_almost_equal(Q.dot(AA), A.dot(Z))
assert_array_almost_equal(Q.dot(BB), B.dot(Z))
# check shape of AA and BB
assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape))
assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape))
# check eigenvalues
for i in range(A.shape[0]):
# does the current diagonal element belong to a 2-by-2 block
# that was already checked?
if i > 0 and A[i, i - 1] != 0:
continue
# take care of 2-by-2 blocks
if i < AA.shape[0] - 1 and AA[i + 1, i] != 0:
evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2])
# make sure the pair of complex conjugate eigenvalues
# is ordered consistently (positive imaginary part first)
if evals[0].imag < 0:
evals = evals[[1, 0]]
tmp = alpha[i:i + 2]/beta[i:i + 2]
if tmp[0].imag < 0:
tmp = tmp[[1, 0]]
assert_array_almost_equal(evals, tmp)
else:
assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i])
sortfun = sort
if sortfun == 'lhp':
sortfun = lambda x, y: (x/y).real < 0
if sortfun == 'rhp':
sortfun = lambda x, y: (x/y).real > 0
if sortfun == 'iuc':
sortfun = lambda x, y: np.abs(x/y) < 1
if sortfun == 'ouc':
sortfun = lambda x, y: np.abs(x/y) > 1
lastsort = True
for i in range(A.shape[0]):
cursort = sortfun(alpha[i], beta[i])
# once the sorting criterion was not matched all subsequent
# eigenvalues also shouldn't match
if not lastsort:
assert(not cursort)
lastsort = cursort
def test_lhp(self):
retc, ret1, ret2 = self.qz_decomp('lhp')
self.check(self.A1, self.B1, 'lhp', *retc)
self.check(self.A2, self.B2, 'lhp', *ret1)
self.check(self.A3, self.B3, 'lhp', *ret2)
def test_rhp(self):
retc, ret1, ret2 = self.qz_decomp('rhp')
self.check(self.A1, self.B1, 'rhp', *retc)
self.check(self.A2, self.B2, 'rhp', *ret1)
self.check(self.A3, self.B3, 'rhp', *ret2)
def test_iuc(self):
retc, ret1, ret2 = self.qz_decomp('iuc')
self.check(self.A1, self.B1, 'iuc', *retc)
self.check(self.A2, self.B2, 'iuc', *ret1)
self.check(self.A3, self.B3, 'iuc', *ret2)
def test_ouc(self):
retc, ret1, ret2 = self.qz_decomp('ouc')
self.check(self.A1, self.B1, 'ouc', *retc)
self.check(self.A2, self.B2, 'ouc', *ret1)
self.check(self.A3, self.B3, 'ouc', *ret2)
def test_ref(self):
sort = lambda x, y: (x/y).imag == 0
retc, ret1, ret2 = self.qz_decomp(sort)
self.check(self.A1, self.B1, sort, *retc)
self.check(self.A2, self.B2, sort, *ret1)
self.check(self.A3, self.B3, sort, *ret2)
def test_cef(self):
sort = lambda x, y: (x/y).imag != 0
retc, ret1, ret2 = self.qz_decomp(sort)
self.check(self.A1, self.B1, sort, *retc)
self.check(self.A2, self.B2, sort, *ret1)
self.check(self.A3, self.B3, sort, *ret2)
def test_diff_input_types(self):
ret = ordqz(self.A1, self.B2, sort='lhp')
self.check(self.A1, self.B2, 'lhp', *ret)
ret = ordqz(self.B2, self.A1, sort='lhp')
self.check(self.B2, self.A1, 'lhp', *ret)
class TestOrdQZWorkspaceSize(TestCase):
def setUp(self):
seed(12345)
def test_decompose(self):
N = 202
for ddtype in [np.float32, np.float64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
sort = lambda alpha, beta: alpha < beta
[S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='real')
for ddtype in [np.complex, np.complex64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
sort = lambda alpha, beta: alpha < beta
[S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='complex')
@dec.slow
def test_decompose_ouc(self):
N = 202
for ddtype in [np.float32, np.float64, np.complex, np.complex64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
[S,T,alpha,beta,U,V] = ordqz(A,B,sort='ouc')
class TestDatacopied(TestCase):
def test_datacopied(self):
from scipy.linalg.decomp import _datacopied
M = matrix([[0,1],[2,3]])
A = asarray(M)
L = M.tolist()
M2 = M.copy()
class Fake1:
def __array__(self):
return A
class Fake2:
__array_interface__ = A.__array_interface__
F1 = Fake1()
F2 = Fake2()
for item, status in [(M, False), (A, False), (L, True),
(M2, False), (F1, False), (F2, False)]:
arr = asarray(item)
assert_equal(_datacopied(arr, item), status,
err_msg=repr(item))
def test_aligned_mem_float():
a = arange(402, dtype=np.uint8)
z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem():
a = arange(804, dtype=np.uint8)
z = np.frombuffer(a.data, offset=4, count=100, dtype=float)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem_complex():
a = zeros(1608, dtype=np.uint8)
z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def check_lapack_misaligned(func, args, kwargs):
args = list(args)
for i in range(len(args)):
a = args[:]
if isinstance(a[i],np.ndarray):
aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)
aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype)
aa.shape = a[i].shape
aa[...] = a[i]
a[i] = aa
func(*a,**kwargs)
if len(a[i].shape) > 1:
a[i] = a[i].T
func(*a,**kwargs)
@dec.knownfailureif(True, "Ticket #1152, triggers a segfault in rare cases.")
def test_lapack_misaligned():
M = np.eye(10,dtype=float)
R = np.arange(100)
R.shape = 10,10
S = np.arange(20000,dtype=np.uint8)
S = np.frombuffer(S.data, offset=4, count=100, dtype=float)
S.shape = 10, 10
b = np.ones(10)
LU, piv = lu_factor(S)
for (func, args, kwargs) in [
(eig,(S,),dict(overwrite_a=True)),
(eigvals,(S,),dict(overwrite_a=True)),
(lu,(S,),dict(overwrite_a=True)),
(lu_factor,(S,),dict(overwrite_a=True)),
(lu_solve,((LU,piv),b),dict(overwrite_b=True)),
(solve,(S,b),dict(overwrite_a=True,overwrite_b=True)),
(svd,(M,),dict(overwrite_a=True)),
(svd,(R,),dict(overwrite_a=True)),
(svd,(S,),dict(overwrite_a=True)),
(svdvals,(S,),dict()),
(svdvals,(S,),dict(overwrite_a=True)),
(cholesky,(M,),dict(overwrite_a=True)),
(qr,(S,),dict(overwrite_a=True)),
(rq,(S,),dict(overwrite_a=True)),
(hessenberg,(S,),dict(overwrite_a=True)),
(schur,(S,),dict(overwrite_a=True)),
]:
yield check_lapack_misaligned, func, args, kwargs
class TestOverwrite(object):
def test_eig(self):
assert_no_overwrite(eig, [(3,3)])
assert_no_overwrite(eig, [(3,3), (3,3)])
def test_eigh(self):
assert_no_overwrite(eigh, [(3,3)])
assert_no_overwrite(eigh, [(3,3), (3,3)])
def test_eig_banded(self):
assert_no_overwrite(eig_banded, [(3,2)])
def test_eigvals(self):
assert_no_overwrite(eigvals, [(3,3)])
def test_eigvalsh(self):
assert_no_overwrite(eigvalsh, [(3,3)])
def test_eigvals_banded(self):
assert_no_overwrite(eigvals_banded, [(3,2)])
def test_hessenberg(self):
assert_no_overwrite(hessenberg, [(3,3)])
def test_lu_factor(self):
assert_no_overwrite(lu_factor, [(3,3)])
def test_lu_solve(self):
x = np.array([[1,2,3], [4,5,6], [7,8,8]])
xlu = lu_factor(x)
assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)])
def test_lu(self):
assert_no_overwrite(lu, [(3,3)])
def test_qr(self):
assert_no_overwrite(qr, [(3,3)])
def test_rq(self):
assert_no_overwrite(rq, [(3,3)])
def test_schur(self):
assert_no_overwrite(schur, [(3,3)])
def test_schur_complex(self):
assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)],
dtypes=[np.float32, np.float64])
def test_svd(self):
assert_no_overwrite(svd, [(3,3)])
assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3,3)])
def test_svdvals(self):
assert_no_overwrite(svdvals, [(3,3)])
def _check_orth(n):
X = np.ones((n, 2), dtype=float)
Y = orth(X)
assert_equal(Y.shape, (n, 1))
assert_allclose(Y, Y.mean(), atol=1e-10)
Y = orth(X.T)
assert_equal(Y.shape, (2, 1))
assert_allclose(Y, Y.mean())
@dec.slow
@dec.skipif(np.dtype(np.intp).itemsize < 8, "test only on 64-bit, else too slow")
def test_orth_memory_efficiency():
n = 10*1000*1000
try:
_check_orth(n)
except MemoryError:
raise AssertionError('memory error perhaps caused by orth regression')
def test_orth():
for n in 1, 2, 3, 10, 100:
_check_orth(n)
if __name__ == "__main__":
run_module_suite()
| true | true |
f722e64671d91a1f0426d6380585220c6acfa0e5 | 326 | py | Python | tests/forms.py | tooreht/django-jsonsuit | 5b47c1f3ce55f0b60b6e3ae2568cc098feb1564f | [
"MIT"
] | 5 | 2017-06-02T16:26:16.000Z | 2018-12-28T14:55:59.000Z | tests/forms.py | tooreht/django-jsonsuit | 5b47c1f3ce55f0b60b6e3ae2568cc098feb1564f | [
"MIT"
] | 6 | 2017-07-08T18:19:54.000Z | 2022-03-23T06:15:05.000Z | tests/forms.py | tooreht/django-jsonsuit | 5b47c1f3ce55f0b60b6e3ae2568cc098feb1564f | [
"MIT"
] | 5 | 2017-12-19T17:31:08.000Z | 2021-04-19T11:14:16.000Z | # -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
from django import forms
from jsonsuit.widgets import JSONSuit, ReadonlyJSONSuit
class TestForm(forms.Form):
stats = forms.CharField(widget=JSONSuit)
class ReadonlyTestForm(forms.Form):
stats = forms.CharField(widget=ReadonlyJSONSuit)
| 21.733333 | 56 | 0.788344 |
from __future__ import unicode_literals, absolute_import
from django import forms
from jsonsuit.widgets import JSONSuit, ReadonlyJSONSuit
class TestForm(forms.Form):
stats = forms.CharField(widget=JSONSuit)
class ReadonlyTestForm(forms.Form):
stats = forms.CharField(widget=ReadonlyJSONSuit)
| true | true |
f722e6787c4846eb96a6e33a04187732b2fa50a3 | 6,441 | py | Python | examples/python/native/inception.py | sdalton1/FlexFlow | a08386df098aaa23195ba15af2d0e1c88ecc399c | [
"Apache-2.0"
] | null | null | null | examples/python/native/inception.py | sdalton1/FlexFlow | a08386df098aaa23195ba15af2d0e1c88ecc399c | [
"Apache-2.0"
] | null | null | null | examples/python/native/inception.py | sdalton1/FlexFlow | a08386df098aaa23195ba15af2d0e1c88ecc399c | [
"Apache-2.0"
] | null | null | null | from flexflow.core import *
from flexflow.keras.datasets import cifar10
from accuracy import ModelAccuracy
from PIL import Image
def InceptionA(ffmodel, input, pool_features):
t1 = ffmodel.conv2d(input, 64, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(input, 48, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(t2, 64, 5, 5, 1, 1, 2, 2)
t3 = ffmodel.conv2d(input, 64, 1, 1, 1, 1, 0, 0)
t3 = ffmodel.conv2d(t3, 96, 3, 3, 1, 1, 1, 1)
t3 = ffmodel.conv2d(t3, 96, 3, 3, 1, 1, 1, 1)
t4 = ffmodel.pool2d(input, 3, 3, 1, 1, 1, 1, PoolType.POOL_AVG)
t4 = ffmodel.conv2d(t4, pool_features, 1, 1, 1, 1, 0, 0)
output = ffmodel.concat([t1, t2, t3, t4], 1)
return output
def InceptionB(ffmodel, input):
t1 = ffmodel.conv2d(input, 384, 3, 3, 2, 2, 0, 0)
t2 = ffmodel.conv2d(input, 64, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(t2, 96, 3, 3, 1, 1, 1, 1)
t2 = ffmodel.conv2d(t2, 96, 3, 3, 2, 2, 0, 0)
t3 = ffmodel.pool2d(input, 3, 3, 2, 2, 0, 0)
output = ffmodel.concat([t1, t2, t3], 1)
return output
def InceptionC(ffmodel, input, channels):
t1 = ffmodel.conv2d(input, 192, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(input, channels, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(t2, channels, 1, 7, 1, 1, 0, 3)
t2 = ffmodel.conv2d(t2, 192, 7, 1, 1, 1, 3, 0)
t3 = ffmodel.conv2d(input, channels, 1, 1, 1, 1, 0, 0)
t3 = ffmodel.conv2d(t3, channels, 7, 1, 1, 1, 3, 0)
t3 = ffmodel.conv2d(t3, channels, 1, 7, 1, 1, 0, 3)
t3 = ffmodel.conv2d(t3, channels, 7, 1, 1, 1, 3, 0)
t3 = ffmodel.conv2d(t3, 192, 1, 7, 1, 1, 0, 3)
t4 = ffmodel.pool2d(input, 3, 3, 1, 1, 1, 1, PoolType.POOL_AVG)
t4 = ffmodel.conv2d(t4, 192, 1, 1, 1, 1, 0, 0)
output = ffmodel.concat([t1, t2, t3, t4], 1)
return output;
def InceptionD(ffmodel, input):
t1 = ffmodel.conv2d(input, 192, 1, 1, 1, 1, 0, 0)
t1 = ffmodel.conv2d(t1, 320, 3, 3, 2, 2, 0, 0)
t2 = ffmodel.conv2d(input, 192, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(t2, 192, 1, 7, 1, 1, 0, 3)
t2 = ffmodel.conv2d(t2, 192, 7, 1, 1, 1, 3, 0)
t2 = ffmodel.conv2d(t2, 192, 3, 3, 2, 2, 0, 0)
t3 = ffmodel.pool2d(input, 3, 3, 2, 2, 0, 0)
output = ffmodel.concat([t1, t2, t3], 1)
return output;
def InceptionE(ffmodel, input):
t1 = ffmodel.conv2d(input, 320, 1, 1, 1, 1, 0, 0)
t2i = ffmodel.conv2d(input, 384, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(t2i, 384, 1, 3, 1, 1, 0, 1)
t3 = ffmodel.conv2d(t2i, 384, 3, 1, 1, 1, 1, 0)
t3i = ffmodel.conv2d(input, 448, 1, 1, 1, 1, 0, 0)
t3i = ffmodel.conv2d(t3i, 384, 3, 3, 1, 1, 1, 1)
t4 = ffmodel.conv2d(t3i, 384, 1, 3, 1, 1, 0, 1)
t5 = ffmodel.conv2d(t3i, 384, 3, 1, 1, 1, 1, 0)
t6 = ffmodel.pool2d(input, 3, 3, 1, 1, 1, 1, PoolType.POOL_AVG)
t6 = ffmodel.conv2d(t6, 192, 1, 1, 1, 1, 0, 0)
output = ffmodel.concat([t1, t2, t3, t4, t5, t6], 1)
return output;
def inception():
ffconfig = FFConfig()
print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %(ffconfig.batch_size, ffconfig.workers_per_node, ffconfig.num_nodes))
ffmodel = FFModel(ffconfig)
dims_input = [ffconfig.batch_size, 3, 299, 299]
#print(dims)
input = ffmodel.create_tensor(dims_input, DataType.DT_FLOAT)
t = ffmodel.conv2d(input, 32, 3, 3, 2, 2, 0, 0)
t = ffmodel.conv2d(t, 32, 3, 3, 1, 1, 0, 0)
t = ffmodel.conv2d(t, 64, 3, 3, 1, 1, 1, 1)
t = ffmodel.pool2d(t, 3, 3, 2, 2, 0, 0)
t = ffmodel.conv2d(t, 80, 1, 1, 1, 1, 0, 0)
t = ffmodel.conv2d(t, 192, 3, 3, 1, 1, 1, 1)
t = ffmodel.pool2d(t, 3, 3, 2, 2, 0, 0)
t = InceptionA(ffmodel, t, 32)
t = InceptionA(ffmodel, t, 64)
t = InceptionA(ffmodel, t, 64)
t = InceptionB(ffmodel, t)
t = InceptionC(ffmodel, t, 128)
t = InceptionC(ffmodel, t, 160)
t = InceptionC(ffmodel, t, 160)
t = InceptionC(ffmodel, t, 192)
t = InceptionD(ffmodel, t)
t = InceptionE(ffmodel, t)
t = InceptionE(ffmodel, t)
t = ffmodel.pool2d(t, 8, 8, 1, 1, 0, 0, PoolType.POOL_AVG)
t = ffmodel.flat(t)
t = ffmodel.dense(t, 10)
t = ffmodel.softmax(t)
ffoptimizer = SGDOptimizer(ffmodel, 0.001)
ffmodel.optimizer = ffoptimizer
ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY, metrics=[MetricsType.METRICS_ACCURACY, MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY])
label = ffmodel.label_tensor
num_samples = 10000
(x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)
full_input_np = np.zeros((num_samples, 3, 299, 299), dtype=np.float32)
for i in range(0, num_samples):
image = x_train[i, :, :, :]
image = image.transpose(1, 2, 0)
pil_image = Image.fromarray(image)
pil_image = pil_image.resize((299,299), Image.NEAREST)
image = np.array(pil_image, dtype=np.float32)
image = image.transpose(2, 0, 1)
full_input_np[i, :, :, :] = image
full_input_np /= 255
print(full_input_np.shape)
print(full_input_np.__array_interface__["strides"])
print(full_input_np[0,:, :, :])
y_train = y_train.astype('int32')
full_label_np = y_train
dims_full_input = [num_samples, 3, 299, 299]
full_input = ffmodel.create_tensor(dims_full_input, DataType.DT_FLOAT)
dims_full_label = [num_samples, 1]
full_label = ffmodel.create_tensor(dims_full_label, DataType.DT_INT32)
full_input.attach_numpy_array(ffconfig, full_input_np)
full_label.attach_numpy_array(ffconfig, full_label_np)
dataloader_input = SingleDataLoader(ffmodel, input, full_input, num_samples, DataType.DT_FLOAT)
dataloader_label = SingleDataLoader(ffmodel, label, full_label, num_samples, DataType.DT_INT32)
full_input.detach_numpy_array(ffconfig)
full_label.detach_numpy_array(ffconfig)
num_samples = dataloader_input.get_num_samples()
assert dataloader_input.get_num_samples() == dataloader_label.get_num_samples()
ffmodel.init_layers()
epochs = ffconfig.epochs
ts_start = ffconfig.get_current_time()
ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)
ts_end = ffconfig.get_current_time()
run_time = 1e-6 * (ts_end - ts_start);
print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %(epochs, run_time, 8192 * epochs / run_time));
# conv_2d1 = ffmodel.get_layer_by_id(7)
# cbias_tensor = conv_2d1.get_weight_tensor()
# print(cbias_tensor)
# #cbias_tensor = conv_2d1.get_output_tensor()
# cbias_tensor.inline_map(ffconfig)
# cbias = cbias_tensor.get_array(ffconfig, DataType.DT_FLOAT)
# print(cbias.shape)
# #print(cbias)
# cbias_tensor.inline_unmap(ffconfig)
if __name__ == "__main__":
print("inception")
inception()
| 37.447674 | 167 | 0.661854 | from flexflow.core import *
from flexflow.keras.datasets import cifar10
from accuracy import ModelAccuracy
from PIL import Image
def InceptionA(ffmodel, input, pool_features):
t1 = ffmodel.conv2d(input, 64, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(input, 48, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(t2, 64, 5, 5, 1, 1, 2, 2)
t3 = ffmodel.conv2d(input, 64, 1, 1, 1, 1, 0, 0)
t3 = ffmodel.conv2d(t3, 96, 3, 3, 1, 1, 1, 1)
t3 = ffmodel.conv2d(t3, 96, 3, 3, 1, 1, 1, 1)
t4 = ffmodel.pool2d(input, 3, 3, 1, 1, 1, 1, PoolType.POOL_AVG)
t4 = ffmodel.conv2d(t4, pool_features, 1, 1, 1, 1, 0, 0)
output = ffmodel.concat([t1, t2, t3, t4], 1)
return output
def InceptionB(ffmodel, input):
t1 = ffmodel.conv2d(input, 384, 3, 3, 2, 2, 0, 0)
t2 = ffmodel.conv2d(input, 64, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(t2, 96, 3, 3, 1, 1, 1, 1)
t2 = ffmodel.conv2d(t2, 96, 3, 3, 2, 2, 0, 0)
t3 = ffmodel.pool2d(input, 3, 3, 2, 2, 0, 0)
output = ffmodel.concat([t1, t2, t3], 1)
return output
def InceptionC(ffmodel, input, channels):
t1 = ffmodel.conv2d(input, 192, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(input, channels, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(t2, channels, 1, 7, 1, 1, 0, 3)
t2 = ffmodel.conv2d(t2, 192, 7, 1, 1, 1, 3, 0)
t3 = ffmodel.conv2d(input, channels, 1, 1, 1, 1, 0, 0)
t3 = ffmodel.conv2d(t3, channels, 7, 1, 1, 1, 3, 0)
t3 = ffmodel.conv2d(t3, channels, 1, 7, 1, 1, 0, 3)
t3 = ffmodel.conv2d(t3, channels, 7, 1, 1, 1, 3, 0)
t3 = ffmodel.conv2d(t3, 192, 1, 7, 1, 1, 0, 3)
t4 = ffmodel.pool2d(input, 3, 3, 1, 1, 1, 1, PoolType.POOL_AVG)
t4 = ffmodel.conv2d(t4, 192, 1, 1, 1, 1, 0, 0)
output = ffmodel.concat([t1, t2, t3, t4], 1)
return output;
def InceptionD(ffmodel, input):
t1 = ffmodel.conv2d(input, 192, 1, 1, 1, 1, 0, 0)
t1 = ffmodel.conv2d(t1, 320, 3, 3, 2, 2, 0, 0)
t2 = ffmodel.conv2d(input, 192, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(t2, 192, 1, 7, 1, 1, 0, 3)
t2 = ffmodel.conv2d(t2, 192, 7, 1, 1, 1, 3, 0)
t2 = ffmodel.conv2d(t2, 192, 3, 3, 2, 2, 0, 0)
t3 = ffmodel.pool2d(input, 3, 3, 2, 2, 0, 0)
output = ffmodel.concat([t1, t2, t3], 1)
return output;
def InceptionE(ffmodel, input):
t1 = ffmodel.conv2d(input, 320, 1, 1, 1, 1, 0, 0)
t2i = ffmodel.conv2d(input, 384, 1, 1, 1, 1, 0, 0)
t2 = ffmodel.conv2d(t2i, 384, 1, 3, 1, 1, 0, 1)
t3 = ffmodel.conv2d(t2i, 384, 3, 1, 1, 1, 1, 0)
t3i = ffmodel.conv2d(input, 448, 1, 1, 1, 1, 0, 0)
t3i = ffmodel.conv2d(t3i, 384, 3, 3, 1, 1, 1, 1)
t4 = ffmodel.conv2d(t3i, 384, 1, 3, 1, 1, 0, 1)
t5 = ffmodel.conv2d(t3i, 384, 3, 1, 1, 1, 1, 0)
t6 = ffmodel.pool2d(input, 3, 3, 1, 1, 1, 1, PoolType.POOL_AVG)
t6 = ffmodel.conv2d(t6, 192, 1, 1, 1, 1, 0, 0)
output = ffmodel.concat([t1, t2, t3, t4, t5, t6], 1)
return output;
def inception():
ffconfig = FFConfig()
print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %(ffconfig.batch_size, ffconfig.workers_per_node, ffconfig.num_nodes))
ffmodel = FFModel(ffconfig)
dims_input = [ffconfig.batch_size, 3, 299, 299]
input = ffmodel.create_tensor(dims_input, DataType.DT_FLOAT)
t = ffmodel.conv2d(input, 32, 3, 3, 2, 2, 0, 0)
t = ffmodel.conv2d(t, 32, 3, 3, 1, 1, 0, 0)
t = ffmodel.conv2d(t, 64, 3, 3, 1, 1, 1, 1)
t = ffmodel.pool2d(t, 3, 3, 2, 2, 0, 0)
t = ffmodel.conv2d(t, 80, 1, 1, 1, 1, 0, 0)
t = ffmodel.conv2d(t, 192, 3, 3, 1, 1, 1, 1)
t = ffmodel.pool2d(t, 3, 3, 2, 2, 0, 0)
t = InceptionA(ffmodel, t, 32)
t = InceptionA(ffmodel, t, 64)
t = InceptionA(ffmodel, t, 64)
t = InceptionB(ffmodel, t)
t = InceptionC(ffmodel, t, 128)
t = InceptionC(ffmodel, t, 160)
t = InceptionC(ffmodel, t, 160)
t = InceptionC(ffmodel, t, 192)
t = InceptionD(ffmodel, t)
t = InceptionE(ffmodel, t)
t = InceptionE(ffmodel, t)
t = ffmodel.pool2d(t, 8, 8, 1, 1, 0, 0, PoolType.POOL_AVG)
t = ffmodel.flat(t)
t = ffmodel.dense(t, 10)
t = ffmodel.softmax(t)
ffoptimizer = SGDOptimizer(ffmodel, 0.001)
ffmodel.optimizer = ffoptimizer
ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY, metrics=[MetricsType.METRICS_ACCURACY, MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY])
label = ffmodel.label_tensor
num_samples = 10000
(x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)
full_input_np = np.zeros((num_samples, 3, 299, 299), dtype=np.float32)
for i in range(0, num_samples):
image = x_train[i, :, :, :]
image = image.transpose(1, 2, 0)
pil_image = Image.fromarray(image)
pil_image = pil_image.resize((299,299), Image.NEAREST)
image = np.array(pil_image, dtype=np.float32)
image = image.transpose(2, 0, 1)
full_input_np[i, :, :, :] = image
full_input_np /= 255
print(full_input_np.shape)
print(full_input_np.__array_interface__["strides"])
print(full_input_np[0,:, :, :])
y_train = y_train.astype('int32')
full_label_np = y_train
dims_full_input = [num_samples, 3, 299, 299]
full_input = ffmodel.create_tensor(dims_full_input, DataType.DT_FLOAT)
dims_full_label = [num_samples, 1]
full_label = ffmodel.create_tensor(dims_full_label, DataType.DT_INT32)
full_input.attach_numpy_array(ffconfig, full_input_np)
full_label.attach_numpy_array(ffconfig, full_label_np)
dataloader_input = SingleDataLoader(ffmodel, input, full_input, num_samples, DataType.DT_FLOAT)
dataloader_label = SingleDataLoader(ffmodel, label, full_label, num_samples, DataType.DT_INT32)
full_input.detach_numpy_array(ffconfig)
full_label.detach_numpy_array(ffconfig)
num_samples = dataloader_input.get_num_samples()
assert dataloader_input.get_num_samples() == dataloader_label.get_num_samples()
ffmodel.init_layers()
epochs = ffconfig.epochs
ts_start = ffconfig.get_current_time()
ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)
ts_end = ffconfig.get_current_time()
run_time = 1e-6 * (ts_end - ts_start);
print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %(epochs, run_time, 8192 * epochs / run_time));
ption")
inception()
| true | true |
f722e68100c41dd81d229f2886754f6a2f83dd54 | 353 | py | Python | src/utils.py | VVCepheiA/Prototypical-Networks-for-Few-shot-Learning-PyTorch | fe58b789f3511bfab6ae9ea0dd4b521aca777b52 | [
"MIT"
] | null | null | null | src/utils.py | VVCepheiA/Prototypical-Networks-for-Few-shot-Learning-PyTorch | fe58b789f3511bfab6ae9ea0dd4b521aca777b52 | [
"MIT"
] | null | null | null | src/utils.py | VVCepheiA/Prototypical-Networks-for-Few-shot-Learning-PyTorch | fe58b789f3511bfab6ae9ea0dd4b521aca777b52 | [
"MIT"
] | null | null | null | """
Utility for model
"""
import pathlib
import os
import json
def save_list_to_file(path, thelist):
with open(path, 'w') as f:
for item in thelist:
f.write("%s\n" % item)
def mkdir_p(full_dir):
"""Simulate mkdir -p"""
if not os.path.exists(full_dir):
pathlib.Path(full_dir).mkdir(parents=True, exist_ok=True)
| 19.611111 | 65 | 0.637394 | import pathlib
import os
import json
def save_list_to_file(path, thelist):
with open(path, 'w') as f:
for item in thelist:
f.write("%s\n" % item)
def mkdir_p(full_dir):
if not os.path.exists(full_dir):
pathlib.Path(full_dir).mkdir(parents=True, exist_ok=True)
| true | true |
f722e7a2cae5cb0bbfe35a1951adbcd802f8d3ca | 1,640 | py | Python | samples/generated_samples/dialogflow_v2_generated_conversation_datasets_delete_conversation_dataset_sync.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 171 | 2018-09-19T21:16:18.000Z | 2020-12-07T17:41:10.000Z | samples/generated_samples/dialogflow_v2_generated_conversation_datasets_delete_conversation_dataset_sync.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 150 | 2018-09-25T14:04:28.000Z | 2020-12-09T21:45:43.000Z | samples/generated_samples/dialogflow_v2_generated_conversation_datasets_delete_conversation_dataset_sync.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 75 | 2018-09-22T14:12:18.000Z | 2020-12-08T07:12:12.000Z | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteConversationDataset
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync]
from google.cloud import dialogflow_v2
def sample_delete_conversation_dataset():
# Create a client
client = dialogflow_v2.ConversationDatasetsClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteConversationDatasetRequest(
name="name_value",
)
# Make the request
operation = client.delete_conversation_dataset(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync]
| 32.8 | 85 | 0.769512 |
from google.cloud import dialogflow_v2
def sample_delete_conversation_dataset():
client = dialogflow_v2.ConversationDatasetsClient()
request = dialogflow_v2.DeleteConversationDatasetRequest(
name="name_value",
)
operation = client.delete_conversation_dataset(request=request)
print("Waiting for operation to complete...")
response = operation.result()
print(response)
| true | true |
f722e8300d29dab1dd6dbd5d3e148f7b855d9ce0 | 12,339 | py | Python | wisdem/drivetrainse/rna.py | ptrbortolotti/WISDEM | 2b7e44716d022e2f62140073dd078c5deeb8bf0a | [
"Apache-2.0"
] | 1 | 2020-06-02T14:58:28.000Z | 2020-06-02T14:58:28.000Z | wisdem/drivetrainse/rna.py | ptrbortolotti/WISDEM | 2b7e44716d022e2f62140073dd078c5deeb8bf0a | [
"Apache-2.0"
] | null | null | null | wisdem/drivetrainse/rna.py | ptrbortolotti/WISDEM | 2b7e44716d022e2f62140073dd078c5deeb8bf0a | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import numpy as np
from openmdao.api import ExplicitComponent, Group, IndepVarComp
from wisdem.commonse.utilities import hstack, vstack
from wisdem.commonse.csystem import DirectionVector
from wisdem.commonse import gravity
# This is an extremely simple RNA mass calculator that should be used when DriveSE otherwise seems too complicated
class RNAMass(ExplicitComponent):
def setup(self):
# variables
self.add_input('blades_mass', 0.0, units='kg', desc='mass of all blade')
self.add_input('hub_mass', 0.0, units='kg', desc='mass of hub')
self.add_input('nac_mass', 0.0, units='kg', desc='mass of nacelle')
self.add_input('hub_cm', np.zeros(3), units='m', desc='location of hub center of mass relative to tower top in yaw-aligned c.s.')
self.add_input('nac_cm', np.zeros(3), units='m', desc='location of nacelle center of mass relative to tower top in yaw-aligned c.s.')
# order for all moments of inertia is (xx, yy, zz, xy, xz, yz) in the yaw-aligned coorinate system
self.add_input('blades_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of all blades about hub center')
self.add_input('hub_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of hub about its center of mass')
self.add_input('nac_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of nacelle about its center of mass')
# outputs
self.add_output('rotor_mass', 0.0, units='kg', desc='mass of blades and hub')
self.add_output('rna_mass', 0.0, units='kg', desc='total mass of RNA')
self.add_output('rna_cm', np.zeros(3), units='m', desc='location of RNA center of mass relative to tower top in yaw-aligned c.s.')
self.add_output('rna_I_TT', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of RNA about tower top in yaw-aligned coordinate system')
self.declare_partials('*','*')
def _assembleI(self, I):
Ixx, Iyy, Izz, Ixy, Ixz, Iyz = I[0], I[1], I[2], I[3], I[4], I[5]
return np.array([[Ixx, Ixy, Ixz], [Ixy, Iyy, Iyz], [Ixz, Iyz, Izz]])
def _unassembleI(self, I):
return np.array([I[0, 0], I[1, 1], I[2, 2], I[0, 1], I[0, 2], I[1, 2]])
def compute(self, inputs, outputs):
rotor_mass = inputs['blades_mass'] + inputs['hub_mass']
nac_mass = inputs['nac_mass']
# rna mass
outputs['rotor_mass'] = rotor_mass
outputs['rna_mass'] = rotor_mass + nac_mass
# rna cm
outputs['rna_cm'] = (rotor_mass*inputs['hub_cm'] + nac_mass*inputs['nac_cm'])/outputs['rna_mass']
#TODO check if the use of assembleI and unassembleI functions are correct
# rna I
blades_I = self._assembleI(inputs['blades_I'])
hub_I = self._assembleI(inputs['hub_I'])
nac_I = self._assembleI(inputs['nac_I'])
rotor_I = blades_I + hub_I
R = inputs['hub_cm']
rotor_I_TT = rotor_I + rotor_mass*(np.dot(R, R)*np.eye(3) - np.outer(R, R))
R = inputs['nac_cm']
nac_I_TT = nac_I + inputs['nac_mass']*(np.dot(R, R)*np.eye(3) - np.outer(R, R))
outputs['rna_I_TT'] = self._unassembleI(rotor_I_TT + nac_I_TT)
def compute_partials(self, inputs, J):
blades_mass = inputs['blades_mass']
hub_mass = inputs['hub_mass']
nac_mass = inputs['nac_mass']
hub_cm = inputs['hub_cm']
nac_cm = inputs['nac_cm']
hub_I = inputs['hub_I']
nac_I = inputs['nac_I']
rotor_mass = blades_mass+hub_mass
rna_mass = rotor_mass + nac_mass
# mass
J['rotor_mass', 'blades_mass'] = 1.0
J['rotor_mass', 'hub_mass'] = 1.0
J['rotor_mass', 'nac_mass'] = 0.0
J['rotor_mass', 'hub_cm'] = np.zeros(3)
J['rotor_mass', 'nac_cm'] = np.zeros(3)
J['rotor_mass', 'blades_I'] = np.zeros(6)
J['rotor_mass', 'hub_I'] = np.zeros(6)
J['rotor_mass', 'nac_I'] = np.zeros(6)
J['rna_mass', 'blades_mass'] = 1.0
J['rna_mass', 'hub_mass'] = 1.0
J['rna_mass', 'nac_mass'] = 1.0
J['rna_mass', 'hub_cm'] = np.zeros(3)
J['rna_mass', 'nac_cm'] = np.zeros(3)
J['rna_mass', 'blades_I'] = np.zeros(6)
J['rna_mass', 'hub_I'] = np.zeros(6)
J['rna_mass', 'nac_I'] = np.zeros(6)
# cm
numerator = (blades_mass+hub_mass)*hub_cm+nac_mass*nac_cm
J['rna_cm', 'blades_mass'] = (rna_mass*hub_cm-numerator)/rna_mass**2
J['rna_cm', 'hub_mass'] = (rna_mass*hub_cm-numerator)/rna_mass**2
J['rna_cm', 'nac_mass'] = (rna_mass*nac_cm-numerator)/rna_mass**2
J['rna_cm', 'hub_cm'] = rotor_mass/rna_mass*np.eye(3)
J['rna_cm', 'nac_cm'] = nac_mass/rna_mass*np.eye(3)
J['rna_cm', 'blades_I'] = np.zeros((3, 6))
J['rna_cm', 'hub_I'] = np.zeros((3, 6))
J['rna_cm', 'nac_I'] = np.zeros((3, 6))
# I
R = hub_cm
const = self._unassembleI(np.dot(R, R)*np.eye(3) - np.outer(R, R))
J['rna_I_TT', 'blades_mass'] = const
J['rna_I_TT', 'hub_mass'] = const
dI_drx = rotor_mass*self._unassembleI(2*R[0]*np.eye(3) - np.array([[2*R[0], R[1], R[2]], [R[1], 0.0, 0.0], [R[2], 0.0, 0.0]]))
dI_dry = rotor_mass*self._unassembleI(2*R[1]*np.eye(3) - np.array([[0.0, R[0], 0.0], [R[0], 2*R[1], R[2]], [0.0, R[2], 0.0]]))
dI_drz = rotor_mass*self._unassembleI(2*R[2]*np.eye(3) - np.array([[0.0, 0.0, R[0]], [0.0, 0.0, R[1]], [R[0], R[1], 2*R[2]]]))
J['rna_I_TT', 'hub_cm'] = np.vstack([dI_drx, dI_dry, dI_drz]).T
R = nac_cm
const = self._unassembleI(np.dot(R, R)*np.eye(3) - np.outer(R, R))
J['rna_I_TT', 'nac_mass'] = const
dI_drx = nac_mass*self._unassembleI(2*R[0]*np.eye(3) - np.array([[2*R[0], R[1], R[2]], [R[1], 0.0, 0.0], [R[2], 0.0, 0.0]]))
dI_dry = nac_mass*self._unassembleI(2*R[1]*np.eye(3) - np.array([[0.0, R[0], 0.0], [R[0], 2*R[1], R[2]], [0.0, R[2], 0.0]]))
dI_drz = nac_mass*self._unassembleI(2*R[2]*np.eye(3) - np.array([[0.0, 0.0, R[0]], [0.0, 0.0, R[1]], [R[0], R[1], 2*R[2]]]))
J['rna_I_TT', 'nac_cm'] = np.vstack([dI_drx, dI_dry, dI_drz]).T
J['rna_I_TT', 'blades_I'] = np.eye(6)
J['rna_I_TT', 'hub_I'] = np.eye(6)
J['rna_I_TT', 'nac_I'] = np.eye(6)
class RotorLoads(ExplicitComponent):
def setup(self):
# variables
self.add_input('F', np.zeros(3), units='N', desc='forces in hub-aligned coordinate system')
self.add_input('M', np.zeros(3), units='N*m', desc='moments in hub-aligned coordinate system')
self.add_input('hub_cm', np.zeros(3), units='m', desc='position of rotor hub relative to tower top in yaw-aligned c.s.')
self.add_input('rna_mass', 0.0, units='kg', desc='mass of rotor nacelle assembly')
self.add_input('rna_cm', np.zeros(3), units='m', desc='location of RNA center of mass relative to tower top in yaw-aligned c.s.')
# # These are used for backwards compatibility - do not use
# T = Float(iotype='in', desc='thrust in hub-aligned coordinate system') # THIS MEANS STILL YAWED THOUGH (Shaft tilt)
# Q = Float(iotype='in', desc='torque in hub-aligned coordinate system')
# parameters
self.add_discrete_input('downwind', False)
self.add_input('tilt', 0.0, units='deg')
# out
self.add_output('top_F', np.zeros(3), units='N') # in yaw-aligned
self.add_output('top_M', np.zeros(3), units='N*m')
self.declare_partials('top_F', ['F','M','hub_cm','rna_mass','rna_cm'])
self.declare_partials('top_M', ['F','M','hub_cm','rna_mass','rna_cm'])
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
F = inputs['F']
M = inputs['M']
tilt = float(inputs['tilt'])
F = DirectionVector.fromArray(F).hubToYaw(tilt)
M = DirectionVector.fromArray(M).hubToYaw(tilt)
# change x-direction if downwind
hub_cm = np.copy(inputs['hub_cm'])
rna_cm = np.copy(inputs['rna_cm'])
if discrete_inputs['downwind']:
hub_cm[0] *= -1
rna_cm[0] *= -1
hub_cm = DirectionVector.fromArray(hub_cm)
rna_cm = DirectionVector.fromArray(rna_cm)
self.save_rhub = hub_cm
self.save_rcm = rna_cm
# aerodynamic moments
M = M + hub_cm.cross(F)
self.saveF = F
'''
Removing this permanently gbarter 1/2020 because of too much confusion in TowerSE and Frame3DD
From now on TowerSE will always add to loading of added mass items, including RNA
# add weight loads
F_w = DirectionVector(0.0, 0.0, -float(inputs['rna_mass'])*gravity)
M_w = rna_cm.cross(F_w)
self.saveF_w = F_w
Fout = F + F_w
if discrete_inputs['rna_weightM']:
Mout = M + M_w
else:
Mout = M
#REMOVE WEIGHT EFFECT TO ACCOUNT FOR P-Delta Effect
print("!!!! No weight effect on rotor moments -TowerSE !!!!")
'''
Fout = F
Mout = M
# put back in array
outputs['top_F'] = np.array([Fout.x, Fout.y, Fout.z])
outputs['top_M'] = np.array([Mout.x, Mout.y, Mout.z])
def compute_partials(self, inputs, J, discrete_inputs):
dF = DirectionVector.fromArray(inputs['F']).hubToYaw(inputs['tilt'])
dFx, dFy, dFz = dF.dx, dF.dy, dF.dz
dtopF_dFx = np.array([dFx['dx'], dFy['dx'], dFz['dx']])
dtopF_dFy = np.array([dFx['dy'], dFy['dy'], dFz['dy']])
dtopF_dFz = np.array([dFx['dz'], dFy['dz'], dFz['dz']])
dtopF_dF = hstack([dtopF_dFx, dtopF_dFy, dtopF_dFz])
dtopF_w_dm = np.array([0.0, 0.0, -gravity])
#dtopF = hstack([dtopF_dF, np.zeros((3, 6)), dtopF_w_dm, np.zeros((3, 3))])
dM = DirectionVector.fromArray(inputs['M']).hubToYaw(inputs['tilt'])
dMx, dMy, dMz = dM.dx, dM.dy, dM.dz
dMxcross, dMycross, dMzcross = self.save_rhub.cross_deriv(self.saveF, 'dr', 'dF')
dtopM_dMx = np.array([dMx['dx'], dMy['dx'], dMz['dx']])
dtopM_dMy = np.array([dMx['dy'], dMy['dy'], dMz['dy']])
dtopM_dMz = np.array([dMx['dz'], dMy['dz'], dMz['dz']])
dtopM_dM = hstack([dtopM_dMx, dtopM_dMy, dtopM_dMz])
dM_dF = np.array([dMxcross['dF'], dMycross['dF'], dMzcross['dF']])
dtopM_dFx = np.dot(dM_dF, dtopF_dFx)
dtopM_dFy = np.dot(dM_dF, dtopF_dFy)
dtopM_dFz = np.dot(dM_dF, dtopF_dFz)
dtopM_dF = hstack([dtopM_dFx, dtopM_dFy, dtopM_dFz])
dtopM_dr = np.array([dMxcross['dr'], dMycross['dr'], dMzcross['dr']])
#dMx_w_cross, dMy_w_cross, dMz_w_cross = self.save_rcm.cross_deriv(self.saveF_w, 'dr', 'dF')
#if discrete_inputs['rna_weightM']:
# dtopM_drnacm = np.array([dMx_w_cross['dr'], dMy_w_cross['dr'], dMz_w_cross['dr']])
# dtopM_dF_w = np.array([dMx_w_cross['dF'], dMy_w_cross['dF'], dMz_w_cross['dF']])
#else:
# dtopM_drnacm = np.zeros((3, 3))
# dtopM_dF_w = np.zeros((3, 3))
dtopM_drnacm = np.zeros((3, 3))
dtopM_dF_w = np.zeros((3, 3))
dtopM_dm = np.dot(dtopM_dF_w, dtopF_w_dm)
if discrete_inputs['downwind']:
dtopM_dr[:, 0] *= -1
dtopM_drnacm[:, 0] *= -1
#dtopM = hstack([dtopM_dF, dtopM_dM, dtopM_dr, dtopM_dm, dtopM_drnacm])
J['top_F', 'F'] = dtopF_dF
J['top_F', 'M'] = np.zeros((3, 3))
J['top_F', 'hub_cm'] = np.zeros((3, 3))
J['top_F', 'rna_mass'] = dtopF_w_dm
J['top_F', 'rna_cm'] = np.zeros((3, 3))
J['top_M', 'F'] = dtopM_dF
J['top_M', 'M'] = dtopM_dM
J['top_M', 'hub_cm'] = dtopM_dr
J['top_M', 'rna_mass'] = dtopM_dm
J['top_M', 'rna_cm'] = dtopM_drnacm
class RNA(Group):
def initialize(self):
self.options.declare('nLC')
def setup(self):
nLC = self.options['nLC']
self.add_subsystem('mass', RNAMass(), promotes=['*'])
for k in range(nLC):
lc = '' if nLC==1 else str(k+1)
self.add_subsystem('loads'+lc, RotorLoads(), promotes=['rna_mass','rna_cm','hub_cm','downwind','tilt'])
| 41.40604 | 153 | 0.57922 | from __future__ import print_function
import numpy as np
from openmdao.api import ExplicitComponent, Group, IndepVarComp
from wisdem.commonse.utilities import hstack, vstack
from wisdem.commonse.csystem import DirectionVector
from wisdem.commonse import gravity
class RNAMass(ExplicitComponent):
def setup(self):
self.add_input('blades_mass', 0.0, units='kg', desc='mass of all blade')
self.add_input('hub_mass', 0.0, units='kg', desc='mass of hub')
self.add_input('nac_mass', 0.0, units='kg', desc='mass of nacelle')
self.add_input('hub_cm', np.zeros(3), units='m', desc='location of hub center of mass relative to tower top in yaw-aligned c.s.')
self.add_input('nac_cm', np.zeros(3), units='m', desc='location of nacelle center of mass relative to tower top in yaw-aligned c.s.')
self.add_input('blades_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of all blades about hub center')
self.add_input('hub_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of hub about its center of mass')
self.add_input('nac_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of nacelle about its center of mass')
self.add_output('rotor_mass', 0.0, units='kg', desc='mass of blades and hub')
self.add_output('rna_mass', 0.0, units='kg', desc='total mass of RNA')
self.add_output('rna_cm', np.zeros(3), units='m', desc='location of RNA center of mass relative to tower top in yaw-aligned c.s.')
self.add_output('rna_I_TT', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of RNA about tower top in yaw-aligned coordinate system')
self.declare_partials('*','*')
def _assembleI(self, I):
Ixx, Iyy, Izz, Ixy, Ixz, Iyz = I[0], I[1], I[2], I[3], I[4], I[5]
return np.array([[Ixx, Ixy, Ixz], [Ixy, Iyy, Iyz], [Ixz, Iyz, Izz]])
def _unassembleI(self, I):
return np.array([I[0, 0], I[1, 1], I[2, 2], I[0, 1], I[0, 2], I[1, 2]])
def compute(self, inputs, outputs):
rotor_mass = inputs['blades_mass'] + inputs['hub_mass']
nac_mass = inputs['nac_mass']
outputs['rotor_mass'] = rotor_mass
outputs['rna_mass'] = rotor_mass + nac_mass
outputs['rna_cm'] = (rotor_mass*inputs['hub_cm'] + nac_mass*inputs['nac_cm'])/outputs['rna_mass']
blades_I = self._assembleI(inputs['blades_I'])
hub_I = self._assembleI(inputs['hub_I'])
nac_I = self._assembleI(inputs['nac_I'])
rotor_I = blades_I + hub_I
R = inputs['hub_cm']
rotor_I_TT = rotor_I + rotor_mass*(np.dot(R, R)*np.eye(3) - np.outer(R, R))
R = inputs['nac_cm']
nac_I_TT = nac_I + inputs['nac_mass']*(np.dot(R, R)*np.eye(3) - np.outer(R, R))
outputs['rna_I_TT'] = self._unassembleI(rotor_I_TT + nac_I_TT)
def compute_partials(self, inputs, J):
blades_mass = inputs['blades_mass']
hub_mass = inputs['hub_mass']
nac_mass = inputs['nac_mass']
hub_cm = inputs['hub_cm']
nac_cm = inputs['nac_cm']
hub_I = inputs['hub_I']
nac_I = inputs['nac_I']
rotor_mass = blades_mass+hub_mass
rna_mass = rotor_mass + nac_mass
J['rotor_mass', 'blades_mass'] = 1.0
J['rotor_mass', 'hub_mass'] = 1.0
J['rotor_mass', 'nac_mass'] = 0.0
J['rotor_mass', 'hub_cm'] = np.zeros(3)
J['rotor_mass', 'nac_cm'] = np.zeros(3)
J['rotor_mass', 'blades_I'] = np.zeros(6)
J['rotor_mass', 'hub_I'] = np.zeros(6)
J['rotor_mass', 'nac_I'] = np.zeros(6)
J['rna_mass', 'blades_mass'] = 1.0
J['rna_mass', 'hub_mass'] = 1.0
J['rna_mass', 'nac_mass'] = 1.0
J['rna_mass', 'hub_cm'] = np.zeros(3)
J['rna_mass', 'nac_cm'] = np.zeros(3)
J['rna_mass', 'blades_I'] = np.zeros(6)
J['rna_mass', 'hub_I'] = np.zeros(6)
J['rna_mass', 'nac_I'] = np.zeros(6)
numerator = (blades_mass+hub_mass)*hub_cm+nac_mass*nac_cm
J['rna_cm', 'blades_mass'] = (rna_mass*hub_cm-numerator)/rna_mass**2
J['rna_cm', 'hub_mass'] = (rna_mass*hub_cm-numerator)/rna_mass**2
J['rna_cm', 'nac_mass'] = (rna_mass*nac_cm-numerator)/rna_mass**2
J['rna_cm', 'hub_cm'] = rotor_mass/rna_mass*np.eye(3)
J['rna_cm', 'nac_cm'] = nac_mass/rna_mass*np.eye(3)
J['rna_cm', 'blades_I'] = np.zeros((3, 6))
J['rna_cm', 'hub_I'] = np.zeros((3, 6))
J['rna_cm', 'nac_I'] = np.zeros((3, 6))
R = hub_cm
const = self._unassembleI(np.dot(R, R)*np.eye(3) - np.outer(R, R))
J['rna_I_TT', 'blades_mass'] = const
J['rna_I_TT', 'hub_mass'] = const
dI_drx = rotor_mass*self._unassembleI(2*R[0]*np.eye(3) - np.array([[2*R[0], R[1], R[2]], [R[1], 0.0, 0.0], [R[2], 0.0, 0.0]]))
dI_dry = rotor_mass*self._unassembleI(2*R[1]*np.eye(3) - np.array([[0.0, R[0], 0.0], [R[0], 2*R[1], R[2]], [0.0, R[2], 0.0]]))
dI_drz = rotor_mass*self._unassembleI(2*R[2]*np.eye(3) - np.array([[0.0, 0.0, R[0]], [0.0, 0.0, R[1]], [R[0], R[1], 2*R[2]]]))
J['rna_I_TT', 'hub_cm'] = np.vstack([dI_drx, dI_dry, dI_drz]).T
R = nac_cm
const = self._unassembleI(np.dot(R, R)*np.eye(3) - np.outer(R, R))
J['rna_I_TT', 'nac_mass'] = const
dI_drx = nac_mass*self._unassembleI(2*R[0]*np.eye(3) - np.array([[2*R[0], R[1], R[2]], [R[1], 0.0, 0.0], [R[2], 0.0, 0.0]]))
dI_dry = nac_mass*self._unassembleI(2*R[1]*np.eye(3) - np.array([[0.0, R[0], 0.0], [R[0], 2*R[1], R[2]], [0.0, R[2], 0.0]]))
dI_drz = nac_mass*self._unassembleI(2*R[2]*np.eye(3) - np.array([[0.0, 0.0, R[0]], [0.0, 0.0, R[1]], [R[0], R[1], 2*R[2]]]))
J['rna_I_TT', 'nac_cm'] = np.vstack([dI_drx, dI_dry, dI_drz]).T
J['rna_I_TT', 'blades_I'] = np.eye(6)
J['rna_I_TT', 'hub_I'] = np.eye(6)
J['rna_I_TT', 'nac_I'] = np.eye(6)
class RotorLoads(ExplicitComponent):
def setup(self):
self.add_input('F', np.zeros(3), units='N', desc='forces in hub-aligned coordinate system')
self.add_input('M', np.zeros(3), units='N*m', desc='moments in hub-aligned coordinate system')
self.add_input('hub_cm', np.zeros(3), units='m', desc='position of rotor hub relative to tower top in yaw-aligned c.s.')
self.add_input('rna_mass', 0.0, units='kg', desc='mass of rotor nacelle assembly')
self.add_input('rna_cm', np.zeros(3), units='m', desc='location of RNA center of mass relative to tower top in yaw-aligned c.s.')
t('tilt', 0.0, units='deg')
self.add_output('top_F', np.zeros(3), units='N')
self.add_output('top_M', np.zeros(3), units='N*m')
self.declare_partials('top_F', ['F','M','hub_cm','rna_mass','rna_cm'])
self.declare_partials('top_M', ['F','M','hub_cm','rna_mass','rna_cm'])
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
F = inputs['F']
M = inputs['M']
tilt = float(inputs['tilt'])
F = DirectionVector.fromArray(F).hubToYaw(tilt)
M = DirectionVector.fromArray(M).hubToYaw(tilt)
hub_cm = np.copy(inputs['hub_cm'])
rna_cm = np.copy(inputs['rna_cm'])
if discrete_inputs['downwind']:
hub_cm[0] *= -1
rna_cm[0] *= -1
hub_cm = DirectionVector.fromArray(hub_cm)
rna_cm = DirectionVector.fromArray(rna_cm)
self.save_rhub = hub_cm
self.save_rcm = rna_cm
M = M + hub_cm.cross(F)
self.saveF = F
Fout = F
Mout = M
outputs['top_F'] = np.array([Fout.x, Fout.y, Fout.z])
outputs['top_M'] = np.array([Mout.x, Mout.y, Mout.z])
def compute_partials(self, inputs, J, discrete_inputs):
dF = DirectionVector.fromArray(inputs['F']).hubToYaw(inputs['tilt'])
dFx, dFy, dFz = dF.dx, dF.dy, dF.dz
dtopF_dFx = np.array([dFx['dx'], dFy['dx'], dFz['dx']])
dtopF_dFy = np.array([dFx['dy'], dFy['dy'], dFz['dy']])
dtopF_dFz = np.array([dFx['dz'], dFy['dz'], dFz['dz']])
dtopF_dF = hstack([dtopF_dFx, dtopF_dFy, dtopF_dFz])
dtopF_w_dm = np.array([0.0, 0.0, -gravity])
dM = DirectionVector.fromArray(inputs['M']).hubToYaw(inputs['tilt'])
dMx, dMy, dMz = dM.dx, dM.dy, dM.dz
dMxcross, dMycross, dMzcross = self.save_rhub.cross_deriv(self.saveF, 'dr', 'dF')
dtopM_dMx = np.array([dMx['dx'], dMy['dx'], dMz['dx']])
dtopM_dMy = np.array([dMx['dy'], dMy['dy'], dMz['dy']])
dtopM_dMz = np.array([dMx['dz'], dMy['dz'], dMz['dz']])
dtopM_dM = hstack([dtopM_dMx, dtopM_dMy, dtopM_dMz])
dM_dF = np.array([dMxcross['dF'], dMycross['dF'], dMzcross['dF']])
dtopM_dFx = np.dot(dM_dF, dtopF_dFx)
dtopM_dFy = np.dot(dM_dF, dtopF_dFy)
dtopM_dFz = np.dot(dM_dF, dtopF_dFz)
dtopM_dF = hstack([dtopM_dFx, dtopM_dFy, dtopM_dFz])
dtopM_dr = np.array([dMxcross['dr'], dMycross['dr'], dMzcross['dr']])
dtopM_drnacm = np.zeros((3, 3))
dtopM_dF_w = np.zeros((3, 3))
dtopM_dm = np.dot(dtopM_dF_w, dtopF_w_dm)
if discrete_inputs['downwind']:
dtopM_dr[:, 0] *= -1
dtopM_drnacm[:, 0] *= -1
J['top_F', 'F'] = dtopF_dF
J['top_F', 'M'] = np.zeros((3, 3))
J['top_F', 'hub_cm'] = np.zeros((3, 3))
J['top_F', 'rna_mass'] = dtopF_w_dm
J['top_F', 'rna_cm'] = np.zeros((3, 3))
J['top_M', 'F'] = dtopM_dF
J['top_M', 'M'] = dtopM_dM
J['top_M', 'hub_cm'] = dtopM_dr
J['top_M', 'rna_mass'] = dtopM_dm
J['top_M', 'rna_cm'] = dtopM_drnacm
class RNA(Group):
def initialize(self):
self.options.declare('nLC')
def setup(self):
nLC = self.options['nLC']
self.add_subsystem('mass', RNAMass(), promotes=['*'])
for k in range(nLC):
lc = '' if nLC==1 else str(k+1)
self.add_subsystem('loads'+lc, RotorLoads(), promotes=['rna_mass','rna_cm','hub_cm','downwind','tilt'])
| true | true |
f722e882215e54f911ad2db42fa10900a5abcc26 | 334 | py | Python | setup.py | zanardo/clog | 390e56dd8bab472532d5ff77d1dfb0acfdb72518 | [
"BSD-2-Clause"
] | null | null | null | setup.py | zanardo/clog | 390e56dd8bab472532d5ff77d1dfb0acfdb72518 | [
"BSD-2-Clause"
] | null | null | null | setup.py | zanardo/clog | 390e56dd8bab472532d5ff77d1dfb0acfdb72518 | [
"BSD-2-Clause"
] | null | null | null | from setuptools import setup
from clogd import __VERSION__
setup(
name='clog',
version=__VERSION__,
packages=['clogd'],
package_data={
'': ['static/*.*', 'views/*.*'],
},
install_requires=[
'zpgdb==0.4.2',
'Bottle==0.12.13',
'waitress==1.1.0',
'PyYAML==3.12',
],
)
| 18.555556 | 40 | 0.517964 | from setuptools import setup
from clogd import __VERSION__
setup(
name='clog',
version=__VERSION__,
packages=['clogd'],
package_data={
'': ['static/*.*', 'views/*.*'],
},
install_requires=[
'zpgdb==0.4.2',
'Bottle==0.12.13',
'waitress==1.1.0',
'PyYAML==3.12',
],
)
| true | true |
f722e9cc17f1f3bc8a478de8d0aeecf08798d105 | 572 | py | Python | sdk/python/pulumi_azure_native/notificationhubs/v20140901/__init__.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/notificationhubs/v20140901/__init__.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/notificationhubs/v20140901/__init__.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_namespace import *
from .get_notification_hub import *
from .get_notification_hub_pns_credentials import *
from .list_namespace_keys import *
from .list_notification_hub_keys import *
from .namespace import *
from .notification_hub import *
from ._inputs import *
from . import outputs
| 31.777778 | 80 | 0.769231 |
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_namespace import *
from .get_notification_hub import *
from .get_notification_hub_pns_credentials import *
from .list_namespace_keys import *
from .list_notification_hub_keys import *
from .namespace import *
from .notification_hub import *
from ._inputs import *
from . import outputs
| true | true |
f722ea35e9284460af48ce2dd1bc2535916a6cb0 | 3,704 | py | Python | kws_streaming/layers/stream_test.py | baajur/google-research | fd74519da7a0976b9b372f1ad8e091d1b3ee2212 | [
"Apache-2.0"
] | 3 | 2020-08-17T01:50:27.000Z | 2020-10-29T01:51:16.000Z | kws_streaming/layers/stream_test.py | JustinDurham/google-research | 9049acf9246c1b75170f0c6757e62a8f619a9db6 | [
"Apache-2.0"
] | 25 | 2020-07-25T08:53:09.000Z | 2022-03-12T00:43:02.000Z | kws_streaming/layers/stream_test.py | JustinDurham/google-research | 9049acf9246c1b75170f0c6757e62a8f619a9db6 | [
"Apache-2.0"
] | 2 | 2020-08-29T08:58:30.000Z | 2021-08-29T09:59:34.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kws_streaming.layers.stream."""
import numpy as np
from kws_streaming.layers import stream
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
from kws_streaming.layers.modes import Modes
from kws_streaming.models import utils
tf1.disable_eager_execution()
# Toy example which require signal processing in time
class Sum(tf.keras.layers.Layer):
"""Applies Sum on time_dim."""
def __init__(self, time_dim=1, **kwargs):
super(Sum, self).__init__(**kwargs)
self.time_dim = time_dim
def call(self, inputs):
return tf.keras.backend.sum(inputs, axis=self.time_dim)
def get_config(self):
config = {"time_dim": self.time_dim}
base_config = super(Sum, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class StreamTest(tf.test.TestCase):
def test_streaming_with_effective_tdim(self):
time_size = 10
feature_size = 3
batch_size = 1
time_dim = 1 # index of time dimensions
ring_buffer_size_in_time_dim = 3 # effective size of aperture in time dim
inputs = tf.keras.layers.Input(
shape=(time_size, feature_size),
batch_size=batch_size,
name="inp_sequence")
mode = Modes.TRAINING
# in streaming mode it will create a
# ring buffer with time dim size ring_buffer_size_in_time_dim
outputs = stream.Stream(
cell=Sum(time_dim=time_dim),
mode=mode,
ring_buffer_size_in_time_dim=ring_buffer_size_in_time_dim)(inputs)
model_train = tf.keras.Model(inputs, outputs)
model_train.summary()
mode = Modes.STREAM_EXTERNAL_STATE_INFERENCE
input_tensors = [
tf.keras.layers.Input(
shape=(
1, # time dim is size 1 in streaming mode
feature_size,
), batch_size=batch_size, name="inp_stream")
]
# convert non streaming model to streaming one
model_stream = utils.convert_to_inference_model(model_train,
input_tensors, mode)
model_stream.summary()
# second input tostream model is a state, so we can use its shape
input_state_np = np.zeros(model_stream.inputs[1].shape, dtype=np.float32)
# input test data
non_stream_input = np.random.randint(
1, 10, size=(batch_size, time_size, feature_size))
# run streaming inference
# iterate over time dim sample by sample
for i in range(input_state_np.shape[1]):
input_stream_np = np.expand_dims(non_stream_input[0][i], 0)
input_stream_np = np.expand_dims(input_stream_np, 1)
input_stream_np = input_stream_np.astype(np.float32)
output_stream_np, output_state_np = model_stream.predict(
[input_stream_np, input_state_np])
input_state_np = output_state_np # update input state
# emulate sliding window summation
target = np.sum(
non_stream_input[:, max(0, i - ring_buffer_size_in_time_dim):i + 1],
axis=time_dim)
self.assertAllEqual(target, output_stream_np)
if __name__ == "__main__":
tf.test.main()
| 33.981651 | 78 | 0.702214 |
import numpy as np
from kws_streaming.layers import stream
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
from kws_streaming.layers.modes import Modes
from kws_streaming.models import utils
tf1.disable_eager_execution()
class Sum(tf.keras.layers.Layer):
def __init__(self, time_dim=1, **kwargs):
super(Sum, self).__init__(**kwargs)
self.time_dim = time_dim
def call(self, inputs):
return tf.keras.backend.sum(inputs, axis=self.time_dim)
def get_config(self):
config = {"time_dim": self.time_dim}
base_config = super(Sum, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class StreamTest(tf.test.TestCase):
def test_streaming_with_effective_tdim(self):
time_size = 10
feature_size = 3
batch_size = 1
time_dim = 1
ring_buffer_size_in_time_dim = 3
inputs = tf.keras.layers.Input(
shape=(time_size, feature_size),
batch_size=batch_size,
name="inp_sequence")
mode = Modes.TRAINING
outputs = stream.Stream(
cell=Sum(time_dim=time_dim),
mode=mode,
ring_buffer_size_in_time_dim=ring_buffer_size_in_time_dim)(inputs)
model_train = tf.keras.Model(inputs, outputs)
model_train.summary()
mode = Modes.STREAM_EXTERNAL_STATE_INFERENCE
input_tensors = [
tf.keras.layers.Input(
shape=(
1,
feature_size,
), batch_size=batch_size, name="inp_stream")
]
model_stream = utils.convert_to_inference_model(model_train,
input_tensors, mode)
model_stream.summary()
input_state_np = np.zeros(model_stream.inputs[1].shape, dtype=np.float32)
non_stream_input = np.random.randint(
1, 10, size=(batch_size, time_size, feature_size))
for i in range(input_state_np.shape[1]):
input_stream_np = np.expand_dims(non_stream_input[0][i], 0)
input_stream_np = np.expand_dims(input_stream_np, 1)
input_stream_np = input_stream_np.astype(np.float32)
output_stream_np, output_state_np = model_stream.predict(
[input_stream_np, input_state_np])
input_state_np = output_state_np
target = np.sum(
non_stream_input[:, max(0, i - ring_buffer_size_in_time_dim):i + 1],
axis=time_dim)
self.assertAllEqual(target, output_stream_np)
if __name__ == "__main__":
tf.test.main()
| true | true |
f722eb594e8e73ff236ef7f232c800cd1fe2c0bd | 2,865 | py | Python | www/page.py | linhyee/py-blog | c6ed92ebe93d6ec7cb7eff1e2690e97e5074b310 | [
"Apache-2.0"
] | null | null | null | www/page.py | linhyee/py-blog | c6ed92ebe93d6ec7cb7eff1e2690e97e5074b310 | [
"Apache-2.0"
] | null | null | null | www/page.py | linhyee/py-blog | c6ed92ebe93d6ec7cb7eff1e2690e97e5074b310 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
JSON API definition.
"""
import math
from urllib import parse
class Page(object):
'''
Page object for display pages.
'''
def __init__(self, item_count, page_index=1,page_size=10):
'''
Init Pagination by item count, page_index and page_size
>>> p1 =Page(100, 1)
>>> p1.page_count
10
>>> p1.offset
0
>>> p1.limit
10
>>> p2 = Page(90, 9, 10)
>>> p2.page_count
9
>>> p2.offset
80
>>> p2.limit
10
>>> p3 = Page(91,10, 10)
>>> p3.page_count
10
>>> p3.offset
90
>>> p3.limit
10
'''
self.item_count = item_count
self.page_size = page_size
self.page_count = item_count // page_size + (1 if item_count % page_size > 0 else 0)
if (item_count == 0) or (page_index > self.page_count):
self.offset = 0
self.limit = 0
self.page_index = 0
else:
self.page_index = page_index
self.offset = self.page_size * (page_index - 1)
self.limit = self.page_size
self.has_next = self.page_index < self.page_count
self.has_previous = self.page_index > 1
def __str__(self):
return 'item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % (
self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit)
__repr__ = __str__
def pagination(page : Page, base_url, query={}):
if page.page_count == 0:
return ''
s = '<ul class="uk-pagination uk-flex-center" uk-margin>'
if page.has_previous:
previous = page.page_index - 1
if previous <= 0:
previous = 1
query['page'] = page.page_index-1 if page.page_index > 1 else 0
s += '<li><a href="%s?%s"><span uk-pagination-previous></span></a></li>' % (base_url, parse.urlencode(query))
count = 5
if count > page.page_count:
count = page.page_count
page_start = (math.ceil(page.page_index / count) - 1) * count + 1
for i in range(page_start, page_start +count):
if i > page.page_count:
break
if i == page.page_index:
s += '<li class="uk-active"><span>%d</span></li>' % page.page_index
else:
query['page'] = i
s += '<li><a href="%s?%s">%d</a>' % (base_url, parse.urlencode(query), i)
if page.has_next:
next = page.page_index + 1
if next > page.page_count:
next =page.page_count
query['page'] = next
s += '<li><a href="%s?%s"><span uk-pagination-next></span></a></li>' % (base_url, parse.urlencode(query))
return s
if __name__ == '__main__':
import doctest
doctest.testmod() | 29.84375 | 117 | 0.543106 |
import math
from urllib import parse
class Page(object):
def __init__(self, item_count, page_index=1,page_size=10):
self.item_count = item_count
self.page_size = page_size
self.page_count = item_count // page_size + (1 if item_count % page_size > 0 else 0)
if (item_count == 0) or (page_index > self.page_count):
self.offset = 0
self.limit = 0
self.page_index = 0
else:
self.page_index = page_index
self.offset = self.page_size * (page_index - 1)
self.limit = self.page_size
self.has_next = self.page_index < self.page_count
self.has_previous = self.page_index > 1
def __str__(self):
return 'item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % (
self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit)
__repr__ = __str__
def pagination(page : Page, base_url, query={}):
if page.page_count == 0:
return ''
s = '<ul class="uk-pagination uk-flex-center" uk-margin>'
if page.has_previous:
previous = page.page_index - 1
if previous <= 0:
previous = 1
query['page'] = page.page_index-1 if page.page_index > 1 else 0
s += '<li><a href="%s?%s"><span uk-pagination-previous></span></a></li>' % (base_url, parse.urlencode(query))
count = 5
if count > page.page_count:
count = page.page_count
page_start = (math.ceil(page.page_index / count) - 1) * count + 1
for i in range(page_start, page_start +count):
if i > page.page_count:
break
if i == page.page_index:
s += '<li class="uk-active"><span>%d</span></li>' % page.page_index
else:
query['page'] = i
s += '<li><a href="%s?%s">%d</a>' % (base_url, parse.urlencode(query), i)
if page.has_next:
next = page.page_index + 1
if next > page.page_count:
next =page.page_count
query['page'] = next
s += '<li><a href="%s?%s"><span uk-pagination-next></span></a></li>' % (base_url, parse.urlencode(query))
return s
if __name__ == '__main__':
import doctest
doctest.testmod() | true | true |
f722eb5f39b52b59d529aed21853ccfff82baebb | 4,845 | py | Python | skbio/stats/distance/tests/test_anosim.py | michaelsilverstein/scikit-bio | 876efcf688a8f15e89bb70fa835a2f2a84b534c1 | [
"BSD-3-Clause"
] | 1 | 2020-03-26T00:23:16.000Z | 2020-03-26T00:23:16.000Z | skbio/stats/distance/tests/test_anosim.py | michaelsilverstein/scikit-bio | 876efcf688a8f15e89bb70fa835a2f2a84b534c1 | [
"BSD-3-Clause"
] | 1 | 2020-05-03T15:13:07.000Z | 2020-05-04T03:01:59.000Z | skbio/stats/distance/tests/test_anosim.py | michaelsilverstein/scikit-bio | 876efcf688a8f15e89bb70fa835a2f2a84b534c1 | [
"BSD-3-Clause"
] | 3 | 2020-02-22T12:31:59.000Z | 2020-09-20T19:21:44.000Z | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
from functools import partial
from unittest import TestCase, main
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal
from skbio import DistanceMatrix
from skbio.stats.distance import anosim
class TestANOSIM(TestCase):
"""All results were verified with R (vegan::anosim)."""
def setUp(self):
# Distance matrices with and without ties in the ranks, with 2 groups
# of equal size.
dm_ids = ['s1', 's2', 's3', 's4']
self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
self.df = pd.read_csv(
io.StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
's1,Control'), index_col=0)
self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
[1, 0, 3, 2],
[1, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
[1, 0, 3, 2],
[5, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
# Test with 3 groups of unequal size. This data also generates a
# negative R statistic.
self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
'Treatment1', 'Control', 'Control']
# Equivalent grouping but with different labels -- groups should be
# assigned different integer labels but results should be the same.
self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']
self.dm_unequal = DistanceMatrix(
[[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
[1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
[0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
[0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
[1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
[1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
['s1', 's2', 's3', 's4', 's5', 's6'])
# Expected series index is the same across all tests.
self.exp_index = ['method name', 'test statistic name', 'sample size',
'number of groups', 'test statistic', 'p-value',
'number of permutations']
# Stricter series equality testing than the default.
self.assert_series_equal = partial(assert_series_equal,
check_index_type=True,
check_series_type=True)
def test_ties(self):
# Ensure we get the same results if we rerun the method using the same
# inputs. Also ensure we get the same results if we run the method
# using a grouping vector or a data frame with equivalent groupings.
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 4, 2, 0.25, 0.671, 999],
name='ANOSIM results')
for _ in range(2):
np.random.seed(0)
obs = anosim(self.dm_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
for _ in range(2):
np.random.seed(0)
obs = anosim(self.dm_ties, self.df, column='Group')
self.assert_series_equal(obs, exp)
def test_no_ties(self):
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 4, 2, 0.625, 0.332, 999],
name='ANOSIM results')
np.random.seed(0)
obs = anosim(self.dm_no_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
def test_no_permutations(self):
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 4, 2, 0.625, np.nan, 0],
name='ANOSIM results')
obs = anosim(self.dm_no_ties, self.grouping_equal, permutations=0)
self.assert_series_equal(obs, exp)
def test_unequal_group_sizes(self):
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 6, 3, -0.363636, 0.878, 999],
name='ANOSIM results')
np.random.seed(0)
obs = anosim(self.dm_unequal, self.grouping_unequal)
self.assert_series_equal(obs, exp)
np.random.seed(0)
obs = anosim(self.dm_unequal, self.grouping_unequal_relabeled)
self.assert_series_equal(obs, exp)
if __name__ == '__main__':
main()
| 40.375 | 78 | 0.519711 |
import io
from functools import partial
from unittest import TestCase, main
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal
from skbio import DistanceMatrix
from skbio.stats.distance import anosim
class TestANOSIM(TestCase):
def setUp(self):
dm_ids = ['s1', 's2', 's3', 's4']
self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
self.df = pd.read_csv(
io.StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
's1,Control'), index_col=0)
self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
[1, 0, 3, 2],
[1, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
[1, 0, 3, 2],
[5, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
'Treatment1', 'Control', 'Control']
self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']
self.dm_unequal = DistanceMatrix(
[[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
[1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
[0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
[0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
[1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
[1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
['s1', 's2', 's3', 's4', 's5', 's6'])
self.exp_index = ['method name', 'test statistic name', 'sample size',
'number of groups', 'test statistic', 'p-value',
'number of permutations']
self.assert_series_equal = partial(assert_series_equal,
check_index_type=True,
check_series_type=True)
def test_ties(self):
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 4, 2, 0.25, 0.671, 999],
name='ANOSIM results')
for _ in range(2):
np.random.seed(0)
obs = anosim(self.dm_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
for _ in range(2):
np.random.seed(0)
obs = anosim(self.dm_ties, self.df, column='Group')
self.assert_series_equal(obs, exp)
def test_no_ties(self):
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 4, 2, 0.625, 0.332, 999],
name='ANOSIM results')
np.random.seed(0)
obs = anosim(self.dm_no_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
def test_no_permutations(self):
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 4, 2, 0.625, np.nan, 0],
name='ANOSIM results')
obs = anosim(self.dm_no_ties, self.grouping_equal, permutations=0)
self.assert_series_equal(obs, exp)
def test_unequal_group_sizes(self):
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 6, 3, -0.363636, 0.878, 999],
name='ANOSIM results')
np.random.seed(0)
obs = anosim(self.dm_unequal, self.grouping_unequal)
self.assert_series_equal(obs, exp)
np.random.seed(0)
obs = anosim(self.dm_unequal, self.grouping_unequal_relabeled)
self.assert_series_equal(obs, exp)
if __name__ == '__main__':
main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.