hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0d5628c5a4c4846ae8b26ecda9cd0dc9fdd16b
| 384
|
py
|
Python
|
beit/semantic_segmentation/configs/_base_/schedules/schedule_deepfashion2.py
|
mrlzla/unilm
|
004083a77d7f1a39d52d3aa97a12ee5e537c7ded
|
[
"MIT"
] | 1
|
2022-01-07T21:46:58.000Z
|
2022-01-07T21:46:58.000Z
|
beit/semantic_segmentation/configs/_base_/schedules/schedule_deepfashion2.py
|
mrlzla/unilm
|
004083a77d7f1a39d52d3aa97a12ee5e537c7ded
|
[
"MIT"
] | null | null | null |
beit/semantic_segmentation/configs/_base_/schedules/schedule_deepfashion2.py
|
mrlzla/unilm
|
004083a77d7f1a39d52d3aa97a12ee5e537c7ded
|
[
"MIT"
] | null | null | null |
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=760000)
checkpoint_config = dict(by_epoch=False, interval=190000)
evaluation = dict(interval=190000, metric='mIoU')
| 38.4
| 72
| 0.760417
|
4a0d56540e2edf1d8aab1bc5b4f38441e4a1cc46
| 6,980
|
py
|
Python
|
train_models/train_tiny_patch.py
|
ypxie/SuperRes
|
1dded37fc24d99ca32cef88e8ccc3f2f0a3738c1
|
[
"MIT"
] | 5
|
2019-01-04T06:50:18.000Z
|
2019-02-13T12:02:24.000Z
|
train_models/train_tiny_patch.py
|
ypxie/SuperRes
|
1dded37fc24d99ca32cef88e8ccc3f2f0a3738c1
|
[
"MIT"
] | null | null | null |
train_models/train_tiny_patch.py
|
ypxie/SuperRes
|
1dded37fc24d99ca32cef88e8ccc3f2f0a3738c1
|
[
"MIT"
] | 1
|
2019-08-24T20:56:53.000Z
|
2019-08-24T20:56:53.000Z
|
import argparse, os, sys
sys.path.insert(0, '..')
proj_root = os.path.join('..')
import pdb
import torch
import random
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from srdense.thinnet import tinynet as srnet
from srdense.thinnet import L1_Charbonnier_loss
from srdense.ssim import SSIM
from srdense.dataset import DatasetSmall as DataSet
from srdense.proj_utils.plot_utils import *
model_name = 'tiny_patch_model'
model_folder = os.path.join(proj_root, 'model_adam', model_name)
if not os.path.exists(model_folder):
os.mkdir(model_folder)
l1_plot = plot_scalar(name = "l1_loss_sr", env= model_name, rate = 1000)
ssim_plot = plot_scalar(name = "ssim_loss_sr", env= model_name, rate = 1000)
# Training settings
parser = argparse.ArgumentParser(description="PyTorch DenseNet")
parser.add_argument("--batch_size", type=int, default=32, help="training batch size")
parser.add_argument("--nEpochs", type=int, default=1000, help="number of epochs to train for")
parser.add_argument("--lr", type=float, default=1e-2, help="Learning Rate. Default=1e-4")
parser.add_argument("--step", type=int, default=100, help="Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=10")
parser.add_argument("--cuda", default=True, action="store_false", help="Use cuda?")
parser.add_argument("--resume", default=True, help="Path to checkpoint (default: none)")
parser.add_argument("--reload_epoch", default=125, type=int, help="Manual epoch number (useful on restarts)")
parser.add_argument("--start_epoch", default=1, type=int, help="Manual epoch number (useful on restarts)")
parser.add_argument("--threads", type=int, default=1, help="Number of threads for data loader to use, Default: 1")
parser.add_argument("--momentum", default=0.9, type=float, help="Momentum, Default: 0.9")
parser.add_argument("--weight-decay", "--wd", default=1e-4, type=float, help="weight decay, Default: 1e-4")
parser.add_argument("--pretrained", default="", type=str, help="path to pretrained model (default: none)")
parser.add_argument("--save_freq", default=1, type=int, help="save frequency")
def main():
global opt, model
opt = parser.parse_args()
print(opt)
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
opt.seed = random.randint(1, 10000)
print(("Random Seed: ", opt.seed))
torch.manual_seed(opt.seed)
if cuda:
import torch.backends.cudnn as cudnn
torch.cuda.manual_seed(opt.seed)
cudnn.benchmark = True
print("===> Building model")
model = srnet()
criterion = L1_Charbonnier_loss()
print("===> Setting GPU")
if cuda:
model = model.cuda()
criterion = criterion.cuda()
ssim_sim = SSIM().cuda()
# optionally resume from a checkpoint
if opt.resume is True:
model_path = os.path.join(model_folder, 'model_epoch_{}.pth'.format(opt.reload_epoch))
if os.path.isfile(model_path):
print(("=> loading checkpoint '{}'".format(model_path)))
model_state = torch.load(model_path)
model.load_state_dict(model_state)
opt.start_epoch = opt.reload_epoch + 1
else:
print(("=> no checkpoint found at '{}'".format(opt.resume)))
# optionally copy weights from a checkpoint
if opt.pretrained:
if os.path.isfile(opt.pretrained):
print(("=> loading model '{}'".format(opt.pretrained)))
weights = torch.load(opt.pretrained)
model.load_state_dict(weights['model'].state_dict())
else:
print(("=> no model found at '{}'".format(opt.pretrained)))
print("===> Setting Optimizer")
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
#optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum = 0.9, nesterov=True)
print("===> Training")
model.train()
print("===> Loading datasets")
#train_set = DatasetFromHdf5("/path/to/your/dataset/like/imagenet_50K.h5")
home = os.path.expanduser('~')
hd_folder = os.path.join('..', 'data', 'LR_HD_Match', 'train_HR')
reg_folder = os.path.join('..', 'data', 'LR_HD_Match', 'train_LR')
#hd_folder = os.path.join('data', 'HD')
training_data_loader = DataSet(hd_folder, reg_folder=reg_folder, batch_size = opt.batch_size, img_size = 256)
for epoch in range(opt.start_epoch, opt.nEpochs + 1):
#train(training_data_loader, optimizer, model, criterion, epoch)
# lr = adjust_learning_rate(optimizer, epoch-1)
lr = opt.lr * (0.5 ** (( epoch-1) // opt.step))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
print("epoch =", epoch,"lr =",optimizer.param_groups[0]["lr"])
for iteration in range(training_data_loader.epoch_iteration):
batch_data = training_data_loader.get_next()
inputs = Variable(torch.from_numpy(batch_data['low'] ), requires_grad=False)
label = Variable(torch.from_numpy(batch_data['high']), requires_grad=False)
if opt.cuda:
inputs = inputs.cuda()
label = label.cuda()
#print(inputs.size())
out = model(inputs)
l1_loss = criterion(out, label)
ssim_loss = - ssim_sim(out, label)
loss = l1_loss + ssim_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
l1_plot.plot(l1_loss.cpu().data.numpy()[0])
ssim_plot.plot(ssim_loss.cpu().data.numpy()[0])
if iteration%100 == 0:
print(("===> Epoch[{}]({}/{}): Loss: {:.10f}".format(epoch, iteration, training_data_loader.epoch_iteration, loss.data[0])))
#print("total gradient", total_gradient(model.parameters()))
reg_img_np = batch_data['low'][0:1]
hd_img_np = batch_data['high'][0:1]
recoverd_img_np = out.data.cpu().numpy()[0:1]
overlaid_img = 0.5* reg_img_np + 0.5*hd_img_np
img_disply = [reg_img_np, hd_img_np, recoverd_img_np, overlaid_img]
returned_img = save_images(img_disply, save_path=None, save=False, dim_ordering='th')
plot_img(X=returned_img, win='reg_hd_recovered', env=model_name)
# save the checkpoints every epoch
if epoch > 0 and epoch % opt.save_freq == 0:
torch.save(model.state_dict(), os.path.join(model_folder, 'model_epoch_{}.pth'.format(epoch)))
print('save weights at {}'.format(model_folder))
#def train(training_data_loader, optimizer, model, criterion, epoch):
if __name__ == "__main__":
main()
| 39.213483
| 151
| 0.640115
|
4a0d569a0bdc5f937770c7b312a3223ff3cba5bc
| 4,847
|
py
|
Python
|
src/m1r_console_input_examples.py
|
macaker/14-WaitUntilEvent_WhileLoops
|
df5929bce19dd65dba75e6a44226d49b8a6e8702
|
[
"MIT"
] | null | null | null |
src/m1r_console_input_examples.py
|
macaker/14-WaitUntilEvent_WhileLoops
|
df5929bce19dd65dba75e6a44226d49b8a6e8702
|
[
"MIT"
] | null | null | null |
src/m1r_console_input_examples.py
|
macaker/14-WaitUntilEvent_WhileLoops
|
df5929bce19dd65dba75e6a44226d49b8a6e8702
|
[
"MIT"
] | null | null | null |
"""
This module demonstrates how to INPUT from the CONSOLE:
-- ints (integers)
-- floats (floating point numbers)
-- strings.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Emily Macak.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# DONE: 2. Read and run this program. Then do the following problems,
# putting your answers RIGHT HERE IN THIS DOCUMENT.
#
# 1. Write a line of code that would input an INTEGER from the
# console, storing the integer in a variable called 'x'.
# Write your line here: x = int(input('Input an Integer: '))
#
# 2. Write a line of code that would input an FLOAT from the console,
# storing the float in a variable called 'x'.
# Write your line here: x = float(input('Input a Float: '))
#
# 3. Write a line of code that would input an STRING from the console,
# storing the string in a variable called 'x'.
# Write your line here: x = str(input('Input a Float: '))
#
# 4. What happens if you (the user) enter something OTHER than a
# single integer (e.g., you enter
# five
# or
# 4.5
# or
# 1 1 1
# or
# nothing at all (just press the Enter key)
# -- try them!) when running the input_an_integer example?
# Put your answer here:
#
# After you have PUT YOUR ANSWERS IN THIS COMMENT as described above,
# a. Find someone who has had HER answer checked.
# Ask her to check YOUR answers to the above.
# b. Change the above TO DO to DONE.
#
# As always, ask questions as needed!
###############################################################################
def main():
""" Calls the other functions in this module to demo CONSOLE IO. """
input_a_string()
input_an_integer()
input_a_float()
###############################################################################
# Example: how to INPUT a STRING from the Console.
###############################################################################
def input_a_string():
print()
print('--------------------------------------------------')
print('Demonstrating CONSOLE INPUT of a STRING:')
print('--------------------------------------------------')
#----------- Using the INPUT function ----------------------------------
name = input('Enter your name: ')
#--------------------------------------------------------------------------
print('Hi, ' + name + '! ', name, '!. ', name)
print(' Sorry, I have the hiccups...')
###############################################################################
# Example: how to INPUT an INTEGER from the Console.
###############################################################################
def input_an_integer():
print()
print('--------------------------------------------------')
print('Demonstrating CONSOLE INPUT of an INTEGER:')
print('--------------------------------------------------')
#----------- Using the INPUT and INT functions ---------------------
age = int(input('How old are you? '))
#--------------------------------------------------------------------------
print('That is ' + str(age * 12) + ' months!')
if age >= 18:
print('You are old enough to vote, nice!')
else:
print('You will be able to vote in ' + str(18 - age) + ' years.')
###############################################################################
# Example: how to INPUT a FLOAT (floating point number) from the Console
###############################################################################
def input_a_float():
print()
print('--------------------------------------------------')
print('Demonstrating CONSOLE INPUT of a FLOATING POINT number:')
print('--------------------------------------------------')
#----------- Using the INPUT and FLOAT functions -------------------
money = float(input('How much money do you have? '))
#--------------------------------------------------------------------------
potatoes_today = round((money / 6.46) * 10)
potatoes_1900 = round((money / 0.140) * 10)
print('According to Infoplease')
print('at http://www.infoplease.com/ipa/A0873707.html')
f_string1 = ' -- That would buy you {} pounds of potatoes in 2015.'
f_string2 = ' -- That would buy you {} pounds of potatoes in 1900!'
print(f_string1.format(potatoes_today))
print(f_string2.format(potatoes_1900))
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 40.391667
| 79
| 0.443986
|
4a0d58069730f79bf6e7b56dc51babb6f83c81b2
| 4,977
|
py
|
Python
|
src/arc_utilities/path_utils.py
|
UM-ARM-Lab/arc_utilities
|
e21bd5062983b25e61e33f832ec66b937540ba10
|
[
"BSD-2-Clause"
] | 10
|
2017-01-09T14:37:14.000Z
|
2022-03-16T08:02:08.000Z
|
src/arc_utilities/path_utils.py
|
UM-ARM-Lab/arc_utilities
|
e21bd5062983b25e61e33f832ec66b937540ba10
|
[
"BSD-2-Clause"
] | 62
|
2017-05-25T16:52:38.000Z
|
2022-03-08T20:05:09.000Z
|
src/arc_utilities/path_utils.py
|
UM-ARM-Lab/arc_utilities
|
e21bd5062983b25e61e33f832ec66b937540ba10
|
[
"BSD-2-Clause"
] | 7
|
2017-08-04T13:06:17.000Z
|
2022-03-16T08:02:11.000Z
|
#! /usr/bin/env python
"""
Useful functions for dealing with paths
Unless otherwise noted, paths are a list of waypoints. Often it is useful to store these in a numpy array
"""
import pathlib
import sys
from copy import deepcopy
import numpy as np
from more_itertools import pairwise
import rospy
from trajectory_msgs.msg import JointTrajectory
def clamp(num, min_val, max_val):
return min(max(min_val, num), max_val)
def dist(p1, p2):
return np.linalg.norm(np.array(p1) - np.array(p2))
def closest_point_to_line_segment(line, point):
"""
Returns:
point, alpha
alpha: (0 to 1) fraction along line segment to closest point
"""
v_line = np.array(line[1]) - np.array(line[0])
if np.linalg.norm(v_line) < 10 * sys.float_info.epsilon:
return line[0], 0
n_v_line = v_line / np.linalg.norm(v_line)
v_l0_point = np.array(point) - np.array(line[0])
alpha = clamp(np.dot(v_l0_point, n_v_line) / np.linalg.norm(v_line), 0, 1)
p_closest = np.array(line[0]) + alpha * v_line
return p_closest, alpha
def closest_point(path, query_point):
"""
Computes the closest point on the path to the query point
Returns:
point, ind, alpha
point: closest point on path to query point
ind: index of the preceding point of the path to point
alpha: The fraction from path[ind] to path[ind+1] where path is
"""
d_close = dist(path[0], query_point)
alpha_close = 0
point_close = path[0]
ind_close = 0
for ind in range(len(path) - 1):
p, alpha = closest_point_to_line_segment([path[ind], path[ind + 1]], query_point)
d = dist(p, query_point)
if d < d_close:
d_close = d
alpha_close = alpha
point_close = p
ind_close = ind
if alpha_close == 1:
alpha_close = 0
ind_close += 1
return point_close, ind_close, alpha_close
def densify_line(start_point, end_point, max_dist):
"""
Returns a linear path from start point (exclusive) to end point (inclusive)
with a distance of most max_dist between points
"""
num_points = int(np.ceil(dist(start_point, end_point) / max_dist))
s_np = np.array(start_point)
dir_np = np.array(end_point) - start_point
return [s_np + dir_np * (idx + 1) / num_points for idx in range(num_points)]
def densify(path, max_dist):
"""
Returns a path that follows path with distance at most max_dist between points
"""
if len(path) == 0:
return path
new_path = [np.array(path[0])]
for i in range(1, len(path)):
new_path = new_path + densify_line(new_path[-1], path[i], max_dist)
return new_path
def travel_along(path, distance, starting_point=None):
"""
Travels along the path from the starting point for distance
Parameters:
path: path
distance: total euclidean distance to travel. Negative follows backwards
starting_point: path traversal starts at the closest point on the path to this point
Returns:
new_path: subpath which lies completely on original path while following inputs as best as possible
"""
if starting_point is None:
starting_point = path[0]
direction = int(np.sign(distance))
dist_to_go = abs(distance)
q, ind, alpha = closest_point(path, starting_point)
newpath = [q]
path = np.array(path)
if alpha != 0:
ind += 1
path = np.concatenate((path[0:ind], [q], path[ind:]))
while dist_to_go > 0:
if direction == 1 and ind == len(path) - 1:
return newpath
if direction == -1 and ind == 0:
return newpath
ind = ind + direction
dist_to_next = dist(q, path[ind])
if dist_to_next > dist_to_go:
motion = path[ind] - q
motion = motion / np.linalg.norm(motion)
newpath.append(motion * dist_to_go + q)
break
q = path[ind]
newpath.append(q)
dist_to_go -= dist_to_next
return newpath
def path_length(path):
if len(path) == 0:
return 0
path = np.array(path)
q = path[0]
d = 0
for ind in range(1, len(path)):
d += dist(q, path[ind])
q = path[ind]
return d
def reverse_trajectory(trajectory: JointTrajectory):
reversed_trajectory = deepcopy(trajectory)
reversed_trajectory.points = deepcopy(trajectory.points[::-1])
reversed_trajectory.points[0].time_from_start = rospy.Duration(0)
time_from_start = rospy.Duration(0)
for (pt_next, pt), r_pt in zip(pairwise(trajectory.points[::-1]), reversed_trajectory.points[1:]):
time_from_start += pt_next.time_from_start - pt.time_from_start
r_pt.time_from_start = time_from_start
return reversed_trajectory
def rm_tree(path):
path = pathlib.Path(path)
for child in path.glob('*'):
if child.is_file():
child.unlink()
else:
rm_tree(child)
path.rmdir()
| 27.048913
| 105
| 0.642757
|
4a0d58897a43ab90c3327dc048f4335b93f2a89d
| 8,814
|
py
|
Python
|
updates_to_drs/faster_clean_hotpix/clean_hotpix.py
|
clairem789/apero-utils
|
68ed0136a36b6badeaf15eb20d673052ad79a949
|
[
"MIT"
] | 2
|
2020-10-08T17:03:45.000Z
|
2021-03-09T17:49:44.000Z
|
updates_to_drs/faster_clean_hotpix/clean_hotpix.py
|
clairem789/apero-utils
|
68ed0136a36b6badeaf15eb20d673052ad79a949
|
[
"MIT"
] | 43
|
2020-10-06T18:42:24.000Z
|
2022-03-28T21:23:10.000Z
|
misc/updates_to_drs/faster_clean_hotpix/clean_hotpix.py
|
njcuk9999/apero-drs
|
83b043e9f277a011b03e0227c77307961b200901
|
[
"MIT"
] | 5
|
2020-04-10T06:41:00.000Z
|
2020-12-16T21:09:14.000Z
|
from astropy.io import fits
from scipy import signal
import bottleneck as mp
import warnings as warnings
import numpy as np
import time
from lin_mini import lin_mini
def fit2dpoly(x, y, z):
# fit a 2nd order polynomial in 2d over x/y/z pixel points
ones = np.ones_like(x)
a = np.array([ones, x, y, x**2, y**2, x*y]).T
b = z.flatten()
# perform a least squares fit on a and b
coeff, r, rank, s = np.linalg.lstsq(a, b,rcond=None)
# return the coefficients
return coeff
image = fits.getdata('dark_dark_02_001d.fits')
badpix = np.isfinite(image) == False
def clean_hotpix1(image, badpix):
#
# Cleans an image by finding pixels that are high-sigma (positive or negative)
# outliers compared to their immediate neighbours. Bad pixels are
# interpolated with a 2D surface fit by using valid pixels within the
# 3x3 pixel box centered on the bad pix.
#
# Pixels in big clusters of bad pix (more than 3 bad neighbours)
# are left as they are.
#
image_rms_measurement = np.array(image)
#
# First we construct a 'flattened' image
# We perform a low-pass filter along the x axis
# filtering the image so that only pixel-to-pixel structures
# remain. This is use to find big outliers in RMS.
# First we apply a median filtering, which removes big outliers
# and then we smooth the image to avoid big regions filled with zeros.
# Regions filled with zeros in the low-pass image happen when the local
# median is equal to the pixel value in the input image.
#
# We apply a 5-pix median boxcar in X and a 5-pix boxcar smoothing
# in x. This blurs along the dispersion over a scale of ~7 pixels.
# perform a [1,5] median filtering by rolling axis of a 2D image
# and constructing a 5*N*M cube, then taking a big median along axis=0
# analoguous to, but faster than :
# low_pass = signal.medfilt(image_rms_measurement, [1, 5])
tmp = []
for d in range(-2,3):
tmp.append(np.roll(image,d))
tmp = np.array(tmp)
tmp = mp.nanmedian(tmp,axis = 0)
# same trick but for convolution with a [1,5] boxcar
low_pass = np.zeros_like(tmp)
for d in range(-2,3):
low_pass += np.roll(tmp,d)
low_pass /= 5
# residual image showing pixel-to-pixel noise
# the image is now centered on zero, so we can
# determine the RMS around a given pixel
image_rms_measurement -= low_pass
abs_image_rms_measurement = np.abs(image_rms_measurement)
# same as a [3,3] median filtering with signal.medfilt but faster
tmp = []
for dx in range(-1,2):
for dy in range(-1,2):
tmp.append(np.roll(abs_image_rms_measurement,[dx,dy],
axis = [0,1]))
tmp = np.array(tmp)
rms = mp.nanmedian(tmp,axis = 0)
# the RMS cannot be arbitrarily small, so we set
# a lower limit to the local RMS at 0.5x the median
# rms
with warnings.catch_warnings(record=True) as _:
rms[rms < (0.5 * mp.nanmedian(rms))] = 0.5 * mp.nanmedian(rms)
# determining a proxy of N sigma
nsig = image_rms_measurement / rms
bad = np.array((np.abs(nsig) > 10), dtype=bool)
# known bad pixels are also considered bad even if they are
# within the +-N sigma rejection
badpix = badpix | bad | ~np.isfinite(image)
# we remove bad pixels at the periphery of the image
badpix[0,:] = False
badpix[-1,:] = False
badpix[:,0] = False
badpix[:,-1] = False
# find the pixel locations where we have bad pixels
x, y = np.where(badpix)
box3d = np.zeros([len(x),3,3])
keep3d = np.zeros([len(x),3,3],dtype = bool)
# centering on zero
yy, xx = np.indices([3, 3]) - 1
for ix in range(-1,2):
for iy in range(-1,2):
box3d[:,ix+1,iy+1] = image[x+ix,y+iy]
keep3d[:,ix+1,iy+1] = ~badpix[x+ix,y+iy]
nvalid = np.sum(np.sum(keep3d,axis=1),axis=1)
# keep only points with >5 valid neighbours
box3d = box3d[nvalid>5]
keep3d = keep3d[nvalid>5]
x = x[nvalid>5]
y = y[nvalid>5]
nvalid = nvalid[nvalid>5]
# copy the original iamge
image1 = np.array(image)
# correcting bad pixels with a 2D fit to valid neighbours
# pre-computing some values that are needed below
xx2 = xx**2
yy2 = yy**2
xy = xx*yy
ones = np.ones_like(xx)
for i in range(len(x)):
keep = keep3d[i]
box = box3d[i]
if nvalid[i] ==8:
# we fall in a special case where there is only a central pixel
# that is bad surrounded by valid pixel. The central value is
# straightfward to compute by using the means of 4 immediate
# neighbours and the 4 corner neighbours.
m1 = np.mean(box[[0,1,1,2],[1,0,2,1]])
m2 = np.mean(box[[0,0,2,2],[2,0,2,0]])
image1[x[i], y[i]] = 2*m1-m2
else:
# fitting a 2D 2nd order polynomial surface. As the xx=0, yy=0
# corresponds to the bad pixel, then the first coefficient
# of the fit (its zero point) corresponds to the value that
# must be given to the pixel
a = np.array([ones[keep], xx[keep], yy[keep], xx2[keep], yy2[keep], xy[keep]])
b = box[keep]
# perform a least squares fit on a and b
coeff,_ = lin_mini(b,a, no_recon = True)
# this is equivalent to the slower command :
#coeff = fit2dpoly(xx[keep], yy[keep], box[keep])
image1[x[i], y[i]] = coeff[0]
# return the cleaned image
return image1
# STAND-ALONE OLD FUNCTION to compare speed
def clean_hotpix2(image, badpix):
# Cleans an image by finding pixels that are high-sigma (positive or negative)
# outliers compared to their immediate neighbours. Bad pixels are
# interpolated with a 2D surface fit by using valid pixels within the
# 3x3 pixel box centered on the bad pix.
#
# Pixels in big clusters of bad pix (more than 3 bad neighbours)
# are left as is
image_rms_measurement = np.array(image)
# First we construct a 'flattened' image
# We perform a low-pass filter along the x axis
# filtering the image so that only pixel-to-pixel structures
# remain. This is use to find big outliers in RMS.
# First we apply a median filtering, which removes big outliers
# and then we smooth the image to avoid big regions filled with zeros.
# Regions filled with zeros in the low-pass image happen when the local
# median is equal to the pixel value in the input image.
#
# We apply a 5-pix median boxcar in X and a 5-pix boxcar smoothing
# in x. This blurs along the dispersion over a scale of ~7 pixels.
box = np.ones([1, 5])
box /= mp.nansum(box)
low_pass = signal.medfilt(image_rms_measurement, [1, 5])
low_pass = signal.convolve2d(low_pass, box, mode='same')
# residual image showing pixel-to-pixel noise
# the image is now centered on zero, so we can
# determine the RMS around a given pixel
image_rms_measurement -= low_pass
# smooth the abs(image) with a 3x3 kernel
rms = signal.medfilt(np.abs(image_rms_measurement), [3, 3])
#fits.writeto('med2.fits',rms, overwrite = True)
# the RMS cannot be arbitrarily small, so we set
# a lower limit to the local RMS at 0.5x the median
# rms
with warnings.catch_warnings(record=True) as _:
rms[rms < (0.5 * mp.nanmedian(rms))] = 0.5 * mp.nanmedian(rms)
# determining a proxy of N sigma
nsig = image_rms_measurement / rms
bad = np.array((np.abs(nsig) > 10), dtype=bool)
# known bad pixels are also considered bad even if they are
# within the +-N sigma rejection
badpix = badpix | bad | ~np.isfinite(image)
# find the pixel locations where we have bad pixels
x, y = np.where(badpix)
# centering on zero
yy, xx = np.indices([3, 3]) - 1
# copy the original iamge
image1 = np.array(image)
# correcting bad pixels with a 2D fit to valid neighbours
for i in range(len(x)):
keep = ~badpix[x[i] - 1:x[i] + 2, y[i] - 1:y[i] + 2]
if mp.nansum(keep*1.0) < 6:
continue
box = image[x[i] - 1:x[i] + 2, y[i] - 1:y[i] + 2]
# fitting a 2D 2nd order polynomial surface. As the xx=0, yy=0
# corresponds to the bad pixel, then the first coefficient
# of the fit (its zero point) corresponds to the value that
# must be given to the pixel
coeff = fit2dpoly(xx[keep], yy[keep], box[keep])
image1[x[i], y[i]] = coeff[0]
# return the cleaned image
return image1
| 36.572614
| 90
| 0.622532
|
4a0d5a02cdeb70d460b0f32d4cd2adfa1970c198
| 1,527
|
py
|
Python
|
preprocess_data.py
|
sebbycake/criticize-ML
|
5422b2811077bf4a0bf8c0f226b77d4380767f82
|
[
"MIT"
] | null | null | null |
preprocess_data.py
|
sebbycake/criticize-ML
|
5422b2811077bf4a0bf8c0f226b77d4380767f82
|
[
"MIT"
] | null | null | null |
preprocess_data.py
|
sebbycake/criticize-ML
|
5422b2811077bf4a0bf8c0f226b77d4380767f82
|
[
"MIT"
] | null | null | null |
import pandas as pd
import gzip
from sklearn.model_selection import train_test_split
import os
from tqdm.auto import tqdm
def parse(path):
g = gzip.open(path, 'rb')
for l in g:
yield eval(l)
def getDF(path):
i = 0
df = {}
for d in parse(path):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient='index')
categories = [category[3:] for category in os.listdir("data") if category.endswith(".gz") and category.startswith("qa")]
for category in tqdm(categories):
# if the category not processed to tsv yet
if not os.path.isfile(f"data/{category.split('.')[0]}.tsv"):
try:
df1 = getDF(f'data/qa_{category}')
df2 = getDF(f'data/{category}')
df = pd.merge(df1, df2, on="asin", how="left")
df = df[["question", "answer", "description"]]
df = df.dropna()
df = df.drop_duplicates(subset="answer")
print(df.head())
df.to_csv(f"data/{category.split('.')[0]}.tsv", "\t")
except:
pass
df = pd.concat((pd.read_csv(f"data/{f}", sep="\t") for f in os.listdir("data") if f.endswith(".tsv")))
df = df[["question", "description"]]
df["description"] = df["description"].apply(lambda x: x[2:-2])
df.columns = ["target_text", "input_text"]
df["prefix"] = "ask_question"
df.to_csv(f"data/data_all.tsv", "\t")
train_df, eval_df = train_test_split(df, test_size=0.05)
train_df.to_csv("data/train_df.tsv", "\t")
eval_df.to_csv("data/eval_df.tsv", "\t")
| 27.763636
| 120
| 0.598559
|
4a0d5a81628f5e67af4663205e2c0ed1c8a0f2b0
| 1,988
|
py
|
Python
|
src/tests/component/engine_fixtures/mock_engine.py
|
carbonblack/cbc-binary-toolkit
|
92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4
|
[
"MIT"
] | 8
|
2020-05-12T18:08:52.000Z
|
2021-12-27T06:11:00.000Z
|
src/tests/component/engine_fixtures/mock_engine.py
|
carbonblack/cbc-binary-toolkit
|
92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4
|
[
"MIT"
] | 4
|
2020-05-13T16:07:49.000Z
|
2020-06-30T18:47:14.000Z
|
src/tests/component/engine_fixtures/mock_engine.py
|
carbonblack/cbc-binary-toolkit
|
92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4
|
[
"MIT"
] | 3
|
2020-05-16T19:57:57.000Z
|
2020-11-01T08:43:31.000Z
|
# -*- coding: utf-8 -*-
# *******************************************************
# Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Mock engine"""
import copy
from cbc_binary_toolkit.engine import LocalEngineFactory
class MockLocalEngine():
"""Mock test engine"""
def __init__(self, config):
"""Test engine"""
assert config.string_default("Test") == "TestPassed"
self.name = "MockEngine"
self.config = config
self.mock_return_data = {}
def analyze(self, test_data):
"""Analyze test data"""
if not isinstance(test_data, dict):
return {
"iocs": [],
"engine_name": self.name,
"binary_hash": None,
"success": False
}
input_hash = test_data.get("sha256", None)
if input_hash is None:
return_iocs = []
else:
return_iocs = self.mock_return_data.get(input_hash, [])
return {
"iocs": return_iocs,
"engine_name": self.name,
"binary_hash": input_hash,
"success": True
}
def mock_engine_output(self, input_hash, return_iocs):
"""Set up the mock engine to return a specific set of IOCs"""
self.mock_return_data[input_hash] = copy.deepcopy(return_iocs)
class MockLocalEngineFactory(LocalEngineFactory):
"""Mock Factory for testing LocalEngineManager"""
def create_engine(self, config):
"""Create test engine"""
return MockLocalEngine(config)
| 32.590164
| 70
| 0.586519
|
4a0d5adb8709021d218a6c6db416f38b406a6d6c
| 2,099
|
py
|
Python
|
cpdb/social_graph/tests/queries/test_geographic_data_query.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 25
|
2018-07-20T22:31:40.000Z
|
2021-07-15T16:58:41.000Z
|
cpdb/social_graph/tests/queries/test_geographic_data_query.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 13
|
2018-06-18T23:08:47.000Z
|
2022-02-10T07:38:25.000Z
|
cpdb/social_graph/tests/queries/test_geographic_data_query.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 6
|
2018-05-17T21:59:43.000Z
|
2020-11-17T00:30:26.000Z
|
from django.test import TestCase
from robber import expect
from data.factories import AllegationFactory, OfficerFactory, OfficerAllegationFactory
from social_graph.queries.geographic_data_query import GeographyCrsDataQuery, GeographyTrrsDataQuery
from trr.factories import TRRFactory
class GeographyCrsDataQueryTestCase(TestCase):
def test_data(self):
officer_1 = OfficerFactory(id=1)
officer_2 = OfficerFactory(id=2)
officer_3 = OfficerFactory(id=3)
officer_4 = OfficerFactory(id=4)
officers = [officer_1, officer_2, officer_3, officer_4]
allegation_1 = AllegationFactory(crid='123')
allegation_2 = AllegationFactory(crid='456')
allegation_3 = AllegationFactory(crid='789')
AllegationFactory(crid='987')
OfficerAllegationFactory(
officer=officer_1,
allegation=allegation_1
)
OfficerAllegationFactory(
officer=officer_1,
allegation=allegation_2
)
OfficerAllegationFactory(
officer=officer_2,
allegation=allegation_2
)
expected_data = [allegation_1.crid, allegation_2.crid, allegation_3.crid]
results = [item.crid for item in list(GeographyCrsDataQuery([allegation_3.crid], officers).data())]
expect(results).to.eq(expected_data)
class GeographyTrrsDataQueryTestCase(TestCase):
def test_data(self):
officer_1 = OfficerFactory(id=1)
officer_2 = OfficerFactory(id=2)
officer_3 = OfficerFactory(id=3)
officer_4 = OfficerFactory(id=4)
officer_5 = OfficerFactory(id=5)
officers = [officer_1, officer_2, officer_3, officer_4]
trr_1 = TRRFactory(id=1, officer=officer_3)
trr_2 = TRRFactory(id=2, officer=officer_4)
trr_3 = TRRFactory(id=3, officer=officer_4)
TRRFactory(id=4, officer=officer_5)
expected_data = [trr_1.id, trr_2.id, trr_3.id]
results = [item.id for item in list(GeographyTrrsDataQuery([trr_3.id], officers).data())]
expect(results).to.eq(expected_data)
| 36.824561
| 107
| 0.686041
|
4a0d5b39cb337d8b7a372f4809fab7c2269fb478
| 66
|
py
|
Python
|
Chapter 02/ch2_43.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 02/ch2_43.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 02/ch2_43.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
x="Shivay"
print(type(x))
# using print() to print the result
| 16.5
| 36
| 0.651515
|
4a0d5b5d8c5af781db3e2882a0c5a7e44de8bc37
| 2,543
|
py
|
Python
|
airflow/providers/oracle/operators/oracle.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 8
|
2017-04-20T16:15:44.000Z
|
2020-10-11T13:44:10.000Z
|
airflow/providers/oracle/operators/oracle.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 219
|
2017-03-15T18:40:16.000Z
|
2022-02-28T22:52:43.000Z
|
airflow/providers/oracle/operators/oracle.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 3
|
2016-07-14T21:51:10.000Z
|
2020-10-12T13:26:36.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Iterable, Mapping, Optional, Union
from airflow.models import BaseOperator
from airflow.providers.oracle.hooks.oracle import OracleHook
from airflow.utils.decorators import apply_defaults
class OracleOperator(BaseOperator):
"""
Executes sql code in a specific Oracle database
:param sql: the sql code to be executed. Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
(templated)
:type sql: str or list[str]
:param oracle_conn_id: reference to a specific Oracle database
:type oracle_conn_id: str
:param parameters: (optional) the parameters to render the SQL query with.
:type parameters: mapping or iterable
:param autocommit: if True, each command is automatically committed.
(default value: False)
:type autocommit: bool
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#ededed'
@apply_defaults
def __init__(
self,
sql: str,
oracle_conn_id: str = 'oracle_default',
parameters: Optional[Union[Mapping, Iterable]] = None,
autocommit: bool = False,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.oracle_conn_id = oracle_conn_id
self.sql = sql
self.autocommit = autocommit
self.parameters = parameters
def execute(self, context):
self.log.info('Executing: %s', self.sql)
hook = OracleHook(oracle_conn_id=self.oracle_conn_id)
hook.run(
self.sql,
autocommit=self.autocommit,
parameters=self.parameters)
| 37.397059
| 92
| 0.692489
|
4a0d5b8953354e4bebaf1c94e9a06cc0b7b21cf7
| 23,204
|
py
|
Python
|
pbl/spotify_plugs.py
|
plamere/PlaylistBuilder
|
ae71c0b493e9cb6ecca5cc02fd3f51afa3fb8d07
|
[
"MIT"
] | 41
|
2015-06-20T10:47:57.000Z
|
2022-01-31T16:55:37.000Z
|
pbl/spotify_plugs.py
|
plamere/PlaylistBuilder
|
ae71c0b493e9cb6ecca5cc02fd3f51afa3fb8d07
|
[
"MIT"
] | 5
|
2015-10-06T22:08:59.000Z
|
2016-05-21T04:47:38.000Z
|
pbl/spotify_plugs.py
|
plamere/PlaylistBuilder
|
ae71c0b493e9cb6ecca5cc02fd3f51afa3fb8d07
|
[
"MIT"
] | 13
|
2015-07-23T14:35:28.000Z
|
2020-07-04T05:27:33.000Z
|
'''
A set of sources and annotators for Spotify. An Spotify track has
the following attributes::
{
"src": "Teen Party",
"artist": "Various Artists",
"title": "Walk The Moon - Teen Party Intro",
"spotify": {
"album": {
"album_type": "album",
"name": "Walk The Moon - Playlist Intros",
"external_urls": {
"spotify": "https://open.spotify.com/album/6ZQf8UHq907D9hu5amANXX"
},
"uri": "spotify:album:6ZQf8UHq907D9hu5amANXX",
"href": "https://api.spotify.com/v1/albums/6ZQf8UHq907D9hu5amANXX",
"images": [
{
"url": "https://i.scdn.co/image/1d06a0a9262a6634ca3a1cf9a9a0855b2245ba81",
"width": 640,
"height": 640
},
{
"url": "https://i.scdn.co/image/2d2dff2f132443083b4368ebead2c71d4dcd7eb7",
"width": 300,
"height": 300
},
{
"url": "https://i.scdn.co/image/c7aa8589b67593d3117020a5a0080598a5997785",
"width": 64,
"height": 64
}
],
"type": "album",
"id": "6ZQf8UHq907D9hu5amANXX",
"available_markets": [ "AD", "...", ]
},
"name": "Walk The Moon - Teen Party Intro",
"uri": "spotify:track:5oPzMRHjORXQlLemgpfacm",
"external_urls": {
"spotify": "https://open.spotify.com/track/5oPzMRHjORXQlLemgpfacm"
},
"popularity": 5,
"explicit": false,
"preview_url": "https://p.scdn.co/mp3-preview/5e14b8b02dae9adf80f41fd0d4c03ca17002b939",
"track_number": 2,
"disc_number": 1,
"href": "https://api.spotify.com/v1/tracks/5oPzMRHjORXQlLemgpfacm",
"artists": [
{
"name": "Various Artists",
"external_urls": {
"spotify": "https://open.spotify.com/artist/0LyfQWJT6nXafLPZqxe9Of"
},
"uri": "spotify:artist:0LyfQWJT6nXafLPZqxe9Of",
"href": "https://api.spotify.com/v1/artists/0LyfQWJT6nXafLPZqxe9Of",
"type": "artist",
"id": "0LyfQWJT6nXafLPZqxe9Of"
}
],
"duration_ms": 7500,
"external_ids": {},
"type": "track",
"id": "5oPzMRHjORXQlLemgpfacm",
"available_markets": [ "AD", "AR", "...", ]
},
"duration": 7,
"id": "5oPzMRHjORXQlLemgpfacm"
}
New spotify track:
{
"src": "Teen Party",
"artist": "Various Artists",
"artist_id": "1234123412342134",
"title": "Walk The Moon - Teen Party Intro",
"duration": 7,
"id": "5oPzMRHjORXQlLemgpfacm"
"audio" : {
name: ""
},
artists: [
]
"primary_artist" : {
"name" : "artist name",
"id:" : "1234",
"popularity": 33,
"followers": 33
"genres" : [],
}
}
'''
from track_manager import tlib
import engine
import spotipy
import spotipy.util
import pprint
import simplejson as json
import cache_manager
from spotipy.oauth2 import SpotifyClientCredentials
cache = cache_manager.get_cache()
class PlaylistSource(object):
'''
A PBL source that generates a stream of tracks from the given Spotify
playlist. If only a name is provided, the playlist will be searched for.
Search success can be improved if the owner of the playlist is also
provided.
:param name: the name of the playlist
:param uri: the uri of the playlist
:param user: the owner of the playlist
'''
def __init__(self, name, uri=None, user=None):
self.name = name
self.uri = normalize_uri(uri)
self.user = user
self.next_offset = 0
self.limit = 100
self.tracks = []
self.total = 1
self.cur_index = 0
def _get_uri_from_name(self, name):
results = _get_spotify().search(q=name, type='playlist')
if len(results['playlists']['items']) > 0:
return results['playlists']['items'][0]['uri']
else:
return None
def _get_uri_from_name_and_user(self, name, user):
results = _get_spotify().user_playlists(user)
while results:
for playlist in results['items']:
if playlist['name'].lower() == name.lower():
return playlist['uri']
if results['next']:
results = _get_spotify().next(results)
else:
results = None
return None
def _get_more_tracks(self):
fields = self.uri.split(':')
if len(fields) == 5:
_,_,user,_,playlist_id = fields
else:
_,_,playlist_id = fields
try:
results = _get_spotify().playlist_tracks(playlist_id, limit=self.limit, offset=self.next_offset)
except spotipy.SpotifyException as e:
raise engine.PBLException(self, e.msg)
self.total = results['total']
for item in results['items']:
track = item['track']
if track and 'id' in track:
self.tracks.append(track['id'])
_add_track(self.name, track)
self.next_offset += self.limit
def next_track(self):
if self.uri == None:
if self.user:
self.uri = self._get_uri_from_name_and_user(self.name, self.user)
else:
self.uri = self._get_uri_from_name(self.name)
if not self.uri:
msg = "Can't find playlist named " + self.name
if self.user:
msg += ' for user ' + self.user
raise engine.PBLException(self, msg)
if self.uri and self.cur_index >= len(self.tracks) \
and len(self.tracks) < self.total:
self._get_more_tracks()
if self.cur_index < len(self.tracks):
track = self.tracks[self.cur_index]
self.cur_index += 1
return track
else:
return None
class TrackSource(object):
''' A PBL Source that generates the a stream of tracks from the given list of
URIs
:param uris: a list of spotify track uris
'''
def __init__(self, uris=[]):
self.name = 'Tracks '
self.uris = [normalize_uri(uri) for uri in uris]
self.buffer = None
def next_track(self):
if self.buffer == None:
self.buffer = []
try:
results = _get_spotify().tracks(self.uris)
except spotipy.SpotifyException as e:
raise engine.PBLException(self, e.msg)
for track in results['tracks']:
if track and 'id' in track:
self.buffer.append(track['id'])
_add_track(self.name, track)
else:
raise engine.PBLException(self, 'bad track')
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
class TrackSourceByName(object):
''' A PBL Source that generates a track given its artist and title
:param title: the title and/or artist of the track
'''
def __init__(self, title):
self.name = title
self.title = title
self.uri = None
def next_track(self):
if self.uri == None:
try:
track = _find_track_by_name(_get_spotify(), self.title)
if track and 'id' in track:
_add_track(self.name, track)
self.uri = track['id']
return self.uri
else:
raise engine.PBLException(self, "Can't find that track")
except spotipy.SpotifyException as e:
raise engine.PBLException(self, e.msg)
else:
return None
class AlbumSource(object):
'''
A PBL Source that generates a series of tracks given an album
:param title: the title of the album
:param artist: the artist of the album
:param uri: the URI of the album
'''
def __init__(self, title=None, artist=None, uri=None):
self.uri = normalize_uri(uri)
self.title = title
self.artist = artist
self.name = 'album ' + title if title != None else uri
self.buffer = None
def _get_uri_from_artist_title(self, artist, title):
results = _get_spotify().search(q=title + ' ' + (artist if artist else ''), type='album')
if len(results['albums']['items']) > 0:
return results['albums']['items'][0]['uri']
else:
return None
def next_track(self):
if self.buffer == None:
if self.title != None and self.uri == None:
self.uri = self._get_uri_from_artist_title(self.artist, self.title)
self.buffer = []
if self.uri:
_,_,id = self.uri.split(':')
try:
results = _get_spotify().album_tracks(id)
except spotipy.SpotifyException as e:
raise engine.PBLException(self, e.msg)
for track in results['items']:
if track and 'id' in track:
self.buffer.append(track['id'])
_add_track(self.name, track)
else:
raise engine.PBLException(self, "Can't find that album");
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
class ArtistTopTracks(object):
''' A PBL Source that generates a series of top tracks by the given artist
:param name: the name of the artist
:param uri: the uri of the artist
'''
def __init__(self, name=None, uri=None):
self.uri = normalize_uri(uri)
self.name = 'Top tracks by ' + name
self.artist_name = name
self.buffer = None
def next_track(self):
if self.buffer == None:
self.buffer = []
if self.uri == None:
self.uri = _find_artist_by_name(_get_spotify(), self.artist_name)
if self.uri != None:
_,_,id = self.uri.split(':')
try:
results = _get_spotify().artist_top_tracks(id)
except spotipy.SpotifyException as e:
raise engine.PBLException(self, e.msg)
for track in results['tracks']:
if track and 'id' in track:
self.buffer.append(track['id'])
_add_track(self.name, track)
else:
raise engine.PBLException(self, "Can't find that artist")
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
class PlaylistSave(object):
''' A PBL Sink that saves the source stream of tracks to the given playlist
:param source: the source of tracks to be saved
:param playlist_name: the name of the playlist
:param user: the owner of the playlist
:param uri: the uri of the playlist
'''
def __init__(self, source, playlist_name= None, user=None, uri=None, \
create=False, append=False, max_size=100):
self.source = source
self.uri = normalize_uri(uri)
self.user = user
self.name = 'Spotify Save'
self.playlist_name = playlist_name
self.max_size = max_size
self.append = append
self.create = create
self.buffer = []
self.saved = False
def next_track(self):
track = self.source.next_track()
if track and len(self.buffer) < self.max_size:
self.buffer.append(track)
elif not self.saved:
self._save_playlist()
return track
def _save_playlist(self):
self.saved = True
if self.uri:
_, _, user, _, pid = self.uri.split(':')
else:
user = self.user
pid = None
sp = _get_spotify()
if sp:
if not pid:
if self.playlist_name:
if self.create:
uri = None
else:
uri = _find_playlist_by_name(sp, self.user, self.playlist_name)
if uri:
print 'found', self.playlist_name, uri
else:
print 'creating new', self.playlist_name, 'playlist'
response = sp.user_playlist_create(self.user, self.playlist_name)
uri = response['uri']
pid = uri.split(':')[4]
if pid:
batch_size = 100
uris = [ 'spotify:track:' + id for id in self.buffer]
for start in xrange(0, len(uris), batch_size):
turis = uris[start:start+batch_size]
if start == 0 and not self.append:
print 'replace', start
sp.user_playlist_replace_tracks(user, pid, turis)
else:
print 'add', start
sp.user_playlist_add_tracks(user, pid, turis)
else:
print "Can't get authenticated access to spotify"
def _get_spotify():
spotify = engine.getEnv('spotify')
if spotify == None:
auth_token = engine.getEnv('spotify_auth_token')
if auth_token:
spotify = spotipy.Spotify(auth=auth_token)
else:
spotify = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials())
spotify.trace_out = True
engine.setEnv('spotify', spotify)
return spotify
def _get_auth_spotify(user):
# deprecated
global auth_sp
if auth_sp == None:
scope = 'playlist-modify-public playlist-modify-private'
token = spotipy.util.prompt_for_user_token(user, scope)
if token:
auth_sp = spotipy.Spotify(auth=token)
return auth_sp
def _find_playlist_by_name(sp, user, name):
batch_size = 50
for start in xrange(0, 1000, batch_size):
playlists = sp.user_playlists(user, limit=batch_size, offset=start)
for playlist in playlists['items']:
if playlist['name'] == name:
return playlist['uri']
return None
def _find_artist_by_name(sp, name):
results = _get_spotify().search(q=name, type='artist')
if len(results['artists']['items']) > 0:
return results['artists']['items'][0]['uri']
else:
return None
def _find_track_by_name(sp, name):
results = _get_spotify().search(q=name, type='track')
if len(results['tracks']['items']) > 0:
return results['tracks']['items'][0]
else:
return None
def _annotate_tracks_with_spotify_data_old(tids):
tids = tlib.annotate_tracks_from_cache('spotify', tids)
if len(tids) > 0:
# print 'annotate tracks with spotify', tids
results = _get_spotify().tracks(tids)
for track in results['tracks']:
if track and 'id' in track:
tlib.annotate_track(track['id'], 'spotify', track)
def _annotate_tracks_with_spotify_data_full(tids):
# full annotation
print "spotify full annotate", len(tids)
tids = tlib.annotate_tracks_from_cache('spotify', tids)
if len(tids) > 0:
# print 'annotate tracks with spotify', tids
results = _get_spotify().tracks(tids)
album_ids = set()
artist_ids = set()
for track in results['tracks']:
album_ids.add(track['album']['id'])
for artist in track['artists']:
artist_ids.add(artist['id'])
print " spotify artist annotate", len(artist_ids)
print " spotify album annotate", len(album_ids)
albums = get_albums(album_ids)
artists = get_artists(artist_ids)
for track in results['tracks']:
ntrack = {}
primary_artist = artists[track['artists'][0]['id']]
album = albums[track['album']['id']]
ntrack['duration_ms'] = track['duration_ms']
ntrack['explicit'] = track['explicit']
ntrack['popularity'] = track['popularity']
ntrack['track_number'] = track['track_number']
ntrack['disc_number'] = track['disc_number']
ntrack['primary_artist_genres'] = album['genres']
ntrack['primary_artist_popularity'] = primary_artist['popularity']
ntrack['primary_artist_followers'] = primary_artist['followers']
ntrack['album_name'] = album['name']
ntrack['album_id'] = album['id']
ntrack['album_genres'] = album['genres']
ntrack['album_release_date'] = album['release_date']
ntrack['album_popularity'] = album['popularity']
ntrack['album_type'] = album['album_type']
if False:
ntrack['primary_artist'] = primary_artist
full_artists = []
for artist in track['artists']:
full_artists.append(artists[artist['id']])
track['artists'] = full_artists
tlib.annotate_track(track['id'], 'spotify', ntrack)
def get_albums(aids):
album_map, naids = get_items_from_cache(aids)
max_per_batch = 20
start = 0
while start < len(naids):
batch = naids[start:start + max_per_batch]
results = _get_spotify().albums(batch)
for album in results['albums']:
falbum = flatten_album(album)
album_map[ falbum['id']] = falbum
put_item_in_cache(falbum)
start += len(results['albums'])
return album_map
def get_artists(aids):
max_per_batch = 50
artist_map, naids = get_items_from_cache(aids)
start = 0
while start < len(naids):
batch = naids[start:start + max_per_batch]
results = _get_spotify().artists(batch)
for artist in results['artists']:
fartist = flatten_artist(artist)
artist_map[ fartist['id'] ] = fartist
put_item_in_cache(fartist)
start += len(results['artists'])
return artist_map
def get_items_from_cache(aids):
map = {}
naids = []
for aid in aids:
fitem = cache.get('item', aid)
if fitem:
map[aid] = fitem
else:
naids.append(aid)
return map, naids
def put_item_in_cache(item):
cache.put('item', item['id'], item)
def flatten_album(album):
falbum = {}
falbum['name'] = album['name']
falbum['id'] = album['id']
falbum['album_type'] = album['album_type']
falbum['release_date'] = album['release_date']
falbum['popularity'] = album['popularity']
falbum['genres'] = album['genres']
return falbum
def flatten_artist(artist):
fartist = {}
fartist['name'] = artist['name']
fartist['id'] = artist['id']
fartist['popularity'] = artist['popularity']
fartist['followers'] = artist['followers']['total']
#fartist['large_image'] = artist['images'][0]['url']
return fartist
def flatten_audio(audio):
return audio
def _annotate_tracks_with_audio_features(tids):
otids = tlib.annotate_tracks_from_cache('audio', tids)
if len(otids) > 0:
stids = set(otids)
try:
results = _get_spotify().audio_features(otids)
for track in results:
if track and 'id' in track:
# print 'audio', json.dumps(track, indent=4)
tlib.annotate_track(track['id'], 'audio', track)
except spotipy.SpotifyException as e:
# we may get a 404 if we request features for a single
# track and the track is missing. In this case we can
# ignore the error
if e.http_status >= 400 and e.http_status < 500:
pass
else:
raise engine.PBLException(self, e.msg)
def _add_track(source, track):
dur = int(track['duration_ms'] / 1000.)
tlib.make_track(track['id'], track['name'],
track['artists'][0]['name'], dur, source)
#tlib.annotate_track(track['id'], 'spotify', _flatten_track(track))
def _flatten_track(track):
return track
def check_uri(uri):
if uri:
if not uri.startswith('spotify:'):
raise ValueError('bad uri: ' + uri)
def normalize_uri(uri):
# convert urls like:
# https://open.spotify.com/user/plamere/playlist/3F1VlEt8oRsKOk9hlp5JDF
# https://open.spotify.com/track/0v2Ad5NPKP8LKv48m0pVHx
# https://open.spotify.com/album/0Gr8tHhOH8vzBTFqnf0YjT
# https://open.spotify.com/artist/6ISyfZw4EVt16zhmH2lvxp
#
# To URIs like:
# spotify:user:plamere:playlist:3F1VlEt8oRsKOk9hlp5JDF
# spotify:artist:6ISyfZw4EVt16zhmH2lvxp
if uri:
uri = uri.strip()
if uri.startswith('https://open.spotify.com'):
uri = uri.replace("https://open.spotify.com", "spotify")
uri = uri.replace("/", ":")
check_uri(uri)
return uri
_spotify_annotator = {
'name': 'spotify',
'annotator': _annotate_tracks_with_spotify_data_full,
'batch_size': 50
}
tlib.add_annotator(_spotify_annotator)
_audio_annotator = {
'name': 'audio',
'annotator': _annotate_tracks_with_audio_features,
'batch_size': 50
}
tlib.add_annotator(_audio_annotator)
def test_urls():
def test(uri):
print uri, '-->', normalize_uri(uri)
test("spotify:user:plamere:playlist:3F1VlEt8oRsKOk9hlp5JDF")
test("https://open.spotify.com/user/plamere/playlist/3F1VlEt8oRsKOk9hlp5JDF")
test("https://open.spotify.com/track/0v2Ad5NPKP8LKv48m0pVHx")
test("https://open.spotify.com/album/0Gr8tHhOH8vzBTFqnf0YjT")
test("https://open.spotify.com/artist/6ISyfZw4EVt16zhmH2lvxp")
def test_full_annotation():
tids = ["09CtPGIpYB4BrO8qb1RGsF", "4phICvcdAfp3eMhVHDls6m"]
for tid in tids:
tlib.make_track(tid, 'fake_name', 'fake_artist', 1, 'test')
_annotate_tracks_with_spotify_data_full(tids)
_annotate_tracks_with_audio_features(tids)
for tid in tids:
track = tlib.get_track(tid)
print json.dumps(track, indent=4)
print
if __name__ == '__main__':
import nocache
cache = nocache
test_full_annotation()
| 34.073421
| 108
| 0.548224
|
4a0d5b9adee76cfde38633c5290e87999616d93b
| 123
|
py
|
Python
|
jsonalyzer/defaults.py
|
saurabh-hirani/jsonalyzer
|
51da350d72d8f3613595fbd03fd1dccdfde68a73
|
[
"0BSD"
] | null | null | null |
jsonalyzer/defaults.py
|
saurabh-hirani/jsonalyzer
|
51da350d72d8f3613595fbd03fd1dccdfde68a73
|
[
"0BSD"
] | null | null | null |
jsonalyzer/defaults.py
|
saurabh-hirani/jsonalyzer
|
51da350d72d8f3613595fbd03fd1dccdfde68a73
|
[
"0BSD"
] | null | null | null |
URI = '/'
CONN_TIMEOUT = 60
HOST = 'localhost'
CALLBACK = 'jsonalyzer.callbacks:no_op'
VALID_PROTOCOLS = ['http', 'https']
| 20.5
| 39
| 0.699187
|
4a0d5bb3a8952451fb750d977a1efe3138ee81fa
| 270
|
py
|
Python
|
frappe/core/doctype/navbar_item/navbar_item.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/navbar_item/navbar_item.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/navbar_item/navbar_item.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class NavbarItem(Document):
pass
| 20.769231
| 58
| 0.774074
|
4a0d5be60e82d9e1e59617fac27186eb2da7151a
| 2,176
|
py
|
Python
|
classify-pet-images/get_input_args.py
|
wesleyjacoby/dog_image_classifier
|
cb7032d8843c10955bc52652da4d6e8c019820af
|
[
"MIT"
] | null | null | null |
classify-pet-images/get_input_args.py
|
wesleyjacoby/dog_image_classifier
|
cb7032d8843c10955bc52652da4d6e8c019820af
|
[
"MIT"
] | 3
|
2022-01-13T03:05:14.000Z
|
2022-03-12T00:42:55.000Z
|
classify-pet-images/get_input_args.py
|
wesleyjacoby/dog_image_classifier
|
cb7032d8843c10955bc52652da4d6e8c019820af
|
[
"MIT"
] | null | null | null |
# PROGRAMMER: Wesley Jacoby
# DATE CREATED: 20 July 2020
# REVISED DATE:
# PURPOSE: Create a function that retrieves the following 3 command line inputs
# from the user using the Argparse Python module. If the user fails to
# provide some or all of the 3 inputs, then the default values are
# used for the missing inputs. Command Line Arguments:
# 1. Image Folder as --dir with default value 'pet_images'
# 2. CNN Model Architecture as --arch with default value 'vgg'
# 3. Text File with Dog Names as --dogfile with default value 'dognames.txt'
#
##
# Imports python modules
import argparse
def get_input_args():
"""
Retrieves and parses the 3 command line arguments provided by the user when
they run the program from a terminal window. This function uses Python's
argparse module to created and defined these 3 command line arguments. If
the user fails to provide some or all of the 3 arguments, then the default
values are used for the missing arguments.
Command Line Arguments:
1. Image Folder as --dir with default value 'pet_images'
2. CNN Model Architecture as --arch with default value 'vgg'
3. Text File with Dog Names as --dogfile with default value 'dognames.txt'
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() -data structure that stores the command line arguments object
"""
# Creates parse
parser = argparse.ArgumentParser()
# Creates 3 command line arguments args.dir for path to images files,
# args.arch which CNN model to use for classification, args.labels path to
# text file with names of dogs.
parser.add_argument('--dir', type=str, default='pet_images/', help='path to the folder of images.')
parser.add_argument('--arch', type=str, default='vgg', help='type of CNN Model Architecture.')
parser.add_argument('--dogfile', type=str, default='dognames.txt', help='text file of names of dog breeds.')
return parser.parse_args()
| 46.297872
| 112
| 0.701287
|
4a0d5c77811f80de54c920815555288e9dde8a44
| 3,247
|
py
|
Python
|
image_blurring_and_augmentation/randomBlurPG.py
|
cardyfib/image_processing_projects
|
e3a5ea7b3a7eb71bedc2d6f5612a3e8e89c36df4
|
[
"Apache-2.0"
] | 1
|
2021-05-12T04:43:18.000Z
|
2021-05-12T04:43:18.000Z
|
image_blurring_and_augmentation/randomBlurPG.py
|
cardyfib/image_processing_projects
|
e3a5ea7b3a7eb71bedc2d6f5612a3e8e89c36df4
|
[
"Apache-2.0"
] | null | null | null |
image_blurring_and_augmentation/randomBlurPG.py
|
cardyfib/image_processing_projects
|
e3a5ea7b3a7eb71bedc2d6f5612a3e8e89c36df4
|
[
"Apache-2.0"
] | 1
|
2021-04-25T10:46:52.000Z
|
2021-04-25T10:46:52.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 10 15:18:51 2018
Copyright (c) Prasanth "Prash" Ganesan
Author email: <prasganesan.pg@gmail.com>
Description:
This program creates a blurry version of an input image. The type of blur
is chosen randomly between Gaussian and average blur. The radius of the
blur is also random but the range is hardcoded and can be changed in the
program if the user wants to.
Inputs:
input_dir = path of the directory where the image files are present
out_dir = path of directory where the output images have to be saved
Outputs:
The blurred images are automatically saved in the destination folder along
with a log file which mentions the random blur type and the blur radius
applied to each image file.
Literature used:
[1] https://github.com/RaphaelMeudec/deblur-gan
-------------------------------------------------------------------------------
"""
# Program starts here
from PIL import ImageFilter, Image
import os
import random
import matplotlib.pyplot as plt
# Secondary Functions
def load_imgRGB(img_path):
img = Image.open(img_path)
return img
def is_an_image(filename):
img_ext = ['.png', '.jpg', '.jpeg']
for ext in img_ext:
if ext in filename:
return True
return False
def list_img_files(directory):
files = os.listdir(directory)
return [os.path.join(directory, f) for f in files if is_an_image(f)]
def randomBlurPG(img_path,out_dir,random_radius):
sharpimg = load_imgRGB(img_path)
if bool(random.randint(0,1)):
blurredimg = sharpimg.filter(ImageFilter.BoxBlur(radius=random_radius))
#print(+" boxblur "+str(random_radius))
with open(out_dir+'log.txt', 'a') as f:
f.write('{} {} {}\n'.format(os.path.basename(img_path), "BoxBlurRadius =", random_radius))
else:
blurredimg = sharpimg.filter(ImageFilter.GaussianBlur(radius=random_radius))
#print(os.path.basename(img_path)+" gausblur "+str(random_radius))
with open(out_dir+'log.txt', 'a') as f:
f.write('{} {} {}\n'.format(os.path.basename(img_path), "GaussianBlurRadius =", random_radius))
return sharpimg,blurredimg
def save_image(img, path):
img.save(path)
def createBlurBatchPG(input_dir,out_dir):
listimgs = list_img_files(input_dir)
min_blur_radius = 2 #Pixel units
max_blur_radius = 5 #Pixel units
for img_path in listimgs:
rand_num = random.sample(range(min_blur_radius, max_blur_radius+1), 1)
sharpimg,blurredimg = randomBlurPG(img_path,out_dir,rand_num[0])
out = os.path.join(out_dir,os.path.basename(img_path))
save_image(blurredimg, out)
print(os.path.basename(img_path)+" Done")
# Main function
if __name__ == "__main__":
input_dir = 'Z:/<path goes in here>'
out_dir = 'Z:/<path goes in here>'
createBlurBatchPG(input_dir,out_dir)
print("Blurring complete")
# sharpimg,blurredimg = blurPG(img_path)
# plt.imshow(sharpimg)
# plt.show()
# plt.imshow(blurredimg)
# plt.show()
#-----------------------------------------------------------------------
| 35.681319
| 108
| 0.63782
|
4a0d5d47c8ebddc2f6f065a8a45f876af8a088e7
| 619
|
py
|
Python
|
exercise_11.py
|
pavlovcoder/python-programs-set-1
|
c582955930e88e4e7d59d35f7351037a4bfa2254
|
[
"MIT"
] | 1
|
2021-01-27T09:01:33.000Z
|
2021-01-27T09:01:33.000Z
|
exercise_11.py
|
pavlovcoder/python-programs-set-1
|
c582955930e88e4e7d59d35f7351037a4bfa2254
|
[
"MIT"
] | null | null | null |
exercise_11.py
|
pavlovcoder/python-programs-set-1
|
c582955930e88e4e7d59d35f7351037a4bfa2254
|
[
"MIT"
] | 1
|
2021-01-26T13:22:21.000Z
|
2021-01-26T13:22:21.000Z
|
print(
'-----------------------------------------\n'\
'Practical python education || Exercise-11:\n'\
'-----------------------------------------\n'
)
print(
'Task:\n'\
'-----------------------------------------\n'\
'Write a Python program to print the documents (syntax, description etc.) of Python built-in function(s).\n'
)
print(
'Solution:\n'\
'-----------------------------------------'\
)
print("abs() - ", abs.__doc__)
print(
'\n-----------------------------------------\n'\
'Copyright 2018 Vladimir Pavlov. All Rights Reserved.\n'\
'-----------------------------------------'
)
| 25.791667
| 110
| 0.355412
|
4a0d5d7173e279b7a608f9a7bb9a6d4a52d5b65f
| 827
|
py
|
Python
|
example/Blinker_Number/Number_WiFi/Number_WiFi.py
|
Victor-He/blinker-py
|
52eefadd382cd82124883d58e032b7cf7298264f
|
[
"MIT"
] | 1,267
|
2018-04-12T07:07:04.000Z
|
2022-03-31T06:30:32.000Z
|
example/Blinker_Number/Number_WiFi/Number_WiFi.py
|
Victor-He/blinker-py
|
52eefadd382cd82124883d58e032b7cf7298264f
|
[
"MIT"
] | 16
|
2020-01-31T00:44:29.000Z
|
2022-03-19T12:05:03.000Z
|
example/Blinker_Number/Number_WiFi/Number_WiFi.py
|
Victor-He/blinker-py
|
52eefadd382cd82124883d58e032b7cf7298264f
|
[
"MIT"
] | 25
|
2018-07-26T02:39:31.000Z
|
2022-03-24T08:29:11.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Blinker import Blinker, BlinkerButton, BlinkerNumber
from Blinker.BlinkerDebug import *
BLINKER_DEBUG.debugAll()
auth = 'Your Device Secret Key'
Blinker.mode("BLINKER_WIFI")
Blinker.begin(auth)
button1 = BlinkerButton("btn-abc")
number1 = BlinkerNumber("num-abc")
counter = 0
def button1_callback(state):
""" """
BLINKER_LOG('get button state: ', state)
button1.icon('icon_1')
button1.color('#FFFFFF')
button1.text('Your button name or describe')
button1.print(state)
def data_callback(data):
global counter
BLINKER_LOG("Blinker readString: ", data)
counter += 1
number1.print(counter)
button1.attach(button1_callback)
Blinker.attachData(data_callback)
if __name__ == '__main__':
while True:
Blinker.run()
| 19.232558
| 57
| 0.696493
|
4a0d5f3b988f77fa896e3347b81f85863cbaff86
| 836
|
py
|
Python
|
pypy/module/__builtin__/test/test_apply.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/module/__builtin__/test/test_apply.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
pypy/module/__builtin__/test/test_apply.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
import autopath
# This is a very trivial series of tests. If apply is subtlely broken,
# we will have to find out some other way.
class AppTestApply:
def test_trivial_listonly(self):
def mymin(*args):
return min(list(args))
assert apply(mymin, [-1,-2,-3,-4]) == -4
def test_trivial_dictonly(self):
def mymin(*arr, **kwargs):
return min(list(arr) + kwargs.values())
assert apply(mymin,
[], {'null' : 0, 'one': 1, 'two' : 2}) == (
0)
def test_trivial(self):
def mymin(*arr, **kwargs):
return min(list(arr) + kwargs.values())
assert apply(mymin,
[-1,-2,-3,-4],
{'null' : 0, 'one': 1, 'two' : 2}) == (
(-4))
| 29.857143
| 72
| 0.472488
|
4a0d5f3c052ef17fa8812a223ec7aeaaab8d25c8
| 275
|
py
|
Python
|
synapse/tests/test_lookup_phonenum.py
|
larrycameron80/synapse
|
24bf21c40b4a467e5dc28c8204aecaf502d5cddf
|
[
"Apache-2.0"
] | null | null | null |
synapse/tests/test_lookup_phonenum.py
|
larrycameron80/synapse
|
24bf21c40b4a467e5dc28c8204aecaf502d5cddf
|
[
"Apache-2.0"
] | 4
|
2017-10-03T21:50:40.000Z
|
2017-11-20T15:49:38.000Z
|
synapse/tests/test_lookup_phonenum.py
|
larrycameron80/synapse
|
24bf21c40b4a467e5dc28c8204aecaf502d5cddf
|
[
"Apache-2.0"
] | null | null | null |
from synapse.tests.common import *
import synapse.lookup.phonenum as s_l_phone
class PhLookTest(SynTest):
def test_lookup_phonenum(self):
self.eq(s_l_phone.getPhoneInfo(18075551212)['cc'], 'ca')
self.eq(s_l_phone.getPhoneInfo(17035551212)['cc'], 'us')
| 27.5
| 64
| 0.723636
|
4a0d5ff0f79c04e1e9f9448fa2ddddfad6604da6
| 4,643
|
py
|
Python
|
models/LinearityIQA.py
|
guanghaoyin/CVRKD-IQA
|
b596a53c064d5472feb63fc61abe0b100e40606f
|
[
"MIT"
] | 25
|
2021-12-09T10:01:16.000Z
|
2022-03-25T03:10:27.000Z
|
models/LinearityIQA.py
|
guanghaoyin/CVRKD-IQA
|
b596a53c064d5472feb63fc61abe0b100e40606f
|
[
"MIT"
] | 1
|
2022-03-07T08:33:20.000Z
|
2022-03-08T08:44:38.000Z
|
models/LinearityIQA.py
|
guanghaoyin/CVRKD-IQA
|
b596a53c064d5472feb63fc61abe0b100e40606f
|
[
"MIT"
] | 5
|
2022-03-02T08:12:29.000Z
|
2022-03-17T05:22:19.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import numpy as np
def SPSP(x, P=1, method='avg'):
batch_size = x.size(0)
map_size = x.size()[-2:]
pool_features = []
for p in range(1, P+1):
pool_size = [np.int(d / p) for d in map_size]
if method == 'maxmin':
M = F.max_pool2d(x, pool_size)
m = -F.max_pool2d(-x, pool_size)
pool_features.append(torch.cat((M, m), 1).view(batch_size, -1)) # max & min pooling
elif method == 'max':
M = F.max_pool2d(x, pool_size)
pool_features.append(M.view(batch_size, -1)) # max pooling
elif method == 'min':
m = -F.max_pool2d(-x, pool_size)
pool_features.append(m.view(batch_size, -1)) # min pooling
elif method == 'avg':
a = F.avg_pool2d(x, pool_size)
pool_features.append(a.view(batch_size, -1)) # average pooling
else:
m1 = F.avg_pool2d(x, pool_size)
rm2 = torch.sqrt(F.relu(F.avg_pool2d(torch.pow(x, 2), pool_size) - torch.pow(m1, 2)))
if method == 'std':
pool_features.append(rm2.view(batch_size, -1)) # std pooling
else:
pool_features.append(torch.cat((m1, rm2), 1).view(batch_size, -1)) # statistical pooling: mean & std
return torch.cat(pool_features, dim=1)
class LinearityIQA(nn.Module):
def __init__(self, arch='resnext101_32x8d', pool='avg', use_bn_end=False, P6=1, P7=1):
super(LinearityIQA, self).__init__()
self.pool = pool
self.use_bn_end = use_bn_end
if pool in ['max', 'min', 'avg', 'std']:
c = 1
else:
c = 2
self.P6 = P6 #
self.P7 = P7 #
features = list(models.__dict__[arch](pretrained=True).children())[:-2]
if arch == 'alexnet':
in_features = [256, 256]
self.id1 = 9
self.id2 = 12
features = features[0]
elif arch == 'vgg16':
in_features = [512, 512]
self.id1 = 23
self.id2 = 30
features = features[0]
elif 'res' in arch:
self.id1 = 6
self.id2 = 7
if arch == 'resnet18' or arch == 'resnet34':
in_features = [256, 512]
else:
in_features = [1024, 2048]
else:
print('The arch is not implemented!')
self.features = nn.Sequential(*features)
self.dr6 = nn.Sequential(nn.Linear(in_features[0] * c * sum([p * p for p in range(1, self.P6+1)]), 1024),
nn.BatchNorm1d(1024),
nn.Linear(1024, 256),
nn.BatchNorm1d(256),
nn.Linear(256, 64),
nn.BatchNorm1d(64), nn.ReLU())
self.dr7 = nn.Sequential(nn.Linear(in_features[1] * c * sum([p * p for p in range(1, self.P7+1)]), 1024),
nn.BatchNorm1d(1024),
nn.Linear(1024, 256),
nn.BatchNorm1d(256),
nn.Linear(256, 64),
nn.BatchNorm1d(64), nn.ReLU())
if self.use_bn_end:
self.regr6 = nn.Sequential(nn.Linear(64, 1), nn.BatchNorm1d(1))
self.regr7 = nn.Sequential(nn.Linear(64, 1), nn.BatchNorm1d(1))
self.regression = nn.Sequential(nn.Linear(64 * 2, 1), nn.BatchNorm1d(1))
else:
self.regr6 = nn.Linear(64, 1)
self.regr7 = nn.Linear(64, 1)
self.regression = nn.Linear(64 * 2, 1)
def extract_features(self, x):
f, pq = [], []
for ii, model in enumerate(self.features):
x = model(x)
if ii == self.id1:
x6 = SPSP(x, P=self.P6, method=self.pool)
x6 = self.dr6(x6)
f.append(x6)
pq.append(self.regr6(x6))
if ii == self.id2:
x7 = SPSP(x, P=self.P7, method=self.pool)
x7 = self.dr7(x7)
f.append(x7)
pq.append(self.regr7(x7))
f = torch.cat(f, dim=1)
return f, pq
def forward(self, x):
f, pq = self.extract_features(x)
s = self.regression(f)
pq.append(s)
return pq, s
if __name__ == "__main__":
x = torch.rand((1,3,224,224))
net = LinearityIQA()
net.train(False)
# print(net.dr6)
# print(net.dr7)
y, pred = net(x)
print(pred)
| 37.144
| 117
| 0.496662
|
4a0d6005d1afad42746dca80d7fdbed4bad27eee
| 2,436
|
py
|
Python
|
snr/prelude/abstract_loop.py
|
sfshaw/SNR
|
593b7b78a91e23e0fcb03985b72f29a66101579c
|
[
"MIT"
] | 1
|
2021-03-09T21:54:56.000Z
|
2021-03-09T21:54:56.000Z
|
snr/prelude/abstract_loop.py
|
sfshaw-calpoly/SNR
|
593b7b78a91e23e0fcb03985b72f29a66101579c
|
[
"MIT"
] | null | null | null |
snr/prelude/abstract_loop.py
|
sfshaw-calpoly/SNR
|
593b7b78a91e23e0fcb03985b72f29a66101579c
|
[
"MIT"
] | 1
|
2021-12-04T19:51:18.000Z
|
2021-12-04T19:51:18.000Z
|
from abc import ABC, abstractmethod
from typing import Any
from .abstract_component import AbstractComponent
from .abstract_factory import AbstractFactory
from .abstract_node import AbstractNode
from .page import DataKey
from .task import SomeTasks, TaskHandlerMap
class AbstractLoop(AbstractComponent, ABC):
"""A Node component that runs outside the main thread event loop.
Base loop implementation may use any concurrency style, ThreadLoop is
provided. User implemented loops will inherit from base loops such as
ThreadLoop.
The loop lifecycle:
Main thread Loop context
1. Loop() - Constructed
2. begin() - Started by Node
3. setup() - Runs user setup
4. loop_handler() - Runs its loop
4. join() - Called from outside the
loop to end it
5. set_terminate_flag() - Signals loop
to end
5. halt() - Cleans up after the
running loop, should
be reloadable now
6. terminate() - Called by the Node to
clean the entire
module up
The endpoint has its loop handler function run according to its
tick_rate (Hz).
"""
factory: AbstractFactory['AbstractLoop']
parent: AbstractNode
task_handlers: TaskHandlerMap
delay_s: float
@abstractmethod
def setup(self) -> None:
'''User implemented method run at the beginning of the Loop's loop
'''
...
@abstractmethod
def loop(self) -> None:
'''User implemented method run per loop iteration
'''
...
@abstractmethod
def set_terminate_flag(self) -> None:
'''Base loop function to signal termination, used by join()
'''
...
@abstractmethod
def is_terminated(self) -> bool:
'''Base loop function to indicate whether loop execution has finished
'''
...
def schedule(self, t: SomeTasks) -> None:
self.parent.schedule(t)
def store_data(self,
key: DataKey,
data: Any,
process: bool = True,
) -> None:
self.parent.store_data(key, data, process)
| 32.052632
| 77
| 0.55624
|
4a0d604fd8c2d3a3a96cf509502dfc28d6da46d0
| 3,166
|
py
|
Python
|
samples/cli/accelbyte_py_sdk_cli/social/_public_create_user_namespace_slot.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
samples/cli/accelbyte_py_sdk_cli/social/_public_create_user_namespace_slot.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
samples/cli/accelbyte_py_sdk_cli/social/_public_create_user_namespace_slot.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-social-service (1.29.2)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.social import public_create_user_namespace_slot as public_create_user_namespace_slot_internal
from accelbyte_py_sdk.api.social.models import ErrorEntity
@click.command()
@click.argument("user_id", type=str)
@click.option("--checksum", "checksum", type=str)
@click.option("--custom_attribute", "custom_attribute", type=str)
@click.option("--file", "file", type=str)
@click.option("--label", "label", type=str)
@click.option("--tags", "tags", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def public_create_user_namespace_slot(
user_id: str,
checksum: Optional[str] = None,
custom_attribute: Optional[str] = None,
file: Optional[str] = None,
label: Optional[str] = None,
tags: Optional[str] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(public_create_user_namespace_slot_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
if tags is not None:
try:
tags_json = json.loads(tags)
tags = [str(i0) for i0 in tags_json]
except ValueError as e:
raise Exception(f"Invalid JSON for 'tags'. {str(e)}") from e
result, error = public_create_user_namespace_slot_internal(
user_id=user_id,
checksum=checksum,
custom_attribute=custom_attribute,
file=file,
label=label,
tags=tags,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"publicCreateUserNamespaceSlot failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
public_create_user_namespace_slot.operation_id = "publicCreateUserNamespaceSlot"
public_create_user_namespace_slot.is_deprecated = False
| 34.043011
| 119
| 0.710676
|
4a0d60a57ece8a1340dcb4cd9c742a8fc07e6364
| 732
|
py
|
Python
|
tests/sql_engine_utils_test.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 3
|
2018-04-28T13:06:14.000Z
|
2020-06-09T02:39:44.000Z
|
tests/sql_engine_utils_test.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 1
|
2021-09-09T07:43:25.000Z
|
2021-09-09T10:47:56.000Z
|
tests/sql_engine_utils_test.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 6
|
2019-06-11T18:59:57.000Z
|
2021-03-02T19:14:42.000Z
|
"""Tests for sql_engine_util."""
import unittest
from perfkitbenchmarker import sql_engine_utils
from tests import pkb_common_test_case
class SqlEngineUtilTest(pkb_common_test_case.PkbCommonTestCase):
def testGetDbEngineType(self):
self.assertEqual(
sql_engine_utils.GetDbEngineType('aurora-postgresql'), 'postgres')
self.assertEqual(
sql_engine_utils.GetDbEngineType('aurora-mysql'), 'mysql')
self.assertEqual(
sql_engine_utils.GetDbEngineType('sqlserver-ex'), 'sqlserver')
self.assertEqual(
sql_engine_utils.GetDbEngineType('mysql'), 'mysql')
with self.assertRaises(TypeError):
sql_engine_utils.GetDbEngineType('abc')
if __name__ == '__main__':
unittest.main()
| 28.153846
| 74
| 0.748634
|
4a0d62a06844110b48b4352360606adb0c39be0d
| 34,930
|
py
|
Python
|
alphafold/common/residue_constants.py
|
konstin/alphafold
|
e2147eb25c4bd4150d11d63033fe3647c7a61500
|
[
"Apache-2.0"
] | null | null | null |
alphafold/common/residue_constants.py
|
konstin/alphafold
|
e2147eb25c4bd4150d11d63033fe3647c7a61500
|
[
"Apache-2.0"
] | null | null | null |
alphafold/common/residue_constants.py
|
konstin/alphafold
|
e2147eb25c4bd4150d11d63033fe3647c7a61500
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used in AlphaFold."""
import collections
import functools
import os
from typing import List, Mapping, Tuple
import numpy as np
import tree
# Internal import (35fd).
stereo_chemical_props_path = 'alphafold/common/stereo_chemical_props.txt'
# Distance from one CA to next CA [trans configuration: omega = 180].
ca_ca = 3.80209737096
# Format: The list for each AA type contains chi1, chi2, chi3, chi4 in
# this order (or a relevant subset from chi1 onwards). ALA and GLY don't have
# chi angles so their chi angle lists are empty.
chi_angles_atoms = {
'ALA': [],
# Chi5 in arginine is always 0 +- 5 degrees, so ignore it.
'ARG': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'],
['CB', 'CG', 'CD', 'NE'], ['CG', 'CD', 'NE', 'CZ']],
'ASN': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'OD1']],
'ASP': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'OD1']],
'CYS': [['N', 'CA', 'CB', 'SG']],
'GLN': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'],
['CB', 'CG', 'CD', 'OE1']],
'GLU': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'],
['CB', 'CG', 'CD', 'OE1']],
'GLY': [],
'HIS': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'ND1']],
'ILE': [['N', 'CA', 'CB', 'CG1'], ['CA', 'CB', 'CG1', 'CD1']],
'LEU': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']],
'LYS': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'],
['CB', 'CG', 'CD', 'CE'], ['CG', 'CD', 'CE', 'NZ']],
'MET': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'SD'],
['CB', 'CG', 'SD', 'CE']],
'PHE': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']],
'PRO': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD']],
'SER': [['N', 'CA', 'CB', 'OG']],
'THR': [['N', 'CA', 'CB', 'OG1']],
'TRP': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']],
'TYR': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']],
'VAL': [['N', 'CA', 'CB', 'CG1']],
}
# If chi angles given in fixed-length array, this matrix determines how to mask
# them for each AA type. The order is as per restype_order (see below).
chi_angles_mask = [
[0.0, 0.0, 0.0, 0.0], # ALA
[1.0, 1.0, 1.0, 1.0], # ARG
[1.0, 1.0, 0.0, 0.0], # ASN
[1.0, 1.0, 0.0, 0.0], # ASP
[1.0, 0.0, 0.0, 0.0], # CYS
[1.0, 1.0, 1.0, 0.0], # GLN
[1.0, 1.0, 1.0, 0.0], # GLU
[0.0, 0.0, 0.0, 0.0], # GLY
[1.0, 1.0, 0.0, 0.0], # HIS
[1.0, 1.0, 0.0, 0.0], # ILE
[1.0, 1.0, 0.0, 0.0], # LEU
[1.0, 1.0, 1.0, 1.0], # LYS
[1.0, 1.0, 1.0, 0.0], # MET
[1.0, 1.0, 0.0, 0.0], # PHE
[1.0, 1.0, 0.0, 0.0], # PRO
[1.0, 0.0, 0.0, 0.0], # SER
[1.0, 0.0, 0.0, 0.0], # THR
[1.0, 1.0, 0.0, 0.0], # TRP
[1.0, 1.0, 0.0, 0.0], # TYR
[1.0, 0.0, 0.0, 0.0], # VAL
]
# The following chi angles are pi periodic: they can be rotated by a multiple
# of pi without affecting the structure.
chi_pi_periodic = [
[0.0, 0.0, 0.0, 0.0], # ALA
[0.0, 0.0, 0.0, 0.0], # ARG
[0.0, 0.0, 0.0, 0.0], # ASN
[0.0, 1.0, 0.0, 0.0], # ASP
[0.0, 0.0, 0.0, 0.0], # CYS
[0.0, 0.0, 0.0, 0.0], # GLN
[0.0, 0.0, 1.0, 0.0], # GLU
[0.0, 0.0, 0.0, 0.0], # GLY
[0.0, 0.0, 0.0, 0.0], # HIS
[0.0, 0.0, 0.0, 0.0], # ILE
[0.0, 0.0, 0.0, 0.0], # LEU
[0.0, 0.0, 0.0, 0.0], # LYS
[0.0, 0.0, 0.0, 0.0], # MET
[0.0, 1.0, 0.0, 0.0], # PHE
[0.0, 0.0, 0.0, 0.0], # PRO
[0.0, 0.0, 0.0, 0.0], # SER
[0.0, 0.0, 0.0, 0.0], # THR
[0.0, 0.0, 0.0, 0.0], # TRP
[0.0, 1.0, 0.0, 0.0], # TYR
[0.0, 0.0, 0.0, 0.0], # VAL
[0.0, 0.0, 0.0, 0.0], # UNK
]
# Atoms positions relative to the 8 rigid groups, defined by the pre-omega, phi,
# psi and chi angles:
# 0: 'backbone group',
# 1: 'pre-omega-group', (empty)
# 2: 'phi-group', (currently empty, because it defines only hydrogens)
# 3: 'psi-group',
# 4,5,6,7: 'chi1,2,3,4-group'
# The atom positions are relative to the axis-end-atom of the corresponding
# rotation axis. The x-axis is in direction of the rotation axis, and the y-axis
# is defined such that the dihedral-angle-definiting atom (the last entry in
# chi_angles_atoms above) is in the xy-plane (with a positive y-coordinate).
# format: [atomname, group_idx, rel_position]
rigid_group_atom_positions = {
'ALA': [
['N', 0, (-0.525, 1.363, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, -0.000, -0.000)],
['CB', 0, (-0.529, -0.774, -1.205)],
['O', 3, (0.627, 1.062, 0.000)],
],
'ARG': [
['N', 0, (-0.524, 1.362, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, -0.000, -0.000)],
['CB', 0, (-0.524, -0.778, -1.209)],
['O', 3, (0.626, 1.062, 0.000)],
['CG', 4, (0.616, 1.390, -0.000)],
['CD', 5, (0.564, 1.414, 0.000)],
['NE', 6, (0.539, 1.357, -0.000)],
['NH1', 7, (0.206, 2.301, 0.000)],
['NH2', 7, (2.078, 0.978, -0.000)],
['CZ', 7, (0.758, 1.093, -0.000)],
],
'ASN': [
['N', 0, (-0.536, 1.357, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, -0.000, -0.000)],
['CB', 0, (-0.531, -0.787, -1.200)],
['O', 3, (0.625, 1.062, 0.000)],
['CG', 4, (0.584, 1.399, 0.000)],
['ND2', 5, (0.593, -1.188, 0.001)],
['OD1', 5, (0.633, 1.059, 0.000)],
],
'ASP': [
['N', 0, (-0.525, 1.362, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.527, 0.000, -0.000)],
['CB', 0, (-0.526, -0.778, -1.208)],
['O', 3, (0.626, 1.062, -0.000)],
['CG', 4, (0.593, 1.398, -0.000)],
['OD1', 5, (0.610, 1.091, 0.000)],
['OD2', 5, (0.592, -1.101, -0.003)],
],
'CYS': [
['N', 0, (-0.522, 1.362, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.524, 0.000, 0.000)],
['CB', 0, (-0.519, -0.773, -1.212)],
['O', 3, (0.625, 1.062, -0.000)],
['SG', 4, (0.728, 1.653, 0.000)],
],
'GLN': [
['N', 0, (-0.526, 1.361, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, 0.000, 0.000)],
['CB', 0, (-0.525, -0.779, -1.207)],
['O', 3, (0.626, 1.062, -0.000)],
['CG', 4, (0.615, 1.393, 0.000)],
['CD', 5, (0.587, 1.399, -0.000)],
['NE2', 6, (0.593, -1.189, -0.001)],
['OE1', 6, (0.634, 1.060, 0.000)],
],
'GLU': [
['N', 0, (-0.528, 1.361, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, -0.000, -0.000)],
['CB', 0, (-0.526, -0.781, -1.207)],
['O', 3, (0.626, 1.062, 0.000)],
['CG', 4, (0.615, 1.392, 0.000)],
['CD', 5, (0.600, 1.397, 0.000)],
['OE1', 6, (0.607, 1.095, -0.000)],
['OE2', 6, (0.589, -1.104, -0.001)],
],
'GLY': [
['N', 0, (-0.572, 1.337, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.517, -0.000, -0.000)],
['O', 3, (0.626, 1.062, -0.000)],
],
'HIS': [
['N', 0, (-0.527, 1.360, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, 0.000, 0.000)],
['CB', 0, (-0.525, -0.778, -1.208)],
['O', 3, (0.625, 1.063, 0.000)],
['CG', 4, (0.600, 1.370, -0.000)],
['CD2', 5, (0.889, -1.021, 0.003)],
['ND1', 5, (0.744, 1.160, -0.000)],
['CE1', 5, (2.030, 0.851, 0.002)],
['NE2', 5, (2.145, -0.466, 0.004)],
],
'ILE': [
['N', 0, (-0.493, 1.373, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.527, -0.000, -0.000)],
['CB', 0, (-0.536, -0.793, -1.213)],
['O', 3, (0.627, 1.062, -0.000)],
['CG1', 4, (0.534, 1.437, -0.000)],
['CG2', 4, (0.540, -0.785, -1.199)],
['CD1', 5, (0.619, 1.391, 0.000)],
],
'LEU': [
['N', 0, (-0.520, 1.363, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, -0.000, -0.000)],
['CB', 0, (-0.522, -0.773, -1.214)],
['O', 3, (0.625, 1.063, -0.000)],
['CG', 4, (0.678, 1.371, 0.000)],
['CD1', 5, (0.530, 1.430, -0.000)],
['CD2', 5, (0.535, -0.774, 1.200)],
],
'LYS': [
['N', 0, (-0.526, 1.362, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, 0.000, 0.000)],
['CB', 0, (-0.524, -0.778, -1.208)],
['O', 3, (0.626, 1.062, -0.000)],
['CG', 4, (0.619, 1.390, 0.000)],
['CD', 5, (0.559, 1.417, 0.000)],
['CE', 6, (0.560, 1.416, 0.000)],
['NZ', 7, (0.554, 1.387, 0.000)],
],
'MET': [
['N', 0, (-0.521, 1.364, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, 0.000, 0.000)],
['CB', 0, (-0.523, -0.776, -1.210)],
['O', 3, (0.625, 1.062, -0.000)],
['CG', 4, (0.613, 1.391, -0.000)],
['SD', 5, (0.703, 1.695, 0.000)],
['CE', 6, (0.320, 1.786, -0.000)],
],
'PHE': [
['N', 0, (-0.518, 1.363, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.524, 0.000, -0.000)],
['CB', 0, (-0.525, -0.776, -1.212)],
['O', 3, (0.626, 1.062, -0.000)],
['CG', 4, (0.607, 1.377, 0.000)],
['CD1', 5, (0.709, 1.195, -0.000)],
['CD2', 5, (0.706, -1.196, 0.000)],
['CE1', 5, (2.102, 1.198, -0.000)],
['CE2', 5, (2.098, -1.201, -0.000)],
['CZ', 5, (2.794, -0.003, -0.001)],
],
'PRO': [
['N', 0, (-0.566, 1.351, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.527, -0.000, 0.000)],
['CB', 0, (-0.546, -0.611, -1.293)],
['O', 3, (0.621, 1.066, 0.000)],
['CG', 4, (0.382, 1.445, 0.0)],
# ['CD', 5, (0.427, 1.440, 0.0)],
['CD', 5, (0.477, 1.424, 0.0)], # manually made angle 2 degrees larger
],
'SER': [
['N', 0, (-0.529, 1.360, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, -0.000, -0.000)],
['CB', 0, (-0.518, -0.777, -1.211)],
['O', 3, (0.626, 1.062, -0.000)],
['OG', 4, (0.503, 1.325, 0.000)],
],
'THR': [
['N', 0, (-0.517, 1.364, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, 0.000, -0.000)],
['CB', 0, (-0.516, -0.793, -1.215)],
['O', 3, (0.626, 1.062, 0.000)],
['CG2', 4, (0.550, -0.718, -1.228)],
['OG1', 4, (0.472, 1.353, 0.000)],
],
'TRP': [
['N', 0, (-0.521, 1.363, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, -0.000, 0.000)],
['CB', 0, (-0.523, -0.776, -1.212)],
['O', 3, (0.627, 1.062, 0.000)],
['CG', 4, (0.609, 1.370, -0.000)],
['CD1', 5, (0.824, 1.091, 0.000)],
['CD2', 5, (0.854, -1.148, -0.005)],
['CE2', 5, (2.186, -0.678, -0.007)],
['CE3', 5, (0.622, -2.530, -0.007)],
['NE1', 5, (2.140, 0.690, -0.004)],
['CH2', 5, (3.028, -2.890, -0.013)],
['CZ2', 5, (3.283, -1.543, -0.011)],
['CZ3', 5, (1.715, -3.389, -0.011)],
],
'TYR': [
['N', 0, (-0.522, 1.362, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.524, -0.000, -0.000)],
['CB', 0, (-0.522, -0.776, -1.213)],
['O', 3, (0.627, 1.062, -0.000)],
['CG', 4, (0.607, 1.382, -0.000)],
['CD1', 5, (0.716, 1.195, -0.000)],
['CD2', 5, (0.713, -1.194, -0.001)],
['CE1', 5, (2.107, 1.200, -0.002)],
['CE2', 5, (2.104, -1.201, -0.003)],
['OH', 5, (4.168, -0.002, -0.005)],
['CZ', 5, (2.791, -0.001, -0.003)],
],
'VAL': [
['N', 0, (-0.494, 1.373, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.527, -0.000, -0.000)],
['CB', 0, (-0.533, -0.795, -1.213)],
['O', 3, (0.627, 1.062, -0.000)],
['CG1', 4, (0.540, 1.429, -0.000)],
['CG2', 4, (0.533, -0.776, 1.203)],
],
}
# A list of atoms (excluding hydrogen) for each AA type. PDB naming convention.
residue_atoms = {
'ALA': ['C', 'CA', 'CB', 'N', 'O'],
'ARG': ['C', 'CA', 'CB', 'CG', 'CD', 'CZ', 'N', 'NE', 'O', 'NH1', 'NH2'],
'ASP': ['C', 'CA', 'CB', 'CG', 'N', 'O', 'OD1', 'OD2'],
'ASN': ['C', 'CA', 'CB', 'CG', 'N', 'ND2', 'O', 'OD1'],
'CYS': ['C', 'CA', 'CB', 'N', 'O', 'SG'],
'GLU': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'O', 'OE1', 'OE2'],
'GLN': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'NE2', 'O', 'OE1'],
'GLY': ['C', 'CA', 'N', 'O'],
'HIS': ['C', 'CA', 'CB', 'CG', 'CD2', 'CE1', 'N', 'ND1', 'NE2', 'O'],
'ILE': ['C', 'CA', 'CB', 'CG1', 'CG2', 'CD1', 'N', 'O'],
'LEU': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'N', 'O'],
'LYS': ['C', 'CA', 'CB', 'CG', 'CD', 'CE', 'N', 'NZ', 'O'],
'MET': ['C', 'CA', 'CB', 'CG', 'CE', 'N', 'O', 'SD'],
'PHE': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'N', 'O'],
'PRO': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'O'],
'SER': ['C', 'CA', 'CB', 'N', 'O', 'OG'],
'THR': ['C', 'CA', 'CB', 'CG2', 'N', 'O', 'OG1'],
'TRP': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE2', 'CE3', 'CZ2', 'CZ3',
'CH2', 'N', 'NE1', 'O'],
'TYR': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'N', 'O',
'OH'],
'VAL': ['C', 'CA', 'CB', 'CG1', 'CG2', 'N', 'O']
}
# Naming swaps for ambiguous atom names.
# Due to symmetries in the amino acids the naming of atoms is ambiguous in
# 4 of the 20 amino acids.
# (The LDDT paper lists 7 amino acids as ambiguous, but the naming ambiguities
# in LEU, VAL and ARG can be resolved by using the 3d constellations of
# the 'ambiguous' atoms and their neighbours)
residue_atom_renaming_swaps = {
'ASP': {'OD1': 'OD2'},
'GLU': {'OE1': 'OE2'},
'PHE': {'CD1': 'CD2', 'CE1': 'CE2'},
'TYR': {'CD1': 'CD2', 'CE1': 'CE2'},
}
# Van der Waals radii [Angstroem] of the atoms (from Wikipedia)
van_der_waals_radius = {
'C': 1.7,
'N': 1.55,
'O': 1.52,
'S': 1.8,
}
Bond = collections.namedtuple(
'Bond', ['atom1_name', 'atom2_name', 'length', 'stddev'])
BondAngle = collections.namedtuple(
'BondAngle',
['atom1_name', 'atom2_name', 'atom3name', 'angle_rad', 'stddev'])
@functools.lru_cache(maxsize=None)
def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]],
Mapping[str, List[Bond]],
Mapping[str, List[BondAngle]]]:
"""Load stereo_chemical_props.txt into a nice structure.
Load literature values for bond lengths and bond angles and translate
bond angles into the length of the opposite edge of the triangle
("residue_virtual_bonds").
Returns:
residue_bonds: Dict that maps resname -> list of Bond tuples.
residue_virtual_bonds: Dict that maps resname -> list of Bond tuples.
residue_bond_angles: Dict that maps resname -> list of BondAngle tuples.
"""
with open(stereo_chemical_props_path, 'rt') as f:
stereo_chemical_props = f.read()
lines_iter = iter(stereo_chemical_props.splitlines())
# Load bond lengths.
residue_bonds = {}
next(lines_iter) # Skip header line.
for line in lines_iter:
if line.strip() == '-':
break
bond, resname, length, stddev = line.split()
atom1, atom2 = bond.split('-')
if resname not in residue_bonds:
residue_bonds[resname] = []
residue_bonds[resname].append(
Bond(atom1, atom2, float(length), float(stddev)))
residue_bonds['UNK'] = []
# Load bond angles.
residue_bond_angles = {}
next(lines_iter) # Skip empty line.
next(lines_iter) # Skip header line.
for line in lines_iter:
if line.strip() == '-':
break
bond, resname, angle_degree, stddev_degree = line.split()
atom1, atom2, atom3 = bond.split('-')
if resname not in residue_bond_angles:
residue_bond_angles[resname] = []
residue_bond_angles[resname].append(
BondAngle(atom1, atom2, atom3,
float(angle_degree) / 180. * np.pi,
float(stddev_degree) / 180. * np.pi))
residue_bond_angles['UNK'] = []
def make_bond_key(atom1_name, atom2_name):
"""Unique key to lookup bonds."""
return '-'.join(sorted([atom1_name, atom2_name]))
# Translate bond angles into distances ("virtual bonds").
residue_virtual_bonds = {}
for resname, bond_angles in residue_bond_angles.items():
# Create a fast lookup dict for bond lengths.
bond_cache = {}
for b in residue_bonds[resname]:
bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b
residue_virtual_bonds[resname] = []
for ba in bond_angles:
bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]
bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]
# Compute distance between atom1 and atom3 using the law of cosines
# c^2 = a^2 + b^2 - 2ab*cos(gamma).
gamma = ba.angle_rad
length = np.sqrt(bond1.length**2 + bond2.length**2
- 2 * bond1.length * bond2.length * np.cos(gamma))
# Propagation of uncertainty assuming uncorrelated errors.
dl_outer = 0.5 / length
dl_dgamma = (2 * bond1.length * bond2.length * np.sin(gamma)) * dl_outer
dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer
dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer
stddev = np.sqrt((dl_dgamma * ba.stddev)**2 +
(dl_db1 * bond1.stddev)**2 +
(dl_db2 * bond2.stddev)**2)
residue_virtual_bonds[resname].append(
Bond(ba.atom1_name, ba.atom3name, length, stddev))
return (residue_bonds,
residue_virtual_bonds,
residue_bond_angles)
# Between-residue bond lengths for general bonds (first element) and for Proline
# (second element).
between_res_bond_length_c_n = [1.329, 1.341]
between_res_bond_length_stddev_c_n = [0.014, 0.016]
# Between-residue cos_angles.
between_res_cos_angles_c_n_ca = [-0.5203, 0.0353] # degrees: 121.352 +- 2.315
between_res_cos_angles_ca_c_n = [-0.4473, 0.0311] # degrees: 116.568 +- 1.995
# This mapping is used when we need to store atom data in a format that requires
# fixed atom data size for every residue (e.g. a numpy array).
atom_types = [
'N', 'CA', 'C', 'CB', 'O', 'CG', 'CG1', 'CG2', 'OG', 'OG1', 'SG', 'CD',
'CD1', 'CD2', 'ND1', 'ND2', 'OD1', 'OD2', 'SD', 'CE', 'CE1', 'CE2', 'CE3',
'NE', 'NE1', 'NE2', 'OE1', 'OE2', 'CH2', 'NH1', 'NH2', 'OH', 'CZ', 'CZ2',
'CZ3', 'NZ', 'OXT'
]
atom_order = {atom_type: i for i, atom_type in enumerate(atom_types)}
atom_type_num = len(atom_types) # := 37.
# A compact atom encoding with 14 columns
# pylint: disable=line-too-long
# pylint: disable=bad-whitespace
restype_name_to_atom14_names = {
'ALA': ['N', 'CA', 'C', 'O', 'CB', '', '', '', '', '', '', '', '', ''],
'ARG': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2', '', '', ''],
'ASN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'ND2', '', '', '', '', '', ''],
'ASP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'OD2', '', '', '', '', '', ''],
'CYS': ['N', 'CA', 'C', 'O', 'CB', 'SG', '', '', '', '', '', '', '', ''],
'GLN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'NE2', '', '', '', '', ''],
'GLU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'OE2', '', '', '', '', ''],
'GLY': ['N', 'CA', 'C', 'O', '', '', '', '', '', '', '', '', '', ''],
'HIS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'ND1', 'CD2', 'CE1', 'NE2', '', '', '', ''],
'ILE': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', 'CD1', '', '', '', '', '', ''],
'LEU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', '', '', '', '', '', ''],
'LYS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'CE', 'NZ', '', '', '', '', ''],
'MET': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'SD', 'CE', '', '', '', '', '', ''],
'PHE': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', '', '', ''],
'PRO': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', '', '', '', '', '', '', ''],
'SER': ['N', 'CA', 'C', 'O', 'CB', 'OG', '', '', '', '', '', '', '', ''],
'THR': ['N', 'CA', 'C', 'O', 'CB', 'OG1', 'CG2', '', '', '', '', '', '', ''],
'TRP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'NE1', 'CE2', 'CE3', 'CZ2', 'CZ3', 'CH2'],
'TYR': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'OH', '', ''],
'VAL': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', '', '', '', '', '', '', ''],
'UNK': ['', '', '', '', '', '', '', '', '', '', '', '', '', ''],
}
# pylint: enable=line-too-long
# pylint: enable=bad-whitespace
# This is the standard residue order when coding AA type as a number.
# Reproduce it by taking 3-letter AA codes and sorting them alphabetically.
restypes = [
'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',
'S', 'T', 'W', 'Y', 'V'
]
restype_order = {restype: i for i, restype in enumerate(restypes)}
restype_num = len(restypes) # := 20.
unk_restype_index = restype_num # Catch-all index for unknown restypes.
restypes_with_x = restypes + ['X']
restype_order_with_x = {restype: i for i, restype in enumerate(restypes_with_x)}
def sequence_to_onehot(
sequence: str,
mapping: Mapping[str, int],
map_unknown_to_x: bool = False) -> np.ndarray:
"""Maps the given sequence into a one-hot encoded matrix.
Args:
sequence: An amino acid sequence.
mapping: A dictionary mapping amino acids to integers.
map_unknown_to_x: If True, any amino acid that is not in the mapping will be
mapped to the unknown amino acid 'X'. If the mapping doesn't contain
amino acid 'X', an error will be thrown. If False, any amino acid not in
the mapping will throw an error.
Returns:
A numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of
the sequence.
Raises:
ValueError: If the mapping doesn't contain values from 0 to
num_unique_aas - 1 without any gaps.
"""
num_entries = max(mapping.values()) + 1
if sorted(set(mapping.values())) != list(range(num_entries)):
raise ValueError('The mapping must have values from 0 to num_unique_aas-1 '
'without any gaps. Got: %s' % sorted(mapping.values()))
one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32)
for aa_index, aa_type in enumerate(sequence):
if map_unknown_to_x:
if aa_type.isalpha() and aa_type.isupper():
aa_id = mapping.get(aa_type, mapping['X'])
else:
raise ValueError(f'Invalid character in the sequence: {aa_type}')
else:
aa_id = mapping[aa_type]
one_hot_arr[aa_index, aa_id] = 1
return one_hot_arr
restype_1to3 = {
'A': 'ALA',
'R': 'ARG',
'N': 'ASN',
'D': 'ASP',
'C': 'CYS',
'Q': 'GLN',
'E': 'GLU',
'G': 'GLY',
'H': 'HIS',
'I': 'ILE',
'L': 'LEU',
'K': 'LYS',
'M': 'MET',
'F': 'PHE',
'P': 'PRO',
'S': 'SER',
'T': 'THR',
'W': 'TRP',
'Y': 'TYR',
'V': 'VAL',
}
# NB: restype_3to1 differs from Bio.PDB.protein_letters_3to1 by being a simple
# 1-to-1 mapping of 3 letter names to one letter names. The latter contains
# many more, and less common, three letter names as keys and maps many of these
# to the same one letter name (including 'X' and 'U' which we don't use here).
restype_3to1 = {v: k for k, v in restype_1to3.items()}
# Define a restype name for all unknown residues.
unk_restype = 'UNK'
resnames = [restype_1to3[r] for r in restypes] + [unk_restype]
resname_to_idx = {resname: i for i, resname in enumerate(resnames)}
# The mapping here uses hhblits convention, so that B is mapped to D, J and O
# are mapped to X, U is mapped to C, and Z is mapped to E. Other than that the
# remaining 20 amino acids are kept in alphabetical order.
# There are 2 non-amino acid codes, X (representing any amino acid) and
# "-" representing a missing amino acid in an alignment. The id for these
# codes is put at the end (20 and 21) so that they can easily be ignored if
# desired.
HHBLITS_AA_TO_ID = {
'A': 0,
'B': 2,
'C': 1,
'D': 2,
'E': 3,
'F': 4,
'G': 5,
'H': 6,
'I': 7,
'J': 20,
'K': 8,
'L': 9,
'M': 10,
'N': 11,
'O': 20,
'P': 12,
'Q': 13,
'R': 14,
'S': 15,
'T': 16,
'U': 1,
'V': 17,
'W': 18,
'X': 20,
'Y': 19,
'Z': 3,
'-': 21,
}
# Partial inversion of HHBLITS_AA_TO_ID.
ID_TO_HHBLITS_AA = {
0: 'A',
1: 'C', # Also U.
2: 'D', # Also B.
3: 'E', # Also Z.
4: 'F',
5: 'G',
6: 'H',
7: 'I',
8: 'K',
9: 'L',
10: 'M',
11: 'N',
12: 'P',
13: 'Q',
14: 'R',
15: 'S',
16: 'T',
17: 'V',
18: 'W',
19: 'Y',
20: 'X', # Includes J and O.
21: '-',
}
restypes_with_x_and_gap = restypes + ['X', '-']
MAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(
restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])
for i in range(len(restypes_with_x_and_gap)))
def _make_standard_atom_mask() -> np.ndarray:
"""Returns [num_res_types, num_atom_types] mask array."""
# +1 to account for unknown (all 0s).
mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32)
for restype, restype_letter in enumerate(restypes):
restype_name = restype_1to3[restype_letter]
atom_names = residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = atom_order[atom_name]
mask[restype, atom_type] = 1
return mask
STANDARD_ATOM_MASK = _make_standard_atom_mask()
# A one hot representation for the first and second atoms defining the axis
# of rotation for each chi-angle in each residue.
def chi_angle_atom(atom_index: int) -> np.ndarray:
"""Define chi-angle rigid groups via one-hot representations."""
chi_angles_index = {}
one_hots = []
for k, v in chi_angles_atoms.items():
indices = [atom_types.index(s[atom_index]) for s in v]
indices.extend([-1]*(4-len(indices)))
chi_angles_index[k] = indices
for r in restypes:
res3 = restype_1to3[r]
one_hot = np.eye(atom_type_num)[chi_angles_index[res3]]
one_hots.append(one_hot)
one_hots.append(np.zeros([4, atom_type_num])) # Add zeros for residue `X`.
one_hot = np.stack(one_hots, axis=0)
one_hot = np.transpose(one_hot, [0, 2, 1])
return one_hot
chi_atom_1_one_hot = chi_angle_atom(1)
chi_atom_2_one_hot = chi_angle_atom(2)
# An array like chi_angles_atoms but using indices rather than names.
chi_angles_atom_indices = [chi_angles_atoms[restype_1to3[r]] for r in restypes]
chi_angles_atom_indices = tree.map_structure(
lambda atom_name: atom_order[atom_name], chi_angles_atom_indices)
chi_angles_atom_indices = np.array([
chi_atoms + ([[0, 0, 0, 0]] * (4 - len(chi_atoms)))
for chi_atoms in chi_angles_atom_indices])
# Mapping from (res_name, atom_name) pairs to the atom's chi group index
# and atom index within that group.
chi_groups_for_atom = collections.defaultdict(list)
for res_name, chi_angle_atoms_for_res in chi_angles_atoms.items():
for chi_group_i, chi_group in enumerate(chi_angle_atoms_for_res):
for atom_i, atom in enumerate(chi_group):
chi_groups_for_atom[(res_name, atom)].append((chi_group_i, atom_i))
chi_groups_for_atom = dict(chi_groups_for_atom)
def _make_rigid_transformation_4x4(ex, ey, translation):
"""Create a rigid 4x4 transformation matrix from two axes and transl."""
# Normalize ex.
ex_normalized = ex / np.linalg.norm(ex)
# make ey perpendicular to ex
ey_normalized = ey - np.dot(ey, ex_normalized) * ex_normalized
ey_normalized /= np.linalg.norm(ey_normalized)
# compute ez as cross product
eznorm = np.cross(ex_normalized, ey_normalized)
m = np.stack([ex_normalized, ey_normalized, eznorm, translation]).transpose()
m = np.concatenate([m, [[0., 0., 0., 1.]]], axis=0)
return m
# create an array with (restype, atomtype) --> rigid_group_idx
# and an array with (restype, atomtype, coord) for the atom positions
# and compute affine transformation matrices (4,4) from one rigid group to the
# previous group
restype_atom37_to_rigid_group = np.zeros([21, 37], dtype=np.int)
restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
restype_atom37_rigid_group_positions = np.zeros([21, 37, 3], dtype=np.float32)
restype_atom14_to_rigid_group = np.zeros([21, 14], dtype=np.int)
restype_atom14_mask = np.zeros([21, 14], dtype=np.float32)
restype_atom14_rigid_group_positions = np.zeros([21, 14, 3], dtype=np.float32)
restype_rigid_group_default_frame = np.zeros([21, 8, 4, 4], dtype=np.float32)
def _make_rigid_group_constants():
"""Fill the arrays above."""
for restype, restype_letter in enumerate(restypes):
resname = restype_1to3[restype_letter]
for atomname, group_idx, atom_position in rigid_group_atom_positions[
resname]:
atomtype = atom_order[atomname]
restype_atom37_to_rigid_group[restype, atomtype] = group_idx
restype_atom37_mask[restype, atomtype] = 1
restype_atom37_rigid_group_positions[restype, atomtype, :] = atom_position
atom14idx = restype_name_to_atom14_names[resname].index(atomname)
restype_atom14_to_rigid_group[restype, atom14idx] = group_idx
restype_atom14_mask[restype, atom14idx] = 1
restype_atom14_rigid_group_positions[restype,
atom14idx, :] = atom_position
for restype, restype_letter in enumerate(restypes):
resname = restype_1to3[restype_letter]
atom_positions = {name: np.array(pos) for name, _, pos
in rigid_group_atom_positions[resname]}
# backbone to backbone is the identity transform
restype_rigid_group_default_frame[restype, 0, :, :] = np.eye(4)
# pre-omega-frame to backbone (currently dummy identity matrix)
restype_rigid_group_default_frame[restype, 1, :, :] = np.eye(4)
# phi-frame to backbone
mat = _make_rigid_transformation_4x4(
ex=atom_positions['N'] - atom_positions['CA'],
ey=np.array([1., 0., 0.]),
translation=atom_positions['N'])
restype_rigid_group_default_frame[restype, 2, :, :] = mat
# psi-frame to backbone
mat = _make_rigid_transformation_4x4(
ex=atom_positions['C'] - atom_positions['CA'],
ey=atom_positions['CA'] - atom_positions['N'],
translation=atom_positions['C'])
restype_rigid_group_default_frame[restype, 3, :, :] = mat
# chi1-frame to backbone
if chi_angles_mask[restype][0]:
base_atom_names = chi_angles_atoms[resname][0]
base_atom_positions = [atom_positions[name] for name in base_atom_names]
mat = _make_rigid_transformation_4x4(
ex=base_atom_positions[2] - base_atom_positions[1],
ey=base_atom_positions[0] - base_atom_positions[1],
translation=base_atom_positions[2])
restype_rigid_group_default_frame[restype, 4, :, :] = mat
# chi2-frame to chi1-frame
# chi3-frame to chi2-frame
# chi4-frame to chi3-frame
# luckily all rotation axes for the next frame start at (0,0,0) of the
# previous frame
for chi_idx in range(1, 4):
if chi_angles_mask[restype][chi_idx]:
axis_end_atom_name = chi_angles_atoms[resname][chi_idx][2]
axis_end_atom_position = atom_positions[axis_end_atom_name]
mat = _make_rigid_transformation_4x4(
ex=axis_end_atom_position,
ey=np.array([-1., 0., 0.]),
translation=axis_end_atom_position)
restype_rigid_group_default_frame[restype, 4 + chi_idx, :, :] = mat
_make_rigid_group_constants()
def make_atom14_dists_bounds(overlap_tolerance=1.5,
bond_length_tolerance_factor=15):
"""compute upper and lower bounds for bonds to assess violations."""
restype_atom14_bond_lower_bound = np.zeros([21, 14, 14], np.float32)
restype_atom14_bond_upper_bound = np.zeros([21, 14, 14], np.float32)
restype_atom14_bond_stddev = np.zeros([21, 14, 14], np.float32)
residue_bonds, residue_virtual_bonds, _ = load_stereo_chemical_props()
for restype, restype_letter in enumerate(restypes):
resname = restype_1to3[restype_letter]
atom_list = restype_name_to_atom14_names[resname]
# create lower and upper bounds for clashes
for atom1_idx, atom1_name in enumerate(atom_list):
if not atom1_name:
continue
atom1_radius = van_der_waals_radius[atom1_name[0]]
for atom2_idx, atom2_name in enumerate(atom_list):
if (not atom2_name) or atom1_idx == atom2_idx:
continue
atom2_radius = van_der_waals_radius[atom2_name[0]]
lower = atom1_radius + atom2_radius - overlap_tolerance
upper = 1e10
restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower
restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower
restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper
restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper
# overwrite lower and upper bounds for bonds and angles
for b in residue_bonds[resname] + residue_virtual_bonds[resname]:
atom1_idx = atom_list.index(b.atom1_name)
atom2_idx = atom_list.index(b.atom2_name)
lower = b.length - bond_length_tolerance_factor * b.stddev
upper = b.length + bond_length_tolerance_factor * b.stddev
restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower
restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower
restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper
restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper
restype_atom14_bond_stddev[restype, atom1_idx, atom2_idx] = b.stddev
restype_atom14_bond_stddev[restype, atom2_idx, atom1_idx] = b.stddev
return {'lower_bound': restype_atom14_bond_lower_bound, # shape (21,14,14)
'upper_bound': restype_atom14_bond_upper_bound, # shape (21,14,14)
'stddev': restype_atom14_bond_stddev, # shape (21,14,14)
}
| 38.940914
| 102
| 0.531749
|
4a0d632380ee7432e0dcf06539e82bea31d41f00
| 496
|
py
|
Python
|
src/dag_pipe/helpers/kernel_meta.py
|
yitistica/dag-pipe
|
ee2f68f4d3db6ba9c9835bddaa1695b0753877ea
|
[
"MIT"
] | null | null | null |
src/dag_pipe/helpers/kernel_meta.py
|
yitistica/dag-pipe
|
ee2f68f4d3db6ba9c9835bddaa1695b0753877ea
|
[
"MIT"
] | null | null | null |
src/dag_pipe/helpers/kernel_meta.py
|
yitistica/dag-pipe
|
ee2f68f4d3db6ba9c9835bddaa1695b0753877ea
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from dag_pipe.utils.types import locate_object
def build_function_meta(function_):
location = locate_object(function_)
meta = OrderedDict([('location', location)])
return meta
def build_method_meta(class_, method_name):
location = locate_object(class_)
location['method'] = method_name
meta = OrderedDict([('location', location)])
return meta
def serialize_kernel_meta(meta):
meta_str = str(meta)
return meta_str
| 19.076923
| 48
| 0.729839
|
4a0d648aa67e5282c0944f5ed1002f0c89c1dbc6
| 1,806
|
py
|
Python
|
polyaxon/event_manager/events/experiment_job.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
polyaxon/event_manager/events/experiment_job.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
polyaxon/event_manager/events/experiment_job.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
from event_manager import event_actions, event_subjects
from event_manager.event import Attribute, Event
EXPERIMENT_JOB_VIEWED = '{}.{}'.format(event_subjects.EXPERIMENT_JOB,
event_actions.VIEWED)
EXPERIMENT_JOB_RESOURCES_VIEWED = '{}.{}'.format(event_subjects.EXPERIMENT_JOB,
event_actions.RESOURCES_VIEWED)
EXPERIMENT_JOB_LOGS_VIEWED = '{}.{}'.format(event_subjects.EXPERIMENT_JOB,
event_actions.LOGS_VIEWED)
EXPERIMENT_JOB_STATUSES_VIEWED = '{}.{}'.format(event_subjects.EXPERIMENT_JOB,
event_actions.STATUSES_VIEWED)
class ExperimentJobViewedEvent(Event):
event_type = EXPERIMENT_JOB_VIEWED
actor = True
attributes = (
Attribute('id'),
Attribute('role'),
Attribute('experiment.id'),
Attribute('experiment.user.id'),
Attribute('last_status'),
)
class ExperimentJobResourcesViewedEvent(Event):
event_type = EXPERIMENT_JOB_RESOURCES_VIEWED
actor = True
attributes = (
Attribute('id'),
Attribute('experiment.id'),
Attribute('experiment.user.id'),
Attribute('last_status'),
)
class ExperimentJobLogsViewedEvent(Event):
event_type = EXPERIMENT_JOB_LOGS_VIEWED
actor = True
attributes = (
Attribute('id'),
Attribute('experiment.id'),
Attribute('experiment.user.id'),
Attribute('last_status'),
)
class ExperimentJobStatusesViewedEvent(Event):
event_type = EXPERIMENT_JOB_STATUSES_VIEWED
actor = True
attributes = (
Attribute('id'),
Attribute('experiment.id'),
Attribute('experiment.user.id'),
Attribute('last_status'),
)
| 31.684211
| 80
| 0.630122
|
4a0d64fe1c1d29edf5ba3870708d10b9bd58f8e3
| 2,921
|
py
|
Python
|
memote/support/essentiality.py
|
siddC/memote
|
326ec54481b1d52d28ffb54fe30baa460ce6c433
|
[
"Apache-2.0"
] | null | null | null |
memote/support/essentiality.py
|
siddC/memote
|
326ec54481b1d52d28ffb54fe30baa460ce6c433
|
[
"Apache-2.0"
] | null | null | null |
memote/support/essentiality.py
|
siddC/memote
|
326ec54481b1d52d28ffb54fe30baa460ce6c433
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supporting functions for stoichiometric consistency checks."""
from __future__ import absolute_import, division
import logging
from numpy import sqrt
LOGGER = logging.getLogger(__name__)
def confusion_matrix(predicted_essential, expected_essential,
predicted_nonessential, expected_nonessential):
"""
Compute a representation of the confusion matrix.
Parameters
----------
predicted_essential : set
expected_essential : set
predicted_nonessential : set
expected_nonessential : set
Returns
-------
dict
Confusion matrix as different keys of a dictionary. The abbreviated
keys correspond to the ones used in [1]_.
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
"""
true_positive = predicted_essential & expected_essential
tp = len(true_positive)
true_negative = predicted_nonessential & expected_nonessential
tn = len(true_negative)
false_positive = predicted_essential - expected_essential
fp = len(false_positive)
false_negative = predicted_nonessential - expected_nonessential
fn = len(false_negative)
# sensitivity or true positive rate
try:
tpr = tp / (tp + fn)
except ZeroDivisionError:
tpr = None
# specificity or true negative rate
try:
tnr = tn / (tn + fp)
except ZeroDivisionError:
tnr = None
# precision or positive predictive value
try:
ppv = tp / (tp + fp)
except ZeroDivisionError:
ppv = None
# false discovery rate
fdr = 1 - ppv
# accuracy
try:
acc = (tp + tn) / (tp + tn + fp + fn)
except ZeroDivisionError:
acc = None
# Compute Matthews correlation coefficient.
try:
mcc = (tp * tn - fp * fn) / sqrt((tp + fp) * (tp + fn) * (tn + fn))
except ZeroDivisionError:
mcc = None
return {
"TP": list(true_positive),
"TN": list(true_negative),
"FP": list(false_positive),
"FN": list(false_negative),
"TPR": tpr,
"TNR": tnr,
"PPV": ppv,
"FDR": fdr,
"ACC": acc,
"MCC": mcc
}
| 29.21
| 75
| 0.651832
|
4a0d658180e51492185475ad5b33041a7918ccea
| 58,544
|
py
|
Python
|
python/ccxt/delta.py
|
RusEu/ccxt
|
d6d2b3e2f54a59d102102ee2858eca4d6702fecc
|
[
"MIT"
] | 3
|
2021-06-29T16:27:19.000Z
|
2021-07-18T08:36:07.000Z
|
python/ccxt/delta.py
|
Bytedex/ccxt
|
3863b5e1d6c77d719ac102b0243964c4946e7abb
|
[
"MIT"
] | null | null | null |
python/ccxt/delta.py
|
Bytedex/ccxt
|
3863b5e1d6c77d719ac102b0243964c4946e7abb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class delta(Exchange):
def describe(self):
return self.deep_extend(super(delta, self).describe(), {
'id': 'delta',
'name': 'Delta Exchange',
'countries': ['VC'], # Saint Vincent and the Grenadines
'rateLimit': 300,
'version': 'v2',
# new metainfo interface
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchCurrencies': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'1d': '1d',
'7d': '7d',
'1w': '1w',
'2w': '2w',
'1M': '30d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/99450025-3be60a00-2931-11eb-9302-f4fd8d8589aa.jpg',
'test': {
'public': 'https://testnet-api.delta.exchange',
'private': 'https://testnet-api.delta.exchange',
},
'api': {
'public': 'https://api.delta.exchange',
'private': 'https://api.delta.exchange',
},
'www': 'https://www.delta.exchange',
'doc': [
'https://docs.delta.exchange',
],
'fees': 'https://www.delta.exchange/fees',
'referral': 'https://www.delta.exchange/app/signup/?code=IULYNB',
},
'api': {
'public': {
'get': [
'assets',
'settings',
'indices',
'products',
'tickers',
'tickers/{symbol}',
'l2orderbook/{symbol}',
'trades/{symbol}',
'history/candles',
'history/sparklines',
],
},
'private': {
'get': [
'orders',
'orders/leverage',
'positions',
'positions/margined',
'orders/history',
'fills',
'fills/history/download/csv',
'wallet/balances',
'wallet/transactions',
'wallet/transactions/download',
'deposits/address',
],
'post': [
'orders',
'orders/batch',
'orders/leverage',
'positions/change_margin',
],
'put': [
'orders',
'orders/batch',
],
'delete': [
'orders',
'orders/all',
'orders/batch',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.15 / 100,
'maker': 0.10 / 100,
'tiers': {
'taker': [
[0, 0.15 / 100],
[100, 0.13 / 100],
[250, 0.13 / 100],
[1000, 0.1 / 100],
[5000, 0.09 / 100],
[10000, 0.075 / 100],
[20000, 0.065 / 100],
],
'maker': [
[0, 0.1 / 100],
[100, 0.1 / 100],
[250, 0.09 / 100],
[1000, 0.075 / 100],
[5000, 0.06 / 100],
[10000, 0.05 / 100],
[20000, 0.05 / 100],
],
},
},
},
'precisionMode': TICK_SIZE,
'requiredCredentials': {
'apiKey': True,
'secret': False,
},
'exceptions': {
'exact': {
# Margin required to place order with selected leverage and quantity is insufficient.
'insufficient_margin': InsufficientFunds, # {"error":{"code":"insufficient_margin","context":{"available_balance":"0.000000000000000000","required_additional_balance":"1.618626000000000000000000000"}},"success":false}
'order_size_exceed_available': InvalidOrder, # The order book doesn't have sufficient liquidity, hence the order couldnt be filled, for example, ioc orders
'risk_limits_breached': BadRequest, # orders couldn't be placed as it will breach allowed risk limits.
'invalid_contract': BadSymbol, # The contract/product is either doesn't exist or has already expired.
'immediate_liquidation': InvalidOrder, # Order will cause immediate liquidation.
'out_of_bankruptcy': InvalidOrder, # Order prices are out of position bankruptcy limits.
'self_matching_disrupted_post_only': InvalidOrder, # Self matching is not allowed during auction.
'immediate_execution_post_only': InvalidOrder, # orders couldn't be placed as it includes post only orders which will be immediately executed
'bad_schema': BadRequest, # {"error":{"code":"bad_schema","context":{"schema_errors":[{"code":"validation_error","message":"id is required","param":""}]}},"success":false}
'invalid_api_key': AuthenticationError, # {"success":false,"error":{"code":"invalid_api_key"}}
'invalid_signature': AuthenticationError, # {"success":false,"error":{"code":"invalid_signature"}}
'open_order_not_found': OrderNotFound, # {"error":{"code":"open_order_not_found"},"success":false}
'unavailable': ExchangeNotAvailable, # {"error":{"code":"unavailable"},"success":false}
},
'broad': {
},
},
})
def fetch_time(self, params={}):
response = self.publicGetSettings(params)
#
# {
# "result":{
# "server_time":1605472733766141,
# "deto_referral_mining_daily_reward":"25000",
# "deto_total_reward_pool":"100000000",
# "deto_trade_mining_daily_reward":"75000",
# "kyc_deposit_limit":"20",
# "kyc_withdrawal_limit":"2",
# "under_maintenance":"false"
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.safe_integer_product(result, 'server_time', 0.001)
def fetch_status(self, params={}):
response = self.publicGetSettings(params)
result = self.safe_value(response, 'result', {})
underMaintenance = self.safe_value(result, 'under_maintenance')
status = 'maintenance' if (underMaintenance == 'true') else 'ok'
updated = self.safe_integer_product(result, 'server_time', 0.001)
self.status = self.extend(self.status, {
'status': status,
'updated': updated,
})
return self.status
def fetch_currencies(self, params={}):
response = self.publicGetAssets(params)
#
# {
# "result":[
# {
# "base_withdrawal_fee":"0.0005",
# "deposit_status":"enabled",
# "id":2,
# "interest_credit":true,
# "interest_slabs":[
# {"limit":"0.1","rate":"0"},
# {"limit":"1","rate":"0.05"},
# {"limit":"5","rate":"0.075"},
# {"limit":"10","rate":"0.1"},
# {"limit":"9999999999999999","rate":"0"}
# ],
# "kyc_deposit_limit":"10",
# "kyc_withdrawal_limit":"2",
# "min_withdrawal_amount":"0.001",
# "minimum_precision":4,
# "name":"Bitcoin",
# "precision":8,
# "sort_priority":1,
# "symbol":"BTC",
# "variable_withdrawal_fee":"0",
# "withdrawal_status":"enabled"
# },
# ],
# "success":true
# }
#
currencies = self.safe_value(response, 'result', [])
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'symbol')
numericId = self.safe_integer(currency, 'id')
code = self.safe_currency_code(id)
depositStatus = self.safe_string(currency, 'deposit_status')
withdrawalStatus = self.safe_string(currency, 'withdrawal_status')
depositsEnabled = (depositStatus == 'enabled')
withdrawalsEnabled = (withdrawalStatus == 'enabled')
active = depositsEnabled and withdrawalsEnabled
precision = self.safe_integer(currency, 'precision')
result[code] = {
'id': id,
'numericId': numericId,
'code': code,
'name': self.safe_string(currency, 'name'),
'info': currency, # the original payload
'active': active,
'fee': self.safe_number(currency, 'base_withdrawal_fee'),
'precision': 1 / math.pow(10, precision),
'limits': {
'amount': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_number(currency, 'min_withdrawal_amount'),
'max': None,
},
},
}
return result
def load_markets(self, reload=False, params={}):
markets = super(delta, self).load_markets(reload, params)
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId')
if (currenciesByNumericId is None) or reload:
self.options['currenciesByNumericId'] = self.index_by(self.currencies, 'numericId')
marketsByNumericId = self.safe_value(self.options, 'marketsByNumericId')
if (marketsByNumericId is None) or reload:
self.options['marketsByNumericId'] = self.index_by(self.markets, 'numericId')
return markets
def fetch_markets(self, params={}):
response = self.publicGetProducts(params)
#
# {
# "meta":{
# "after":null,
# "before":null,
# "limit":100,
# "total_count":81
# },
# "result":[
# {
# "annualized_funding":"5.475000000000000000",
# "is_quanto":false,
# "ui_config":{
# "default_trading_view_candle":"15",
# "leverage_slider_values":[1,3,5,10,25,50],
# "price_clubbing_values":[0.001,0.005,0.05,0.1,0.5,1,5],
# "show_bracket_orders":false,
# "sort_priority":29,
# "tags":[]
# },
# "basis_factor_max_limit":"0.15",
# "symbol":"P-LINK-D-151120",
# "id":1584,
# "default_leverage":"5.000000000000000000",
# "maker_commission_rate":"0.0005",
# "contract_unit_currency":"LINK",
# "strike_price":"12.507948",
# "settling_asset":{
# # asset structure
# },
# "auction_start_time":null,
# "auction_finish_time":null,
# "settlement_time":"2020-11-15T12:00:00Z",
# "launch_time":"2020-11-14T11:55:05Z",
# "spot_index":{
# # index structure
# },
# "trading_status":"operational",
# "tick_size":"0.001",
# "position_size_limit":100000,
# "notional_type":"vanilla", # vanilla, inverse
# "price_band":"0.4",
# "barrier_price":null,
# "description":"Daily LINK PUT options quoted in USDT and settled in USDT",
# "insurance_fund_margin_contribution":"1",
# "quoting_asset":{
# # asset structure
# },
# "liquidation_penalty_factor":"0.2",
# "product_specs":{"max_volatility":3,"min_volatility":0.3,"spot_price_band":"0.40"},
# "initial_margin_scaling_factor":"0.0001",
# "underlying_asset":{
# # asset structure
# },
# "state":"live",
# "contract_value":"1",
# "initial_margin":"2",
# "impact_size":5000,
# "settlement_price":null,
# "contract_type":"put_options", # put_options, call_options, move_options, perpetual_futures, interest_rate_swaps, futures, spreads
# "taker_commission_rate":"0.0005",
# "maintenance_margin":"1",
# "short_description":"LINK Daily PUT Options",
# "maintenance_margin_scaling_factor":"0.00005",
# "funding_method":"mark_price",
# "max_leverage_notional":"20000"
# },
# ],
# "success":true
# }
#
markets = self.safe_value(response, 'result', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
type = self.safe_string(market, 'contract_type')
# settlingAsset = self.safe_value(market, 'settling_asset', {})
quotingAsset = self.safe_value(market, 'quoting_asset', {})
underlyingAsset = self.safe_value(market, 'underlying_asset', {})
baseId = self.safe_string(underlyingAsset, 'symbol')
quoteId = self.safe_string(quotingAsset, 'symbol')
id = self.safe_string(market, 'symbol')
numericId = self.safe_integer(market, 'id')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = id
swap = False
future = False
option = False
if type == 'perpetual_futures':
type = 'swap'
swap = True
future = False
option = False
if id.find('_') < 0:
symbol = base + '/' + quote
elif (type == 'call_options') or (type == 'put_options') or (type == 'move_options'):
type = 'option'
swap = False
option = True
future = False
elif type == 'futures':
type = 'future'
swap = False
option = False
future = True
precision = {
'amount': 1.0, # number of contracts
'price': self.safe_number(market, 'tick_size'),
}
limits = {
'amount': {
'min': 1.0,
'max': self.safe_number(market, 'position_size_limit'),
},
'price': {
'min': precision['price'],
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_size'),
'max': None,
},
}
state = self.safe_string(market, 'state')
active = (state == 'live')
maker = self.safe_number(market, 'maker_commission_rate')
taker = self.safe_number(market, 'taker_commission_rate')
result.append({
'id': id,
'numericId': numericId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'type': type,
'option': option,
'swap': swap,
'future': future,
'maker': maker,
'taker': taker,
'precision': precision,
'limits': limits,
'info': market,
'active': active,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "close":15837.5,
# "high":16354,
# "low":15751.5,
# "mark_price":"15820.100867",
# "open":16140.5,
# "product_id":139,
# "size":640552,
# "spot_price":"15827.050000000001",
# "symbol":"BTCUSDT",
# "timestamp":1605373550208262,
# "turnover":10298630.3735,
# "turnover_symbol":"USDT",
# "turnover_usd":10298630.3735,
# "volume":640.5520000000001
# }
#
timestamp = self.safe_integer_product(ticker, 'timestamp', 0.001)
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'close')
open = self.safe_number(ticker, 'open')
change = None
average = None
percentage = None
if (open is not None) and (last is not None):
change = last - open
average = self.sum(last, open) / 2
if open != 0.0:
percentage = (change / open) * 100
baseVolume = self.safe_number(ticker, 'volume')
quoteVolume = self.safe_number(ticker, 'turnover')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetTickersSymbol(self.extend(request, params))
#
# {
# "result":{
# "close":15837.5,
# "high":16354,
# "low":15751.5,
# "mark_price":"15820.100867",
# "open":16140.5,
# "product_id":139,
# "size":640552,
# "spot_price":"15827.050000000001",
# "symbol":"BTCUSDT",
# "timestamp":1605373550208262,
# "turnover":10298630.3735,
# "turnover_symbol":"USDT",
# "turnover_usd":10298630.3735,
# "volume":640.5520000000001
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_ticker(result, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickers(params)
#
# {
# "result":[
# {
# "close":0.003966,
# "high":0.004032,
# "low":0.003606,
# "mark_price":"0.00396328",
# "open":0.003996,
# "product_id":1327,
# "size":6242,
# "spot_price":"0.0039555",
# "symbol":"AAVEBTC",
# "timestamp":1605374143864107,
# "turnover":23.997904999999996,
# "turnover_symbol":"BTC",
# "turnover_usd":387957.4544782897,
# "volume":6242
# },
# ],
# "success":true
# }
#
tickers = self.safe_value(response, 'result', [])
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = self.publicGetL2orderbookSymbol(self.extend(request, params))
#
# {
# "result":{
# "buy":[
# {"price":"15814.0","size":912},
# {"price":"15813.5","size":1279},
# {"price":"15813.0","size":1634},
# ],
# "sell":[
# {"price":"15814.5","size":625},
# {"price":"15815.0","size":982},
# {"price":"15815.5","size":1328},
# ],
# "symbol":"BTCUSDT"
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_order_book(result, None, 'buy', 'sell', 'price', 'size')
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "buyer_role":"maker",
# "price":"15896.5",
# "seller_role":"taker",
# "size":241,
# "symbol":"BTCUSDT",
# "timestamp":1605376684714595
# }
#
# private fetchMyTrades
#
# {
# "commission":"0.008335000000000000",
# "created_at":"2020-11-16T19:07:19Z",
# "fill_type":"normal",
# "id":"e7ff05c233a74245b72381f8dd91d1ce",
# "meta_data":{
# "effective_commission_rate":"0.0005",
# "order_price":"16249",
# "order_size":1,
# "order_type":"market_order",
# "order_unfilled_size":0,
# "trading_fee_credits_used":"0"
# },
# "order_id":"152999629",
# "price":"16669",
# "product":{
# "contract_type":"perpetual_futures",
# "contract_unit_currency":"BTC",
# "contract_value":"0.001",
# "id":139,
# "notional_type":"vanilla",
# "quoting_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "settling_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "symbol":"BTCUSDT",
# "tick_size":"0.5",
# "underlying_asset":{"minimum_precision":4,"precision":8,"symbol":"BTC"}
# },
# "product_id":139,
# "role":"taker",
# "side":"sell",
# "size":1
# }
#
id = self.safe_string(trade, 'id')
orderId = self.safe_string(trade, 'order_id')
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
timestamp = self.safe_integer_product(trade, 'timestamp', 0.001, timestamp)
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'size')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
product = self.safe_value(trade, 'product', {})
marketId = self.safe_string(product, 'symbol')
symbol = self.safe_symbol(marketId, market)
sellerRole = self.safe_string(trade, 'seller_role')
side = self.safe_string(trade, 'side')
if side is None:
if sellerRole == 'taker':
side = 'sell'
elif sellerRole == 'maker':
side = 'buy'
takerOrMaker = self.safe_string(trade, 'role')
metaData = self.safe_value(trade, 'meta_data', {})
type = self.safe_string(metaData, 'order_type')
if type is not None:
type = type.replace('_order', '')
feeCost = self.safe_number(trade, 'commission')
fee = None
if feeCost is not None:
settlingAsset = self.safe_value(product, 'settling_asset', {})
feeCurrencyId = self.safe_string(settlingAsset, 'symbol')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetTradesSymbol(self.extend(request, params))
#
# {
# "result":[
# {
# "buyer_role":"maker",
# "price":"15896.5",
# "seller_role":"taker",
# "size":241,
# "symbol":"BTCUSDT",
# "timestamp":1605376684714595
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_trades(result, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "time":1605393120,
# "open":15989,
# "high":15989,
# "low":15987.5,
# "close":15987.5,
# "volume":565
# }
#
return [
self.safe_timestamp(ohlcv, 'time'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
limit = limit if limit else 2000 # max 2000
if since is None:
end = self.seconds()
request['end'] = end
request['start'] = end - limit * duration
else:
start = int(since / 1000)
request['start'] = start
request['end'] = self.sum(start, limit * duration)
response = self.publicGetHistoryCandles(self.extend(request, params))
#
# {
# "success":true,
# "result":[
# {"time":1605393120,"open":15989,"high":15989,"low":15987.5,"close":15987.5,"volume":565},
# {"time":1605393180,"open":15966,"high":15966,"low":15959,"close":15959,"volume":24},
# {"time":1605393300,"open":15973,"high":15973,"low":15973,"close":15973,"volume":1288},
# ]
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetWalletBalances(params)
#
# {
# "result":[
# {
# "asset_id":1,
# "available_balance":"0",
# "balance":"0",
# "commission":"0",
# "id":154883,
# "interest_credit":"0",
# "order_margin":"0",
# "pending_referral_bonus":"0",
# "pending_trading_fee_credit":"0",
# "position_margin":"0",
# "trading_fee_credit":"0",
# "user_id":22142
# },
# ],
# "success":true
# }
#
balances = self.safe_value(response, 'result', [])
result = {'info': response}
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId', {})
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'asset_id')
currency = self.safe_value(currenciesByNumericId, currencyId)
code = currencyId if (currency is None) else currency['code']
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['free'] = self.safe_string(balance, 'available_balance')
result[code] = account
return self.parse_balance(result, False)
def fetch_position(self, symbol, params=None):
self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['numericId'],
}
response = self.privateGetPositions(self.extend(request, params))
#
# {
# "result":{
# "entry_price":null,
# "size":0,
# "timestamp":1605454074268079
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return result
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
response = self.privateGetPositionsMargined(params)
#
# {
# "success": True,
# "result": [
# {
# "user_id": 0,
# "size": 0,
# "entry_price": "string",
# "margin": "string",
# "liquidation_price": "string",
# "bankruptcy_price": "string",
# "adl_level": 0,
# "product_id": 0
# }
# ]
# }
#
result = self.safe_value(response, 'result', [])
return result
def parse_order_status(self, status):
statuses = {
'open': 'open',
'pending': 'open',
'closed': 'closed',
'cancelled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder, cancelOrder, editOrder, fetchOpenOrders, fetchClosedOrders
#
# {
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":null,
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"open",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# }
#
id = self.safe_string(order, 'id')
clientOrderId = self.safe_string(order, 'client_order_id')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
marketId = self.safe_string(order, 'product_id')
marketsByNumericId = self.safe_value(self.options, 'marketsByNumericId', {})
market = self.safe_value(marketsByNumericId, marketId, market)
symbol = marketId if (market is None) else market['symbol']
status = self.parse_order_status(self.safe_string(order, 'state'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'order_type')
type = type.replace('_order', '')
price = self.safe_number(order, 'limit_price')
amount = self.safe_number(order, 'size')
remaining = self.safe_number(order, 'unfilled_size')
average = self.safe_number(order, 'average_fill_price')
fee = None
feeCost = self.safe_number(order, 'paid_commission')
if feeCost is not None:
feeCurrencyCode = None
if market is not None:
settlingAsset = self.safe_value(market['info'], 'settling_asset', {})
feeCurrencyId = self.safe_string(settlingAsset, 'symbol')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'average': average,
'filled': None,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
})
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
orderType = type + '_order'
market = self.market(symbol)
request = {
'product_id': market['numericId'],
# 'limit_price': self.price_to_precision(symbol, price),
'size': self.amount_to_precision(symbol, amount),
'side': side,
'order_type': orderType,
# 'client_order_id': 'string',
# 'time_in_force': 'gtc', # gtc, ioc, fok
# 'post_only': 'false', # 'true',
# 'reduce_only': 'false', # 'true',
}
if type == 'limit':
request['limit_price'] = self.price_to_precision(symbol, price)
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
params = self.omit(params, ['clientOrderId', 'client_order_id'])
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
response = self.privatePostOrders(self.extend(request, params))
#
# {
# "result":{
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":null,
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"open",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_order(result, market)
def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'id': int(id),
'product_id': market['numericId'],
# 'limit_price': self.price_to_precision(symbol, price),
# 'size': self.amount_to_precision(symbol, amount),
}
if amount is not None:
request['size'] = int(self.amount_to_precision(symbol, amount))
if price is not None:
request['limit_price'] = self.price_to_precision(symbol, price)
response = self.privatePutOrders(self.extend(request, params))
#
# {
# "success": True,
# "result": {
# "id": "ashb1212",
# "product_id": 27,
# "limit_price": "9200",
# "side": "buy",
# "size": 100,
# "unfilled_size": 50,
# "user_id": 1,
# "order_type": "limit_order",
# "state": "open",
# "created_at": "..."
# }
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'id': int(id),
'product_id': market['numericId'],
}
response = self.privateDeleteOrders(self.extend(request, params))
#
# {
# "result":{
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":"cancelled_by_user",
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"cancelled",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# },
# "success":true
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['numericId'],
# 'cancel_limit_orders': 'true',
# 'cancel_stop_orders': 'true',
}
response = self.privateDeleteOrdersAll(self.extend(request, params))
#
# {
# "result":{},
# "success":true
# }
#
return response
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetOrders', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetOrdersHistory', symbol, since, limit, params)
def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'product_ids': market['id'], # comma-separated
# 'contract_types': types, # comma-separated, futures, perpetual_futures, call_options, put_options, interest_rate_swaps, move_options, spreads
# 'order_types': types, # comma-separated, market, limit, stop_market, stop_limit, all_stop
# 'start_time': since * 1000,
# 'end_time': self.microseconds(),
# 'after': string, # after cursor for pagination
# 'before': string, # before cursor for pagination
# 'page_size': limit, # number of records per page
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_ids'] = market['numericId'] # accepts a comma-separated list of ids
if since is not None:
request['start_time'] = str(since) + '000'
if limit is not None:
request['page_size'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# {
# "success": True,
# "result": [
# {
# "id": "ashb1212",
# "product_id": 27,
# "limit_price": "9200",
# "side": "buy",
# "size": 100,
# "unfilled_size": 50,
# "user_id": 1,
# "order_type": "limit_order",
# "state": "open",
# "created_at": "..."
# }
# ],
# "meta": {
# "after": "string",
# "before": "string"
# }
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_orders(result, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'product_ids': market['id'], # comma-separated
# 'contract_types': types, # comma-separated, futures, perpetual_futures, call_options, put_options, interest_rate_swaps, move_options, spreads
# 'start_time': since * 1000,
# 'end_time': self.microseconds(),
# 'after': string, # after cursor for pagination
# 'before': string, # before cursor for pagination
# 'page_size': limit, # number of records per page
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_ids'] = market['numericId'] # accepts a comma-separated list of ids
if since is not None:
request['start_time'] = str(since) + '000'
if limit is not None:
request['page_size'] = limit
response = self.privateGetFills(self.extend(request, params))
#
# {
# "meta":{
# "after":null,
# "before":null,
# "limit":10,
# "total_count":2
# },
# "result":[
# {
# "commission":"0.008335000000000000",
# "created_at":"2020-11-16T19:07:19Z",
# "fill_type":"normal",
# "id":"e7ff05c233a74245b72381f8dd91d1ce",
# "meta_data":{
# "effective_commission_rate":"0.0005",
# "order_price":"16249",
# "order_size":1,
# "order_type":"market_order",
# "order_unfilled_size":0,
# "trading_fee_credits_used":"0"
# },
# "order_id":"152999629",
# "price":"16669",
# "product":{
# "contract_type":"perpetual_futures",
# "contract_unit_currency":"BTC",
# "contract_value":"0.001",
# "id":139,
# "notional_type":"vanilla",
# "quoting_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "settling_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "symbol":"BTCUSDT",
# "tick_size":"0.5",
# "underlying_asset":{"minimum_precision":4,"precision":8,"symbol":"BTC"}
# },
# "product_id":139,
# "role":"taker",
# "side":"sell",
# "size":1
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_trades(result, market, since, limit)
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'asset_id': currency['numericId'],
# 'end_time': self.seconds(),
# 'after': 'string', # after cursor for pagination
# 'before': 'string', # before cursor for pagination
# 'page_size': limit,
}
currency = None
if code is not None:
currency = self.currency(code)
request['asset_id'] = currency['numericId']
if limit is not None:
request['page_size'] = limit
response = self.privateGetWalletTransactions(self.extend(request, params))
#
# {
# "meta":{"after":null,"before":null,"limit":10,"total_count":1},
# "result":[
# {
# "amount":"29.889184",
# "asset_id":5,
# "balance":"29.889184",
# "created_at":"2020-11-15T21:25:01Z",
# "meta_data":{
# "deposit_id":3884,
# "transaction_id":"0x41a60174849828530abb5008e98fc63c9b598288743ec4ba9620bcce900a3b8d"
# },
# "transaction_type":"deposit",
# "user_id":22142,
# "uuid":"70bb5679da3c4637884e2dc63efaa846"
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_ledger(result, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'pnl': 'pnl',
'deposit': 'transaction',
'withdrawal': 'transaction',
'commission': 'fee',
'conversion': 'trade',
# 'perpetual_futures_funding': 'perpetual_futures_funding',
# 'withdrawal_cancellation': 'withdrawal_cancellation',
'referral_bonus': 'referral',
'commission_rebate': 'rebate',
# 'promo_credit': 'promo_credit',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "amount":"29.889184",
# "asset_id":5,
# "balance":"29.889184",
# "created_at":"2020-11-15T21:25:01Z",
# "meta_data":{
# "deposit_id":3884,
# "transaction_id":"0x41a60174849828530abb5008e98fc63c9b598288743ec4ba9620bcce900a3b8d"
# },
# "transaction_type":"deposit",
# "user_id":22142,
# "uuid":"70bb5679da3c4637884e2dc63efaa846"
# }
#
id = self.safe_string(item, 'uuid')
direction = None
account = None
metaData = self.safe_value(item, 'meta_data', {})
referenceId = self.safe_string(metaData, 'transaction_id')
referenceAccount = None
type = self.safe_string(item, 'transaction_type')
if (type == 'deposit') or (type == 'commission_rebate') or (type == 'referral_bonus') or (type == 'pnl') or (type == 'withdrawal_cancellation') or (type == 'promo_credit'):
direction = 'in'
elif (type == 'withdrawal') or (type == 'commission') or (type == 'conversion') or (type == 'perpetual_futures_funding'):
direction = 'out'
type = self.parse_ledger_entry_type(type)
currencyId = self.safe_integer(item, 'asset_id')
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId')
currency = self.safe_value(currenciesByNumericId, currencyId, currency)
code = None if (currency is None) else currency['code']
amount = self.safe_number(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'created_at'))
after = self.safe_number(item, 'balance')
before = max(0, after - amount)
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'asset_symbol': currency['id'],
}
response = self.privateGetDepositsAddress(self.extend(request, params))
#
# {
# "success":true,
# "result":{
# "id":19628,
# "user_id":22142,
# "address":"0x0eda26523397534f814d553a065d8e46b4188e9a",
# "status":"active",
# "updated_at":"2020-11-15T20:25:53.000Z",
# "created_at":"2020-11-15T20:25:53.000Z",
# "asset_symbol":"USDT",
# "custodian":"onc"
# }
# }
#
result = self.safe_value(response, 'result', {})
address = self.safe_string(result, 'address')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': None,
'info': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
requestPath = '/' + self.version + '/' + self.implode_params(path, params)
url = self.urls['api'][api] + requestPath
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
self.check_required_credentials()
timestamp = str(self.seconds())
headers = {
'api-key': self.apiKey,
'timestamp': timestamp,
}
auth = method + timestamp + requestPath
if (method == 'GET') or (method == 'DELETE'):
if query:
queryString = '?' + self.urlencode(query)
auth += queryString
url += queryString
else:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers['signature'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"error":{"code":"insufficient_margin","context":{"available_balance":"0.000000000000000000","required_additional_balance":"1.618626000000000000000000000"}},"success":false}
#
error = self.safe_value(response, 'error', {})
errorCode = self.safe_string(error, 'code')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
| 41.491141
| 238
| 0.458185
|
4a0d659623d863cf345d52067436fae44d6653bb
| 1,363
|
py
|
Python
|
2021/Python/9.py
|
hckr/adventofcode-haskell
|
fa6a7624c68392d45b937c49cc35c17f314ea6e4
|
[
"MIT"
] | null | null | null |
2021/Python/9.py
|
hckr/adventofcode-haskell
|
fa6a7624c68392d45b937c49cc35c17f314ea6e4
|
[
"MIT"
] | null | null | null |
2021/Python/9.py
|
hckr/adventofcode-haskell
|
fa6a7624c68392d45b937c49cc35c17f314ea6e4
|
[
"MIT"
] | null | null | null |
import fileinput
from typing import Optional
import numpy as np
def main(input_path: Optional[str] = None):
r"""
>>> from _pytest.monkeypatch import MonkeyPatch
>>> with MonkeyPatch.context() as monkeypatch:
... monkeypatch.setattr(fileinput, "input", lambda x: iter([
... "2199943210\n",
... "3987894921\n",
... "9856789892\n",
... "8767896789\n",
... "9899965678\n"]))
... main()
15
>>> main('../9.in')
452
"""
heightmap = np.array(
[list(line.strip()) for line in fileinput.input(input_path)], dtype=int
)
diff_top = np.concatenate(
[filler((1, heightmap.shape[1])), heightmap[1:] - heightmap[:-1]], axis=0
)
diff_right = np.concatenate(
[heightmap[:, :-1] - heightmap[:, 1:], filler((heightmap.shape[0], 1))], axis=1
)
diff_bottom = np.concatenate(
[heightmap[:-1] - heightmap[1:], filler((1, heightmap.shape[1]))], axis=0
)
diff_left = np.concatenate(
[filler((heightmap.shape[0], 1)), heightmap[:, 1:] - heightmap[:, :-1]], axis=1
)
filter_low_points = (diff_top < 0) & (diff_right < 0) & (diff_bottom < 0) & (diff_left < 0)
print(sum(heightmap[filter_low_points] + 1))
def filler(shape):
return np.ones(shape) * -10
if __name__ == "__main__":
main()
| 29.630435
| 95
| 0.566398
|
4a0d65dea4bc10ed7b1d67c9a5865ccd8304f9e7
| 4,295
|
py
|
Python
|
Python-3.7.12/PC/layout/support/props.py
|
TimS-ml/CPython-internals-Note
|
8dcf9e9db3a42926689ed426ec271bcae7db8178
|
[
"Xnet",
"X11"
] | 29
|
2018-04-05T10:12:30.000Z
|
2021-08-19T12:02:23.000Z
|
Python-3.7.12/PC/layout/support/props.py
|
TimS-ml/CPython-internals-Note
|
8dcf9e9db3a42926689ed426ec271bcae7db8178
|
[
"Xnet",
"X11"
] | 1
|
2022-01-20T21:49:17.000Z
|
2022-01-20T21:49:17.000Z
|
Python-3.7.12/PC/layout/support/props.py
|
TimS-ml/CPython-internals-Note
|
8dcf9e9db3a42926689ed426ec271bcae7db8178
|
[
"Xnet",
"X11"
] | 17
|
2018-04-05T06:55:49.000Z
|
2022-03-28T22:25:36.000Z
|
"""
Provides .props file.
"""
import os
from .constants import *
__all__ = ["PYTHON_PROPS_NAME"]
def public(f):
__all__.append(f.__name__)
return f
PYTHON_PROPS_NAME = "python.props"
PROPS_DATA = {
"PYTHON_TAG": VER_DOT,
"PYTHON_VERSION": os.getenv("PYTHON_NUSPEC_VERSION"),
"PYTHON_PLATFORM": os.getenv("PYTHON_PROPS_PLATFORM"),
"PYTHON_TARGET": "",
}
if not PROPS_DATA["PYTHON_VERSION"]:
if VER_NAME:
PROPS_DATA["PYTHON_VERSION"] = "{}.{}-{}{}".format(
VER_DOT, VER_MICRO, VER_NAME, VER_SERIAL
)
else:
PROPS_DATA["PYTHON_VERSION"] = "{}.{}".format(VER_DOT, VER_MICRO)
if not PROPS_DATA["PYTHON_PLATFORM"]:
PROPS_DATA["PYTHON_PLATFORM"] = "x64" if IS_X64 else "Win32"
PROPS_DATA["PYTHON_TARGET"] = "_GetPythonRuntimeFilesDependsOn{}{}_{}".format(
VER_MAJOR, VER_MINOR, PROPS_DATA["PYTHON_PLATFORM"]
)
PROPS_TEMPLATE = r"""<?xml version="1.0" encoding="utf-8"?>
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Condition="$(Platform) == '{PYTHON_PLATFORM}'">
<PythonHome Condition="$(PythonHome) == ''">$([System.IO.Path]::GetFullPath("$(MSBuildThisFileDirectory)\..\..\tools"))</PythonHome>
<PythonInclude>$(PythonHome)\include</PythonInclude>
<PythonLibs>$(PythonHome)\libs</PythonLibs>
<PythonTag>{PYTHON_TAG}</PythonTag>
<PythonVersion>{PYTHON_VERSION}</PythonVersion>
<IncludePythonExe Condition="$(IncludePythonExe) == ''">true</IncludePythonExe>
<IncludeDistutils Condition="$(IncludeDistutils) == ''">false</IncludeDistutils>
<IncludeLib2To3 Condition="$(IncludeLib2To3) == ''">false</IncludeLib2To3>
<IncludeVEnv Condition="$(IncludeVEnv) == ''">false</IncludeVEnv>
<GetPythonRuntimeFilesDependsOn>{PYTHON_TARGET};$(GetPythonRuntimeFilesDependsOn)</GetPythonRuntimeFilesDependsOn>
</PropertyGroup>
<ItemDefinitionGroup Condition="$(Platform) == '{PYTHON_PLATFORM}'">
<ClCompile>
<AdditionalIncludeDirectories>$(PythonInclude);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
</ClCompile>
<Link>
<AdditionalLibraryDirectories>$(PythonLibs);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<Target Name="GetPythonRuntimeFiles" Returns="@(PythonRuntime)" DependsOnTargets="$(GetPythonRuntimeFilesDependsOn)" />
<Target Name="{PYTHON_TARGET}" Returns="@(PythonRuntime)">
<ItemGroup>
<_PythonRuntimeExe Include="$(PythonHome)\python*.dll" />
<_PythonRuntimeExe Include="$(PythonHome)\python*.exe" Condition="$(IncludePythonExe) == 'true'" />
<_PythonRuntimeExe>
<Link>%(Filename)%(Extension)</Link>
</_PythonRuntimeExe>
<_PythonRuntimeDlls Include="$(PythonHome)\DLLs\*.pyd" />
<_PythonRuntimeDlls Include="$(PythonHome)\DLLs\*.dll" />
<_PythonRuntimeDlls>
<Link>DLLs\%(Filename)%(Extension)</Link>
</_PythonRuntimeDlls>
<_PythonRuntimeLib Include="$(PythonHome)\Lib\**\*" Exclude="$(PythonHome)\Lib\**\*.pyc;$(PythonHome)\Lib\site-packages\**\*" />
<_PythonRuntimeLib Remove="$(PythonHome)\Lib\distutils\**\*" Condition="$(IncludeDistutils) != 'true'" />
<_PythonRuntimeLib Remove="$(PythonHome)\Lib\lib2to3\**\*" Condition="$(IncludeLib2To3) != 'true'" />
<_PythonRuntimeLib Remove="$(PythonHome)\Lib\ensurepip\**\*" Condition="$(IncludeVEnv) != 'true'" />
<_PythonRuntimeLib Remove="$(PythonHome)\Lib\venv\**\*" Condition="$(IncludeVEnv) != 'true'" />
<_PythonRuntimeLib>
<Link>Lib\%(RecursiveDir)%(Filename)%(Extension)</Link>
</_PythonRuntimeLib>
<PythonRuntime Include="@(_PythonRuntimeExe);@(_PythonRuntimeDlls);@(_PythonRuntimeLib)" />
</ItemGroup>
<Message Importance="low" Text="Collected Python runtime from $(PythonHome):%0D%0A@(PythonRuntime->' %(Link)','%0D%0A')" />
</Target>
</Project>
"""
@public
def get_props_layout(ns):
if ns.include_all or ns.include_props:
yield "python.props", ns.temp / "python.props"
@public
def get_props(ns):
# TODO: Filter contents of props file according to included/excluded items
props = PROPS_TEMPLATE.format_map(PROPS_DATA)
return props.encode("utf-8")
| 39.045455
| 136
| 0.689639
|
4a0d661f1dae839871dabe2d04bb61bb6c6dcc1f
| 1,691
|
py
|
Python
|
App-Installer.py
|
m-jishnu/Microsoft-Store-App-Installer
|
019e6b74835fc2b032b278e7d867bdb1923c42a1
|
[
"MIT"
] | null | null | null |
App-Installer.py
|
m-jishnu/Microsoft-Store-App-Installer
|
019e6b74835fc2b032b278e7d867bdb1923c42a1
|
[
"MIT"
] | null | null | null |
App-Installer.py
|
m-jishnu/Microsoft-Store-App-Installer
|
019e6b74835fc2b032b278e7d867bdb1923c42a1
|
[
"MIT"
] | null | null | null |
import os
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from windows import set_dpi_awareness
import webbrowser
def callback(url):
webbrowser.open_new(url)
set_dpi_awareness()
try:
def select_file():
filename = filedialog.askopenfilename(initialdir="/",
title="Select a File")
os.system(f'powershell.exe Add-AppPackage "{filename}"')
# Create the root window
window = tk.Tk()
# Set window title
window.title('file Installer')
# icon set
# window.iconbitmap(path)
label = ttk.Label(window,
text="file Installer V1.1")
label.config(font=("Courier", 12))
button_explore = ttk.Button(window,
text="Select File",
command=select_file)
button_exit = ttk.Button(window,
text="Exit",
command=window.destroy)
label_credits = ttk.Label(window,
text="By TechoZ")
label_credits.config(font=("Courier", 12))
label.grid(column=0, row=0, padx=100, pady=10)
button_explore.grid(column=0, row=1, padx=10, pady=10)
button_exit.grid(column=0, row=2, padx=10, pady=2)
label_credits.grid(column=0, row=3, padx=10,
sticky='E', columnspan=True)
label_credits.bind(
"<Button-1>", lambda e: callback("http://youtube.com/c/techoz_youtube_channel"))
window.mainloop()
except:
import traceback
traceback.print_exc()
input("Press Enter to end...")
| 26.015385
| 89
| 0.562389
|
4a0d6702f635d32012718b19e2d217a26328bae6
| 11,210
|
py
|
Python
|
blog/views.py
|
tm2018/vmaig_blog
|
b79fdb2255879dc5111d6a72cb09877af573dede
|
[
"BSD-3-Clause"
] | null | null | null |
blog/views.py
|
tm2018/vmaig_blog
|
b79fdb2255879dc5111d6a72cb09877af573dede
|
[
"BSD-3-Clause"
] | null | null | null |
blog/views.py
|
tm2018/vmaig_blog
|
b79fdb2255879dc5111d6a72cb09877af573dede
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django import template
from django import forms
from django.http import HttpResponse, Http404
from django.shortcuts import render, render_to_response
from django.template import Context, loader
from django.views.generic import View, TemplateView, ListView, DetailView
from django.db.models import Q
from django.core.cache import caches
from django.core.exceptions import PermissionDenied
from django.core.cache.backends.base import InvalidCacheBackendError
from django.contrib import auth
from django.contrib.auth.forms import PasswordChangeForm, SetPasswordForm
from django.contrib.auth.tokens import default_token_generator
from blog.models import Article, Category, Carousel, Column, Nav, News
from vmaig_comments.models import Comment
from vmaig_auth.models import VmaigUser
from vmaig_system.models import Link
from vmaig_auth.forms import VmaigUserCreationForm, VmaigPasswordRestForm
from django.conf import settings
import datetime
import time
import json
import logging
# 缓存
try:
cache = caches['redis']
except InvalidCacheBackendError as e:
cache = caches['default']
# logger
logger = logging.getLogger(__name__)
class BaseMixin(object):
def get_context_data(self, *args, **kwargs):
context = super(BaseMixin, self).get_context_data(**kwargs)
try:
# 网站标题等内容
context['website_title'] = settings.WEBSITE_TITLE
context['website_welcome'] = settings.WEBSITE_WELCOME
# 热门文章
context['hot_article_list'] = \
Article.objects.order_by("-view_times")[0:10]
# 导航条
# context['nav_list'] = Nav.objects.filter(status=0)
context['nav_list'] = Nav.objects.order_by('rank').filter(status=0)
# 最新评论
context['latest_comment_list'] = \
Comment.objects.order_by("-create_time")[0:10]
# 友情链接
context['links'] = Link.objects.order_by('create_time').all()
colors = ['primary', 'success', 'info', 'warning', 'danger']
for index, link in enumerate(context['links']):
link.color = colors[index % len(colors)]
# 用户未读消息数
user = self.request.user
if user.is_authenticated:
context['notification_count'] = \
user.to_user_notification_set.filter(is_read=0).count()
except Exception as e:
logger.error(u'[BaseMixin]加载基本信息出错')
return context
class IndexView(BaseMixin, ListView):
template_name = 'blog/index.html'
context_object_name = 'article_list'
paginate_by = settings.PAGE_NUM # 分页--每页的数目
def get_context_data(self, **kwargs):
# 轮播
kwargs['carousel_page_list'] = Carousel.objects.all()
return super(IndexView, self).get_context_data(**kwargs)
def get_queryset(self):
article_list = Article.objects.filter(status=0)
return article_list
class ArticleView(BaseMixin, DetailView):
queryset = Article.objects.filter(Q(status=0) | Q(status=1))
template_name = 'blog/article.html'
context_object_name = 'article'
slug_field = 'en_title'
def get(self, request, *args, **kwargs):
# 统计文章的访问访问次数
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
self.cur_user_ip = ip
en_title = self.kwargs.get('slug')
# 获取15*60s时间内访问过这篇文章的所有ip
visited_ips = cache.get(en_title, [])
# 如果ip不存在就把文章的浏览次数+1
if ip not in visited_ips:
try:
article = self.queryset.get(en_title=en_title)
except Article.DoesNotExist:
logger.error(u'[ArticleView]访问不存在的文章:[%s]' % en_title)
raise Http404
else:
article.view_times += 1
article.save()
visited_ips.append(ip)
# 更新缓存
cache.set(en_title, visited_ips, 15*60)
return super(ArticleView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
# 评论
en_title = self.kwargs.get('slug', '')
kwargs['comment_list'] = \
self.queryset.get(en_title=en_title).comment_set.all()
return super(ArticleView, self).get_context_data(**kwargs)
class AllView(BaseMixin, ListView):
template_name = 'blog/all.html'
context_object_name = 'article_list'
def get_context_data(self, **kwargs):
kwargs['category_list'] = Category.objects.all()
kwargs['PAGE_NUM'] = settings.PAGE_NUM
return super(AllView, self).get_context_data(**kwargs)
def get_queryset(self):
article_list = Article.objects.filter(
status=0
).order_by("-pub_time")[0:settings.PAGE_NUM]
return article_list
def post(self, request, *args, **kwargs):
val = self.request.POST.get("val", "")
sort = self.request.POST.get("sort", "time")
start = self.request.POST.get("start", 0)
end = self.request.POST.get("end", settings.PAGE_NUM)
start = int(start)
end = int(end)
if sort == "time":
sort = "-pub_time"
elif sort == "recommend":
sort = "-view_times"
else:
sort = "-pub_time"
if val == "all":
article_list = \
Article.objects.filter(status=0).order_by(sort)[start:end+1]
else:
try:
article_list = Category.objects.get(
name=val
).article_set.filter(
status=0
).order_by(sort)[start:end+1]
except Category.DoesNotExist:
logger.error(u'[AllView]此分类不存在:[%s]' % val)
raise PermissionDenied
isend = len(article_list) != (end-start+1)
article_list = article_list[0:end-start]
html = ""
for article in article_list:
html += template.loader.get_template(
'blog/include/all_post.html'
).render({'post': article})
mydict = {"html": html, "isend": isend}
return HttpResponse(
json.dumps(mydict),
content_type="application/json"
)
class SearchView(BaseMixin, ListView):
template_name = 'blog/search.html'
context_object_name = 'article_list'
paginate_by = settings.PAGE_NUM
def get_context_data(self, **kwargs):
kwargs['s'] = self.request.GET.get('s', '')
return super(SearchView, self).get_context_data(**kwargs)
def get_queryset(self):
# 获取搜索的关键字
s = self.request.GET.get('s', '')
# 在文章的标题,summary和tags中搜索关键字
article_list = Article.objects.only(
'title', 'summary', 'tags'
).filter(
Q(title__icontains=s) |
Q(summary__icontains=s) |
Q(tags__icontains=s),
status=0
)
return article_list
class TagView(BaseMixin, ListView):
template_name = 'blog/tag.html'
context_object_name = 'article_list'
paginate_by = settings.PAGE_NUM
def get_queryset(self):
tag = self.kwargs.get('tag', '')
article_list = \
Article.objects.only('tags').filter(tags__icontains=tag, status=0)
return article_list
class CategoryView(BaseMixin, ListView):
template_name = 'blog/category.html'
context_object_name = 'article_list'
paginate_by = settings.PAGE_NUM
def get_queryset(self):
category = self.kwargs.get('category', '')
try:
article_list = \
Category.objects.get(name=category).article_set.all()
except Category.DoesNotExist:
logger.error(u'[CategoryView]此分类不存在:[%s]' % category)
raise Http404
return article_list
class UserView(BaseMixin, TemplateView):
template_name = 'blog/user.html'
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
logger.error(u'[UserView]用户未登陆')
return render(request, 'blog/login.html')
slug = self.kwargs.get('slug')
if slug == 'changetx':
self.template_name = 'blog/user_changetx.html'
elif slug == 'changepassword':
self.template_name = 'blog/user_changepassword.html'
elif slug == 'changeinfo':
self.template_name = 'blog/user_changeinfo.html'
elif slug == 'message':
self.template_name = 'blog/user_message.html'
elif slug == 'notification':
self.template_name = 'blog/user_notification.html'
return super(UserView, self).get(request, *args, **kwargs)
logger.error(u'[UserView]不存在此接口')
raise Http404
def get_context_data(self, **kwargs):
context = super(UserView, self).get_context_data(**kwargs)
slug = self.kwargs.get('slug')
if slug == 'notification':
context['notifications'] = \
self.request.user.to_user_notification_set.order_by(
'-create_time'
).all()
return context
class ColumnView(BaseMixin, ListView):
queryset = Column.objects.all()
template_name = 'blog/column.html'
context_object_name = 'article_list'
paginate_by = settings.PAGE_NUM
def get_context_data(self, **kwargs):
column = self.kwargs.get('column', '')
try:
kwargs['column'] = Column.objects.get(name=column)
except Column.DoesNotExist:
logger.error(u'[ColumnView]访问专栏不存在: [%s]' % column)
raise Http404
return super(ColumnView, self).get_context_data(**kwargs)
def get_queryset(self):
column = self.kwargs.get('column', '')
try:
article_list = Column.objects.get(name=column).article.all()
except Column.DoesNotExist:
logger.error(u'[ColumnView]访问专栏不存在: [%s]' % column)
raise Http404
return article_list
class NewsView(BaseMixin, TemplateView):
template_name = 'blog/news.html'
def get_context_data(self, **kwargs):
timeblocks = []
# 获取开始和终止的日期
start_day = self.request.GET.get("start", "0")
end_day = self.request.GET.get("end", "6")
start_day = int(start_day)
end_day = int(end_day)
start_date = datetime.datetime.now()
last_new = News.objects.order_by("-pub_time").first()
if last_new is not None:
start_date = last_new.pub_time
# 获取url中时间断的资讯
for x in range(start_day, end_day+1):
date = start_date - datetime.timedelta(x)
news_list = News.objects.filter(
pub_time__year=date.year,
pub_time__month=date.month,
pub_time__day=date.day
)
if news_list:
timeblocks.append(news_list)
kwargs['timeblocks'] = timeblocks
kwargs['active'] = start_day/7 # li中那个显示active
return super(NewsView, self).get_context_data(**kwargs)
| 32.587209
| 79
| 0.608742
|
4a0d68a1c1849ee1e8e88ad0265cf5a62fc7ee25
| 5,914
|
py
|
Python
|
test/functional/fabcoin-searchlog.py
|
XianlinGong/fabcoinsc-dev
|
585d90f376a9223ab172151a81b92dca1113ecd6
|
[
"MIT"
] | 26
|
2018-04-24T00:33:11.000Z
|
2022-03-26T15:46:04.000Z
|
test/functional/fabcoin-searchlog.py
|
XianlinGong/fabcoinsc-dev
|
585d90f376a9223ab172151a81b92dca1113ecd6
|
[
"MIT"
] | 4
|
2018-07-17T13:33:26.000Z
|
2018-08-27T07:10:49.000Z
|
test/functional/fabcoin-searchlog.py
|
XianlinGong/fabcoinsc-dev
|
585d90f376a9223ab172151a81b92dca1113ecd6
|
[
"MIT"
] | 22
|
2018-04-24T00:33:31.000Z
|
2022-02-03T09:40:26.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import FabcoinTestFramework
from test_framework.fabcoinconfig import INITIAL_BLOCK_REWARD, COINBASE_MATURITY
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import sys
class FabcoinRPCSearchlogsTest(FabcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-logevents"]]
def run_test(self):
self.nodes[0].generate(COINBASE_MATURITY+100)
contract_address = self.nodes[0].createcontract("6060604052600d600055341561001457600080fd5b61017e806100236000396000f30060606040526004361061004c576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063027c1aaf1461004e5780635b9af12b14610058575b005b61005661008f565b005b341561006357600080fd5b61007960048080359060200190919050506100a1565b6040518082815260200191505060405180910390f35b60026000808282540292505081905550565b60007fc5c442325655248f6bccf5c6181738f8755524172cea2a8bd1e38e43f833e7f282600054016000548460405180848152602001838152602001828152602001935050505060405180910390a17fc5c442325655248f6bccf5c6181738f8755524172cea2a8bd1e38e43f833e7f282600054016000548460405180848152602001838152602001828152602001935050505060405180910390a1816000540160008190555060005490509190505600a165627a7a7230582015732bfa66bdede47ecc05446bf4c1e8ed047efac25478cb13b795887df70f290029")['address']
self.nodes[0].generate(1)
addresses = {}
address = []
address.append(contract_address)
addresses["addresses"] = address
topics = {}
topic = []
topic.append("c5c442325655248f6bccf5c6181738f8755524172cea2a8bd1e38e43f833e7f2")
topics["topics"] = topic
error_topics = {}
error_topic = []
error_topic.append("35c442325655248f6bccf5c6181738f8755524172cea2a8bd1e38e43f833e7f2")
error_topics["topics"] = error_topic
self.nodes[0].sendtocontract(contract_address,"5b9af12b")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].searchlogs(602,602,addresses),self.nodes[0].searchlogs(602,602,addresses,topics))
assert_equal(self.nodes[0].searchlogs(602,602,addresses,error_topics),[])
contract_address = self.nodes[0].createcontract("6060604052341561000f57600080fd5b61029b8061001e6000396000f300606060405260043610610062576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806394e8767d14610067578063b717cfe6146100a6578063d3b57be9146100bb578063f7e52d58146100d0575b600080fd5b341561007257600080fd5b61008860048080359060200190919050506100e5565b60405180826000191660001916815260200191505060405180910390f35b34156100b157600080fd5b6100b961018e565b005b34156100c657600080fd5b6100ce6101a9565b005b34156100db57600080fd5b6100e36101b3565b005b600080821415610117577f30000000000000000000000000000000000000000000000000000000000000009050610186565b5b600082111561018557610100816001900481151561013257fe5b0460010290507f01000000000000000000000000000000000000000000000000000000000000006030600a8481151561016757fe5b06010260010281179050600a8281151561017d57fe5b049150610118565b5b809050919050565b60008081548092919060010191905055506101a76101b3565b565b6101b161018e565b565b7f746f7069632034000000000000000000000000000000000000000000000000007f746f7069632033000000000000000000000000000000000000000000000000007f746f7069632032000000000000000000000000000000000000000000000000007f746f70696320310000000000000000000000000000000000000000000000000060405180807f3700000000000000000000000000000000000000000000000000000000000000815250600101905060405180910390a45600a165627a7a72305820262764914338437fc49c9f752503904820534b24092308961bc10cd851985ae50029")['address']
self.nodes[0].generate(1)
self.nodes[0].sendtocontract(contract_address,"d3b57be9")
self.nodes[0].generate(1)
address.clear()
address.append(contract_address)
topic.clear()
topic.append("746f706963203100000000000000000000000000000000000000000000000000")
topic.append("746f706963203200000000000000000000000000000000000000000000000000")
assert_equal(self.nodes[0].searchlogs(COINBASE_MATURITY+104,COINBASE_MATURITY+104,addresses),self.nodes[0].searchlogs(COINBASE_MATURITY+104,COINBASE_MATURITY+104,addresses,topics))
topic.reverse()
assert_equal(self.nodes[0].searchlogs(COINBASE_MATURITY+104,COINBASE_MATURITY+104,addresses,topics),[])
topic.remove("746f706963203200000000000000000000000000000000000000000000000000")
topic.append("746f706963203300000000000000000000000000000000000000000000000000")
assert_equal(self.nodes[0].searchlogs(COINBASE_MATURITY+104,COINBASE_MATURITY+104,addresses),self.nodes[0].searchlogs(COINBASE_MATURITY+104,COINBASE_MATURITY+104,addresses,topics))
topic.remove("746f706963203100000000000000000000000000000000000000000000000000")
topic.insert(0,"746f706963103100000000000000000000000000000000000000000000000000")
assert_equal(self.nodes[0].searchlogs(COINBASE_MATURITY+104,COINBASE_MATURITY+104,addresses,topics),[])
topic.remove("746f706963203300000000000000000000000000000000000000000000000000")
topic.insert(1,"746f706963203200000000000000000000000000000000000000000000000000")
assert_equal(self.nodes[0].searchlogs(COINBASE_MATURITY+104,COINBASE_MATURITY+104,addresses),self.nodes[0].searchlogs(COINBASE_MATURITY+104,COINBASE_MATURITY+104,addresses,topics))
topic.reverse()
assert_equal(self.nodes[0].searchlogs(COINBASE_MATURITY+104,COINBASE_MATURITY+104,addresses,topics),[])
if __name__ == '__main__':
FabcoinRPCSearchlogsTest().main()
| 67.977011
| 1,470
| 0.842577
|
4a0d68a5d8f8f7ebe698927d44dea5b0cd95708f
| 67,810
|
py
|
Python
|
ironic/conductor/utils.py
|
dangervon/ironic
|
01dd06a17673ec5157dda2ecfc51feb9d2f8e5c2
|
[
"Apache-2.0"
] | null | null | null |
ironic/conductor/utils.py
|
dangervon/ironic
|
01dd06a17673ec5157dda2ecfc51feb9d2f8e5c2
|
[
"Apache-2.0"
] | null | null | null |
ironic/conductor/utils.py
|
dangervon/ironic
|
01dd06a17673ec5157dda2ecfc51feb9d2f8e5c2
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import crypt
import datetime
import functools
import os
import secrets
import time
from openstack.baremetal import configdrive as os_configdrive
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import faults
from ironic.common.i18n import _
from ironic.common import images
from ironic.common import network
from ironic.common import nova
from ironic.common import states
from ironic.common import utils
from ironic.conductor import notification_utils as notify_utils
from ironic.conductor import task_manager
from ironic.objects import fields
from ironic.objects import node_history
LOG = log.getLogger(__name__)
CONF = cfg.CONF
PASSWORD_HASH_FORMAT = {
'sha256': crypt.METHOD_SHA256,
'sha512': crypt.METHOD_SHA512,
}
@task_manager.require_exclusive_lock
def node_set_boot_device(task, device, persistent=False):
"""Set the boot device for a node.
If the node that the boot device change is being requested for
is in ADOPTING state, the boot device will not be set as that
change could potentially result in the future running state of
an adopted node being modified erroneously.
:param task: a TaskManager instance.
:param device: Boot device. Values are vendor-specific.
:param persistent: Whether to set next-boot, or make the change
permanent. Default: False.
:raises: InvalidParameterValue if the validation of the
ManagementInterface fails.
"""
task.driver.management.validate(task)
if task.node.provision_state == states.ADOPTING:
return
force_persistent = task.node.driver_info.get(
'force_persistent_boot_device')
if force_persistent == 'Always':
persistent = True
elif force_persistent == 'Never':
persistent = False
elif force_persistent not in (None, 'Default'):
# Backward compatibility (used to be a boolean and only True mattered)
if strutils.bool_from_string(force_persistent, strict=False):
persistent = True
task.driver.management.set_boot_device(task, device=device,
persistent=persistent)
def node_get_boot_mode(task):
"""Read currently set boot mode from a node.
Reads the boot mode for a node. If boot mode can't be discovered,
`None` is returned.
:param task: a TaskManager instance.
:raises: DriverOperationError or its derivative in case
of driver runtime error.
:raises: UnsupportedDriverExtension if current driver does not have
management interface or `get_boot_mode()` method is
not supported.
:returns: Boot mode. One of :mod:`ironic.common.boot_mode` or `None`
if boot mode can't be discovered
"""
task.driver.management.validate(task)
return task.driver.management.get_boot_mode(task)
# TODO(ietingof): remove `Sets the boot mode...` from the docstring
# once classic drivers are gone
@task_manager.require_exclusive_lock
def node_set_boot_mode(task, mode):
"""Set the boot mode for a node.
Sets the boot mode for a node if the node's driver interface
contains a 'management' interface.
If the node that the boot mode change is being requested for
is in ADOPTING state, the boot mode will not be set as that
change could potentially result in the future running state of
an adopted node being modified erroneously.
:param task: a TaskManager instance.
:param mode: Boot mode. Values are one of
:mod:`ironic.common.boot_modes`
:raises: InvalidParameterValue if the validation of the
ManagementInterface fails.
:raises: DriverOperationError or its derivative in case
of driver runtime error.
:raises: UnsupportedDriverExtension if current driver does not have
vendor interface or method is unsupported.
"""
if task.node.provision_state == states.ADOPTING:
return
task.driver.management.validate(task)
try:
supported_boot_modes = (
task.driver.management.get_supported_boot_modes(task)
)
except exception.UnsupportedDriverExtension:
LOG.debug(
"Cannot determine supported boot modes of driver "
"%(driver)s. Will make an attempt to set boot mode %(mode)s",
{'driver': task.node.driver, 'mode': mode})
supported_boot_modes = ()
if supported_boot_modes and mode not in supported_boot_modes:
msg = _("Unsupported boot mode %(mode)s specified for "
"node %(node_id)s. Supported boot modes are: "
"%(modes)s") % {'mode': mode,
'modes': ', '.join(supported_boot_modes),
'node_id': task.node.uuid}
raise exception.InvalidParameterValue(msg)
task.driver.management.set_boot_mode(task, mode=mode)
def node_wait_for_power_state(task, new_state, timeout=None):
"""Wait for node to be in new power state.
:param task: a TaskManager instance.
:param new_state: the desired new power state, one of the power states
in :mod:`ironic.common.states`.
:param timeout: number of seconds to wait before giving up. If not
specified, uses the conductor.power_state_change_timeout config value.
:raises: PowerStateFailure if timed out
"""
retry_timeout = (timeout or CONF.conductor.power_state_change_timeout)
def _wait():
status = task.driver.power.get_power_state(task)
if status == new_state:
raise loopingcall.LoopingCallDone(retvalue=status)
# NOTE(sambetts): Return False to trigger BackOffLoopingCall to start
# backing off.
return False
try:
timer = loopingcall.BackOffLoopingCall(_wait)
return timer.start(initial_delay=1, timeout=retry_timeout).wait()
except loopingcall.LoopingCallTimeOut:
LOG.error('Timed out after %(retry_timeout)s secs waiting for '
'%(state)s on node %(node_id)s.',
{'retry_timeout': retry_timeout,
'state': new_state, 'node_id': task.node.uuid})
raise exception.PowerStateFailure(pstate=new_state)
def _calculate_target_state(new_state):
if new_state in (states.POWER_ON, states.REBOOT, states.SOFT_REBOOT):
target_state = states.POWER_ON
elif new_state in (states.POWER_OFF, states.SOFT_POWER_OFF):
target_state = states.POWER_OFF
else:
target_state = None
return target_state
def _can_skip_state_change(task, new_state):
"""Check if we can ignore the power state change request for the node.
Check if we should ignore the requested power state change. This can occur
if the requested power state is already the same as our current state. This
only works for power on and power off state changes. More complex power
state changes, like reboot, are not skipped.
:param task: a TaskManager instance containing the node to act on.
:param new_state: The requested power state to change to. This can be any
power state from ironic.common.states.
:returns: True if should ignore the requested power state change. False
otherwise
"""
# We only ignore certain state changes. So if the desired new_state is not
# one of them, then we can return early and not do an un-needed
# get_power_state() call
if new_state not in (states.POWER_ON, states.POWER_OFF,
states.SOFT_POWER_OFF):
return False
node = task.node
def _not_going_to_change():
# Neither the ironic service nor the hardware has erred. The
# node is, for some reason, already in the requested state,
# though we don't know why. eg, perhaps the user previously
# requested the node POWER_ON, the network delayed those IPMI
# packets, and they are trying again -- but the node finally
# responds to the first request, and so the second request
# gets to this check and stops.
# This isn't an error, so we'll clear last_error field
# (from previous operation), log a warning, and return.
node['last_error'] = None
# NOTE(dtantsur): under rare conditions we can get out of sync here
node['power_state'] = curr_state
node['target_power_state'] = states.NOSTATE
node.save()
notify_utils.emit_power_set_notification(
task, fields.NotificationLevel.INFO,
fields.NotificationStatus.END, new_state)
LOG.debug("Not going to change node %(node)s power state because "
"current state = requested state = '%(state)s'.",
{'node': node.uuid, 'state': curr_state})
try:
curr_state = task.driver.power.get_power_state(task)
except Exception as e:
with excutils.save_and_reraise_exception():
error = _(
"Failed to change power state to '%(target)s': %(error)s") % {
'target': new_state, 'error': e}
node_history_record(node, event=error, error=True)
node['target_power_state'] = states.NOSTATE
node.save()
notify_utils.emit_power_set_notification(
task, fields.NotificationLevel.ERROR,
fields.NotificationStatus.ERROR, new_state)
if curr_state == states.POWER_ON:
if new_state == states.POWER_ON:
_not_going_to_change()
return True
elif curr_state == states.POWER_OFF:
if new_state in (states.POWER_OFF, states.SOFT_POWER_OFF):
_not_going_to_change()
return True
LOG.info("Node %(node)s current power state is '%(state)s', "
"requested state is '%(new_state)s'.",
{'node': node.uuid, 'state': curr_state, 'new_state': new_state})
return False
@task_manager.require_exclusive_lock
def node_power_action(task, new_state, timeout=None):
"""Change power state or reset for a node.
Perform the requested power action if the transition is required.
:param task: a TaskManager instance containing the node to act on.
:param new_state: Any power state from ironic.common.states.
:param timeout: timeout (in seconds) positive integer (> 0) for any
power state. ``None`` indicates to use default timeout.
:raises: InvalidParameterValue when the wrong state is specified
or the wrong driver info is specified.
:raises: StorageError when a failure occurs updating the node's
storage interface upon setting power on.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
"""
notify_utils.emit_power_set_notification(
task, fields.NotificationLevel.INFO, fields.NotificationStatus.START,
new_state)
node = task.node
if _can_skip_state_change(task, new_state):
return
target_state = _calculate_target_state(new_state)
# Set the target_power_state and clear any last_error, if we're
# starting a new operation. This will expose to other processes
# and clients that work is in progress.
node['target_power_state'] = target_state
node['last_error'] = None
node.timestamp_driver_internal_info('last_power_state_change')
# NOTE(dtantsur): wipe token on shutting down, otherwise a reboot in
# fast-track (or an accidentally booted agent) will cause subsequent
# actions to fail.
if new_state in (states.POWER_OFF, states.SOFT_POWER_OFF,
states.REBOOT, states.SOFT_REBOOT):
wipe_internal_info_on_power_off(node)
node.save()
# take power action
try:
if (target_state == states.POWER_ON
and node.provision_state == states.ACTIVE):
task.driver.storage.attach_volumes(task)
if new_state != states.REBOOT:
task.driver.power.set_power_state(task, new_state, timeout=timeout)
else:
# TODO(TheJulia): We likely ought to consider toggling
# volume attachments, although we have no mechanism to
# really verify what cinder has connector wise.
task.driver.power.reboot(task, timeout=timeout)
except Exception as e:
with excutils.save_and_reraise_exception():
node['target_power_state'] = states.NOSTATE
error = _(
"Failed to change power state to '%(target_state)s' "
"by '%(new_state)s': %(error)s") % {
'target_state': target_state,
'new_state': new_state,
'error': e}
node_history_record(node, event=error, error=True)
node.save()
notify_utils.emit_power_set_notification(
task, fields.NotificationLevel.ERROR,
fields.NotificationStatus.ERROR, new_state)
else:
# success!
node['power_state'] = target_state
node['target_power_state'] = states.NOSTATE
node.save()
if node.instance_uuid:
nova.power_update(
task.context, node.instance_uuid, target_state)
notify_utils.emit_power_set_notification(
task, fields.NotificationLevel.INFO, fields.NotificationStatus.END,
new_state)
LOG.info('Successfully set node %(node)s power state to '
'%(target_state)s by %(new_state)s.',
{'node': node.uuid,
'target_state': target_state,
'new_state': new_state})
# NOTE(TheJulia): Similarly to power-on, when we power-off
# a node, we should detach any volume attachments.
if (target_state == states.POWER_OFF
and node.provision_state == states.ACTIVE):
try:
task.driver.storage.detach_volumes(task)
except exception.StorageError as e:
LOG.warning("Volume detachment for node %(node)s "
"failed: %(error)s",
{'node': node.uuid, 'error': e})
@task_manager.require_exclusive_lock
def cleanup_after_timeout(task):
"""Cleanup deploy task after timeout.
:param task: a TaskManager instance.
"""
msg = (_('Timeout reached while waiting for callback for node %s')
% task.node.uuid)
deploying_error_handler(task, msg, msg)
def provisioning_error_handler(e, node, provision_state,
target_provision_state):
"""Set the node's provisioning states if error occurs.
This hook gets called upon an exception being raised when spawning
the worker to do some provisioning to a node like deployment, tear down,
or cleaning.
:param e: the exception object that was raised.
:param node: an Ironic node object.
:param provision_state: the provision state to be set on
the node.
:param target_provision_state: the target provision state to be
set on the node.
"""
if isinstance(e, exception.NoFreeConductorWorker):
# NOTE(tenbrae): there is no need to clear conductor_affinity
# because it isn't updated on a failed deploy
node.provision_state = provision_state
node.target_provision_state = target_provision_state
error = (_("No free conductor workers available"))
node_history_record(node, event=error, event_type=states.PROVISIONING,
error=True)
node.save()
LOG.warning("No free conductor workers available to perform "
"an action on node %(node)s, setting node's "
"provision_state back to %(prov_state)s and "
"target_provision_state to %(tgt_prov_state)s.",
{'node': node.uuid, 'prov_state': provision_state,
'tgt_prov_state': target_provision_state})
def cleanup_cleanwait_timeout(task):
"""Cleanup a cleaning task after timeout.
:param task: a TaskManager instance.
"""
last_error = (_("Timeout reached while cleaning the node. Please "
"check if the ramdisk responsible for the cleaning is "
"running on the node. Failed on step %(step)s.") %
{'step': task.node.clean_step})
logmsg = ("Cleaning for node %(node)s failed. %(error)s" %
{'node': task.node.uuid, 'error': last_error})
# NOTE(rloo): this is called from the periodic task for cleanwait timeouts,
# via the task manager's process_event(). The node has already been moved
# to CLEANFAIL, so the error handler doesn't need to set the fail state.
cleaning_error_handler(task, logmsg, errmsg=last_error,
set_fail_state=False)
def cleaning_error_handler(task, logmsg, errmsg=None, traceback=False,
tear_down_cleaning=True, set_fail_state=True,
set_maintenance=None):
"""Put a failed node in CLEANFAIL and maintenance (if needed).
:param task: a TaskManager instance.
:param logmsg: Message to be logged.
:param errmsg: Message for the user. Optional, if not provided `logmsg` is
used.
:param traceback: Whether to log a traceback. Defaults to False.
:param tear_down_cleaning: Whether to clean up the PXE and DHCP files after
cleaning. Default to True.
:param set_fail_state: Whether to set node to failed state. Default to
True.
:param set_maintenance: Whether to set maintenance mode. If None,
maintenance mode will be set if and only if a clean step is being
executed on a node.
"""
if set_maintenance is None:
set_maintenance = bool(task.node.clean_step)
errmsg = errmsg or logmsg
LOG.error(logmsg, exc_info=traceback)
node = task.node
if set_maintenance:
node.fault = faults.CLEAN_FAILURE
node.maintenance = True
if tear_down_cleaning:
try:
task.driver.deploy.tear_down_cleaning(task)
except Exception as e:
msg2 = ('Failed to tear down cleaning on node %(uuid)s, '
'reason: %(err)s' % {'err': e, 'uuid': node.uuid})
LOG.exception(msg2)
errmsg = _('%s. Also failed to tear down cleaning.') % errmsg
if node.provision_state in (
states.CLEANING,
states.CLEANWAIT,
states.CLEANFAIL):
# Clear clean step, msg should already include current step
node.clean_step = {}
# Clear any leftover metadata about cleaning
node.del_driver_internal_info('clean_step_index')
node.del_driver_internal_info('cleaning_reboot')
node.del_driver_internal_info('cleaning_polling')
node.del_driver_internal_info('skip_current_clean_step')
# We don't need to keep the old agent URL
# as it should change upon the next cleaning attempt.
node.del_driver_internal_info('agent_url')
# For manual cleaning, the target provision state is MANAGEABLE, whereas
# for automated cleaning, it is AVAILABLE.
manual_clean = node.target_provision_state == states.MANAGEABLE
node_history_record(node, event=errmsg, event_type=states.CLEANING,
error=True)
# NOTE(dtantsur): avoid overwriting existing maintenance_reason
if not node.maintenance_reason and set_maintenance:
node.maintenance_reason = errmsg
node.save()
if set_fail_state and node.provision_state != states.CLEANFAIL:
target_state = states.MANAGEABLE if manual_clean else None
task.process_event('fail', target_state=target_state)
def wipe_internal_info_on_power_off(node):
"""Wipe information that should not survive reboot/power off."""
# DHCP may result in a new IP next time.
node.del_driver_internal_info('agent_url')
if not is_agent_token_pregenerated(node):
# Wipe the token if it's not pre-generated, otherwise we'll refuse to
# generate it again for the newly booted agent.
node.del_driver_internal_info('agent_secret_token')
# Wipe cached steps since they may change after reboot.
node.del_driver_internal_info('agent_cached_deploy_steps')
node.del_driver_internal_info('agent_cached_clean_steps')
# Remove TLS certificate since it's regenerated on each run.
node.del_driver_internal_info('agent_verify_ca')
def wipe_token_and_url(task):
"""Remove agent URL and token from the task."""
node = task.node
node.del_driver_internal_info('agent_secret_token')
node.del_driver_internal_info('agent_secret_token_pregenerated')
# Remove agent_url since it will be re-asserted
# upon the next deployment attempt.
node.del_driver_internal_info('agent_url')
# Remove TLS certificate since it's regenerated on each run.
node.del_driver_internal_info('agent_verify_ca')
def wipe_deploy_internal_info(task):
"""Remove temporary deployment fields from driver_internal_info."""
if not fast_track_able(task):
wipe_token_and_url(task)
# Clear any leftover metadata about deployment.
node = task.node
node.set_driver_internal_info('deploy_steps', None)
node.del_driver_internal_info('user_deploy_steps')
node.del_driver_internal_info('agent_cached_deploy_steps')
node.del_driver_internal_info('deploy_step_index')
node.del_driver_internal_info('deployment_reboot')
node.del_driver_internal_info('deployment_polling')
node.del_driver_internal_info('skip_current_deploy_step')
node.del_driver_internal_info('steps_validated')
def wipe_cleaning_internal_info(task):
"""Remove temporary cleaning fields from driver_internal_info."""
if not fast_track_able(task):
wipe_token_and_url(task)
node = task.node
node.set_driver_internal_info('clean_steps', None)
node.del_driver_internal_info('agent_cached_clean_steps')
node.del_driver_internal_info('clean_step_index')
node.del_driver_internal_info('cleaning_reboot')
node.del_driver_internal_info('cleaning_polling')
node.del_driver_internal_info('cleaning_disable_ramdisk')
node.del_driver_internal_info('skip_current_clean_step')
node.del_driver_internal_info('steps_validated')
def deploying_error_handler(task, logmsg, errmsg=None, traceback=False,
clean_up=True):
"""Put a failed node in DEPLOYFAIL.
:param task: the task
:param logmsg: message to be logged
:param errmsg: message for the user
:param traceback: Boolean; True to log a traceback
:param clean_up: Boolean; True to clean up
"""
errmsg = errmsg or logmsg
node = task.node
LOG.error(logmsg, exc_info=traceback)
node_history_record(node, event=errmsg, event_type=states.DEPLOYING,
error=True)
node.save()
cleanup_err = None
if clean_up:
try:
task.driver.deploy.clean_up(task)
except Exception as e:
msg = ('Cleanup failed for node %(node)s; reason: %(err)s'
% {'node': node.uuid, 'err': e})
LOG.exception(msg)
if isinstance(e, exception.IronicException):
addl = _('Also failed to clean up due to: %s') % e
else:
addl = _('An unhandled exception was encountered while '
'aborting. More information may be found in the log '
'file.')
cleanup_err = '%(err)s. %(add)s' % {'err': errmsg, 'add': addl}
node.refresh()
if node.provision_state in (
states.DEPLOYING,
states.DEPLOYWAIT,
states.DEPLOYFAIL):
# Clear deploy step; we leave the list of deploy steps
# in node.driver_internal_info for debugging purposes.
node.deploy_step = {}
wipe_deploy_internal_info(task)
if cleanup_err:
node_history_record(node, event=cleanup_err,
event_type=states.DEPLOYING,
error=True)
node.save()
# NOTE(tenbrae): there is no need to clear conductor_affinity
task.process_event('fail')
def fail_on_error(error_callback, msg, *error_args, **error_kwargs):
"""A decorator for failing operation on failure."""
def wrapper(func):
@functools.wraps(func)
def wrapped(task, *args, **kwargs):
try:
return func(task, *args, **kwargs)
except Exception as exc:
errmsg = "%s. %s: %s" % (msg, exc.__class__.__name__, exc)
error_callback(task, errmsg, *error_args, **error_kwargs)
return wrapped
return wrapper
def verifying_error_handler(task, logmsg, errmsg=None, traceback=False):
"""Handle errors during verification steps
:param task: the task
:param logmsg: message to be logged
:param errmsg: message for the user
:param traceback: Boolean; True to log a traceback
"""
errmsg = errmsg or logmsg
node = task.node
LOG.error(logmsg, exc_info=traceback)
node_history_record(node, event=errmsg, event_type=states.VERIFYING,
error=True)
node.save()
node.refresh()
if node.provision_state in (
states.VERIFYING):
# Clear verifying step; we leave the list of verify steps
# in node.driver_internal_info for debugging purposes.
node.verify_step = {}
node.save()
@task_manager.require_exclusive_lock
def abort_on_conductor_take_over(task):
"""Set node's state when a task was aborted due to conductor take over.
:param task: a TaskManager instance.
"""
msg = _('Operation was aborted due to conductor take over')
# By this time the "fail" even was processed, so we cannot end up in
# CLEANING or CLEAN WAIT, only in CLEAN FAIL.
if task.node.provision_state == states.CLEANFAIL:
cleaning_error_handler(task, msg, set_fail_state=False)
else:
# For aborted deployment (and potentially other operations), just set
# the last_error accordingly.
node_history_record(task.node, event=msg, event_type=states.TAKEOVER,
error=True)
task.node.save()
LOG.warning('Aborted the current operation on node %s due to '
'conductor take over', task.node.uuid)
def rescuing_error_handler(task, msg, set_fail_state=True):
"""Cleanup rescue task after timeout or failure.
:param task: a TaskManager instance.
:param msg: a message to set into node's last_error field
:param set_fail_state: a boolean flag to indicate if node needs to be
transitioned to a failed state. By default node
would be transitioned to a failed state.
"""
node = task.node
try:
node_power_action(task, states.POWER_OFF)
task.driver.rescue.clean_up(task)
remove_agent_url(node)
node_history_record(task.node, event=msg, event_type=states.RESCUE,
error=True)
except exception.IronicException as e:
error = (_('Rescue operation was unsuccessful, clean up '
'failed for node: %(error)s') % {'error': e})
node_history_record(task.node, event=error, event_type=states.RESCUE,
error=True)
LOG.error(('Rescue operation was unsuccessful, clean up failed for '
'node %(node)s: %(error)s'),
{'node': node.uuid, 'error': e})
except Exception as e:
error = (_('Rescue failed, but an unhandled exception was '
'encountered while aborting: %(error)s') %
{'error': e})
node_history_record(task.node, event=error, event_type=states.RESCUE,
error=True)
LOG.exception('Rescue failed for node %(node)s, an exception was '
'encountered while aborting.', {'node': node.uuid})
finally:
remove_agent_url(node)
node.save()
if set_fail_state:
try:
task.process_event('fail')
except exception.InvalidState:
node = task.node
LOG.error('Internal error. Node %(node)s in provision state '
'"%(state)s" could not transition to a failed state.',
{'node': node.uuid, 'state': node.provision_state})
@task_manager.require_exclusive_lock
def cleanup_rescuewait_timeout(task):
"""Cleanup rescue task after timeout.
:param task: a TaskManager instance.
"""
msg = _('Timeout reached while waiting for rescue ramdisk callback '
'for node')
errmsg = msg + ' %(node)s'
LOG.error(errmsg, {'node': task.node.uuid})
rescuing_error_handler(task, msg, set_fail_state=False)
def _spawn_error_handler(e, node, operation):
"""Handle error while trying to spawn a process.
Handle error while trying to spawn a process to perform an
operation on a node.
:param e: the exception object that was raised.
:param node: an Ironic node object.
:param operation: the operation being performed on the node.
"""
if isinstance(e, exception.NoFreeConductorWorker):
error = (_("No free conductor workers available"))
node_history_record(node, event=error, event_type=states.CONDUCTOR,
error=True)
node.save()
LOG.warning("No free conductor workers available to perform "
"%(operation)s on node %(node)s",
{'operation': operation, 'node': node.uuid})
def spawn_cleaning_error_handler(e, node):
"""Handle spawning error for node cleaning."""
_spawn_error_handler(e, node, states.CLEANING)
def spawn_deploying_error_handler(e, node):
"""Handle spawning error for node deploying."""
_spawn_error_handler(e, node, states.DEPLOYING)
def spawn_rescue_error_handler(e, node):
"""Handle spawning error for node rescue."""
if isinstance(e, exception.NoFreeConductorWorker):
remove_node_rescue_password(node, save=False)
_spawn_error_handler(e, node, states.RESCUE)
def power_state_error_handler(e, node, power_state):
"""Set the node's power states if error occurs.
This hook gets called upon an exception being raised when spawning
the worker thread to change the power state of a node.
:param e: the exception object that was raised.
:param node: an Ironic node object.
:param power_state: the power state to set on the node.
"""
# NOTE This error will not emit a power state change notification since
# this is related to spawning the worker thread, not the power state change
# itself.
if isinstance(e, exception.NoFreeConductorWorker):
node.power_state = power_state
node.target_power_state = states.NOSTATE
error = (_("No free conductor workers available"))
node_history_record(node, event=error, event_type=states.CONDUCTOR,
error=True)
node.save()
LOG.warning("No free conductor workers available to perform "
"an action on node %(node)s, setting node's "
"power state back to %(power_state)s.",
{'node': node.uuid, 'power_state': power_state})
@task_manager.require_exclusive_lock
def validate_port_physnet(task, port_obj):
"""Validate the consistency of physical networks of ports in a portgroup.
Validate the consistency of a port's physical network with other ports in
the same portgroup. All ports in a portgroup should have the same value
(which may be None) for their physical_network field.
During creation or update of a port in a portgroup we apply the
following validation criteria:
- If the portgroup has existing ports with different physical networks, we
raise PortgroupPhysnetInconsistent. This shouldn't ever happen.
- If the port has a physical network that is inconsistent with other
ports in the portgroup, we raise exception.Conflict.
If a port's physical network is None, this indicates that ironic's VIF
attachment mapping algorithm should operate in a legacy (physical
network unaware) mode for this port or portgroup. This allows existing
ironic nodes to continue to function after an upgrade to a release
including physical network support.
:param task: a TaskManager instance
:param port_obj: a port object to be validated.
:raises: Conflict if the port is a member of a portgroup which is on a
different physical network.
:raises: PortgroupPhysnetInconsistent if the port's portgroup has
ports which are not all assigned the same physical network.
"""
if 'portgroup_id' not in port_obj or not port_obj.portgroup_id:
return
delta = port_obj.obj_what_changed()
# We can skip this step if the port's portgroup membership or physical
# network assignment is not being changed (during creation these will
# appear changed).
if not (delta & {'portgroup_id', 'physical_network'}):
return
# Determine the current physical network of the portgroup.
pg_physnets = network.get_physnets_by_portgroup_id(task,
port_obj.portgroup_id,
exclude_port=port_obj)
if not pg_physnets:
return
# Check that the port has the same physical network as any existing
# member ports.
pg_physnet = pg_physnets.pop()
port_physnet = (port_obj.physical_network
if 'physical_network' in port_obj else None)
if port_physnet != pg_physnet:
portgroup = network.get_portgroup_by_id(task, port_obj.portgroup_id)
msg = _("Port with physical network %(physnet)s cannot become a "
"member of port group %(portgroup)s which has ports in "
"physical network %(pg_physnet)s.")
raise exception.Conflict(
msg % {'portgroup': portgroup.uuid, 'physnet': port_physnet,
'pg_physnet': pg_physnet})
def remove_node_rescue_password(node, save=True):
"""Helper to remove rescue password from a node.
Removes rescue password from node. It saves node by default.
If node should not be saved, then caller needs to explicitly
indicate it.
:param node: an Ironic node object.
:param save: Boolean; True (default) to save the node; False
otherwise.
"""
instance_info = node.instance_info
if 'rescue_password' in instance_info:
del instance_info['rescue_password']
if 'hashed_rescue_password' in instance_info:
del instance_info['hashed_rescue_password']
node.instance_info = instance_info
if save:
node.save()
def validate_instance_info_traits(node):
"""Validate traits in instance_info.
All traits in instance_info must also exist as node traits.
:param node: an Ironic node object.
:raises: InvalidParameterValue if the instance traits are badly formatted,
or contain traits that are not set on the node.
"""
def invalid():
err = (_("Error parsing traits from Node %(node)s instance_info "
"field. A list of strings is expected.")
% {"node": node.uuid})
raise exception.InvalidParameterValue(err)
if not node.instance_info.get('traits'):
return
instance_traits = node.instance_info['traits']
if not isinstance(instance_traits, list):
invalid()
if not all(isinstance(t, str) for t in instance_traits):
invalid()
node_traits = node.traits.get_trait_names()
missing = set(instance_traits) - set(node_traits)
if missing:
err = (_("Cannot specify instance traits that are not also set on the "
"node. Node %(node)s is missing traits %(traits)s") %
{"node": node.uuid, "traits": ", ".join(missing)})
raise exception.InvalidParameterValue(err)
def notify_conductor_resume_operation(task, operation):
"""Notify the conductor to resume an operation.
:param task: the task
:param operation: the operation, a string
"""
LOG.debug('Sending RPC to conductor to resume %(op)s steps for node '
'%(node)s', {'op': operation, 'node': task.node.uuid})
method = 'continue_node_%s' % operation
from ironic.conductor import rpcapi
uuid = task.node.uuid
rpc = rpcapi.ConductorAPI()
topic = rpc.get_current_topic()
# Need to release the lock to let the conductor take it
task.release_resources()
getattr(rpc, method)(task.context, uuid, topic=topic)
def notify_conductor_resume_clean(task):
notify_conductor_resume_operation(task, 'clean')
def notify_conductor_resume_deploy(task):
notify_conductor_resume_operation(task, 'deploy')
def skip_automated_cleaning(node):
"""Checks if node cleaning needs to be skipped for an specific node.
:param node: the node to consider
"""
if node.automated_clean:
return False
elif node.automated_clean is None:
return not CONF.conductor.automated_clean
else:
LOG.info("Automated cleaning is disabled via the API for "
"node %(node)s",
{'node': node.uuid})
return True
def power_on_node_if_needed(task):
"""Powers on node if it is powered off and has a Smart NIC port
:param task: A TaskManager object
:returns: the previous power state or None if no changes were made
:raises: exception.NetworkError if agent status didn't match the required
status after max retry attempts.
"""
if not task.driver.network.need_power_on(task):
return
previous_power_state = task.driver.power.get_power_state(task)
if previous_power_state == states.POWER_OFF:
node_set_boot_device(
task, boot_devices.BIOS, persistent=False)
node_power_action(task, states.POWER_ON)
# local import is necessary to avoid circular import
from ironic.common import neutron
host_id = None
for port in task.ports:
if neutron.is_smartnic_port(port):
link_info = port.local_link_connection
host_id = link_info['hostname']
break
if host_id:
LOG.debug('Waiting for host %(host)s agent to be down',
{'host': host_id})
client = neutron.get_client(context=task.context)
neutron.wait_for_host_agent(
client, host_id, target_state='down')
return previous_power_state
def restore_power_state_if_needed(task, power_state_to_restore):
"""Change the node's power state if power_state_to_restore is not None
:param task: A TaskManager object
:param power_state_to_restore: power state
"""
if power_state_to_restore:
# Sleep is required here in order to give neutron agent
# a chance to apply the changes before powering off.
# Using twice the polling interval of the agent
# "CONF.AGENT.polling_interval" would give the agent
# enough time to apply network changes.
time.sleep(CONF.agent.neutron_agent_poll_interval * 2)
node_power_action(task, power_state_to_restore)
@contextlib.contextmanager
def power_state_for_network_configuration(task):
"""Handle the power state for a node reconfiguration.
Powers the node on if and only if it has a Smart NIC port. Yields for
the actual reconfiguration, then restores the power state.
:param task: A TaskManager object.
"""
previous = power_on_node_if_needed(task)
yield task
restore_power_state_if_needed(task, previous)
def build_configdrive(node, configdrive):
"""Build a configdrive from provided meta_data, network_data and user_data.
If uuid or name are not provided in the meta_data, they're defauled to the
node's uuid and name accordingly.
:param node: an Ironic node object.
:param configdrive: A configdrive as a dict with keys ``meta_data``,
``network_data``, ``user_data`` and ``vendor_data`` (all optional).
:returns: A gzipped and base64 encoded configdrive as a string.
"""
meta_data = configdrive.setdefault('meta_data', {})
meta_data.setdefault('uuid', node.uuid)
if node.name:
meta_data.setdefault('name', node.name)
user_data = configdrive.get('user_data')
if isinstance(user_data, (dict, list)):
user_data = jsonutils.dump_as_bytes(user_data)
elif user_data:
user_data = user_data.encode('utf-8')
LOG.debug('Building a configdrive for node %s', node.uuid)
return os_configdrive.build(meta_data, user_data=user_data,
network_data=configdrive.get('network_data'),
vendor_data=configdrive.get('vendor_data'))
def get_configdrive_image(node):
"""Get configdrive as an ISO image or a URL.
Converts the JSON representation into an image. URLs and raw contents
are returned unchanged.
:param node: an Ironic node object.
:returns: A gzipped and base64 encoded configdrive as a string.
"""
configdrive = node.instance_info.get('configdrive')
if isinstance(configdrive, dict):
configdrive = build_configdrive(node, configdrive)
return configdrive
def fast_track_able(task):
"""Checks if the operation can be a streamlined deployment sequence.
This is mainly focused on ensuring that we are able to quickly sequence
through operations if we already have a ramdisk heartbeating through
external means.
:param task: Taskmanager object
:returns: True if [deploy]fast_track is set to True, no iSCSI boot
configuration is present, and no last_error is present for
the node indicating that there was a recent failure.
"""
return (utils.fast_track_enabled(task.node)
# TODO(TheJulia): Network model aside, we should be able to
# fast-track through initial sequence to complete deployment.
# This needs to be validated.
# TODO(TheJulia): Do we need a secondary guard? To prevent
# driving through this we could query the API endpoint of
# the agent with a short timeout such as 10 seconds, which
# would help verify if the node is online.
# TODO(TheJulia): Should we check the provisioning/deployment
# networks match config wise? Do we care? #decisionsdecisions
and task.driver.storage.should_write_image(task)
and task.node.last_error is None)
def value_within_timeout(value, timeout):
"""Checks if the time is within the previous timeout seconds from now.
:param value: a string representing date and time or None.
:param timeout: timeout in seconds.
"""
# use native datetime objects for conversion and compare
# slightly odd because py2 compatability :(
last = datetime.datetime.strptime(value or '1970-01-01T00:00:00.000000',
"%Y-%m-%dT%H:%M:%S.%f")
# If we found nothing, we assume that the time is essentially epoch.
time_delta = datetime.timedelta(seconds=timeout)
last_valid = timeutils.utcnow() - time_delta
return last_valid <= last
def agent_is_alive(node, timeout=None):
"""Check that the agent is likely alive.
The method then checks for the last agent heartbeat, and if it occured
within the timeout set by [deploy]fast_track_timeout, then agent is
presumed alive.
:param node: A node object.
:param timeout: Heartbeat timeout, defaults to `fast_track_timeout`.
"""
# If no agent_url is present then we have powered down since the
# last agent heartbeat
if not node.driver_internal_info.get('agent_url'):
return False
return value_within_timeout(
node.driver_internal_info.get('agent_last_heartbeat'),
timeout or CONF.deploy.fast_track_timeout)
def is_fast_track(task):
"""Checks a fast track is available.
This method first ensures that the node and conductor configuration
is valid to perform a fast track sequence meaning that we already
have a ramdisk running through another means like discovery.
If not valid, False is returned.
The method then checks for the last agent heartbeat, and if it occured
within the timeout set by [deploy]fast_track_timeout and the power
state for the machine is POWER_ON, then fast track is permitted.
:param task: Taskmanager object
:returns: True if the last heartbeat that was recorded was within
the [deploy]fast_track_timeout setting.
"""
if (not fast_track_able(task)
or task.driver.power.get_power_state(task) != states.POWER_ON):
if task.node.last_error:
LOG.debug('Node %(node)s is not fast-track-able because it has '
'an error: %(error)s',
{'node': task.node.uuid, 'error': task.node.last_error})
return False
if agent_is_alive(task.node):
return True
else:
LOG.debug('Node %(node)s should be fast-track-able, but the agent '
'doesn\'t seem to be running. Last heartbeat: %(last)s',
{'node': task.node.uuid,
'last': task.node.driver_internal_info.get(
'agent_last_heartbeat')})
return False
def remove_agent_url(node):
"""Helper to remove the agent_url record."""
node.del_driver_internal_info('agent_url')
def _get_node_next_steps(task, step_type, skip_current_step=True):
"""Get the task's node's next steps.
This determines what the next (remaining) steps are, and
returns the index into the steps list that corresponds to the
next step. The remaining steps are determined as follows:
* If no steps have been started yet, all the steps
must be executed
* If skip_current_step is False, the remaining steps start
with the current step. Otherwise, the remaining steps
start with the step after the current one.
All the steps are in node.driver_internal_info['<step_type>_steps'].
node.<step_type>_step is the current step that was just executed
(or None, {} if no steps have been executed yet).
node.driver_internal_info['<step_type>_step_index'] is the index
index into the steps list (or None, doesn't exist if no steps have
been executed yet) and corresponds to node.<step_type>_step.
:param task: A TaskManager object
:param step_type: The type of steps to process: 'clean' or 'deploy'.
:param skip_current_step: True to skip the current step; False to
include it.
:returns: index of the next step; None if there are none to execute.
"""
valid_types = set(['clean', 'deploy'])
if step_type not in valid_types:
# NOTE(rloo): No need to i18n this, since this would be a
# developer error; it isn't user-facing.
raise exception.Invalid(
'step_type must be one of %(valid)s, not %(step)s'
% {'valid': valid_types, 'step': step_type})
node = task.node
if not getattr(node, '%s_step' % step_type):
# first time through, all steps need to be done. Return the
# index of the first step in the list.
return 0
ind = node.driver_internal_info.get('%s_step_index' % step_type)
if ind is None:
return None
if skip_current_step:
ind += 1
if ind >= len(node.driver_internal_info['%s_steps' % step_type]):
# no steps left to do
ind = None
return ind
def get_node_next_clean_steps(task, skip_current_step=True):
return _get_node_next_steps(task, 'clean',
skip_current_step=skip_current_step)
def get_node_next_deploy_steps(task, skip_current_step=True):
return _get_node_next_steps(task, 'deploy',
skip_current_step=skip_current_step)
def update_next_step_index(task, step_type):
"""Calculate the next step index and update the node.
:param task: A TaskManager object
:param step_type: The type of steps to process: 'clean' or 'deploy'.
:returns: Index of the next step.
"""
skip_current_step = task.node.del_driver_internal_info(
'skip_current_%s_step' % step_type, True)
if step_type == 'clean':
task.node.del_driver_internal_info('cleaning_polling')
else:
task.node.del_driver_internal_info('deployment_polling')
task.node.save()
return _get_node_next_steps(task, step_type,
skip_current_step=skip_current_step)
def add_secret_token(node, pregenerated=False):
"""Adds a secret token to driver_internal_info for IPA verification.
:param node: Node object
:param pregenerated: Boolean value, default False, which indicates if
the token should be marked as "pregenerated" in
order to facilitate virtual media booting where
the token is embedded into the configuration.
"""
token = secrets.token_urlsafe()
node.set_driver_internal_info('agent_secret_token', token)
if pregenerated:
node.set_driver_internal_info('agent_secret_token_pregenerated', True)
else:
node.del_driver_internal_info('agent_secret_token_pregenerated')
def is_agent_token_present(node):
"""Determines if an agent token is present upon a node.
:param node: Node object
:returns: True if an agent_secret_token value is present in a node
driver_internal_info field.
"""
# TODO(TheJulia): we should likely record the time when we add the token
# and then compare if it was in the last ?hour? to act as an additional
# guard rail, but if we do that we will want to check the last heartbeat
# because the heartbeat overrides the age of the token.
# We may want to do this elsewhere or nowhere, just a thought for the
# future.
return node.driver_internal_info.get(
'agent_secret_token', None) is not None
def is_agent_token_valid(node, token):
"""Validates if a supplied token is valid for the node.
:param node: Node object
:param token: A token value to validate against the driver_internal_info
field agent_secret_token.
:returns: True if the supplied token matches the token recorded in the
supplied node object.
"""
if token is None:
# No token is never valid.
return False
known_token = node.driver_internal_info.get('agent_secret_token', None)
return known_token == token
def is_agent_token_pregenerated(node):
"""Determines if the token was generated for out of band configuration.
Ironic supports the ability to provide configuration data to the agent
through the a virtual floppy or as part of the virtual media image
which is attached to the BMC.
This method helps us identify WHEN we did so as we don't need to remove
records of the token prior to rebooting the token. This is important as
tokens provided through out of band means presist in the virtual media
image, are loaded as part of the agent ramdisk, and do not require
regeneration of the token upon the initial lookup, ultimately making
the overall usage of virtual media and pregenerated tokens far more
secure.
:param node: Node Object
:returns: True if the token was pregenerated as indicated by the node's
driver_internal_info field.
False in all other cases.
"""
return node.driver_internal_info.get(
'agent_secret_token_pregenerated', False)
def make_salt():
"""Generate a random salt with the indicator tag for password type.
:returns: a valid salt for use with crypt.crypt
"""
return crypt.mksalt(
method=PASSWORD_HASH_FORMAT[
CONF.conductor.rescue_password_hash_algorithm])
def hash_password(password=''):
"""Hashes a supplied password.
:param password: password to be hashed
"""
return crypt.crypt(password, make_salt())
def get_attached_vif(port):
"""Get any attached vif ID for the port
:param port: The port object upon which to check for a vif
record.
:returns: Returns a tuple of the vif if found and the use of
the vif in the form of a string, 'tenant', 'cleaning'
'provisioning', 'rescuing'.
:raises: InvalidState exception upon finding a port with a
transient state vif on the port.
"""
tenant_vif = port.internal_info.get('tenant_vif_port_id')
if tenant_vif:
return (tenant_vif, 'tenant')
clean_vif = port.internal_info.get('cleaning_vif_port_id')
if clean_vif:
return (clean_vif, 'cleaning')
prov_vif = port.internal_info.get('provisioning_vif_port_id')
if prov_vif:
return (prov_vif, 'provisioning')
rescue_vif = port.internal_info.get('rescuing_vif_port_id')
if rescue_vif:
return (rescue_vif, 'rescuing')
inspection_vif = port.internal_info.get('inspection_vif_port_id')
if inspection_vif:
return (inspection_vif, 'inspecting')
return (None, None)
def store_agent_certificate(node, agent_verify_ca):
"""Store certificate received from the agent and return its path."""
existing_verify_ca = node.driver_internal_info.get(
'agent_verify_ca')
if existing_verify_ca:
if os.path.exists(existing_verify_ca):
try:
with open(existing_verify_ca, 'rt') as fp:
existing_text = fp.read()
except EnvironmentError:
with excutils.save_and_reraise_exception():
LOG.exception('Could not read the existing TLS certificate'
' for node %s', node.uuid)
if existing_text.strip() != agent_verify_ca.strip():
LOG.error('Content mismatch for agent_verify_ca for '
'node %s', node.uuid)
raise exception.InvalidParameterValue(
_('Detected change in ramdisk provided "agent_verify_ca"'))
else:
return existing_verify_ca
else:
LOG.info('Current agent_verify_ca was not found for node '
'%s, assuming take over and storing', node.uuid)
fname = os.path.join(CONF.agent.certificates_path, '%s.crt' % node.uuid)
try:
# FIXME(dtantsur): it makes more sense to create this path on conductor
# start-up, but it requires reworking a ton of unit tests.
os.makedirs(CONF.agent.certificates_path, exist_ok=True)
with open(fname, 'wt') as fp:
fp.write(agent_verify_ca)
except EnvironmentError:
with excutils.save_and_reraise_exception():
LOG.exception('Could not save the TLS certificate for node %s',
node.uuid)
else:
LOG.debug('Saved the custom certificate for node %(node)s to %(file)s',
{'node': node.uuid, 'file': fname})
return fname
def node_cache_bios_settings(task, node):
"""Do caching of bios settings if supported by driver"""
try:
LOG.debug('Getting BIOS info for node %s', node.uuid)
task.driver.bios.cache_bios_settings(task)
except exception.UnsupportedDriverExtension:
LOG.warning('BIOS settings are not supported for node %s, '
'skipping', node.uuid)
# TODO(zshi) remove this check when classic drivers are removed
except Exception:
msg = (_('Caching of bios settings failed on node %(node)s.')
% {'node': node.uuid})
LOG.exception(msg)
def node_cache_vendor(task):
"""Cache the vendor if it can be detected."""
properties = task.node.properties
if properties.get('vendor'):
return # assume that vendors don't change on fly
try:
# We have no vendor stored, so we'll go ahead and
# call to store it.
vendor = task.driver.management.detect_vendor(task)
if not vendor:
return
# This function may be called without an exclusive lock, so get one
task.upgrade_lock(purpose='caching node vendor')
except exception.UnsupportedDriverExtension:
return
except Exception as exc:
LOG.warning('Unexpected exception when trying to detect vendor '
'for node %(node)s. %(class)s: %(exc)s',
{'node': task.node.uuid,
'class': type(exc).__name__, 'exc': exc},
exc_info=not isinstance(exc, exception.IronicException))
return
props = task.node.properties
props['vendor'] = vendor
task.node.properties = props
task.node.save()
LOG.info("Detected vendor %(vendor)s for node %(node)s",
{'vendor': vendor, 'node': task.node.uuid})
def node_cache_boot_mode(task):
"""Cache boot_mode and secure_boot state if supported by driver.
Cache current boot_mode and secure_boot in ironic's node representation
:param task: a TaskManager instance containing the node to check.
"""
# Try to retrieve boot mode and secure_boot state
try:
boot_mode = task.driver.management.get_boot_mode(task)
except exception.UnsupportedDriverExtension:
boot_mode = None
except Exception as exc:
LOG.warning('Unexpected exception when trying to detect boot_mode '
'for node %(node)s. %(class)s: %(exc)s',
{'node': task.node.uuid,
'class': type(exc).__name__, 'exc': exc},
exc_info=not isinstance(exc, exception.IronicException))
return
try:
secure_boot = task.driver.management.get_secure_boot_state(task)
except exception.UnsupportedDriverExtension:
secure_boot = None
except Exception as exc:
LOG.warning('Unexpected exception when trying to detect secure_boot '
'state for node %(node)s. %(class)s: %(exc)s',
{'node': task.node.uuid,
'class': type(exc).__name__, 'exc': exc},
exc_info=not isinstance(exc, exception.IronicException))
return
if (boot_mode != task.node.boot_mode
or secure_boot != task.node.secure_boot):
# Update node if current values different from node's last known info.
# Get exclusive lock in case we don't have one already.
task.upgrade_lock(purpose='caching boot_mode or secure_boot state')
task.node.boot_mode = boot_mode
task.node.secure_boot = secure_boot
task.node.save()
LOG.info("Updated boot_mode %(boot_mode)s, secure_boot %(secure_boot)s"
"for node %(node)s",
{'boot_mode': boot_mode, 'secure_boot': secure_boot,
'node': task.node.uuid})
def node_change_boot_mode(task, target_boot_mode):
"""Change boot mode to requested state for node
:param task: a TaskManager instance containing the node to act on.
:param target_boot_mode: Any boot mode in :mod:`ironic.common.boot_modes`.
"""
try:
current_boot_mode = task.driver.management.get_boot_mode(task)
except Exception as exc:
current_boot_mode = None
LOG.warning('Unexpected exception when trying to detect boot_mode '
'while changing boot mode for node '
'%(node)s. %(class)s: %(exc)s',
{'node': task.node.uuid,
'class': type(exc).__name__, 'exc': exc},
exc_info=not isinstance(exc, exception.IronicException))
if (current_boot_mode is not None
and target_boot_mode == current_boot_mode):
LOG.info("Target boot mode '%(target)s', and current boot mode "
"'%(current)s' are identical. No change being made "
"for node %(node)s",
{'target': target_boot_mode, 'current': current_boot_mode,
'node': task.node.uuid})
return
try:
task.driver.management.set_boot_mode(task, mode=target_boot_mode)
except Exception as exc:
LOG.error('Unexpected exception when trying to change boot_mode '
'to %(target)s for node %(node)s. %(class)s: %(exc)s',
{'node': task.node.uuid, 'target': target_boot_mode,
'class': type(exc).__name__, 'exc': exc},
exc_info=not isinstance(exc, exception.IronicException))
task.node.last_error = (
"Failed to change boot mode to '%(target)s: %(err)s" % {
'target': target_boot_mode, 'err': exc})
task.node.save()
else:
LOG.info("Changed boot_mode to %(mode)s for node %(node)s",
{'mode': target_boot_mode, 'node': task.node.uuid})
task.node.boot_mode = target_boot_mode
task.node.save()
def node_change_secure_boot(task, secure_boot_target):
"""Change secure_boot state to requested state for node
:param task: a TaskManager instance containing the node to act on.
:param secure_boot_target: Target secure_boot state
OneOf(True => on, False => off)
:type secure_boot_target: boolean
"""
try:
secure_boot_current = task.driver.management.get_secure_boot_state(
task)
except Exception as exc:
secure_boot_current = None
LOG.warning('Unexpected exception when trying to detect secure_boot '
'state while changing secure_boot for node '
'%(node)s. %(class)s: %(exc)s',
{'node': task.node.uuid,
'class': type(exc).__name__, 'exc': exc},
exc_info=not isinstance(exc, exception.IronicException))
if (secure_boot_current is not None
and secure_boot_target == secure_boot_current):
LOG.info("Target secure_boot state '%(target)s', and current "
"secure_boot state '%(current)s' are identical. "
"No change being made for node %(node)s",
{'target': secure_boot_target,
'current': secure_boot_current,
'node': task.node.uuid})
return
try:
task.driver.management.set_secure_boot_state(task, secure_boot_target)
except Exception as exc:
LOG.error('Unexpected exception when trying to change secure_boot '
'to %(target)s for node %(node)s. %(class)s: %(exc)s',
{'node': task.node.uuid, 'target': secure_boot_target,
'class': type(exc).__name__, 'exc': exc},
exc_info=not isinstance(exc, exception.IronicException))
task.node.last_error = (
"Failed to change secure_boot state to '%(target)s': %(err)s" % {
'target': secure_boot_target, 'err': exc})
task.node.save()
else:
LOG.info("Changed secure_boot state to %(state)s for node %(node)s",
{'state': secure_boot_target, 'node': task.node.uuid})
task.node.secure_boot = secure_boot_target
task.node.save()
def node_history_record(node, conductor=None, event=None,
event_type=None, user=None,
error=False):
"""Records a node history record
Adds an entry to the node history table with the appropriate fields
populated to ensure consistent experience by also updating the
node ``last_error`` field. Please note the event is only recorded
if the ``[conductor]node_history_max_size`` parameter is set to a
value greater than ``0``.
:param node: A node object from a task object. Required.
:param conductor: The hostname of the conductor. If not specified
this value is populated with the conductor FQDN.
:param event: The text to record to the node history table.
If no value is supplied, the method silently returns
to the caller.
:param event_type: The type activity where the event was encountered,
either "provisioning", "monitoring", "cleaning",
or whatever text the a driver author wishes to supply
based upon the activity. The purpose is to help guide
an API consumer/operator to have a better contextual
understanding of what was going on *when* the "event"
occured.
:param user: The user_id value which triggered the request,
if available.
:param error: Boolean value, default false, to signify if the event
is an error which should be recorded in the node
``last_error`` field.
:returns: None. No value is returned by this method.
"""
if not event:
# No error has occured, apparently.
return
if error:
# When the task exits out or is saved, the event
# or error is saved, but that is outside of ceating an
# entry in the history table.
node.last_error = event
if not conductor:
conductor = CONF.host
if CONF.conductor.node_history:
# If the maximum number of entries is not set to zero,
# then we should record the entry.
# NOTE(TheJulia): DB API automatically adds in a uuid.
# TODO(TheJulia): At some point, we should allow custom severity.
node_history.NodeHistory(
node_id=node.id,
conductor=CONF.host,
user=user,
severity=error and "ERROR" or "INFO",
event=event,
event_type=event_type or "UNKNOWN").create()
def update_image_type(context, node):
"""Updates is_whole_disk_image and image_type based on the node data.
:param context: Request context.
:param node: Node object.
:return: True if any changes have been done, else False.
"""
iwdi = images.is_whole_disk_image(context, node.instance_info)
if iwdi is None:
return False
node.set_driver_internal_info('is_whole_disk_image', iwdi)
# We need to gradually phase out is_whole_disk_image in favour of
# image_type, so make sure to set it as well. The primary use case is to
# cache information detected from Glance or the presence of kernel/ramdisk.
node.set_instance_info(
'image_type',
images.IMAGE_TYPE_WHOLE_DISK if iwdi else images.IMAGE_TYPE_PARTITION)
return True
| 40.507766
| 79
| 0.657882
|
4a0d69091f1e02eabde1b9f6d4457296a1210159
| 2,815
|
py
|
Python
|
fractals/sierpinski.py
|
ronikbhaskar/math-art
|
16289ed260567bea10049e3404753a5d103ddda3
|
[
"MIT"
] | null | null | null |
fractals/sierpinski.py
|
ronikbhaskar/math-art
|
16289ed260567bea10049e3404753a5d103ddda3
|
[
"MIT"
] | null | null | null |
fractals/sierpinski.py
|
ronikbhaskar/math-art
|
16289ed260567bea10049e3404753a5d103ddda3
|
[
"MIT"
] | null | null | null |
"""
Old recursive sierpinski triangle script I made.
I decided it would fit with everything I was doing.
Made some changes to turn it into a single function.
"""
import turtle
import os
from helpers.ps_to_image import make_gif
from helpers.background import draw_gradient_bkg
from math import sqrt
def sierpinski(side_length, pen):
"""
Single function to draw Sierpinski Triangle
"""
pen.hideturtle()
pen.penup()
pen.backward(side_length/4)
pen.left(60)
pen.backward(side_length/2)
pen.pendown()
for _ in range(3):
pen.forward(side_length)
pen.right(120)
def _sierpinski(size):
"""simple recursive function to draw upside down triangles"""
if size < side_length / 100:
return
pen.forward(size)
for _ in range(3):
_sierpinski(size/2)
pen.right(60)
pen.forward(size)
pen.right(60)
pen.backward(size)
_sierpinski(side_length / 2)
def sierpinski_simultaneous(side_length, pen):
"""
modified function to draw the three corners "simultaneously"
"""
# this line is to vertically center the triangle
# based on the starting position of the pen
pen.sety(pen.pos()[1] - side_length * sqrt(3) / 12)
pen2 = pen.clone()
pen3 = pen.clone()
pens = [pen, pen2, pen3]
for i, pen in enumerate(pens):
pen.speed(0)
pen.hideturtle()
pen.penup()
pen.right(i * 120)
pen.left(30)
pen.backward(side_length/sqrt(3))
pen.left(30)
pen.pendown()
pen.forward(side_length)
pen.right(120)
pen.forward(side_length / 2)
def _sierpinski_simultaneous(size, pens):
"""recursive function to draw the corners"""
if size < side_length / 100:
return
[pen.forward(size) for pen in pens]
for _ in range(3):
_sierpinski_simultaneous(size/2, pens)
for pen in pens:
pen.right(60)
pen.forward(size)
pen.right(60)
[pen.backward(size) for pen in pens]
_sierpinski_simultaneous(side_length / 4, pens)
if __name__ == "__main__":
side_length = 450
image_folder = os.path.dirname(os.path.realpath(__file__)) + "/images"
tortoise=turtle.Turtle()
tortoise.color("white")
turtle.Screen().setup(side_length * 1.5,side_length * 1.5)
tortoise.speed(0)
tortoise.hideturtle()
draw_gradient_bkg((255/255,249/255,194/255),
(255/255,167/255,145/255),
700,700)
turtle.tracer(1,3)
make_gif(lambda: sierpinski_simultaneous(side_length, tortoise),
"sierpinski", f"{image_folder}/sierpinski.gif",
square = True, compressed = False)
| 27.598039
| 74
| 0.609947
|
4a0d6ac9ab8f10c6eefbfee3f00c4ddaaca42980
| 3,358
|
py
|
Python
|
invenio_records_marc21/services/record/fields/leader.py
|
tu-graz-library/invenio-records-marc21
|
af94dbb1101826b4fcc848a8df107247dc15ccbd
|
[
"MIT"
] | 1
|
2022-03-30T11:59:46.000Z
|
2022-03-30T11:59:46.000Z
|
invenio_records_marc21/services/record/fields/leader.py
|
philippgualdi/invenio-records-marc21
|
6a4d17692649d8256ec8c9671d023e1065ce6063
|
[
"MIT"
] | 14
|
2021-01-11T09:31:46.000Z
|
2022-03-15T21:55:55.000Z
|
invenio_records_marc21/services/record/fields/leader.py
|
philippgualdi/invenio-records-marc21
|
6a4d17692649d8256ec8c9671d023e1065ce6063
|
[
"MIT"
] | 4
|
2021-01-07T11:13:58.000Z
|
2021-11-08T09:35:28.000Z
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
#
# Copyright (C) 2021 Graz University of Technology.
#
# Invenio-Records-Marc21 is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Marc21 leader field class."""
from os import linesep
class LeaderField(object):
"""LeaderField class representing the leaderfield HTML tag in MARC21 XML."""
def __init__(self, data=None, **kwargs):
"""Default constructor of the class."""
if data is not None:
self._load_from_str(data)
else:
self._load_from_dict(**kwargs)
def _load_from_str(self, data: str):
if len(data) != 24:
raise ValueError("Leader must have 24 characters!!")
self.length = data[0:5]
self.status = data[5]
self.type = data[6]
self.level = data[7]
self.control = data[8]
self.charset = data[9]
self.ind_count = data[10]
self.sub_count = data[11]
self.address = data[12:17]
self.encoding = data[17]
self.description = data[18]
self.multipart_resource_record_level = data[19]
self.length_field_position = data[20]
self.length_starting_character_position_portion = data[21]
self.length_implementation_defined_portion = data[22]
self.undefined = data[23]
def _load_from_dict(self, **kwargs):
self.length = kwargs.get("length", "00000") # 00-04
self.status = kwargs.get("status", "n") # 05
self.type = kwargs.get("type", "a") # 06
self.level = kwargs.get("level", "m") # 07
self.control = kwargs.get("control", " ") # 08
self.charset = kwargs.get("charset", "a") # 09
self.ind_count = kwargs.get("ind_count", "2") # 10
self.sub_count = kwargs.get("sub_count", "2") # 11
self.address = kwargs.get("address", "00000") # 12-16
self.encoding = kwargs.get("encoding", "z") # 17
self.description = kwargs.get("description", "c") # 18
self.multipart_resource_record_level = kwargs.get(
"multipart_resource_record_level", "a"
) # 19
self.length_field_position = kwargs.get("length_field_position", "4") # 20
self.length_starting_character_position_portion = kwargs.get(
"length_starting_character_position_portion", "5"
) # 21
self.length_implementation_defined_portion = kwargs.get(
"length_implementation_defined_portion", "0"
) # 22
self.undefined = kwargs.get("undefined", "0") # 23
def to_xml_tag(self, tagsep: str = linesep, indent: int = 4) -> str:
"""Get the Marc21 Leaderfield XML tag as string."""
leader = " " * indent
leader += "<leader>"
leader += f"{self.length}{self.status}{self.type}{self.level}{self.control}{self.charset}"
leader += f"{self.ind_count}{self.sub_count}{self.address}{self.encoding}{self.description}"
leader += f"{self.multipart_resource_record_level}{self.length_field_position}"
leader += f"{self.length_starting_character_position_portion}{self.length_implementation_defined_portion}"
leader += f"{self.undefined}"
leader += "</leader>"
leader += tagsep
return leader
| 39.505882
| 114
| 0.62299
|
4a0d6c25e234cdad7dabe6d3b13d9e02d05909ee
| 1,967
|
py
|
Python
|
lldb/test/API/lang/swift/hashed_containers_enums/TestSwiftHashedContainerEnum.py
|
LaudateCorpus1/llvm-project
|
ff2e0f0c1112558b3f30d8afec7c9882c33c79e3
|
[
"Apache-2.0"
] | 605
|
2019-10-18T01:15:54.000Z
|
2022-03-31T14:31:04.000Z
|
lldb/test/API/lang/swift/hashed_containers_enums/TestSwiftHashedContainerEnum.py
|
LaudateCorpus1/llvm-project
|
ff2e0f0c1112558b3f30d8afec7c9882c33c79e3
|
[
"Apache-2.0"
] | 3,180
|
2019-10-18T01:21:21.000Z
|
2022-03-31T23:25:41.000Z
|
lldb/test/API/lang/swift/hashed_containers_enums/TestSwiftHashedContainerEnum.py
|
LaudateCorpus1/llvm-project
|
ff2e0f0c1112558b3f30d8afec7c9882c33c79e3
|
[
"Apache-2.0"
] | 275
|
2019-10-18T05:27:22.000Z
|
2022-03-30T09:04:21.000Z
|
# TestSwiftHashedContainerEnum.py
"""
Test combinations of hashed swift containers with enums as keys/values
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftHashedContainerEnum(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@swiftTest
def test_any_object_type(self):
"""Test combinations of hashed swift containers with enums"""
self.build()
lldbutil.run_to_source_breakpoint(
self, '// break here', lldb.SBFileSpec('main.swift'))
self.expect(
'frame variable -d run -- testA',
ordered=False,
substrs=[
'key = c',
'value = 1',
'key = b',
'value = 2'])
self.expect(
'expr -d run -- testA',
ordered=False,
substrs=[
'key = c',
'value = 1',
'key = b',
'value = 2'])
self.expect(
'frame variable -d run -- testB',
ordered=False,
substrs=[
'key = "a", value = 1',
'key = "b", value = 2'])
self.expect(
'expr -d run -- testB',
ordered=False,
substrs=[
'key = "a", value = 1',
'key = "b", value = 2'])
self.expect(
'frame variable -d run -- testC',
ordered=False,
substrs=['key = b', 'value = 2'])
self.expect(
'expr -d run -- testC',
ordered=False,
substrs=['key = b', 'value = 2'])
self.expect(
'frame variable -d run -- testD',
substrs=['[0] = c'])
self.expect(
'expr -d run -- testD',
substrs=['[0] = c'])
| 26.945205
| 70
| 0.478393
|
4a0d6c525752558360ee926b154c99435767e1f2
| 10,911
|
py
|
Python
|
python/colab/convert_to_student.py
|
UTDataMining/prog-edu-assistant
|
52f9e5aa3ded64f949cafab4a89c08f94e99cddd
|
[
"Apache-2.0"
] | 28
|
2019-02-21T02:12:47.000Z
|
2022-02-20T01:18:31.000Z
|
python/colab/convert_to_student.py
|
UTDataMining/prog-edu-assistant
|
52f9e5aa3ded64f949cafab4a89c08f94e99cddd
|
[
"Apache-2.0"
] | 31
|
2019-02-26T07:53:27.000Z
|
2021-07-30T17:53:03.000Z
|
python/colab/convert_to_student.py
|
UTDataMining/prog-edu-assistant
|
52f9e5aa3ded64f949cafab4a89c08f94e99cddd
|
[
"Apache-2.0"
] | 19
|
2019-02-21T02:10:32.000Z
|
2021-10-21T04:23:10.000Z
|
#!/usr/bin/env python
#
# A tool to convert master notebook to student notebook, similar to
# //go/cmd/assign.go.
#
# Note: at the moment, it only supports the minimal functionality that is
# necessary for running inline tests in Colab directly.
# TODO(salikh): Support unit tests, templates and other functionality of
# //go/cmd/assign.go.
#
# Usage:
#
# convert_to_student.py master.ipynb > student.ipynb
#
import copy
import json
import re
from absl import app
from absl import flags
from absl import logging
FLAGS = flags.FLAGS
flags.DEFINE_string('master_notebook', None,
'The path to the master notebook file (.ipynb) to convert.')
flags.DEFINE_string('output_student_notebook', None,
'The output path to write the converted student notebook. '
'If not specified, the converted notebook is printed to stdout.')
def LoadNotebook(filename):
"""Load an ipynb notebook.
Args:
filename: the name of the .ipynb file.
Returns
loaded notbook as a JSON object.
"""
with open(filename) as f:
return json.load(f)
def SaveNotebook(notebook, filename):
"""Save a notebook to .ipynb file.
Args:
notebook: a notebook in the form of a JSON object.
filename: the name of the .ipynb file to write.
"""
with open(filename, 'w') as f:
json.dump(notebook, f)
def PrintNotebook(notebook):
"""Convert a master notebook to student notebook.
It removes the cells that are recognized as tests and master-only cells,
and removes some markers (e.g. # EXERCISE_ID) from the code.
See the source code definition for the details of the transformations.
Args:
notebook: a master notebook in the form of a JSON object.
Returns
A converted student notebook in the form of a JSON object.
"""
for cell in notebook['cells']:
source = ''.join(cell['source'])
print('-- ' + cell['cell_type'])
if 'metadata' in cell:
print('# ' + str(cell['metadata']))
print(source)
# A regexp identifying master-only notebooks. Applies both to code and markdown cells.
reMasterOnly = re.compile('^[\t ]*#.*MASTER ONLY.*\n?', re.M)
reSubmission = re.compile('^%%submission[ \t]*\n')
reAutotest = re.compile('%autotest|autotest\\(')
reReport = re.compile('%%(template|report)|report\\(')
reSolution = re.compile('^%%solution[ \t]*\n')
reExerciseID = re.compile('^# *EXERCISE_ID: [\'"]?([a-zA-Z0-9_.-]*)[\'"]?\n', re.M)
reSolutionBegin = re.compile('^([ \t]*)# BEGIN SOLUTION[ \t]*\n', re.M)
reSolutionEnd = re.compile('^[ \t]*# END SOLUTION[ \t]*\n', re.M)
rePromptBegin = re.compile('^[ \t]*""" # BEGIN PROMPT[ \t]*\n', re.M)
rePromptEnd = re.compile('^[ \t]*""" # END PROMPT[ \t]*\n?', re.M)
reStudentTest = re.compile('^%%studenttest *([a-zA-Z0-9_]*)[ \t]*\n')
reInlineTest = re.compile('^%%inlinetest *([a-zA-Z0-9_]*)[ \t]*\n')
reExerciseId = re.compile('^# *EXERCISE[_ ]ID:[ \t]*[\'"]?([a-zA-Z0-9_.-]*)[\'"]?[ \t]*\n?', re.M)
def ShouldSkipCodeCell(source):
"""Returns true iff the cell should be skipped from student notebook.
Args:
source: The merged source string of the code cell.
Returns:
true iff the cell should be skipped in the student notebook output.
"""
return (reMasterOnly.search(source) or
reSubmission.search(source) or
reInlineTest.search(source) or
reAutotest.search(source) or
reReport.search(source))
def ExtractPrompt(source, default):
"""Attempts to extract the prompt from the code cell.
Args:
source: The merged source string of the code cell.
default: The default prompt string.
Returns:
The source with prompt removed.
The first extracted prompt if prompt regexp matched, or default otherwise.
"""
promptBeginMatch = rePromptBegin.search(source)
promptEndMatch = rePromptEnd.search(source)
if promptBeginMatch and promptEndMatch:
if promptBeginMatch.end(0) > promptEndMatch.start(0):
logging.error("Malformed prompt in cell:\n%s", source)
return source, default
return (source[:promptBeginMatch.start(0)] + source[promptEndMatch.end(0):],
source[promptBeginMatch.end(0):promptEndMatch.start(0)])
elif promptBeginMatch or promptEndMatch:
logging.error("Malformed prompt in cell:\n%s", source)
return source, default
def CleanCodeCell(source):
"""Rewrites the code cell source by removing markers.
Args:
source: The merged source string of the code cell.
Returns
A cleaned up source string.
"""
m = reSolution.search(source)
if m:
source = source[m.end(0):]
m = reExerciseID.search(source)
if m:
source = source[0:m.start(0)] + source[m.end(0):]
m = reStudentTest.search(source)
if m:
source = source[m.end(0):]
m = reSolutionBegin.search(source)
if m:
indent = m.group(1)
prompt = indent + '...\n'
source, prompt = ExtractPrompt(source, prompt)
outs = []
while m:
outs.append(source[0:m.start(0)])
post = source[m.start(0):]
m = reSolutionEnd.search(post)
if not m:
logging.error('Unclosed # SOLUTION BEGIN in cell:\n%s', source)
outs.append(post)
break
outs.append(prompt)
source = post[m.end(0):]
# Update the prompt from the remaining piece.
source, prompt = ExtractPrompt(source, prompt)
m = reSolutionBegin.search(source)
# Update the prompt from the remaining piece.
source, prompt = ExtractPrompt(source, prompt)
# Append the last remaining part.
outs.append(source)
source = ''.join(outs)
return source
def CleanMarkdownCell(source):
"""Rewrites the source of the markdown cell source.
Args:
source: The merged source string of the markdown cell.
Returns:
A cleaned up source string.
"""
# TODO(salikh): Implement removing of triple-backtick cells with metadata.
return source
def ExtractExerciseID(source):
"""Attempts to extract exercise ID from the code cell.
Args:
source: The merged source string of the code cell.
Returns:
exercide ID string if found, or None if not found.
"""
m = reExerciseID.search(source)
if m:
return m.group(1)
return None
def ExtractInlineTest(source):
"""Attempts to extract inline test from the code cell.
Args:
source: The merged source string of the code cell.
Returns:
test_name: The name of the inline test, or None.
test_source: The source of the inline test, or None.
"""
m = reInlineTest.search(source)
if m:
return m.group(1), source[m.end(0):]
return None, None
def ExtractExerciseId(source):
"""Attempts to extract the exercise id from the code cell.
Args:
source: The merged source string of the code cell.
Returns:
exercide ID string if exercise ID is found, or None otherwise.
"""
m = reExerciseId.search(source)
if m:
return m.group(1)
return None
def ToStudent(notebook, embed_inline_tests=True):
"""Convert a master notebook to student notebook.
It removes the cells that are recognized as tests and master-only cells,
and removes some markers (e.g. # EXERCISE_ID) from the code.
See the source code definition for the details of the transformations.
Args:
notebook: a master notebook in the form of a JSON object.
embed_inline_tests: whether the inline tests should be embedded into
solution cell metadata.
Returns
A converted student notebook in the form of a JSON object.
"""
# exercise_id -> inline test name -> inline test source.
inline_tests = {}
output_cells = []
current_exercise_id = None
for cell in notebook['cells']:
source = ''.join(cell['source'])
if reMasterOnly.search(source):
continue
metadata = {}
if 'metadata' in cell:
metadata = copy.deepcopy(cell['metadata'])
cell_type = cell['cell_type']
if cell_type == 'markdown':
source = CleanMarkdownCell(source)
output_cell = {
'cell_type': 'markdown',
'source': source.splitlines(keepends=True),
}
if len(metadata) > 0:
output_cell['metadata'] = metadata
output_cells.append(output_cell)
continue
if cell_type != 'code':
# Pass through cells with unknown type.
output_cells.append(cell)
continue
# cell_type == 'code'
if embed_inline_tests:
# Check whether the cell contains an inline test.
test_name, test_source = ExtractInlineTest(source)
if test_name:
if not current_exercise_id:
raise Exception('Found an inline test, but no current exercise')
if current_exercise_id not in inline_tests:
inline_tests[current_exercise_id] = {}
inline_tests[current_exercise_id][test_name] = test_source
if ShouldSkipCodeCell(source):
continue
exercise_id = ExtractExerciseId(source)
if exercise_id:
metadata['exercise_id'] = exercise_id
# Store the inline tests map reference for later adding.
if embed_inline_tests:
inline_tests[exercise_id] = {}
metadata['inlinetests'] = inline_tests[exercise_id]
current_exercise_id = exercise_id
source = CleanCodeCell(source)
output_cell = {
'cell_type': 'code',
'source': source.splitlines(keepends=True),
}
if len(metadata) > 0:
output_cell['metadata'] = metadata
output_cells.append(output_cell)
output_notebook = copy.deepcopy(notebook)
output_notebook['cells'] = output_cells
return output_notebook
def main(argv):
if not FLAGS.master_notebook:
if len(argv) != 2:
raise app.UsageError(f'Usage: convert_to_student.py <notebook file>')
master_notebook_filename = argv[1]
else:
if len(argv) != 1:
raise app.UsageError(f'Usage: convert_to_student.py --master_notebook <notebook file>')
master_notebook_filename = FLAGS.master_notebook
master_notebook = LoadNotebook(master_notebook_filename)
student_notebook = ToStudent(master_notebook)
if FLAGS.output_student_notebook:
SaveNotebook(student_notebook, FLAGS.output_student_notebook)
else:
PrintNotebook(student_notebook)
if __name__ == '__main__':
app.run(main)
| 33.366972
| 99
| 0.630923
|
4a0d6d0f8cd33e6eec0842f2178b0e28b0c9b39e
| 8,022
|
py
|
Python
|
TermTk/TTkCore/TTkTerm/input.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | 1
|
2022-02-28T16:33:25.000Z
|
2022-02-28T16:33:25.000Z
|
TermTk/TTkCore/TTkTerm/input.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | null | null | null |
TermTk/TTkCore/TTkTerm/input.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 Eugenio Parodi <ceccopierangiolieugenio AT googlemail DOT com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from time import time
import platform
if platform.system() == "Linux":
from TermTk.TTkCore.TTkTerm.readinputlinux import *
elif platform.system() == "Darwin":
from TermTk.TTkCore.TTkTerm.readinputlinux import *
elif platform.system() == "Windows":
raise NotImplementedError("Windows OS not yet supported")
elif platform.system() == "Emscripten":
raise NotImplementedError("Pyodide not yet supported")
from TermTk.TTkCore.log import TTkLog
from TermTk.TTkCore.constant import TTkK
from TermTk.TTkCore.TTkTerm.inputkey import KeyEvent
class TTkMouseEvent:
# Keys
NoButton = (
TTkK.NoButton
) # The button state does not refer to any button (see QMouseEvent::button()).
AllButtons = (
TTkK.AllButtons
) # This value corresponds to a mask of all possible mouse buttons. Use to set the 'acceptedButtons' property of a MouseArea to accept ALL mouse buttons.
LeftButton = (
TTkK.LeftButton
) # The left button is pressed, or an event refers to the left button. (The left button may be the right button on left-handed mice.)
RightButton = TTkK.RightButton # The right button.
MidButton = TTkK.MidButton # The middle button.
MiddleButton = TTkK.MiddleButton # The middle button.
Wheel = TTkK.Wheel
# Events
NoEvent = TTkK.NoEvent
Press = TTkK.Press
Release = TTkK.Release
Drag = TTkK.Drag
Move = TTkK.Move
Up = TTkK.WHEEL_Up
Down = TTkK.WHEEL_Down
__slots__ = ("x", "y", "key", "evt", "tap", "raw")
def __init__(self, x: int, y: int, key: int, evt: int, tap: int, raw: str):
self.x = x
self.y = y
self.key = key
self.evt = evt
self.raw = raw
self.tap = tap
def clone(self, pos=None, evt=None):
x, y = pos or (self.x, self.y)
evt = evt or self.evt
return TTkMouseEvent(x, y, self.key, evt, self.tap, self.raw)
def key2str(self):
return {
TTkMouseEvent.NoButton: "NoButton",
TTkMouseEvent.AllButtons: "AllButtons",
TTkMouseEvent.LeftButton: "LeftButton",
TTkMouseEvent.RightButton: "RightButton",
TTkMouseEvent.MidButton: "MidButton",
TTkMouseEvent.MiddleButton: "MiddleButton",
TTkMouseEvent.Wheel: "Wheel",
}.get(self.key, "Undefined")
def evt2str(self):
return {
TTkMouseEvent.NoEvent: "NoEvent",
TTkMouseEvent.Press: "Press",
TTkMouseEvent.Release: "Release",
TTkMouseEvent.Drag: "Drag",
TTkMouseEvent.Move: "Move",
TTkMouseEvent.Up: "Up",
TTkMouseEvent.Down: "Down",
}.get(self.evt, "Undefined")
def __str__(self):
return f"MouseEvent ({self.x},{self.y}) {self.key2str()} {self.evt2str()} tap:{self.tap} - {self.raw}"
class TTkInput:
_leftLastTime = 0
_midLastTime = 0
_rightLastTime = 0
_leftTap = 0
_midTap = 0
_rightTap = 0
@staticmethod
def get_key(callback=None):
mouse_re = re.compile(r"\033\[<(\d+);(\d+);(\d+)([mM])")
while not False:
stdinRead = readInput()
mevt = None
kevt = KeyEvent.parse(stdinRead)
if kevt is None and stdinRead.startswith("\033[<"):
# Mouse Event
m = mouse_re.match(stdinRead)
if not m:
# TODO: Return Error
TTkLog.error("UNHANDLED: " + stdinRead.replace("\033", "<ESC>"))
continue
code = int(m.group(1))
x = int(m.group(2)) - 1
y = int(m.group(3)) - 1
state = m.group(4)
key = TTkMouseEvent.NoButton
evt = TTkMouseEvent.NoEvent
tap = 0
def _checkTap(lastTime, tap):
if state == "M":
t = time()
if (t - lastTime) < 0.4:
return t, tap + 1
else:
return t, 1
return lastTime, tap
if code == 0x00:
TTkInput._leftLastTime, TTkInput._leftTap = _checkTap(
TTkInput._leftLastTime, TTkInput._leftTap
)
tap = TTkInput._leftTap
key = TTkMouseEvent.LeftButton
evt = TTkMouseEvent.Press if state == "M" else TTkMouseEvent.Release
elif code == 0x01:
TTkInput._midLastTime, TTkInput._midTap = _checkTap(
TTkInput._midLastTime, TTkInput._midTap
)
tap = TTkInput._midTap
key = TTkMouseEvent.MidButton
evt = TTkMouseEvent.Press if state == "M" else TTkMouseEvent.Release
elif code == 0x02:
TTkInput._rightLastTime, TTkInput._rightTap = _checkTap(
TTkInput._rightLastTime, TTkInput._rightTap
)
tap = TTkInput._rightTap
key = TTkMouseEvent.RightButton
evt = TTkMouseEvent.Press if state == "M" else TTkMouseEvent.Release
elif code == 0x20:
key = TTkMouseEvent.LeftButton
evt = TTkMouseEvent.Drag
elif code == 0x21:
key = TTkMouseEvent.MidButton
evt = TTkMouseEvent.Drag
elif code == 0x22:
key = TTkMouseEvent.RightButton
evt = TTkMouseEvent.Drag
elif code == 0x40:
key = TTkMouseEvent.Wheel
evt = TTkMouseEvent.Up
elif code == 0x41:
key = TTkMouseEvent.Wheel
evt = TTkMouseEvent.Down
mevt = TTkMouseEvent(
x, y, key, evt, tap, m.group(0).replace("\033", "<ESC>")
)
if kevt is None and mevt is None:
TTkLog.error("UNHANDLED: " + stdinRead.replace("\033", "<ESC>"))
if callback is not None:
if not callback(kevt, mevt):
break
def main():
print("Retrieve Keyboard, Mouse press/drag/wheel Events")
print("Press q or <ESC> to exit")
import term as t
t.Term.push(t.Term.mouse_on)
t.Term.echo(False)
def callback(kevt=None, mevt=None):
if kevt is not None:
print(f"Key Event: {kevt}")
if mevt is not None:
print(f"Mouse Event: {mevt}")
TTkInput.get_key(callback)
t.Term.push(t.Term.mouse_off, t.Term.mouse_direct_off)
t.Term.echo(True)
if __name__ == "__main__":
main()
| 36.463636
| 158
| 0.573672
|
4a0d6d98e8a0ec8c765899045e7410edf86c4a65
| 275
|
py
|
Python
|
AlgorithmTest/CODING_CHALLENGE/CC_1802.py
|
bluesky0960/AlgorithmTest
|
35e6c01b1c25bf13d4c034c047f3dd3b67f1578e
|
[
"MIT"
] | null | null | null |
AlgorithmTest/CODING_CHALLENGE/CC_1802.py
|
bluesky0960/AlgorithmTest
|
35e6c01b1c25bf13d4c034c047f3dd3b67f1578e
|
[
"MIT"
] | null | null | null |
AlgorithmTest/CODING_CHALLENGE/CC_1802.py
|
bluesky0960/AlgorithmTest
|
35e6c01b1c25bf13d4c034c047f3dd3b67f1578e
|
[
"MIT"
] | null | null | null |
# https://ktaivle-ai.moducoding.com/Question/1802/View/1#1
# 바닥공사 2(초급)
import sys
f = [0] * 1001
n = int(sys.stdin.readline())
f[1] = 1
for i in range(2, n+1):
if i%2==0:
f[i] = (2*f[i-1] + 1)%796796
else:
f[i] = (2*f[i-1] - 1)%796796
print(f[n])
| 17.1875
| 58
| 0.530909
|
4a0d6e4bafc954448208bab990a9e7acfd20e3ed
| 3,430
|
py
|
Python
|
schort.py
|
informaniac/schort
|
7def769bbb80befdcbc5095f3e6a3fc0c52be2f5
|
[
"CC0-1.0"
] | null | null | null |
schort.py
|
informaniac/schort
|
7def769bbb80befdcbc5095f3e6a3fc0c52be2f5
|
[
"CC0-1.0"
] | null | null | null |
schort.py
|
informaniac/schort
|
7def769bbb80befdcbc5095f3e6a3fc0c52be2f5
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
from flask import Flask, render_template, url_for, request, redirect, abort, escape
import sqlite3, random, string, time, hashlib, base64
from urllib.parse import urlparse
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
@app.route('/<shortLink>', methods=['GET', 'POST'])
def short(shortLink=""):
if request.method == "GET":
if shortLink:
noauto = shortLink[-1] == "+"
if noauto: shortLink = shortLink[:-1]
conn = sqlite3.connect("data/links.sqlite")
c = conn.cursor()
result = c.execute('SELECT longLink FROM links WHERE shortLink=?', (shortLink, )).fetchone()
conn.close()
if result:
url = result[0]
parsedUrl = urlparse(url)
if parsedUrl.scheme == "":
url = "http://" + url
if "resolve" in request.args:
return escape(url)
else:
if noauto:
url = str(escape(url))
html = "<a href=" + url + ">" + url + "</a>"
return html
else:
return redirect(url, code=301) # Redirect to long URL saved in the database
else:
return render_template("index.html", name=shortLink, message="Enter long URL for "+ request.url_root + shortLink+":", message_type="info") # Custom link page
else:
return render_template("index.html", name=shortLink) # Landing page
elif request.method == "POST": # Someone submitted a new link to short
longUrl = request.form["url"] # required, accept the exception if the key does not exist
wishId = request.form.get("wishId")
if len(longUrl) <= 0:
abort(400)
databaseId = insertIdUnique(longUrl, idToCheck=wishId)
return request.url_root + databaseId # Short link in plain text
def insertIdUnique(longUrl, idToCheck=None):
hashUrl = hashlib.sha256(longUrl.encode()).digest()
base64Url = base64.urlsafe_b64encode(hashUrl).decode()
if idToCheck == None or idToCheck == "":
idToCheck = base64Url[:4]
conn = sqlite3.connect("data/links.sqlite")
c = conn.cursor()
try:
c.execute('INSERT INTO links VALUES (?, ?, ?, ?, ?)', (idToCheck, longUrl, int(time.time()), request.remote_addr, "default" ))
databaseId = idToCheck
conn.commit()
conn.close()
except sqlite3.IntegrityError as e:
print("Hash already exists, does the long URL matches?")
longUrlDb = c.execute('SELECT * FROM links WHERE shortLink=?', (idToCheck, )).fetchone()
if longUrl == longUrlDb[1]:
print(longUrl + " is already in database with id " + idToCheck + ". Serving old id…")
databaseId = idToCheck
else:
print("Found real hash collision for " + longUrl + " and " + longUrlDb[1])
conn.commit()
conn.close()
if len(base64Url) - 1 >= len(idToCheck) + 1:
databaseId = insertIdUnique(longUrl, idToCheck=base64Url[:len(idToCheck)+1])
else:
print("Can't produce a long enough hash from the new link to be unique. This should never happen")
print("Bailing out, you are on your own. Good luck.")
print("=========================================================================================")
abort(500)
return databaseId
def initDB():
conn = sqlite3.connect("data/links.sqlite")
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS links (shortLink UNIQUE NOT NULL, longLink, timestamp, ip, redirectMethod);''')
conn.commit()
conn.close()
if __name__ == '__main__':
initDB()
app.run(debug=True) # If you call this file directly it will always run in debug mode. THIS IS VERY DANGEROUS!
# vim: noexpandtab:ts=2:sw=2:sts=2
| 37.692308
| 161
| 0.66035
|
4a0d6ea4758e816c0828363c0506223db04c5553
| 16,011
|
py
|
Python
|
tests/test_order_Steinhardt.py
|
ThenoobMario/freud
|
3dad11885dcfebfa79fe2575f4e8b556ab3caba2
|
[
"BSD-3-Clause"
] | 172
|
2018-11-24T03:07:53.000Z
|
2022-02-24T17:18:15.000Z
|
tests/test_order_Steinhardt.py
|
ThenoobMario/freud
|
3dad11885dcfebfa79fe2575f4e8b556ab3caba2
|
[
"BSD-3-Clause"
] | 631
|
2019-01-23T17:49:33.000Z
|
2022-03-28T19:46:36.000Z
|
tests/test_order_Steinhardt.py
|
ThenoobMario/freud
|
3dad11885dcfebfa79fe2575f4e8b556ab3caba2
|
[
"BSD-3-Clause"
] | 30
|
2019-07-24T07:57:06.000Z
|
2022-02-25T10:58:19.000Z
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pytest
import rowan
import util
import freud
matplotlib.use("agg")
# Validated against manual calculation and pyboo
PERFECT_FCC_Q6 = 0.57452416
PERFECT_FCC_W6 = -0.00262604
class TestSteinhardt:
def test_shape(self):
N = 1000
L = 10
box, positions = freud.data.make_random_system(L, N, seed=0)
comp = freud.order.Steinhardt(6)
comp.compute((box, positions), neighbors={"r_max": 1.5})
npt.assert_equal(comp.particle_order.shape[0], N)
@pytest.mark.parametrize("sph_l", range(3, 8))
def test_qlmi(self, sph_l):
"""Test the raw calculated qlmi."""
special = pytest.importorskip("scipy.special")
sph_harm = special.sph_harm
atol = 1e-4
L = 8
N = 100
box, points = freud.data.make_random_system(L, N, seed=0)
num_neighbors = 4
# Note the order of m values provided by fsph.
ms = np.array(list(range(sph_l + 1)) + [-m for m in range(1, sph_l + 1)])[
:, np.newaxis
]
aq = freud.locality.AABBQuery(box, points)
nl = aq.query(
points, {"exclude_ii": True, "num_neighbors": num_neighbors}
).toNeighborList()
comp = freud.order.Steinhardt(sph_l)
comp.compute(aq, neighbors=nl)
qlmi = np.zeros([N, 2 * sph_l + 1], dtype=complex)
# Loop over the particles and compute the qlmis for each (the
# broadcasting syntax becomes rather abstruse for 3D arrays, and we
# have to match the indices to the NeighborList anyway).
for i in range(N):
neighbors_i = nl[nl.query_point_indices == i]
bonds = box.wrap(points[neighbors_i[:, 1]] - points[neighbors_i[:, 0]])
r = np.linalg.norm(bonds, axis=-1)
thetas = np.arccos(bonds[:, 2] / r)
phis = np.arctan2(bonds[:, 1], bonds[:, 0])
qlmi[i, :] = np.sum(
sph_harm(ms, sph_l, phis[np.newaxis, :], thetas[np.newaxis, :]), axis=-1
)
qlmi /= num_neighbors
npt.assert_allclose(comp.particle_harmonics, qlmi, atol=atol)
def test_l_axis_aligned(self):
# This test has three points along the z-axis. By construction, the
# points on the end should have Q_l = 1 for odd l and the central
# point should have Q_l = 0 for odd l. All three points should
# have perfect order for even l.
box = freud.box.Box.cube(10)
positions = [[0, 0, -1], [0, 0, 0], [0, 0, 1]]
for odd_l in range(1, 20, 2):
comp = freud.order.Steinhardt(odd_l)
comp.compute((box, positions), neighbors={"num_neighbors": 2})
npt.assert_allclose(comp.particle_order, [1, 0, 1], atol=1e-5)
for even_l in range(0, 20, 2):
comp = freud.order.Steinhardt(even_l)
comp.compute((box, positions), neighbors={"num_neighbors": 2})
npt.assert_allclose(comp.particle_order, 1, atol=1e-5)
def test_identical_environments_ql(self):
box, positions = freud.data.UnitCell.fcc().generate_system(4, scale=2)
r_max = 1.5
test_set = util.make_raw_query_nlist_test_set(
box, positions, positions, "ball", r_max, 0, True
)
for nq, neighbors in test_set:
comp = freud.order.Steinhardt(6)
comp.compute(nq, neighbors=neighbors)
npt.assert_allclose(
np.average(comp.particle_order), PERFECT_FCC_Q6, atol=1e-5
)
npt.assert_allclose(comp.particle_order, comp.particle_order[0], atol=1e-5)
assert abs(comp.order - PERFECT_FCC_Q6) < 1e-5
comp = freud.order.Steinhardt(6, average=True)
comp.compute(nq, neighbors=neighbors)
npt.assert_allclose(
np.average(comp.particle_order), PERFECT_FCC_Q6, atol=1e-5
)
npt.assert_allclose(comp.particle_order, comp.particle_order[0], atol=1e-5)
assert abs(comp.order - PERFECT_FCC_Q6) < 1e-5
def test_identical_environments_ql_near(self):
box, positions = freud.data.UnitCell.fcc().generate_system(4)
r_max = 1.5
n = 12
test_set = util.make_raw_query_nlist_test_set(
box, positions, positions, "nearest", r_max, n, True
)
for nq, neighbors in test_set:
comp = freud.order.Steinhardt(6)
comp.compute(nq, neighbors=neighbors)
npt.assert_allclose(
np.average(comp.particle_order), PERFECT_FCC_Q6, atol=1e-5
)
npt.assert_allclose(comp.particle_order, comp.particle_order[0], atol=1e-5)
assert abs(comp.order - PERFECT_FCC_Q6) < 1e-5
comp = freud.order.Steinhardt(6, average=True)
comp.compute(nq, neighbors=neighbors)
npt.assert_allclose(
np.average(comp.particle_order), PERFECT_FCC_Q6, atol=1e-5
)
npt.assert_allclose(comp.particle_order, comp.particle_order[0], atol=1e-5)
assert abs(comp.order - PERFECT_FCC_Q6) < 1e-5
# Perturb one position
perturbed_positions = positions.copy()
perturbed_positions[-1] += [0.1, 0, 0]
test_set = util.make_raw_query_nlist_test_set(
box, perturbed_positions, perturbed_positions, "nearest", r_max, n, True
)
# Ensure exactly 13 values change for the perturbed system
for nq, neighbors in test_set:
comp = freud.order.Steinhardt(6)
comp.compute(nq, neighbors=neighbors)
assert sum(~np.isclose(comp.ql, PERFECT_FCC_Q6, rtol=1e-6)) == 13
# More than 13 particles should change for
# ql averaged over neighbors
comp = freud.order.Steinhardt(6, average=True)
comp.compute(nq, neighbors=neighbors)
assert sum(~np.isclose(comp.particle_order, PERFECT_FCC_Q6, rtol=1e-6)) > 13
def test_identical_environments_wl(self):
box, positions = freud.data.UnitCell.fcc().generate_system(4, scale=2)
r_max = 1.5
test_set = util.make_raw_query_nlist_test_set(
box, positions, positions, "ball", r_max, 0, True
)
for nq, neighbors in test_set:
comp = freud.order.Steinhardt(6, wl=True)
comp.compute(nq, neighbors=neighbors)
npt.assert_allclose(
np.average(comp.particle_order), PERFECT_FCC_W6, atol=1e-5
)
npt.assert_allclose(comp.particle_order, comp.particle_order[0], atol=1e-5)
assert abs(comp.order - PERFECT_FCC_W6) < 1e-5
comp = freud.order.Steinhardt(6, wl=True, average=True)
comp.compute(nq, neighbors=neighbors)
npt.assert_allclose(
np.average(comp.particle_order), PERFECT_FCC_W6, atol=1e-5
)
npt.assert_allclose(comp.particle_order, comp.particle_order[0], atol=1e-5)
assert abs(comp.order - PERFECT_FCC_W6) < 1e-5
def test_identical_environments_wl_near(self):
box, positions = freud.data.UnitCell.fcc().generate_system(4)
r_max = 1.5
n = 12
test_set = util.make_raw_query_nlist_test_set(
box, positions, positions, "nearest", r_max, n, True
)
for nq, neighbors in test_set:
comp = freud.order.Steinhardt(6, wl=True)
comp.compute(nq, neighbors=neighbors)
npt.assert_allclose(
np.average(comp.particle_order), PERFECT_FCC_W6, atol=1e-5
)
npt.assert_allclose(comp.particle_order, comp.particle_order[0], atol=1e-5)
assert abs(comp.order - PERFECT_FCC_W6) < 1e-5
comp = freud.order.Steinhardt(6, wl=True, average=True)
comp.compute(nq, neighbors=neighbors)
npt.assert_allclose(
np.average(comp.particle_order), PERFECT_FCC_W6, atol=1e-5
)
npt.assert_allclose(comp.particle_order, comp.particle_order[0], atol=1e-5)
assert abs(comp.order - PERFECT_FCC_W6) < 1e-5
def test_weighted(self):
box, positions = freud.data.UnitCell.fcc().generate_system(4)
r_max = 1.5
n = 12
test_set = util.make_raw_query_nlist_test_set(
box, positions, positions, "nearest", r_max, n, True
)
# Skip test sets without an explicit neighbor list
for nq, neighbors in filter(
lambda ts: type(ts[1]) == freud.locality.NeighborList, test_set
):
nlist = neighbors
for wt in [0, 0.1, 0.9, 1.1, 10, 1e6]:
# Change the weight of the first bond for each particle
weights = nlist.weights.copy()
weights[nlist.segments] = wt
weighted_nlist = freud.locality.NeighborList.from_arrays(
len(positions),
len(positions),
nlist.query_point_indices,
nlist.point_indices,
nlist.distances,
weights,
)
comp = freud.order.Steinhardt(6, weighted=True)
comp.compute(nq, neighbors=weighted_nlist)
# Unequal neighbor weighting in a perfect FCC structure
# appears to increase the Q6 order parameter
npt.assert_array_less(PERFECT_FCC_Q6, comp.particle_order)
npt.assert_allclose(
comp.particle_order, comp.particle_order[0], atol=1e-5
)
npt.assert_array_less(PERFECT_FCC_Q6, comp.order)
# Ensure that W6 values are altered by changing the weights
comp = freud.order.Steinhardt(6, wl=True, weighted=True)
comp.compute(nq, neighbors=weighted_nlist)
with pytest.raises(AssertionError):
npt.assert_allclose(
np.average(comp.particle_order), PERFECT_FCC_W6, rtol=1e-5
)
with pytest.raises(AssertionError):
npt.assert_allclose(comp.order, PERFECT_FCC_W6, rtol=1e-5)
def test_attribute_access(self):
comp = freud.order.Steinhardt(6)
with pytest.raises(AttributeError):
comp.order
with pytest.raises(AttributeError):
comp.particle_order
box, positions = freud.data.UnitCell.fcc().generate_system(4)
comp.compute((box, positions), neighbors={"r_max": 1.5})
comp.order
comp.particle_order
def test_compute_twice_norm(self):
"""Test that computing norm twice works as expected."""
L = 5
num_points = 100
box, points = freud.data.make_random_system(L, num_points, seed=0)
st = freud.order.Steinhardt(6)
first_result = st.compute((box, points), neighbors={"r_max": 1.5}).order
second_result = st.compute((box, points), neighbors={"r_max": 1.5}).order
npt.assert_array_almost_equal(first_result, second_result)
def test_rotational_invariance(self):
box = freud.box.Box.cube(10)
positions = np.array(
[
[0, 0, 0],
[-1, -1, 0],
[-1, 1, 0],
[1, -1, 0],
[1, 1, 0],
[-1, 0, -1],
[-1, 0, 1],
[1, 0, -1],
[1, 0, 1],
[0, -1, -1],
[0, -1, 1],
[0, 1, -1],
[0, 1, 1],
]
)
query_point_indices = np.zeros(len(positions) - 1)
point_indices = np.arange(1, len(positions))
nlist = freud.locality.NeighborList.from_arrays(
len(positions),
len(positions),
query_point_indices,
point_indices,
np.full(len(query_point_indices), np.sqrt(2)),
)
q6 = freud.order.Steinhardt(6)
w6 = freud.order.Steinhardt(6, wl=True)
q6.compute((box, positions), neighbors=nlist)
q6_unrotated_order = q6.particle_order[0]
w6.compute((box, positions), neighbors=nlist)
w6_unrotated_order = w6.particle_order[0]
for i in range(10):
np.random.seed(i)
quat = rowan.random.rand()
positions_rotated = rowan.rotate(quat, positions)
# Ensure Q6 is rotationally invariant
q6.compute((box, positions_rotated), neighbors=nlist)
npt.assert_allclose(q6.particle_order[0], q6_unrotated_order, rtol=1e-5)
npt.assert_allclose(q6.particle_order[0], PERFECT_FCC_Q6, rtol=1e-5)
# Ensure W6 is rotationally invariant
w6.compute((box, positions_rotated), neighbors=nlist)
npt.assert_allclose(w6.particle_order[0], w6_unrotated_order, rtol=1e-5)
npt.assert_allclose(w6.particle_order[0], PERFECT_FCC_W6, rtol=1e-5)
def test_repr(self):
comp = freud.order.Steinhardt(6)
assert str(comp) == str(eval(repr(comp)))
# Use non-default arguments for all parameters
comp = freud.order.Steinhardt(6, average=True, wl=True, weighted=True)
assert str(comp) == str(eval(repr(comp)))
def test_repr_png(self):
L = 5
num_points = 100
box, points = freud.data.make_random_system(L, num_points, seed=0)
st = freud.order.Steinhardt(6)
with pytest.raises(AttributeError):
st.plot()
assert st._repr_png_() is None
st.compute(system=(box, points), neighbors={"r_max": 1.5})
st._repr_png_()
plt.close("all")
def test_no_neighbors(self):
"""Ensure that particles without neighbors are assigned NaN."""
box = freud.box.Box.cube(10)
positions = [(0, 0, 0)]
comp = freud.order.Steinhardt(6)
comp.compute((box, positions), neighbors={"r_max": 1.25})
assert np.all(np.isnan(comp.particle_order))
npt.assert_allclose(np.nan_to_num(comp.particle_order), 0)
def test_multiple_l(self):
"""Test the raw calculated qlmi."""
special = pytest.importorskip("scipy.special")
sph_harm = special.sph_harm
atol = 1e-4
L = 8
N = 100
box, points = freud.data.make_random_system(L, N, seed=0)
num_neighbors = 4
sph_l = list(range(3, 8))
# Note the order of m values provided by fsph.
ms_per_l = [
np.array(list(range(l + 1)) + [-m for m in range(1, l + 1)])[:, np.newaxis]
for l in sph_l
]
aq = freud.locality.AABBQuery(box, points)
nl = aq.query(
points, {"exclude_ii": True, "num_neighbors": num_neighbors}
).toNeighborList()
comp = freud.order.Steinhardt(sph_l)
comp.compute(aq, neighbors=nl)
qlmis = [np.zeros([N, 2 * l + 1], dtype=complex) for l in sph_l]
# Loop over the particles and compute the qlmis for each (the
# broadcasting syntax becomes rather abstruse for 3D arrays, and we
# have to match the indices to the NeighborList anyway).
for l, ms, qlmi in zip(sph_l, ms_per_l, qlmis):
for i in range(N):
neighbors_i = nl[nl.query_point_indices == i]
bonds = box.wrap(points[neighbors_i[:, 1]] - points[neighbors_i[:, 0]])
r = np.linalg.norm(bonds, axis=-1)
thetas = np.arccos(bonds[:, 2] / r)
phis = np.arctan2(bonds[:, 1], bonds[:, 0])
qlmi[i, :] = np.sum(
sph_harm(ms, l, phis[np.newaxis, :], thetas[np.newaxis, :]), axis=-1
)
qlmi /= num_neighbors
assert all(
np.allclose(comp.particle_harmonics[i], qlmis[i], atol=atol)
for i in range(len(sph_l))
)
| 38.767554
| 88
| 0.583286
|
4a0d6eb446cc7bc2666ff6bb3f817f992991c0b9
| 3,362
|
py
|
Python
|
cart/services/cart_service.py
|
hpanwar08/greatkart
|
834ff9fabdbb9493f54bcfd5d23505831b4a66d2
|
[
"MIT"
] | null | null | null |
cart/services/cart_service.py
|
hpanwar08/greatkart
|
834ff9fabdbb9493f54bcfd5d23505831b4a66d2
|
[
"MIT"
] | null | null | null |
cart/services/cart_service.py
|
hpanwar08/greatkart
|
834ff9fabdbb9493f54bcfd5d23505831b4a66d2
|
[
"MIT"
] | null | null | null |
import logging
from django.http import HttpRequest
from cart.models import CartItem, Cart
from store.models import Product, Variation
logger = logging.getLogger(__file__)
def _get_session_id(request: HttpRequest):
session_id = request.session.session_key
if not session_id:
session_id = request.session.create()
return session_id
def add_item_to_cart(request, product_id):
current_user = request.user
product = Product.objects.get(id=product_id)
product_variation = []
for key, value in request.POST.items():
try:
variation = Variation.objects.get(product=product, variation_category__iexact=key,
variation_value__iexact=value)
product_variation.append(variation)
except Variation.DoesNotExist as e:
print(e)
print(product_variation)
if current_user.is_authenticated:
# get items by user and product and find out if same variation exists. If yes then increase count
cart_items = CartItem.objects.filter(buyer=current_user, product=product).prefetch_related('variations')
if cart_items.exists():
item_found = False
# iterate and find item of same variation
for cart_item in cart_items:
print(cart_item)
print(list(cart_item.variations.all()))
if product_variation == list(cart_item.variations.all()):
cart_item.quantity += 1
cart_item.save()
item_found = True
break
if not item_found:
new_cart_item = CartItem.objects.create(buyer=current_user, product=product, quantity=1)
new_cart_item.variations.add(*product_variation)
new_cart_item.save()
else:
new_cart_item = CartItem.objects.create(buyer=current_user, product=product, quantity=1)
new_cart_item.variations.add(*product_variation)
new_cart_item.save()
else:
# get item by cart id and product and find out if same variation exists. If yes then increase count
try:
cart = Cart.objects.get(cart_id=_get_session_id(request))
except Cart.DoesNotExist:
cart = Cart.objects.create(cart_id=_get_session_id(request))
cart_items = CartItem.objects.filter(cart=cart, product=product).prefetch_related('variations')
if cart_items.exists():
item_found = False
# iterate and get existing variation
for cart_item in cart_items:
print(cart_item)
print(list(cart_item.variations.all()))
if product_variation == list(cart_item.variations.all()):
cart_item.quantity += 1
cart_item.save()
item_found = True
break
if not item_found:
new_cart_item = CartItem.objects.create(cart=cart, product=product, quantity=1)
new_cart_item.variations.add(*product_variation)
new_cart_item.save()
else:
new_cart_item = CartItem.objects.create(cart=cart, product=product, quantity=1)
new_cart_item.variations.add(*product_variation)
new_cart_item.save()
| 37.775281
| 112
| 0.624331
|
4a0d6edf03c1db15e6cef712d27f94fb496cce1d
| 20,549
|
py
|
Python
|
flux_combined_high_binding/model_613.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_613.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_613.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 72500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 40000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.134259
| 798
| 0.804127
|
4a0d6f02a2bfa46c029b915e41e2b87d11f0008d
| 56
|
py
|
Python
|
pure/__init__.py
|
Enteee/pure
|
98e9019956c0444ade610bee8bfc752da4540e50
|
[
"Apache-2.0"
] | null | null | null |
pure/__init__.py
|
Enteee/pure
|
98e9019956c0444ade610bee8bfc752da4540e50
|
[
"Apache-2.0"
] | 1
|
2019-10-22T17:01:43.000Z
|
2019-10-22T17:01:43.000Z
|
pure/__init__.py
|
Enteee/pure
|
98e9019956c0444ade610bee8bfc752da4540e50
|
[
"Apache-2.0"
] | null | null | null |
# vim: set fenc=utf8 ts=4 sw=4 et :
from .pure import *
| 18.666667
| 35
| 0.642857
|
4a0d6f2cccc32f02305c1f26e2c15d22d5744d9b
| 85,345
|
py
|
Python
|
pandas/tests/test_algos.py
|
AndrewEckart/pandas
|
075ec87643130074c9389a17a9d964cd33f779dc
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-09-13T12:15:43.000Z
|
2021-09-13T13:07:23.000Z
|
pandas/tests/test_algos.py
|
casperlundberg/pandas
|
0b671ad37ca31eebe60b8443496994b88bffd2e2
|
[
"BSD-3-Clause"
] | 1
|
2022-02-02T13:33:09.000Z
|
2022-02-02T13:33:09.000Z
|
pandas/tests/test_algos.py
|
casperlundberg/pandas
|
0b671ad37ca31eebe60b8443496994b88bffd2e2
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime
from itertools import permutations
import struct
import numpy as np
import pytest
from pandas._libs import (
algos as libalgos,
hashtable as ht,
)
from pandas.compat import np_array_datetime64_compat
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_bool_dtype,
is_complex_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timedelta,
Timestamp,
date_range,
timedelta_range,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.core.algorithms as algos
from pandas.core.arrays import DatetimeArray
import pandas.core.common as com
class TestFactorize:
@pytest.mark.parametrize("sort", [True, False])
def test_factorize(self, index_or_series_obj, sort):
obj = index_or_series_obj
result_codes, result_uniques = obj.factorize(sort=sort)
constructor = Index
if isinstance(obj, MultiIndex):
constructor = MultiIndex.from_tuples
expected_uniques = constructor(obj.unique())
if sort:
expected_uniques = expected_uniques.sort_values()
# construct an integer ndarray so that
# `expected_uniques.take(expected_codes)` is equal to `obj`
expected_uniques_list = list(expected_uniques)
expected_codes = [expected_uniques_list.index(val) for val in obj]
expected_codes = np.asarray(expected_codes, dtype=np.intp)
tm.assert_numpy_array_equal(result_codes, expected_codes)
tm.assert_index_equal(result_uniques, expected_uniques)
def test_series_factorize_na_sentinel_none(self):
# GH#35667
values = np.array([1, 2, 1, np.nan])
ser = Series(values)
codes, uniques = ser.factorize(na_sentinel=None)
expected_codes = np.array([0, 1, 0, 2], dtype=np.intp)
expected_uniques = Index([1.0, 2.0, np.nan])
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_index_equal(uniques, expected_uniques)
def test_basic(self):
codes, uniques = algos.factorize(["a", "b", "b", "a", "a", "c", "c", "c"])
tm.assert_numpy_array_equal(uniques, np.array(["a", "b", "c"], dtype=object))
codes, uniques = algos.factorize(
["a", "b", "b", "a", "a", "c", "c", "c"], sort=True
)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array(["a", "b", "c"], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
arr = np.arange(5, dtype=np.intp)[::-1]
codes, uniques = algos.factorize(arr)
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
codes, uniques = algos.factorize(arr, sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
arr = np.arange(5.0)[::-1]
codes, uniques = algos.factorize(arr)
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([4.0, 3.0, 2.0, 1.0, 0.0], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
codes, uniques = algos.factorize(arr, sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(["A", "A", np.nan, "B", 3.14, np.inf])
codes, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = Index(["A", "B", 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = Index([3.14, np.inf, "A", "B"])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp("20130101 09:00:00.00004")
v2 = Timestamp("20130101")
x = Series([v1, v1, v1, v2, v2, v1])
codes, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = Period("201302", freq="M")
v2 = Period("201303", freq="M")
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
codes, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, PeriodIndex([v1, v2]))
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, PeriodIndex([v1, v2]))
# GH 5986
v1 = to_timedelta("1 day 1 min")
v2 = to_timedelta("1 day")
x = Series([v1, v2, v1, v1, v2, v2, v1])
codes, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, to_timedelta([v1, v2]))
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype="O")
rizer = ht.ObjectFactorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype="int32")
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype="O")
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype="int32")
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize(
"data, expected_codes, expected_uniques",
[
(
[(1, 1), (1, 2), (0, 0), (1, 2), "nonsense"],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), "nonsense"],
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)],
),
([(1, 1), (1, 2), (0, 0), (1, 2)], [0, 1, 2, 1], [(1, 1), (1, 2), (0, 0)]),
],
)
def test_factorize_tuple_list(self, data, expected_codes, expected_uniques):
# GH9454
codes, uniques = pd.factorize(data)
tm.assert_numpy_array_equal(codes, np.array(expected_codes, dtype=np.intp))
expected_uniques_array = com.asarray_tuplesafe(expected_uniques, dtype=object)
tm.assert_numpy_array_equal(uniques, expected_uniques_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
x17 = np.array([complex(i) for i in range(17)], dtype=object)
msg = (
"unorderable types: .* [<>] .*"
"|" # the above case happens for numpy < 1.14
"'[<>]' not supported between instances of .*"
)
with pytest.raises(TypeError, match=msg):
algos.factorize(x17[::-1], sort=True)
def test_numeric_dtype_factorize(self, any_real_numpy_dtype):
# GH41132
dtype = any_real_numpy_dtype
data = np.array([1, 2, 2, 1], dtype=dtype)
expected_codes = np.array([0, 1, 1, 0], dtype=np.intp)
expected_uniques = np.array([1, 2], dtype=dtype)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_float64_factorize(self, writable):
data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0, 2, 1, 0], dtype=np.intp)
expected_uniques = np.array([1.0, 1e8, 1e-8], dtype=np.float64)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_uint64_factorize(self, writable):
data = np.array([2 ** 64 - 1, 1, 2 ** 64 - 1], dtype=np.uint64)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0], dtype=np.intp)
expected_uniques = np.array([2 ** 64 - 1, 1], dtype=np.uint64)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_int64_factorize(self, writable):
data = np.array([2 ** 63 - 1, -(2 ** 63), 2 ** 63 - 1], dtype=np.int64)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0], dtype=np.intp)
expected_uniques = np.array([2 ** 63 - 1, -(2 ** 63)], dtype=np.int64)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_string_factorize(self, writable):
data = np.array(["a", "c", "a", "b", "c"], dtype=object)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0, 2, 1], dtype=np.intp)
expected_uniques = np.array(["a", "c", "b"], dtype=object)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_object_factorize(self, writable):
data = np.array(["a", "c", None, np.nan, "a", "b", NaT, "c"], dtype=object)
data.setflags(write=writable)
expected_codes = np.array([0, 1, -1, -1, 0, 2, -1, 1], dtype=np.intp)
expected_uniques = np.array(["a", "c", "b"], dtype=object)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_datetime64_factorize(self, writable):
# GH35650 Verify whether read-only datetime64 array can be factorized
data = np.array([np.datetime64("2020-01-01T00:00:00.000")])
data.setflags(write=writable)
expected_codes = np.array([0], dtype=np.intp)
expected_uniques = np.array(
["2020-01-01T00:00:00.000000000"], dtype="datetime64[ns]"
)
codes, uniques = pd.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize("sort", [True, False])
def test_factorize_rangeindex(self, sort):
# increasing -> sort doesn't matter
ri = pd.RangeIndex.from_range(range(10))
expected = np.arange(10, dtype=np.intp), ri
result = algos.factorize(ri, sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
result = ri.factorize(sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
@pytest.mark.parametrize("sort", [True, False])
def test_factorize_rangeindex_decreasing(self, sort):
# decreasing -> sort matters
ri = pd.RangeIndex.from_range(range(10))
expected = np.arange(10, dtype=np.intp), ri
ri2 = ri[::-1]
expected = expected[0], ri2
if sort:
expected = expected[0][::-1], expected[1][::-1]
result = algos.factorize(ri2, sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
result = ri2.factorize(sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2 ** 63, 1, 2 ** 63], dtype=np.uint64)
with pytest.raises(TypeError, match="got an unexpected keyword"):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize(
"data",
[
np.array([0, 1, 0], dtype="u8"),
np.array([-(2 ** 63), 1, -(2 ** 63)], dtype="i8"),
np.array(["__nan__", "foo", "__nan__"], dtype="object"),
],
)
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
codes, uniques = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_codes = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize(
"data, na_value",
[
(np.array([0, 1, 0, 2], dtype="u8"), 0),
(np.array([1, 0, 1, 2], dtype="u8"), 1),
(np.array([-(2 ** 63), 1, -(2 ** 63), 0], dtype="i8"), -(2 ** 63)),
(np.array([1, -(2 ** 63), 1, 0], dtype="i8"), 1),
(np.array(["a", "", "a", "b"], dtype=object), "a"),
(np.array([(), ("a", 1), (), ("a", 2)], dtype=object), ()),
(np.array([("a", 1), (), ("a", 1), ("a", 2)], dtype=object), ("a", 1)),
],
)
def test_parametrized_factorize_na_value(self, data, na_value):
codes, uniques = algos.factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_codes = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize("sort", [True, False])
@pytest.mark.parametrize("na_sentinel", [-1, -10, 100])
@pytest.mark.parametrize(
"data, uniques",
[
(
np.array(["b", "a", None, "b"], dtype=object),
np.array(["b", "a"], dtype=object),
),
(
pd.array([2, 1, np.nan, 2], dtype="Int64"),
pd.array([2, 1], dtype="Int64"),
),
],
ids=["numpy_array", "extension_array"],
)
def test_factorize_na_sentinel(self, sort, na_sentinel, data, uniques):
codes, uniques = algos.factorize(data, sort=sort, na_sentinel=na_sentinel)
if sort:
expected_codes = np.array([1, 0, na_sentinel, 1], dtype=np.intp)
expected_uniques = algos.safe_sort(uniques)
else:
expected_codes = np.array([0, 1, na_sentinel, 0], dtype=np.intp)
expected_uniques = uniques
tm.assert_numpy_array_equal(codes, expected_codes)
if isinstance(data, np.ndarray):
tm.assert_numpy_array_equal(uniques, expected_uniques)
else:
tm.assert_extension_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize(
"data, expected_codes, expected_uniques",
[
(
["a", None, "b", "a"],
np.array([0, 2, 1, 0], dtype=np.dtype("intp")),
np.array(["a", "b", np.nan], dtype=object),
),
(
["a", np.nan, "b", "a"],
np.array([0, 2, 1, 0], dtype=np.dtype("intp")),
np.array(["a", "b", np.nan], dtype=object),
),
],
)
def test_object_factorize_na_sentinel_none(
self, data, expected_codes, expected_uniques
):
codes, uniques = algos.factorize(data, na_sentinel=None)
tm.assert_numpy_array_equal(uniques, expected_uniques)
tm.assert_numpy_array_equal(codes, expected_codes)
@pytest.mark.parametrize(
"data, expected_codes, expected_uniques",
[
(
[1, None, 1, 2],
np.array([0, 2, 0, 1], dtype=np.dtype("intp")),
np.array([1, 2, np.nan], dtype="O"),
),
(
[1, np.nan, 1, 2],
np.array([0, 2, 0, 1], dtype=np.dtype("intp")),
np.array([1, 2, np.nan], dtype=np.float64),
),
],
)
def test_int_factorize_na_sentinel_none(
self, data, expected_codes, expected_uniques
):
codes, uniques = algos.factorize(data, na_sentinel=None)
tm.assert_numpy_array_equal(uniques, expected_uniques)
tm.assert_numpy_array_equal(codes, expected_codes)
class TestUnique:
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype("O")
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ["A", "B", "C", "D", "E"]
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = MultiIndex.from_arrays(
[np.arange(5).repeat(5), np.tile(np.arange(5), 5)]
)
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_dtype_preservation(self, any_numpy_dtype):
# GH 15442
if any_numpy_dtype in (tm.BYTES_DTYPES + tm.STRING_DTYPES):
pytest.skip("skip string dtype")
elif is_integer_dtype(any_numpy_dtype):
data = [1, 2, 2]
uniques = [1, 2]
elif is_float_dtype(any_numpy_dtype):
data = [1, 2, 2]
uniques = [1.0, 2.0]
elif is_complex_dtype(any_numpy_dtype):
data = [complex(1, 0), complex(2, 0), complex(2, 0)]
uniques = [complex(1, 0), complex(2, 0)]
elif is_bool_dtype(any_numpy_dtype):
data = [True, True, False]
uniques = [True, False]
elif is_object_dtype(any_numpy_dtype):
data = ["A", "B", "B"]
uniques = ["A", "B"]
else:
# datetime64[ns]/M8[ns]/timedelta64[ns]/m8[ns] tested elsewhere
data = [1, 2, 2]
uniques = [1, 2]
result = Series(data, dtype=any_numpy_dtype).unique()
expected = np.array(uniques, dtype=any_numpy_dtype)
tm.assert_numpy_array_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
[
"2015-01-03T00:00:00.000000000+0000",
"2015-01-01T00:00:00.000000000+0000",
],
dtype="M8[ns]",
)
dt_index = to_datetime(
[
"2015-01-03T00:00:00.000000000",
"2015-01-01T00:00:00.000000000",
"2015-01-01T00:00:00.000000000",
]
)
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_datetime_non_ns(self):
a = np.array(["2000", "2000", "2001"], dtype="datetime64[s]")
result = pd.unique(a)
expected = np.array(["2000", "2001"], dtype="datetime64[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_timedelta_non_ns(self):
a = np.array(["2000", "2000", "2001"], dtype="timedelta64[s]")
result = pd.unique(a)
expected = np.array([2000000000000, 2001000000000], dtype="timedelta64[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype="m8[ns]")
td_index = to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2 ** 63, 2 ** 63], dtype=np.uint64)
exp = np.array([1, 2, 2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
duplicated_items = ["a", np.nan, "c", "c"]
result = pd.unique(duplicated_items)
expected = np.array(["a", np.nan, "c"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list("bac"))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(list("bac"), categories=list("abc"), ordered=True)
# GH 15939
c = Categorical(list("baabc"))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list("baabc"), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list("baabc")), name="foo")
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list("baabc"), categories=list("abc")))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index(
[
Timestamp("20160101", tz="US/Eastern"),
Timestamp("20160101", tz="US/Eastern"),
]
)
).unique()
expected = DatetimeArray._from_sequence(
np.array([Timestamp("2016-01-01 00:00:00-0500", tz="US/Eastern")])
)
tm.assert_extension_array_equal(result, expected)
result = Index(
[
Timestamp("20160101", tz="US/Eastern"),
Timestamp("20160101", tz="US/Eastern"),
]
).unique()
expected = DatetimeIndex(
["2016-01-01 00:00:00"], dtype="datetime64[ns, US/Eastern]", freq=None
)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(
Index(
[
Timestamp("20160101", tz="US/Eastern"),
Timestamp("20160101", tz="US/Eastern"),
]
)
)
)
expected = DatetimeArray._from_sequence(
np.array([Timestamp("2016-01-01", tz="US/Eastern")])
)
tm.assert_extension_array_equal(result, expected)
result = pd.unique(
Index(
[
Timestamp("20160101", tz="US/Eastern"),
Timestamp("20160101", tz="US/Eastern"),
]
)
)
expected = DatetimeIndex(
["2016-01-01 00:00:00"], dtype="datetime64[ns, US/Eastern]", freq=None
)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result, np.array([2, 1, 3], dtype="int64"))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result, np.array([2, 1], dtype="int64"))
result = pd.unique(Series([Timestamp("20160101"), Timestamp("20160101")]))
expected = np.array(["2016-01-01T00:00:00.000000000"], dtype="datetime64[ns]")
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(
Index(
[
Timestamp("20160101", tz="US/Eastern"),
Timestamp("20160101", tz="US/Eastern"),
]
)
)
expected = DatetimeIndex(
["2016-01-01 00:00:00"], dtype="datetime64[ns, US/Eastern]", freq=None
)
tm.assert_index_equal(result, expected)
result = pd.unique(list("aabc"))
expected = np.array(["a", "b", "c"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list("aabc"))))
expected = Categorical(list("abc"))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize(
"arg ,expected",
[
(("1", "1", "2"), np.array(["1", "2"], dtype=object)),
(("foo",), np.array(["foo"], dtype=object)),
],
)
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
def test_obj_none_preservation(self):
# GH 20866
arr = np.array(["foo", None], dtype=object)
result = pd.unique(arr)
expected = np.array(["foo", None], dtype=object)
tm.assert_numpy_array_equal(result, expected, strict_nan=True)
def test_signed_zero(self):
# GH 21866
a = np.array([-0.0, 0.0])
result = pd.unique(a)
expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent
tm.assert_numpy_array_equal(result, expected)
def test_different_nans(self):
# GH 21866
# create different nans from bit-patterns:
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent
result = pd.unique(a)
expected = np.array([np.nan])
tm.assert_numpy_array_equal(result, expected)
def test_first_nan_kept(self):
# GH 22295
# create different nans from bit-patterns:
bits_for_nan1 = 0xFFF8000000000001
bits_for_nan2 = 0x7FF8000000000001
NAN1 = struct.unpack("d", struct.pack("=Q", bits_for_nan1))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", bits_for_nan2))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
for el_type in [np.float64, object]:
a = np.array([NAN1, NAN2], dtype=el_type)
result = pd.unique(a)
assert result.size == 1
# use bit patterns to identify which nan was kept:
result_nan_bits = struct.unpack("=Q", struct.pack("d", result[0]))[0]
assert result_nan_bits == bits_for_nan1
def test_do_not_mangle_na_values(self, unique_nulls_fixture, unique_nulls_fixture2):
# GH 22295
if unique_nulls_fixture is unique_nulls_fixture2:
return # skip it, values not unique
a = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object)
result = pd.unique(a)
assert result.size == 2
assert a[0] is unique_nulls_fixture
assert a[1] is unique_nulls_fixture2
class TestIsin:
def test_invalid(self):
msg = (
r"only list-like objects are allowed to be passed to isin\(\), "
r"you passed a \[int\]"
)
with pytest.raises(TypeError, match=msg):
algos.isin(1, 1)
with pytest.raises(TypeError, match=msg):
algos.isin(1, [1])
with pytest.raises(TypeError, match=msg):
algos.isin([1], 1)
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), {1})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(["a", "b"], ["a"])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(["a", "b"]), Series(["a"]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(["a", "b"]), {"a"})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(["a", "b"], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = date_range("20130101", periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = timedelta_range("1 day", periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype1", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"])
@pytest.mark.parametrize("dtype", ["i8", "f8", "u8"])
def test_isin_datetimelike_values_numeric_comps(self, dtype, dtype1):
# Anything but object and we get all-False shortcut
dta = date_range("2013-01-01", periods=3)._values
if dtype1 == "period[D]":
# TODO: fix Series.view to get this on its own
arr = dta.to_period("D")
elif dtype1 == "M8[ns, UTC]":
# TODO: fix Series.view to get this on its own
arr = dta.tz_localize("UTC")
else:
arr = Series(dta.view("i8")).view(dtype1)._values
comps = arr.view("i8").astype(dtype)
result = algos.isin(comps, arr)
expected = np.zeros(comps.shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = date_range("20000101", periods=2000000, freq="s").values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ["a", "b", "c"]
Sd = Series(Categorical([1]).from_codes(vals, cats))
St = Series(Categorical([1]).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
def test_categorical_isin(self):
vals = np.array([0, 1, 2, 0])
cats = ["a", "b", "c"]
cat = Categorical([1]).from_codes(vals, cats)
other = Categorical([1]).from_codes(np.array([0, 1]), cats)
expected = np.array([True, True, False, True])
result = algos.isin(cat, other)
tm.assert_numpy_array_equal(expected, result)
def test_same_nan_is_in(self):
# GH 22160
# nan is special, because from " a is b" doesn't follow "a == b"
# at least, isin() should follow python's "np.nan in [nan] == True"
# casting to -> np.float64 -> another float-object somewhere on
# the way could lead jepardize this behavior
comps = [np.nan] # could be casted to float64
values = [np.nan]
expected = np.array([True])
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(expected, result)
def test_same_nan_is_in_large(self):
# https://github.com/pandas-dev/pandas/issues/22205
s = np.tile(1.0, 1_000_001)
s[0] = np.nan
result = algos.isin(s, [np.nan, 1])
expected = np.ones(len(s), dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_same_nan_is_in_large_series(self):
# https://github.com/pandas-dev/pandas/issues/22205
s = np.tile(1.0, 1_000_001)
series = Series(s)
s[0] = np.nan
result = series.isin([np.nan, 1])
expected = Series(np.ones(len(s), dtype=bool))
tm.assert_series_equal(result, expected)
def test_same_object_is_in(self):
# GH 22160
# there could be special treatment for nans
# the user however could define a custom class
# with similar behavior, then we at least should
# fall back to usual python's behavior: "a in [a] == True"
class LikeNan:
def __eq__(self, other) -> bool:
return False
def __hash__(self):
return 0
a, b = LikeNan(), LikeNan()
# same object -> True
tm.assert_numpy_array_equal(algos.isin([a], [a]), np.array([True]))
# different objects -> False
tm.assert_numpy_array_equal(algos.isin([a], [b]), np.array([False]))
def test_different_nans(self):
# GH 22160
# all nans are handled as equivalent
comps = [float("nan")]
values = [float("nan")]
assert comps[0] is not values[0] # different nan-objects
# as list of python-objects:
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(np.array([True]), result)
# as object-array:
result = algos.isin(
np.asarray(comps, dtype=object), np.asarray(values, dtype=object)
)
tm.assert_numpy_array_equal(np.array([True]), result)
# as float64-array:
result = algos.isin(
np.asarray(comps, dtype=np.float64), np.asarray(values, dtype=np.float64)
)
tm.assert_numpy_array_equal(np.array([True]), result)
def test_no_cast(self):
# GH 22160
# ensure 42 is not casted to a string
comps = ["ss", 42]
values = ["42"]
expected = np.array([False, False])
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
def test_different_nan_objects(self):
# GH 22119
comps = np.array(["nan", np.nan * 1j, float("nan")], dtype=object)
vals = np.array([float("nan")], dtype=object)
expected = np.array([False, False, True])
result = algos.isin(comps, vals)
tm.assert_numpy_array_equal(expected, result)
def test_different_nans_as_float64(self):
# GH 21866
# create different nans from bit-patterns,
# these nans will land in different buckets in the hash-table
# if no special care is taken
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# check that NAN1 and NAN2 are equivalent:
arr = np.array([NAN1, NAN2], dtype=np.float64)
lookup1 = np.array([NAN1], dtype=np.float64)
result = algos.isin(arr, lookup1)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
lookup2 = np.array([NAN2], dtype=np.float64)
result = algos.isin(arr, lookup2)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
def test_isin_int_df_string_search(self):
"""Comparing df with int`s (1,2) with a string at isin() ("1")
-> should not match values because int 1 is not equal str 1"""
df = DataFrame({"values": [1, 2]})
result = df.isin(["1"])
expected_false = DataFrame({"values": [False, False]})
tm.assert_frame_equal(result, expected_false)
def test_isin_nan_df_string_search(self):
"""Comparing df with nan value (np.nan,2) with a string at isin() ("NaN")
-> should not match values because np.nan is not equal str NaN"""
df = DataFrame({"values": [np.nan, 2]})
result = df.isin(["NaN"])
expected_false = DataFrame({"values": [False, False]})
tm.assert_frame_equal(result, expected_false)
def test_isin_float_df_string_search(self):
"""Comparing df with floats (1.4245,2.32441) with a string at isin() ("1.4245")
-> should not match values because float 1.4245 is not equal str 1.4245"""
df = DataFrame({"values": [1.4245, 2.32441]})
result = df.isin(["1.4245"])
expected_false = DataFrame({"values": [False, False]})
tm.assert_frame_equal(result, expected_false)
class TestValueCounts:
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4], index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series(
[2, 2], index=IntervalIndex.from_tuples([(0.996, 2.5), (2.5, 4.0)])
)
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.0])
assert len(result) == 1
result = algos.value_counts([1, 1.0], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1.0, "1"])) # object
assert len(result) == 2
msg = "bins argument only works with numeric data"
with pytest.raises(TypeError, match=msg):
algos.value_counts(["1", 1], bins=1)
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), NaT], dtype="timedelta64[ns]")
dt = to_datetime(["NaT", "2014-01-01"])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp("2014-01-01 00:00:00"): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series(
[
datetime(3000, 1, 1),
datetime(5000, 1, 1),
datetime(5000, 1, 1),
datetime(6000, 1, 1),
datetime(3000, 1, 1),
datetime(3000, 1, 1),
]
)
res = s.value_counts()
exp_index = Index(
[datetime(3000, 1, 1), datetime(5000, 1, 1), datetime(6000, 1, 1)],
dtype=object,
)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = to_datetime(Series(["2362-01-01", np.nan]), errors="ignore")
exp = Series(["2362-01-01", np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list("aaabbc")))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(["a", "b", "c"]))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list("aaaaabbbcc"))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series(
[4, 3, 2],
index=CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c"]),
)
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(["a", "b", "c", np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(
Categorical(list("aaaaabbbcc"), ordered=True, categories=["b", "a", "c"])
)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series(
[4, 3, 2],
index=CategoricalIndex(
["a", "b", "c"], categories=["b", "a", "c"], ordered=True
),
)
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series(
[4, 3, 2, 1],
index=CategoricalIndex(
["a", "b", "c", np.nan], categories=["b", "a", "c"], ordered=True
),
)
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(list("bbbaac"), categories=list("abcd"), ordered=True))
result = s.value_counts()
expected = Series(
[3, 2, 1, 0],
index=Categorical(
["b", "a", "c", "d"], categories=list("abcd"), ordered=True
),
)
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]),
)
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]),
)
tm.assert_series_equal(
Series([True] * 3 + [False] * 2 + [None] * 5).value_counts(dropna=True),
Series([3, 2], index=[True, False]),
)
tm.assert_series_equal(
Series([True] * 5 + [False] * 3 + [None] * 2).value_counts(dropna=False),
Series([5, 3, 2], index=[True, False, np.nan]),
)
tm.assert_series_equal(
Series([10.3, 5.0, 5.0]).value_counts(dropna=True),
Series([2, 1], index=[5.0, 10.3]),
)
tm.assert_series_equal(
Series([10.3, 5.0, 5.0]).value_counts(dropna=False),
Series([2, 1], index=[5.0, 10.3]),
)
tm.assert_series_equal(
Series([10.3, 5.0, 5.0, None]).value_counts(dropna=True),
Series([2, 1], index=[5.0, 10.3]),
)
result = Series([10.3, 10.3, 5.0, 5.0, 5.0, None]).value_counts(dropna=False)
expected = Series([3, 2, 1], index=[5.0, 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1] * 2 + [2] * 3 + [np.nan] * 5)
dtypes = (np.float64, object, "M8[ns]")
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series(
[0.5, 0.3, 0.2], index=Series([np.nan, 2.0, 1.0], dtype=t)
)
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.6, 0.4], index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2 ** 63], dtype=np.uint64)
expected = Series([1], index=[2 ** 63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2 ** 63], dtype=object)
expected = Series([1, 1], index=[-1, 2 ** 63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
class TestDuplicated:
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep="first")
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep="last")
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(
zip([0, 0, np.nan, np.nan] * 2, [0, np.nan, 0, np.nan] * 2)
):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep="last")
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"case",
[
np.array([1, 2, 1, 5, 3, 2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3, 2.2, 4.4, 1.1, np.nan, 6.6]),
np.array(
[
1 + 1j,
2 + 2j,
1 + 1j,
5 + 5j,
3 + 3j,
2 + 2j,
4 + 4j,
1 + 1j,
5 + 5j,
6 + 6j,
]
),
np.array(["a", "b", "a", "e", "c", "b", "d", "a", "e", "f"], dtype=object),
np.array(
[1, 2 ** 63, 1, 3 ** 5, 10, 2 ** 63, 39, 1, 3 ** 5, 7], dtype=np.uint64
),
],
)
def test_numeric_object_likes(self, case):
exp_first = np.array(
[False, False, True, False, False, True, False, True, True, False]
)
exp_last = np.array(
[True, True, True, True, False, False, False, False, False, False]
)
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep="first")
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep="last")
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype="category")]:
res_first = idx.duplicated(keep="first")
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype="category")]:
res_first = s.duplicated(keep="first")
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep="last")
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = [
"2011-01-01",
"2011-01-02",
"2011-01-01",
"NaT",
"2011-01-03",
"2011-01-02",
"2011-01-04",
"2011-01-01",
"NaT",
"2011-01-06",
]
td = [
"1 days",
"2 days",
"1 days",
"NaT",
"3 days",
"2 days",
"4 days",
"1 days",
"NaT",
"6 days",
]
cases = [
np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz="US/Eastern") for d in dt]),
np.array([Period(d, freq="D") for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([Timedelta(d) for d in td]),
]
exp_first = np.array(
[False, False, True, False, False, True, False, True, True, False]
)
exp_last = np.array(
[True, True, True, True, False, False, False, False, False, False]
)
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep="first")
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep="last")
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [
Index(case),
Index(case, dtype="category"),
Index(case, dtype=object),
]:
res_first = idx.duplicated(keep="first")
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [
Series(case),
Series(case, dtype="category"),
Series(case, dtype=object),
]:
res_first = s.duplicated(keep="first")
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep="last")
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique is True
tm.assert_numpy_array_equal(
case.duplicated(), np.array([False, False, False])
)
@pytest.mark.parametrize(
"arr, uniques",
[
(
[(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 0), (1, 1)],
),
(
[("b", "c"), ("a", "b"), ("a", "b"), ("b", "c")],
[("b", "c"), ("a", "b")],
),
([("a", 1), ("b", 2), ("a", 3), ("a", 1)], [("a", 1), ("b", 2), ("a", 3)]),
],
)
def test_unique_tuples(self, arr, uniques):
# https://github.com/pandas-dev/pandas/issues/16519
expected = np.empty(len(uniques), dtype=object)
expected[:] = uniques
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"array,expected",
[
(
[1 + 1j, 0, 1, 1j, 1 + 2j, 1 + 2j],
# Should return a complex dtype in the future
np.array([(1 + 1j), 0j, (1 + 0j), 1j, (1 + 2j)], dtype=object),
)
],
)
def test_unique_complex_numbers(self, array, expected):
# GH 17927
result = pd.unique(array)
tm.assert_numpy_array_equal(result, expected)
class TestHashTable:
def test_string_hashtable_set_item_signature(self):
# GH#30419 fix typing in StringHashTable.set_item to prevent segfault
tbl = ht.StringHashTable()
tbl.set_item("key", 1)
assert tbl.get_item("key") == 1
with pytest.raises(TypeError, match="'key' has incorrect type"):
# key arg typed as string, not object
tbl.set_item(4, 6)
with pytest.raises(TypeError, match="'val' has incorrect type"):
tbl.get_item(4)
def test_lookup_nan(self, writable):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
# GH 21688 ensure we can deal with readonly memory views
xs.setflags(write=writable)
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp))
def test_add_signed_zeros(self):
# GH 21866 inconsistent hash-function for float64
# default hash-function would lead to different hash-buckets
# for 0.0 and -0.0 if there are more than 2^30 hash-buckets
# but this would mean 16GB
N = 4 # 12 * 10**8 would trigger the error, if you have enough memory
m = ht.Float64HashTable(N)
m.set_item(0.0, 0)
m.set_item(-0.0, 0)
assert len(m) == 1 # 0.0 and -0.0 are equivalent
def test_add_different_nans(self):
# GH 21866 inconsistent hash-function for float64
# create different nans from bit-patterns:
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# default hash function would lead to different hash-buckets
# for NAN1 and NAN2 even if there are only 4 buckets:
m = ht.Float64HashTable()
m.set_item(NAN1, 0)
m.set_item(NAN2, 0)
assert len(m) == 1 # NAN1 and NAN2 are equivalent
def test_lookup_overflow(self, writable):
xs = np.array([1, 2, 2 ** 63], dtype=np.uint64)
# GH 21688 ensure we can deal with readonly memory views
xs.setflags(write=writable)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp))
def test_get_unique(self):
s = Series([1, 2, 2 ** 63, 2 ** 63], dtype=np.uint64)
exp = np.array([1, 2, 2 ** 63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
@pytest.mark.parametrize("nvals", [0, 10]) # resizing to 0 is special case
@pytest.mark.parametrize(
"htable, uniques, dtype, safely_resizes",
[
(ht.PyObjectHashTable, ht.ObjectVector, "object", False),
(ht.StringHashTable, ht.ObjectVector, "object", True),
(ht.Float64HashTable, ht.Float64Vector, "float64", False),
(ht.Int64HashTable, ht.Int64Vector, "int64", False),
(ht.Int32HashTable, ht.Int32Vector, "int32", False),
(ht.UInt64HashTable, ht.UInt64Vector, "uint64", False),
],
)
def test_vector_resize(
self, writable, htable, uniques, dtype, safely_resizes, nvals
):
# Test for memory errors after internal vector
# reallocations (GH 7157)
vals = np.array(np.random.randn(1000), dtype=dtype)
# GH 21688 ensures we can deal with read-only memory views
vals.setflags(write=writable)
# initialise instances; cannot initialise in parametrization,
# as otherwise external views would be held on the array (which is
# one of the things this test is checking)
htable = htable()
uniques = uniques()
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() sets an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (except for StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with pytest.raises(ValueError, match="external reference.*"):
htable.get_labels(vals, uniques, 0, -1)
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
@pytest.mark.parametrize(
"htable, tm_dtype",
[
(ht.PyObjectHashTable, "String"),
(ht.StringHashTable, "String"),
(ht.Float64HashTable, "Float"),
(ht.Int64HashTable, "Int"),
(ht.UInt64HashTable, "UInt"),
],
)
def test_hashtable_unique(self, htable, tm_dtype, writable):
# output of maker has guaranteed unique elements
maker = getattr(tm, "make" + tm_dtype + "Index")
s = Series(maker(1000))
if htable == ht.Float64HashTable:
# add NaN for float column
s.loc[500] = np.nan
elif htable == ht.PyObjectHashTable:
# use different NaN types for object column
s.loc[500:502] = [np.nan, None, NaT]
# create duplicated selection
s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)
s_duplicated.values.setflags(write=writable)
# drop_duplicates has own cython code (hash_table_func_helper.pxi)
# and is tested separately; keeps first occurrence like ht.unique()
expected_unique = s_duplicated.drop_duplicates(keep="first").values
result_unique = htable().unique(s_duplicated.values)
tm.assert_numpy_array_equal(result_unique, expected_unique)
# test return_inverse=True
# reconstruction can only succeed if the inverse is correct
result_unique, result_inverse = htable().unique(
s_duplicated.values, return_inverse=True
)
tm.assert_numpy_array_equal(result_unique, expected_unique)
reconstr = result_unique[result_inverse]
tm.assert_numpy_array_equal(reconstr, s_duplicated.values)
@pytest.mark.parametrize(
"htable, tm_dtype",
[
(ht.PyObjectHashTable, "String"),
(ht.StringHashTable, "String"),
(ht.Float64HashTable, "Float"),
(ht.Int64HashTable, "Int"),
(ht.UInt64HashTable, "UInt"),
],
)
def test_hashtable_factorize(self, htable, tm_dtype, writable):
# output of maker has guaranteed unique elements
maker = getattr(tm, "make" + tm_dtype + "Index")
s = Series(maker(1000))
if htable == ht.Float64HashTable:
# add NaN for float column
s.loc[500] = np.nan
elif htable == ht.PyObjectHashTable:
# use different NaN types for object column
s.loc[500:502] = [np.nan, None, NaT]
# create duplicated selection
s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True)
s_duplicated.values.setflags(write=writable)
na_mask = s_duplicated.isna().values
result_unique, result_inverse = htable().factorize(s_duplicated.values)
# drop_duplicates has own cython code (hash_table_func_helper.pxi)
# and is tested separately; keeps first occurrence like ht.factorize()
# since factorize removes all NaNs, we do the same here
expected_unique = s_duplicated.dropna().drop_duplicates().values
tm.assert_numpy_array_equal(result_unique, expected_unique)
# reconstruction can only succeed if the inverse is correct. Since
# factorize removes the NaNs, those have to be excluded here as well
result_reconstruct = result_unique[result_inverse[~na_mask]]
expected_reconstruct = s_duplicated.dropna().values
tm.assert_numpy_array_equal(result_reconstruct, expected_reconstruct)
@pytest.mark.parametrize(
"hashtable",
[
ht.PyObjectHashTable,
ht.StringHashTable,
ht.Float64HashTable,
ht.Int64HashTable,
ht.Int32HashTable,
ht.UInt64HashTable,
],
)
def test_hashtable_large_sizehint(self, hashtable):
# GH 22729
size_hint = np.iinfo(np.uint32).max + 1
tbl = hashtable(size_hint=size_hint) # noqa
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, 0.25, 0.5, 0.75, 1.0])
expected = algos.quantile(s.values, [0, 0.25, 0.5, 0.75, 1.0])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype(np.intp)
left = ht.unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right, check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = ht.unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right, check_dtype=False)
class TestRank:
@td.skip_if_no_scipy
def test_scipy_compat(self):
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = np.nan
tm.assert_almost_equal(result, exp)
_check(np.array([np.nan, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 3, np.nan]))
_check(np.array([4.0, np.nan, 5.0, 5.0, 5.0, np.nan, 1, 2, 4.0, np.nan]))
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
def test_basic(self, writable, dtype):
exp = np.array([1, 2], dtype=np.float64)
data = np.array([1, 100], dtype=dtype)
data.setflags(write=writable)
ser = Series(data)
result = algos.rank(ser)
tm.assert_numpy_array_equal(result, exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2 ** 63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with pytest.raises(TypeError, match=msg):
algos.rank(arr)
@pytest.mark.single
@pytest.mark.high_memory
def test_pct_max_many_rows(self):
# GH 18271
values = np.arange(2 ** 24 + 1)
result = algos.rank(values, pct=True).max()
assert result == 1
values = np.arange(2 ** 25 + 2).reshape(2 ** 24 + 1, 2)
result = algos.rank(values, pct=True).max()
assert result == 1
def test_pad_backfill_object_segfault():
old = np.array([], dtype="O")
new = np.array([datetime(2010, 12, 31)], dtype="O")
result = libalgos.pad["object"](old, new)
expected = np.array([-1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.pad["object"](new, old)
expected = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill["object"](old, new)
expected = np.array([-1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill["object"](new, old)
expected = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
class TestTseriesUtil:
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isna(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(list(range(12)))
filler = libalgos.backfill["int64_t"](old.values, new.values)
expect_filler = np.array([0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([1, 4])
new = Index(list(range(5, 10)))
filler = libalgos.backfill["int64_t"](old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = Index([1, 5, 10])
new = Index(list(range(12)))
filler = libalgos.pad["int64_t"](old.values, new.values)
expect_filler = np.array([-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([5, 10])
new = Index(np.arange(5))
filler = libalgos.pad["int64_t"](old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_is_lexsorted():
failure = [
np.array(
[
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
dtype="int64",
),
np.array(
[
30,
29,
28,
27,
26,
25,
24,
23,
22,
21,
20,
19,
18,
17,
16,
15,
14,
13,
12,
11,
10,
9,
8,
7,
6,
5,
4,
3,
2,
1,
0,
30,
29,
28,
27,
26,
25,
24,
23,
22,
21,
20,
19,
18,
17,
16,
15,
14,
13,
12,
11,
10,
9,
8,
7,
6,
5,
4,
3,
2,
1,
0,
30,
29,
28,
27,
26,
25,
24,
23,
22,
21,
20,
19,
18,
17,
16,
15,
14,
13,
12,
11,
10,
9,
8,
7,
6,
5,
4,
3,
2,
1,
0,
30,
29,
28,
27,
26,
25,
24,
23,
22,
21,
20,
19,
18,
17,
16,
15,
14,
13,
12,
11,
10,
9,
8,
7,
6,
5,
4,
3,
2,
1,
0,
],
dtype="int64",
),
]
assert not libalgos.is_lexsorted(failure)
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype(np.intp)
b = np.random.randint(0, 1000, 100).astype(np.intp)
result = libalgos.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
# np.argsort returns int, groupsort_indexer
# always returns intp
expected = np.argsort(a, kind="mergesort")
expected = expected.astype(np.intp)
tm.assert_numpy_array_equal(result, expected)
# compare with lexsort
# np.lexsort returns int, groupsort_indexer
# always returns intp
key = a * 1000 + b
result = libalgos.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
expected = expected.astype(np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_infinity_sort():
# GH 13445
# numpy's argsort can be unhappy if something is less than
# itself. Instead, let's give our infinities a self-consistent
# ordering, but outside the float extended real line.
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
assert all(Inf >= x for x in ref_nums)
assert all(Inf > x or x is Inf for x in ref_nums)
assert Inf >= Inf and Inf == Inf
assert not Inf < Inf and not Inf > Inf
assert libalgos.Infinity() == libalgos.Infinity()
assert not libalgos.Infinity() != libalgos.Infinity()
assert all(NegInf <= x for x in ref_nums)
assert all(NegInf < x or x is NegInf for x in ref_nums)
assert NegInf <= NegInf and NegInf == NegInf
assert not NegInf < NegInf and not NegInf > NegInf
assert libalgos.NegInfinity() == libalgos.NegInfinity()
assert not libalgos.NegInfinity() != libalgos.NegInfinity()
for perm in permutations(ref_nums):
assert sorted(perm) == ref_nums
# smoke tests
np.array([libalgos.Infinity()] * 32).argsort()
np.array([libalgos.NegInfinity()] * 32).argsort()
def test_infinity_against_nan():
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
assert not Inf > np.nan
assert not Inf >= np.nan
assert not Inf < np.nan
assert not Inf <= np.nan
assert not Inf == np.nan
assert Inf != np.nan
assert not NegInf > np.nan
assert not NegInf >= np.nan
assert not NegInf < np.nan
assert not NegInf <= np.nan
assert not NegInf == np.nan
assert NegInf != np.nan
def test_ensure_platform_int():
arr = np.arange(100, dtype=np.intp)
result = libalgos.ensure_platform_int(arr)
assert result is arr
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(np.array([m, m]), m)
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(np.array([n, n]), n)
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(
np.array([m, m]), np.array([m, m]), arr_mask=np.array([False, True])
)
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(
np.array([m, m]), np.array([m, m]), b_mask=np.array([False, True])
)
with pytest.raises(OverflowError, match=msg):
algos.checked_add_with_arr(
np.array([m, m]),
np.array([m, m]),
arr_mask=np.array([False, True]),
b_mask=np.array([False, True]),
)
with pytest.raises(OverflowError, match=msg):
with tm.assert_produces_warning(RuntimeWarning):
algos.checked_add_with_arr(np.array([m, m]), np.array([np.nan, m]))
# Check that the nan boolean arrays override whether or not
# the addition overflows. We don't check the result but just
# the fact that an OverflowError is not raised.
algos.checked_add_with_arr(
np.array([m, m]), np.array([m, m]), arr_mask=np.array([True, True])
)
algos.checked_add_with_arr(
np.array([m, m]), np.array([m, m]), b_mask=np.array([True, True])
)
algos.checked_add_with_arr(
np.array([m, m]),
np.array([m, m]),
arr_mask=np.array([True, False]),
b_mask=np.array([False, True]),
)
class TestMode:
def test_no_mode(self):
exp = Series([], dtype=np.float64, index=Index([], dtype=int))
tm.assert_series_equal(algos.mode([]), exp)
def test_mode_single(self):
# GH 15714
exp_single = [1]
data_single = [1]
exp_multi = [1]
data_multi = [1, 1]
for dt in np.typecodes["AllInteger"] + np.typecodes["Float"]:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1], dtype=int)
tm.assert_series_equal(algos.mode([1]), exp)
exp = Series(["a", "b", "c"], dtype=object)
tm.assert_series_equal(algos.mode(["a", "b", "c"]), exp)
def test_number_mode(self):
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
for dt in np.typecodes["AllInteger"] + np.typecodes["Float"]:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_strobj_mode(self):
exp = ["b"]
data = ["a"] * 2 + ["b"] * 3
s = Series(data, dtype="c")
exp = Series(exp, dtype="c")
tm.assert_series_equal(algos.mode(s), exp)
exp = ["bar"]
data = ["foo"] * 2 + ["bar"] * 3
for dt in [str, object]:
s = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_datelike_mode(self):
exp = Series(["1900-05-03", "2011-01-03", "2013-01-02"], dtype="M8[ns]")
s = Series(["2011-01-03", "2013-01-02", "1900-05-03"], dtype="M8[ns]")
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(["2011-01-03", "2013-01-02"], dtype="M8[ns]")
s = Series(
["2011-01-03", "2013-01-02", "1900-05-03", "2011-01-03", "2013-01-02"],
dtype="M8[ns]",
)
tm.assert_series_equal(algos.mode(s), exp)
def test_timedelta_mode(self):
exp = Series(["-1 days", "0 days", "1 days"], dtype="timedelta64[ns]")
s = Series(["1 days", "-1 days", "0 days"], dtype="timedelta64[ns]")
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(["2 min", "1 day"], dtype="timedelta64[ns]")
s = Series(
["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"],
dtype="timedelta64[ns]",
)
tm.assert_series_equal(algos.mode(s), exp)
def test_mixed_dtype(self):
exp = Series(["foo"])
s = Series([1, "foo", "foo"])
tm.assert_series_equal(algos.mode(s), exp)
def test_uint64_overflow(self):
exp = Series([2 ** 63], dtype=np.uint64)
s = Series([1, 2 ** 63, 2 ** 63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1, 2 ** 63], dtype=np.uint64)
s = Series([1, 2 ** 63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
def test_categorical(self):
c = Categorical([1, 2])
exp = c
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, "a", "a"])
exp = Categorical(["a"], categories=[1, "a"])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 1, 2, 3, 3])
exp = Categorical([1, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
def test_index(self):
idx = Index([1, 2, 3])
exp = Series([1, 2, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, "a", "a"])
exp = Series(["a"], dtype=object)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 1, 2, 3, 3])
exp = Series([1, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
exp = Series(["2 min", "1 day"], dtype="timedelta64[ns]")
idx = Index(
["1 day", "1 day", "-1 day", "-1 day 2 min", "2 min", "2 min"],
dtype="timedelta64[ns]",
)
tm.assert_series_equal(algos.mode(idx), exp)
class TestDiff:
@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
def test_diff_datetimelike_nat(self, dtype):
# NaT - NaT is NaT, not 0
arr = np.arange(12).astype(np.int64).view(dtype).reshape(3, 4)
arr[:, 2] = arr.dtype.type("NaT", "ns")
result = algos.diff(arr, 1, axis=0)
expected = np.ones(arr.shape, dtype="timedelta64[ns]") * 4
expected[:, 2] = np.timedelta64("NaT", "ns")
expected[0, :] = np.timedelta64("NaT", "ns")
tm.assert_numpy_array_equal(result, expected)
result = algos.diff(arr.T, 1, axis=1)
tm.assert_numpy_array_equal(result, expected.T)
def test_diff_ea_axis(self):
dta = date_range("2016-01-01", periods=3, tz="US/Pacific")._data
msg = "cannot diff DatetimeArray on axis=1"
with pytest.raises(ValueError, match=msg):
algos.diff(dta, 1, axis=1)
@pytest.mark.parametrize("dtype", ["int8", "int16"])
def test_diff_low_precision_int(self, dtype):
arr = np.array([0, 1, 1, 0, 0], dtype=dtype)
result = algos.diff(arr, 1)
expected = np.array([np.nan, 1, 0, -1, 0], dtype="float32")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("op", [np.array, pd.array])
def test_union_with_duplicates(op):
# GH#36289
lvals = op([3, 1, 3, 4])
rvals = op([2, 3, 1, 1])
expected = op([3, 3, 1, 1, 4, 2])
if isinstance(expected, np.ndarray):
result = algos.union_with_duplicates(lvals, rvals)
tm.assert_numpy_array_equal(result, expected)
else:
with tm.assert_produces_warning(RuntimeWarning):
result = algos.union_with_duplicates(lvals, rvals)
tm.assert_extension_array_equal(result, expected)
| 34.636769
| 88
| 0.551843
|
4a0d6f75ca7944ac917143cdc872e597b4a0303c
| 8,486
|
py
|
Python
|
pyrobolearn/utils/plotting/end_effector_realtime_FT_plot.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 2
|
2021-01-21T21:08:30.000Z
|
2022-03-29T16:45:49.000Z
|
pyrobolearn/utils/plotting/end_effector_realtime_FT_plot.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | null | null | null |
pyrobolearn/utils/plotting/end_effector_realtime_FT_plot.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 1
|
2020-09-29T21:25:39.000Z
|
2020-09-29T21:25:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Boyang Ti"
__copyright__ = "Copyright 2020, PyRoboLearn"
__credits__ = ["Boyang Ti"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Boyang Ti"
__email__ = "tiboyang@outlook.com"
__status__ = "Development"
from pyrobolearn.utils.plotting.plot import RealTimePlot
import pyrobolearn as prl
import numpy as np
from pyrobolearn.utils.transformation import *
from pyrobolearn.robots import Body
class EeFtRealTimePlot(RealTimePlot):
def __init__(self, robot, sensor, forcex=False, forcey=False, forcez=False, torquex=False,
torquey=False, torquez=False, num_point=100, xlims=None, ylims=None,
suptitle='End effector Force and Torque', ticks=1, blit=True, interval=0.0001):
"""
Initialize the configuration of the robot arm drawn in real time
:parameter:
robot: the
forcex, forcey,forcez: if is True plot the force information
torquex, torquey, torquez: if is True plot the torque information
num_point: the number of the points need to be obtained in the figure
xlims: the limited of x axis
ylims: the limited of y axis
suptitle: the title of the figure
ticks: Time step interval for sampling real-time points
blit: If it is true, only updating the data content will not change the label and other content
"""
if not isinstance(robot, prl.robots.Robot):
raise TypeError("Expecting the given 'robot' to be an instance of `Robot`, but got instead: "
"{}".format(robot))
if not isinstance(sensor, prl.robots.sensors.JointForceTorqueSensor):
raise TypeError("Expecting the given 'sensor' to be an instance of `sensor`, but got instead: "
"{}".format(sensor))
self._robot = robot
self._sensor = sensor
self.axis_ids = ['Force', 'Torque']
# Set image layout
nrows, ncols = 1, 1
# Set the parameters we need to draw
self._plot_Fx = bool(forcex)
self._plot_Fy = bool(forcey)
self._plot_Fz = bool(forcez)
self._plot_Tx = bool(torquex)
self._plot_Ty = bool(torquey)
self._plot_Tz = bool(torquez)
states = np.array([self._plot_Fx, self._plot_Fy, self._plot_Fz, self._plot_Tx, self._plot_Ty, self._plot_Tz])
self._num_states = len(states[states])
if len(self.axis_ids) == 0:
raise ValueError("Expecting to plot at least something (force or torque)")
if len(self.axis_ids) == 1:
ncols = 1
else:
ncols = 2
# set the point
self._num_points = num_point if num_point > 10 else 10
# check the limited of the x y
if xlims is None:
xlims = (0, self._num_points)
if ylims is None:
ylims = (-2000, 2000)
super(EeFtRealTimePlot, self).__init__(nrows=nrows, ncols=ncols, xlims=xlims, ylims=ylims,
titles=['Force', 'Torque'],
suptitle=suptitle, ticks=ticks, blit=blit, interval=interval)
def _init(self, axes):
"""
initialize the figure
:param axes:
:return:
"""
self._lines = []
for i, axis_ids in enumerate(['Force', 'Torque']):
axes[0].legend(loc='upper left')
axes[1].legend(loc='upper left')
if self._plot_Fx:
line, = axes[0].plot([], [], lw=self._linewidths[i], color='r', label='Fx')
self._lines.append(line)
if self._plot_Fy:
line, = axes[0].plot([], [], lw=self._linewidths[i], color='y', label='Fy')
self._lines.append(line)
if self._plot_Fz:
line, = axes[0].plot([], [], lw=self._linewidths[i], color='g', label='Fz')
self._lines.append(line)
if self._plot_Tx:
line, = axes[1].plot([], [], lw=self._linewidths[i], color='m', label='Tx')
self._lines.append(line)
if self._plot_Ty:
line, = axes[1].plot([], [], lw=self._linewidths[i], color='k', label='Ty')
self._lines.append(line)
if self._plot_Tz:
line, = axes[1].plot([], [], lw=self._linewidths[i], color='b', label='Tz')
self._lines.append(line)
self._x = []
length = len(self.axis_ids) * self._num_states
self._ys = [[] for _ in range(length)]
def _init_anim(self):
"""
Init function (plot the background of each frame) that is passed to FuncAnimation. This has to be
implemented in the child class.
:return:
"""
for line in self._lines:
line.set_data([], [])
return self._lines
def _set_line(self, line_idx, data, state_name):
"""
:param axis_idx: joint index
:param line_idx: line index
:param data: data sent through the pipe
:param state_name: name of the state; select from Fx, Fy, Fz, Tx, Ty, Tz
:return:
"""
self._ys[line_idx].append(data[state_name])
self._ys[line_idx] = self._ys[line_idx][-self._num_points:]
self._lines[line_idx].set_data(self._x, self._ys[line_idx])
line_idx += 1
return line_idx
def _animate_data(self, i, data):
"""
Animate function that is passed to FuncAnimation. This has to be implemented in the child class.
:param i: frame counter
:param data: data that has been received from the pipe
:return: list of object to update
"""
if len(self._x) < self._num_points:
self._x = range(len(self._x) + 1)
k = 0
for j in range(len(self.axis_ids)):
if self._plot_Fx:
k = self._set_line(line_idx=k, data=data, state_name='Fx')
if self._plot_Fy:
k = self._set_line(line_idx=k, data=data, state_name='Fy')
if self._plot_Fz:
k = self._set_line(line_idx=k, data=data, state_name='Fz')
if self._plot_Tx:
k = self._set_line(line_idx=k, data=data, state_name='Tx')
if self._plot_Ty:
k = self._set_line(line_idx=k, data=data, state_name='Ty')
if self._plot_Tz:
k = self._set_line(line_idx=k, data=data, state_name='Tz')
return self._lines
def _update(self):
"""
This return the next data to be plotted; this has to be implemented in the child class.
:return:data to be sent through the pipe and that have to be plotted. This will be given to `_animate_data`.
"""
data = {}
if self._sensor.sense() is None:
data['Fx'] = 0
data['Fy'] = 0
data['Fz'] = 0
data['Tx'] = 0
data['Ty'] = 0
data['Tz'] = 0
return data
if self._plot_Fx:
data['Fx'] = self._sensor.sense()[0]
if self._plot_Fy:
data['Fy'] = self._sensor.sense()[1]
if self._plot_Fz:
data['Fz'] = self._sensor.sense()[2]
if self._plot_Tx:
data['Tx'] = self._sensor.sense()[3]
if self._plot_Ty:
data['Ty'] = self._sensor.sense()[4]
if self._plot_Tz:
data['Tz'] = self._sensor.sense()[5]
return data
if __name__ == '__main__':
# Try to move the robot in the simulator
# WARNING: DON'T FORGET TO CLOSE FIRST THE FIGURE THEN THE SIMULATOR OTHERWISE YOU WILL HAVE THE PLOTTING PROCESS
# STILL RUNNING
from itertools import count
sim = prl.simulators.Bullet()
world = prl.worlds.BasicWorld(sim)
robot = world.load_robot('kuka_iiwa')
box = world.load_visual_box(position=[0.7, 0., 0.2], orientation=get_quaternion_from_rpy([0, 1.57, 0]),
dimensions=(0.2, 0.2, 0.2))
box = Body(sim, body_id=box)
sensor = prl.robots.sensors.JointForceTorqueSensor(sim, body_id=robot.id, joint_ids=5)
plot = EeFtRealTimePlot(robot, sensor=sensor, forcex=True, forcey=True, forcez=True,
torquex=True, torquey=True, torquez=True, ticks=24)
for t in count():
plot.update()
world.step(sim.dt)
| 39.840376
| 117
| 0.572119
|
4a0d6f85895f815da4820380ad3cf3facb229b31
| 250
|
py
|
Python
|
TRICOIN/PYTHON/sana.py
|
inishchith/getting-started-with-algorithms
|
ab686010d9c5913c5b94fa6efb068d4b9a09a6fb
|
[
"MIT"
] | 3
|
2019-02-12T18:35:10.000Z
|
2019-02-17T08:38:07.000Z
|
TRICOIN/PYTHON/sana.py
|
KJSCE-Codecell/getting-started-with-algorithms
|
ab686010d9c5913c5b94fa6efb068d4b9a09a6fb
|
[
"MIT"
] | null | null | null |
TRICOIN/PYTHON/sana.py
|
KJSCE-Codecell/getting-started-with-algorithms
|
ab686010d9c5913c5b94fa6efb068d4b9a09a6fb
|
[
"MIT"
] | null | null | null |
t = int(input())
for i in range(t):
n=int(input())
l=0
h=100000
while l<=h:
mid =(l+h)//2
r= (mid*(mid+1))//2
if r>n:
h=mid - 1
else:
ht=mid
l=mid+1
print(ht)
| 16.666667
| 27
| 0.364
|
4a0d6f9f14fc6f5f92cb6945f3ce51a6746948be
| 10,321
|
gyp
|
Python
|
syzygy/integration_tests/integration_tests.gyp
|
xswz8015/syzygy
|
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
|
[
"Apache-2.0"
] | 343
|
2015-01-07T05:58:44.000Z
|
2022-03-15T14:55:21.000Z
|
syzygy/integration_tests/integration_tests.gyp
|
xswz8015/syzygy
|
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
|
[
"Apache-2.0"
] | 61
|
2015-03-19T18:20:21.000Z
|
2019-10-23T12:58:23.000Z
|
syzygy/integration_tests/integration_tests.gyp
|
xswz8015/syzygy
|
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
|
[
"Apache-2.0"
] | 66
|
2015-01-20T15:35:05.000Z
|
2021-11-25T16:49:41.000Z
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
# These are source files that are used for generating both:
# the integration_tests_dll and integration_tests_clang_dll.
# They contain the Asan test cases.
'integration_tests_common_source_files': [
'asan_interceptors_tests.cc',
'asan_page_protection_tests.cc',
'deferred_free_tests.cc',
'integration_tests_dll.cc',
],
# These files are used for generating the integration_tests_dll.
'integration_tests_other_files': [
'asan_interceptors_tests.h',
'asan_page_protection_tests.h',
'bb_entry_tests.h',
'bb_entry_tests.cc',
'behavior_tests.h',
'behavior_tests.cc',
'coverage_tests.h',
'coverage_tests.cc',
'deferred_free_tests.h',
'integration_tests_dll.def',
'integration_tests_dll.h',
'integration_tests_dll.rc',
'profile_tests.h',
'profile_tests.cc',
],
},
'targets': [
{
'target_name': 'integration_tests',
'type': 'executable',
'sources': [
'integration_tests.rc',
'instrument_integration_test.cc',
'<(src)/syzygy/testing/run_all_unittests.cc',
],
'dependencies': [
'crash_for_exception_harness',
'integration_tests_clang_dll',
'integration_tests_harness',
'<(src)/base/base.gyp:test_support_base',
'<(src)/syzygy/agent/asan/asan.gyp:syzyasan_rtl',
'<(src)/syzygy/core/core.gyp:core_unittest_utils',
'<(src)/syzygy/poirot/poirot.gyp:poirot_lib',
'<(src)/syzygy/testing/testing.gyp:testing_lib',
'<(src)/syzygy/trace/parse/parse.gyp:parse_lib',
'<(src)/syzygy/trace/agent_logger/agent_logger.gyp:agent_logger',
'<(src)/testing/gmock.gyp:gmock',
'<(src)/third_party/pcre/pcre.gyp:pcre_lib',
'<(src)/third_party/protobuf/protobuf.gyp:protobuf_lite_lib',
],
'conditions': [
['target_arch == "ia32"', {
'dependencies': [
'integration_tests_dll',
'<(src)/syzygy/agent/basic_block_entry/basic_block_entry.gyp:'
'basic_block_entry_client',
'<(src)/syzygy/agent/call_trace/call_trace.gyp:call_trace_client',
'<(src)/syzygy/agent/coverage/coverage.gyp:coverage_client',
'<(src)/syzygy/agent/profiler/profiler.gyp:profile_client',
'<(src)/syzygy/grinder/grinder.gyp:grinder_lib',
'<(src)/syzygy/instrument/instrument.gyp:instrument_lib',
'<(src)/syzygy/pe/pe.gyp:pe_unittest_utils',
'<(src)/syzygy/trace/common/common.gyp:trace_unittest_utils',
'<(src)/syzygy/trace/service/service.gyp:call_trace_service_exe',
],
}],
],
'msvs_settings': {
'VCLinkerTool': {
# This test binary is initially compiled without large address
# support. A second version of it that is LAA aware is created by
# another build step.
'LargeAddressAware': 1,
},
},
'defines': [
'SYZYGY_UNITTESTS_CHECK_MEMORY_MODEL=1',
'SYZYGY_UNITTESTS_USE_LONG_TIMEOUT=1',
],
},
{
'target_name': 'integration_tests_4g',
'type': 'none',
'msvs_cygwin_shell': 0,
'dependencies': ['integration_tests'],
'actions': [
{
'action_name': 'make_integration_tests_4g',
'inputs': [
'<(src)/syzygy/build/copy_laa.py',
'<(PRODUCT_DIR)/integration_tests.exe',
],
'outputs': ['<(PRODUCT_DIR)/integration_tests_4g.exe'],
'action': [
'<(python_exe)',
'<(src)/syzygy/build/copy_laa.py',
'--input=$(OutDir)\\integration_tests.exe',
'--output=$(OutDir)\\integration_tests_4g.exe',
'--overwrite',
],
},
],
},
{
'target_name': 'integration_tests_clang_dll',
'type': 'none',
'msvs_cygwin_shell': 0,
'dependencies': [
'<(src)/syzygy/pe/pe.gyp:export_dll',
'<(src)/syzygy/agent/asan/asan.gyp:syzyasan_rtl'
],
'actions': [
{
'action_name': 'make_integration_tests_clang',
'inputs': [
'<@(integration_tests_common_source_files)',
'allocator_shim.cc'
],
'outputs': [
'<(PRODUCT_DIR)/integration_tests_clang_dll.dll',
'<(PRODUCT_DIR)/integration_tests_clang_dll.pdb'
],
'action': [
'<(python_exe)',
'make_integration_tests_clang.py',
'--output-dir=<(PRODUCT_DIR)',
'--input-files=<(_inputs)',
'--target-name=integration_tests_clang_dll',
'--def-file=integration_tests_clang_dll.def'
],
},
],
},
{
'target_name': 'integration_tests_dll',
'type': 'loadable_module',
'sources': [
'<@(integration_tests_common_source_files)',
'<@(integration_tests_other_files)'
],
'dependencies': [
'<(src)/syzygy/pe/pe.gyp:export_dll',
'<(src)/syzygy/version/version.gyp:syzygy_version',
],
'msvs_settings': {
'VCLinkerTool': {
# Asan agent is compiled without large address spaces to allow a
# memory optimization on the shadow memory. Agents should run in both
# modes, thus in the long term, we should remove this.
# Disable support for large address spaces.
'LargeAddressAware': 1,
},
},
# We more or less want this to always be a release-style executable
# to facilitate instrumentation.
# We have to do this per configuration, as base.gypi specifies
# this per-config, which binds tighter than the defaults above.
'configurations': {
'Debug_Base': {
'msvs_settings': {
'VCLinkerTool': {
# This corresponds to /INCREMENTAL:NO. With incremental linking
# enabled, every function resolves to a location in a jump table
# which jumps to the function proper. This gets in the way of
# disassembly.
'LinkIncremental': '1',
# Ensure that the checksum present in the header of the binaries
# is set.
'SetChecksum': 'true',
},
'VCCLCompilerTool': {
'BasicRuntimeChecks': '0',
# Asan needs the application to be linked with the release static
# runtime library. Otherwise, memory allocation functions are
# wrapped and hide memory bugs like overflow/underflow.
'RuntimeLibrary': '0', # 0 = /MT (nondebug static)
# Disable the iterator debugging for this project. We need to do
# this because we link against the release version of the C
# runtime library, and the iterator debugging relies on some
# functions present only in the debug version of the library.
'PreprocessorDefinitions': [
'_HAS_ITERATOR_DEBUGGING=0',
'NDEBUG',
],
# The DEBUG preprocessor flag has to be explicitely undefined in
# order to avoid using some code only available in the debug
# version of the runtime library (see comment above about that).
'AdditionalOptions': [
'/U_DEBUG'
]
},
},
},
'Common_Base': {
'msvs_settings': {
'VCLinkerTool': {
# This corresponds to /PROFILE, which ensures that the
# PDB file contains a FIXUP stream.
# TODO(chrisha): Move this to base.gypi so everything links
# with this flag.
'Profile': 'true',
},
},
},
},
},
{
'target_name': 'integration_tests_harness',
'type': 'executable',
'sources': [
'integration_tests_harness.cc',
],
'dependencies': [
'<(src)/base/base.gyp:base',
'<(src)/syzygy/common/common.gyp:common_lib',
],
'conditions': [
['target_arch == "ia32"', {
'dependencies': [
'integration_tests_dll',
],
},
],
],
'msvs_settings': {
'VCLinkerTool': {
# Asan agent is compiled without large address spaces to allow a
# memory optimization on the shadow memory. Agents should run in both
# modes, thus in the long term, we should remove this.
# Disable support for large address spaces.
'LargeAddressAware': 1,
},
},
},
{
'target_name': 'crash_for_exception_harness',
'type': 'executable',
'sources': [
'crash_for_exception_export.cc',
'integration_tests_harness.cc',
],
'dependencies': [
'<(src)/base/base.gyp:base',
'<(src)/syzygy/agent/asan/asan.gyp:syzyasan_rtl_lib',
'<(src)/syzygy/common/common.gyp:common_lib',
],
'conditions': [
['target_arch == "ia32"', {
'dependencies': [
'integration_tests_dll',
],
},
],
],
'msvs_settings': {
'VCLinkerTool': {
# Asan agent is compiled without large address spaces to allow a
# memory optimization on the shadow memory. Agents should run in both
# modes, thus in the long term, we should remove this.
# Disable support for large address spaces.
'LargeAddressAware': 1,
},
},
},
],
}
| 36.341549
| 79
| 0.571941
|
4a0d6fb082e8cfa8acbca81a683a43753c9c26eb
| 7,553
|
py
|
Python
|
modin/core/dataframe/pandas/exchange/dataframe_protocol/dataframe.py
|
yizx-1017/modin
|
2eee697135b30a9694c202456db0635c52c9e6c9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/core/dataframe/pandas/exchange/dataframe_protocol/dataframe.py
|
yizx-1017/modin
|
2eee697135b30a9694c202456db0635c52c9e6c9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/core/dataframe/pandas/exchange/dataframe_protocol/dataframe.py
|
yizx-1017/modin
|
2eee697135b30a9694c202456db0635c52c9e6c9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Dataframe exchange protocol implementation.
See more in https://data-apis.org/dataframe-protocol/latest/index.html.
Notes
-----
- Interpreting a raw pointer (as in ``Buffer.ptr``) is annoying and unsafe to
do in pure Python. It's more general but definitely less friendly than having
``to_arrow`` and ``to_numpy`` methods. So for the buffers which lack
``__dlpack__`` (e.g., because the column dtype isn't supported by DLPack),
this is worth looking at again.
"""
import collections
from typing import Any, Dict, Optional, Iterable, Sequence
import numpy as np
from modin.core.dataframe.base.exchange.dataframe_protocol.dataframe import (
ProtocolDataframe,
)
from modin.core.dataframe.pandas.dataframe.dataframe import PandasDataframe
from modin.utils import _inherit_docstrings
from .column import PandasProtocolColumn
@_inherit_docstrings(ProtocolDataframe)
class PandasProtocolDataframe(ProtocolDataframe):
"""
A data frame class, with only the methods required by the interchange protocol defined.
Instances of this (private) class are returned from ``modin.pandas.DataFrame.__dataframe__``
as objects with the methods and attributes defined on this class.
A "data frame" represents an ordered collection of named columns.
A column's "name" must be a unique string. Columns may be accessed by name or by position.
This could be a public data frame class, or an object with the methods and
attributes defined on this DataFrame class could be returned from the
``__dataframe__`` method of a public data frame class in a library adhering
to the dataframe interchange protocol specification.
Parameters
----------
df : PandasDataframe
A ``PandasDataframe`` object.
nan_as_null : bool, default:False
A keyword intended for the consumer to tell the producer
to overwrite null values in the data with ``NaN`` (or ``NaT``).
This currently has no effect; once support for nullable extension
dtypes is added, this value should be propagated to columns.
allow_copy : bool, default: True
A keyword that defines whether or not the library is allowed
to make a copy of the data. For example, copying data would be necessary
if a library supports strided buffers, given that this protocol
specifies contiguous buffers. Currently, if the flag is set to ``False``
and a copy is needed, a ``RuntimeError`` will be raised.
"""
def __init__(
self,
df: PandasDataframe,
nan_as_null: bool = False,
allow_copy: bool = True,
) -> None:
self._df = df
self._nan_as_null = nan_as_null
self._allow_copy = allow_copy
def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):
return PandasProtocolDataframe(
self._df, nan_as_null=nan_as_null, allow_copy=allow_copy
)
@property
def metadata(self) -> Dict[str, Any]:
return {"modin.index": self._df.index}
def num_columns(self) -> int:
return len(self._df.columns)
def num_rows(self) -> int:
return len(self._df.index)
def num_chunks(self) -> int:
return self._df._partitions.shape[0]
def column_names(self) -> Iterable[str]:
for col in self._df.columns:
yield col
def get_column(self, i: int) -> PandasProtocolColumn:
return PandasProtocolColumn(
self._df.mask(row_positions=None, col_positions=[i]),
allow_copy=self._allow_copy,
)
def get_column_by_name(self, name: str) -> PandasProtocolColumn:
return PandasProtocolColumn(
self._df.mask(row_positions=None, col_labels=[name]),
allow_copy=self._allow_copy,
)
def get_columns(self) -> Iterable[PandasProtocolColumn]:
for name in self._df.columns:
yield PandasProtocolColumn(
self._df.mask(row_positions=None, col_labels=[name]),
allow_copy=self._allow_copy,
)
def select_columns(self, indices: Sequence[int]) -> "PandasProtocolDataframe":
if not isinstance(indices, collections.Sequence):
raise ValueError("`indices` is not a sequence")
return PandasProtocolDataframe(
self._df.mask(row_positions=None, col_positions=indices),
allow_copy=self._allow_copy,
)
def select_columns_by_name(self, names: Sequence[str]) -> "PandasProtocolDataframe":
if not isinstance(names, collections.Sequence):
raise ValueError("`names` is not a sequence")
return PandasProtocolDataframe(
self._df.mask(row_positions=None, col_labels=names),
allow_copy=self._allow_copy,
)
def get_chunks(
self, n_chunks: Optional[int] = None
) -> Iterable["PandasProtocolDataframe"]:
cur_n_chunks = self.num_chunks()
n_rows = self.num_rows()
if n_chunks is None or n_chunks == cur_n_chunks:
cum_row_lengths = np.cumsum([0] + self._df._row_lengths)
for i in range(len(cum_row_lengths) - 1):
yield PandasProtocolDataframe(
self._df.mask(
row_positions=range(cum_row_lengths[i], cum_row_lengths[i + 1]),
col_positions=None,
),
allow_copy=self._allow_copy,
)
return
if n_chunks % cur_n_chunks != 0:
raise RuntimeError(
"The passed `n_chunks` must be a multiple of `self.num_chunks()`."
)
if n_chunks > n_rows:
raise RuntimeError(
"The passed `n_chunks` value is bigger than `self.num_rows()`."
)
chunksize = n_rows // n_chunks
new_lengths = [chunksize] * n_chunks
new_lengths[-1] = n_rows % n_chunks + new_lengths[-1]
new_partitions = self._df._partition_mgr_cls.map_axis_partitions(
0,
self._df._partitions,
lambda df: df,
keep_partitioning=False,
lengths=new_lengths,
)
new_df = self._df.__constructor__(
new_partitions,
self._df.index,
self._df.columns,
new_lengths,
self._df._column_widths,
)
cum_row_lengths = np.cumsum([0] + new_df._row_lengths)
for i in range(len(cum_row_lengths) - 1):
yield PandasProtocolDataframe(
new_df.mask(
row_positions=range(cum_row_lengths[i], cum_row_lengths[i + 1]),
col_positions=None,
),
allow_copy=self._allow_copy,
)
| 38.93299
| 96
| 0.65431
|
4a0d6fb3f03c357d3a728da97d25231249efea32
| 1,999
|
py
|
Python
|
app/handlers/users/commands.py
|
rdfsx/schedule_bot
|
7a0231e3bbb61ca4adec2a20a256fb35c35078ed
|
[
"MIT"
] | 17
|
2020-12-26T18:23:44.000Z
|
2022-02-22T12:48:18.000Z
|
app/handlers/users/commands.py
|
rdfsx/schedule_bot
|
7a0231e3bbb61ca4adec2a20a256fb35c35078ed
|
[
"MIT"
] | 6
|
2021-01-01T13:46:15.000Z
|
2022-03-24T11:57:13.000Z
|
app/handlers/users/commands.py
|
rdfsx/schedule_bot
|
7a0231e3bbb61ca4adec2a20a256fb35c35078ed
|
[
"MIT"
] | 2
|
2021-09-10T13:19:01.000Z
|
2022-01-13T18:57:57.000Z
|
from aiogram import types
from aiogram.dispatcher.filters import Command
from app.data.convert import sticker
from app.data.messages import hello_message, donuts
from app.keyboards.default import menu
from app.keyboards.inline import search_kb
from app.keyboards.inline.inline_buttons import search_teacher
from app.loader import dp
from app.states import States
@dp.message_handler(Command('reset'))
async def do_reset(message: types.Message):
await States.GROUP.set()
await message.answer('Найди свою группу:', reply_markup=search_kb)
@dp.message_handler(Command('prepods'))
async def get_prepods_command(message: types.Message):
await message.answer('Нажмите на кнопку ниже, чтобы посмотреть рейтинг, либо просто отправьте боту фамилию.',
reply_markup=search_teacher)
@dp.message_handler(Command('calls'))
async def get_sticker(message: types.Message):
await message.answer_sticker(sticker=sticker)
@dp.message_handler(Command('search'))
async def get_group_schedule(message: types.Message):
await message.answer('Нажмите на кнопку ниже и начинайте вводить или просто отправьте боту название группы.',
reply_markup=search_kb)
@dp.message_handler(Command('b'))
async def get_kb(message: types.Message):
await message.answer(hello_message, reply_markup=menu, disable_web_page_preview=True)
@dp.message_handler(Command('info'))
async def get_info(message: types.Message):
text = [
"Ботдаёт возможнность просматривать расписание групп и преподавателей ГГТУ Сухого.",
"Доступны команды:",
"/prepods - рейтинг и расписание преподавателей",
"/calls - стикер с расписанием звонков",
"/search - расписание чужой группы",
"/start или /reset - сброс настроек"
]
await message.answer("\n\n".join(text), reply_markup=menu)
@dp.message_handler(Command('donuts'))
async def get_donuts(message: types.Message):
await message.answer(donuts, reply_markup=menu)
| 35.070175
| 113
| 0.746373
|
4a0d6fc8e9adae98f76093908422e8f25626487e
| 2,207
|
py
|
Python
|
src/aspire/nfft/pynfft.py
|
janden/ASPIRE-Python
|
5bcf831881fd0e42630c3b99671c5ed08de260ea
|
[
"MIT"
] | null | null | null |
src/aspire/nfft/pynfft.py
|
janden/ASPIRE-Python
|
5bcf831881fd0e42630c3b99671c5ed08de260ea
|
[
"MIT"
] | null | null | null |
src/aspire/nfft/pynfft.py
|
janden/ASPIRE-Python
|
5bcf831881fd0e42630c3b99671c5ed08de260ea
|
[
"MIT"
] | null | null | null |
import numpy as np
from pynfft.nfft import NFFT
from aspire.utils import ensure
from aspire.nfft import Plan
from aspire.nfft.utils import nextpow2
class PyNfftPlan(Plan):
@staticmethod
def epsilon_to_nfft_cutoff(epsilon):
# NOTE: These are obtained empirically. Should have a theoretical derivation.
rel_errs = [6e-2, 2e-3, 2e-5, 2e-7, 3e-9, 4e-11, 4e-13, 0]
return list(filter(lambda i_err: i_err[1] < epsilon, enumerate(rel_errs, start=1)))[0][0]
def __init__(self, sz, fourier_pts, epsilon=1e-15, **kwargs):
"""
A plan for non-uniform FFT (3D)
:param sz: A tuple indicating the geometry of the signal
:param fourier_pts: The points in Fourier space where the Fourier transform is to be calculated,
arranged as a 3-by-K array. These need to be in the range [-pi, pi] in each dimension.
:param epsilon: The desired precision of the NUFFT
"""
self.sz = sz
self.dim = len(sz)
self.fourier_pts = fourier_pts
self.num_pts = fourier_pts.shape[1]
self.epsilon = epsilon
self.cutoff = PyNfftPlan.epsilon_to_nfft_cutoff(epsilon)
self.multi_bandwith = tuple(2 * 2**nextpow2(self.sz))
# TODO - no other flags used in the MATLAB code other than these 2 are supported by the PyNFFT wrapper
self._flags = ('PRE_PHI_HUT', 'PRE_PSI')
self._plan = NFFT(
N=self.sz,
M=self.num_pts,
n=self.multi_bandwith,
m=self.cutoff,
flags=self._flags
)
self._plan.x = ((1./(2*np.pi)) * self.fourier_pts).T
self._plan.precompute()
def transform(self, signal):
ensure(signal.shape == self.sz, f'Signal to be transformed must have shape {self.sz}')
self._plan.f_hat = signal.astype('complex64')
f = self._plan.trafo()
if signal.dtype == np.float32:
f = f.astype('complex64')
return f
def adjoint(self, signal):
self._plan.f = signal.astype('complex64')
f_hat = self._plan.adjoint()
if signal.dtype == np.float32:
f_hat = f_hat.astype('complex64')
return f_hat
| 33.953846
| 110
| 0.620299
|
4a0d70ed28500ae0edbad8023f3ee20304c7c976
| 869
|
py
|
Python
|
docker/dempcap/pcapminey/core/ThreadPool/Pool.py
|
JakubOrzol/dockerfiles
|
d04ead31d053dbe62b1e98b33e3a2852e335b41c
|
[
"MIT"
] | 203
|
2016-03-02T14:13:34.000Z
|
2022-03-30T06:08:56.000Z
|
docker/dempcap/pcapminey/core/ThreadPool/Pool.py
|
Axonius/dockerfiles
|
f2135e9abb468ee8db339ec27b2ba737acbbaef6
|
[
"MIT"
] | 7,201
|
2018-12-24T17:14:17.000Z
|
2022-03-31T13:39:12.000Z
|
docker/dempcap/pcapminey/core/ThreadPool/Pool.py
|
Axonius/dockerfiles
|
f2135e9abb468ee8db339ec27b2ba737acbbaef6
|
[
"MIT"
] | 94
|
2018-12-17T10:59:21.000Z
|
2022-03-29T12:59:30.000Z
|
# -*- coding: utf8 -*-
__author__ = 'Viktor Winkelmann'
from Queue import Queue
from Worker import Worker
class Pool:
def __init__(self, size):
self.size = size
self.workers = []
self.tasks = Queue()
def _removeDeadWorkers(self):
self.workers = [w for w in self.workers if w.isAlive()]
def map_async(self, func, objects, callback):
self._removeDeadWorkers()
if not len(self.workers) == 0:
raise Exception('ThreadPool is still working! Adding new jobs is not allowed!')
for object in objects:
self.tasks.put((func, object, callback))
for id in range(self.size):
self.workers.append(Worker(id, self.tasks))
for worker in self.workers:
worker.start()
def join(self):
for worker in self.workers:
worker.join()
| 27.15625
| 91
| 0.604143
|
4a0d7370b21e0839d3363819a2d7278027aee423
| 2,076
|
py
|
Python
|
fixtures/virtual_router_fixture.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | 1
|
2017-06-13T04:42:34.000Z
|
2017-06-13T04:42:34.000Z
|
fixtures/virtual_router_fixture.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | null | null | null |
fixtures/virtual_router_fixture.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | null | null | null |
from netaddr import *
from contrailapi import ContrailVncApi
import vnc_api_test
class VirtualRouterBase(vnc_api_test.VncLibFixture):
'''Fixture to manage virtual router objects
Mandatory:
:param name : name of the device
:param virtual_router_type : virtual router type('embedded','hypervisor' etc)
'''
def __init__(self, *args, **kwargs):
super(VirtualRouterBase, self).__init__(*args, **kwargs)
self.name = args[0]
self.virtual_router_type = args[1]
self.vr = None
self.vnc_h = ContrailVncApi(self.vnc_api_h, self.logger)
# end __init__
def setUp(self):
super(VirtualRouterBase, self).setUp()
def cleanUp(self):
super(VirtualRouterBase, self).cleanUp()
def create(self):
pass
def delete(self):
pass
def update(self):
self.vnc_api_h.virtual_router_update(self.vr)
def read(self,id):
self.vr = self.vnc_api_h.virtual_router_read(id=id)
def update_virtual_router_type(self,vrouter_type=None):
if not vrouter_type:
self.vnc_h.update_virtual_router_type(self.name,self.virtual_router_type)
else:
self.vnc_h.update_virtual_router_type(self.name,vrouter_type)
class VirtualRouterFixture(VirtualRouterBase):
def __init__(self, *args, **kwargs):
super(VirtualRouterFixture,self).__init__(*args, **kwargs)
def setUp(self):
super(VirtualRouterFixture, self).setUp()
vr_fq_name = ['default-global-system-config', self.name]
try:
self.vr = self.vnc_api_h.virtual_router_read(
fq_name=vr_fq_name)
self.logger.info('virtual router %s already present' % (
vr_fq_name))
except vnc_api_test.NoIdError:
self.vr = self.create_virtual_router()
self.update_virtual_router_type()
def cleanUp(self):
super(VirtualRouterFixture, self).cleanUp()
pass
# end VirtualRouterFixture
if __name__ == "__main__":
import pdb
pdb.set_trace()
| 28.054054
| 85
| 0.657033
|
4a0d748961c7215e5bff2fee06e0804ebad3469b
| 1,074
|
py
|
Python
|
Using NoSQL - MongoDB/ml.py
|
Geervanireddy16/online-bookstore-management
|
91da06bc311fa8747c72beda60a3aedea7e5dbf6
|
[
"MIT"
] | 3
|
2021-12-05T07:51:41.000Z
|
2022-03-30T06:33:18.000Z
|
Using NoSQL - MongoDB/ml.py
|
Geervanireddy16/online-bookstore-management
|
91da06bc311fa8747c72beda60a3aedea7e5dbf6
|
[
"MIT"
] | 1
|
2021-12-05T08:58:46.000Z
|
2021-12-05T08:58:46.000Z
|
Using NoSQL - MongoDB/ml.py
|
Geervanireddy16/online-bookstore-management
|
91da06bc311fa8747c72beda60a3aedea7e5dbf6
|
[
"MIT"
] | null | null | null |
from pymongo import MongoClient
# from tabulate import tabulate
import numpy as np
from os import getenv
from dotenv import load_dotenv
load_dotenv()
# from sklearn import metrics
# sakjiflks
db_link = getenv('MONGO',None)
db_name = getenv('DB_NAME',None)
client = MongoClient(db_link)
db = client.get_database(db_name)
X=[]
y=[]
booksData = db.books.find({'genre':'adventure'})
for book in booksData:
X.append(int(book['bookID']))
y.append([0])
booksData1 = db.books.find({'genre':'mystery'})
for book in booksData1:
X.append(int(book['bookID']))
y.append([1])
# X = X.reshape(-1,1)
# print(tabulate(X, headers='firstrow'))
# print("\n")
# print(tabulate(Y, headers='firstrow'))
print(X)
x = np.append(X)
print(x)
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20) #0.20 means 80% from training and 20% for testing
# from sklearn.svm import SVC
# svclassifier = SVC(kernel='linear')
# svclassifier.fit(X_train, y_train)
| 24.976744
| 125
| 0.727188
|
4a0d75dc881f905c1e1ed5249ed73af30495d082
| 10,087
|
py
|
Python
|
hwtLib/amba/axiLite_comp/endpoint_arr_test.py
|
Nic30/hwtLib
|
52fd28023c4a25f64da17bb4d7c3089d5c7348f4
|
[
"MIT"
] | 24
|
2017-02-23T10:00:50.000Z
|
2022-01-28T12:20:21.000Z
|
hwtLib/amba/axiLite_comp/endpoint_arr_test.py
|
Nic30/hwtLib
|
52fd28023c4a25f64da17bb4d7c3089d5c7348f4
|
[
"MIT"
] | 32
|
2017-04-28T10:29:34.000Z
|
2021-04-27T09:16:43.000Z
|
hwtLib/amba/axiLite_comp/endpoint_arr_test.py
|
Nic30/hwtLib
|
52fd28023c4a25f64da17bb4d7c3089d5c7348f4
|
[
"MIT"
] | 8
|
2019-09-19T03:34:36.000Z
|
2022-01-21T06:56:58.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.hdl.constants import Time
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.hdlType import HdlType
from hwt.hdl.types.struct import HStruct
from hwt.hdl.types.structUtils import field_path_get_type
from hwt.pyUtils.arrayQuery import flatten
from hwt.synthesizer.typePath import TypePath
from hwtLib.amba.axiLite_comp.endpoint import AxiLiteEndpoint
from hwtLib.amba.axiLite_comp.endpoint_test import AxiLiteEndpointTC, \
AxiLiteEndpointDenseStartTC, AxiLiteEndpointDenseTC
from hwtLib.amba.constants import RESP_OKAY, RESP_SLVERR
from hwtLib.types.ctypes import uint32_t
from pyMathBitPrecise.bit_utils import mask
structTwoArr = HStruct(
(uint32_t[4], "field0"),
(uint32_t[4], "field1")
)
structTwoArr_str = """\
struct {
<Bits, 32bits, unsigned>[4] field0 // start:0x0(bit) 0x0(byte)
<Bits, 32bits, unsigned>[4] field1 // start:0x80(bit) 0x10(byte)
}"""
structTwoArr2 = HStruct(
(uint32_t[3], "field0"),
(uint32_t[4], "field1")
)
structTwoArr2_str = """\
struct {
<Bits, 32bits, unsigned>[3] field0 // start:0x0(bit) 0x0(byte)
<Bits, 32bits, unsigned>[4] field1 // start:0x60(bit) 0xc(byte)
}"""
structStructsInArray = HStruct(
(HStruct(
(uint32_t, "field0"),
(uint32_t, "field1")
)[4],
"arr"),
)
structStructsInArray_str = """\
struct {
struct {
<Bits, 32bits, unsigned> field0 // start:0x0(bit) 0x0(byte)
<Bits, 32bits, unsigned> field1 // start:0x20(bit) 0x4(byte)
}[4] arr // start:0x0(bit) 0x0(byte)
}"""
class AxiLiteEndpointArrayTC(AxiLiteEndpointTC):
STRUCT_TEMPLATE = structTwoArr
FIELD_ADDR = [0x0, 0x10]
def test_nop(self):
u = self.mySetUp(32)
MAGIC = 100
for i in range(8):
u.decoded.field0._ag.mem[i] = MAGIC + 1
u.decoded.field1._ag.mem[i] = 2 * MAGIC + 1
self.randomizeAll()
self.runSim(100 * Time.ns)
self.assertEmpty(u.bus._ag.r.data)
for i in range(8):
self.assertValEqual(u.decoded.field0._ag.mem[i], MAGIC + 1)
self.assertValEqual(u.decoded.field1._ag.mem[i], 2 * MAGIC + 1)
def test_read(self):
u = self.mySetUp(32)
regs = self.regs
MAGIC = 100
for i in range(4):
u.decoded.field0._ag.mem[i] = MAGIC + i + 1
u.decoded.field1._ag.mem[i] = 2 * MAGIC + i + 1
regs.field0[i].read()
regs.field1[i].read()
self.randomizeAll()
self.runSim(2 * 8 * 100 * Time.ns)
self.assertValSequenceEqual(u.bus._ag.r.data, [
(MAGIC + 1, RESP_OKAY),
(2 * MAGIC + 1, RESP_OKAY),
(MAGIC + 2, RESP_OKAY),
(2 * MAGIC + 2, RESP_OKAY),
(MAGIC + 3, RESP_OKAY),
(2 * MAGIC + 3, RESP_OKAY),
(MAGIC + 4, RESP_OKAY),
(2 * MAGIC + 4, RESP_OKAY),
])
def test_write(self):
u = self.mySetUp(32)
regs = self.regs
MAGIC = 100
for i in range(4):
u.decoded.field0._ag.mem[i] = None
u.decoded.field1._ag.mem[i] = None
regs.field0[i].write(MAGIC + i + 1)
regs.field1[i].write(2 * MAGIC + i + 1)
self.randomizeAll()
self.runSim(2 * 8 * 100 * Time.ns)
self.assertEmpty(u.bus._ag.r.data)
for i in range(4):
self.assertValEqual(u.decoded.field0._ag.mem[i],
MAGIC + i + 1, f"index={i:d}")
self.assertValEqual(u.decoded.field1._ag.mem[i],
2 * MAGIC + i + 1, f"index={i:d}")
def test_registerMap(self):
self.mySetUp(32)
s = self.addrProbe.discovered.__repr__(withAddr=0, expandStructs=True)
self.assertEqual(s, structTwoArr_str)
class AxiLiteEndpointArray2TC(AxiLiteEndpointTC):
STRUCT_TEMPLATE = structTwoArr2
FIELD_ADDR = [0x0, 3*0x04]
def test_nop(self):
u = self.mySetUp(32)
MAGIC = 100
for i in range(4):
if i < 3:
u.decoded.field0._ag.mem[i] = MAGIC + 1
u.decoded.field1._ag.mem[i] = 2 * MAGIC + 1
self.randomizeAll()
self.runSim(100 * Time.ns)
self.assertEmpty(u.bus._ag.r.data)
for i in range(4):
if i < 3:
self.assertValEqual(u.decoded.field0._ag.mem[i], MAGIC + 1)
self.assertValEqual(u.decoded.field1._ag.mem[i], 2 * MAGIC + 1)
def test_read(self):
u = self.mySetUp(32)
regs = self.regs
MAGIC = 100
for i in range(4):
if i < 3:
u.decoded.field0._ag.mem[i] = MAGIC + i + 1
regs.field0[i].read()
u.decoded.field1._ag.mem[i] = 2 * MAGIC + i + 1
regs.field1[i].read()
self.randomizeAll()
self.runSim(2 * 8 * 100 * Time.ns)
self.assertValSequenceEqual(u.bus._ag.r.data, [
(MAGIC + 1, RESP_OKAY),
(2 * MAGIC + 1, RESP_OKAY),
(MAGIC + 2, RESP_OKAY),
(2 * MAGIC + 2, RESP_OKAY),
(MAGIC + 3, RESP_OKAY),
(2 * MAGIC + 3, RESP_OKAY),
(2 * MAGIC + 4, RESP_OKAY),
])
def test_write(self):
u = self.mySetUp(32)
regs = self.regs
MAGIC = 100
for i in range(4):
if i < 3:
u.decoded.field0._ag.mem[i] = None
regs.field0[i].write(MAGIC + i + 1)
u.decoded.field1._ag.mem[i] = None
regs.field1[i].write(2 * MAGIC + i + 1)
self.randomizeAll()
self.runSim(2 * 8 * 100 * Time.ns)
self.assertEmpty(u.bus._ag.r.data)
for i in range(4):
if i < 3:
self.assertValEqual(u.decoded.field0._ag.mem[i],
MAGIC + i + 1, f"index={i:d}")
self.assertValEqual(u.decoded.field1._ag.mem[i],
2 * MAGIC + i + 1, f"index={i:d}")
def test_registerMap(self):
self.mySetUp(32)
s = self.addrProbe.discovered.__repr__(withAddr=0, expandStructs=True)
self.assertEqual(s, structTwoArr2_str)
class AxiLiteEndpointStructsInArrayTC(AxiLiteEndpointTC):
STRUCT_TEMPLATE = structStructsInArray
def mySetUp(self, data_width=32):
def shouldEnterFn(root: HdlType, field_path: TypePath):
return (True, isinstance(field_path_get_type(root, field_path), Bits))
u = AxiLiteEndpoint(self.STRUCT_TEMPLATE,
shouldEnterFn=shouldEnterFn)
self.u = u
self.DATA_WIDTH = data_width
u.DATA_WIDTH = self.DATA_WIDTH
self.compileSimAndStart(self.u, onAfterToRtl=self.mkRegisterMap)
return u
def test_nop(self):
u = self.mySetUp(32)
self.randomizeAll()
self.runSim(100 * Time.ns)
self.assertEmpty(u.bus._ag.r.data)
for item in u.decoded.arr:
self.assertEmpty(item.field0._ag.dout)
self.assertEmpty(item.field1._ag.dout)
def test_registerMap(self):
self.mySetUp(32)
s = self.addrProbe.discovered.__repr__(withAddr=0, expandStructs=True)
self.assertEqual(s, structStructsInArray_str)
def test_read(self):
u = self.mySetUp(32)
MAGIC = 100
MAGIC2 = 300
a = u.bus.ar._ag.create_addr_req
u.bus.ar._ag.data.extend([a(i * 0x4) for i in range(4 * 2 + 1)])
for i, a in enumerate(u.decoded.arr):
a.field0._ag.din.extend([MAGIC + i])
a.field1._ag.din.extend([MAGIC2 + i])
self.randomizeAll()
self.runSim(500 * Time.ns)
expected = list(flatten([[(MAGIC + i, RESP_OKAY),
(MAGIC2 + i, RESP_OKAY)]
for i in range(4)], level=1)
) + [(None, RESP_SLVERR)]
self.assertValSequenceEqual(u.bus.r._ag.data, expected)
def test_write(self):
u = self.mySetUp(32)
MAGIC = 100
MAGIC2 = 300
m = mask(32 // 8)
N = 4
a = u.bus.ar._ag.create_addr_req
u.bus.aw._ag.data.extend([a(i * 0x4) for i in range(N * 2 + 1)])
expected = [
[(MAGIC + i + 1, m) for i in range(N)],
[(MAGIC2 + i + 1, m) for i in range(N)]
]
u.bus.w._ag.data.extend(flatten(zip(expected[0], expected[1]),
level=1))
u.bus.w._ag.data.append((123, m))
self.randomizeAll()
self.runSim(800 * Time.ns)
for i, a in enumerate(u.decoded.arr):
# [index of field][index in arr][data index]
self.assertValSequenceEqual(a.field0._ag.dout, [expected[0][i][0]])
self.assertValSequenceEqual(a.field1._ag.dout, [expected[1][i][0]])
self.assertValSequenceEqual(u.bus.b._ag.data,
[RESP_OKAY for _ in range(2 * N)]
+ [RESP_SLVERR])
AxiLiteEndpointArrTCs = [
AxiLiteEndpointArrayTC,
AxiLiteEndpointArray2TC,
AxiLiteEndpointStructsInArrayTC,
]
if __name__ == "__main__":
import unittest
suite = unittest.TestSuite()
# suite.addTest(AxiLiteEndpointArray2TC('test_read'))
suite.addTest(unittest.makeSuite(AxiLiteEndpointTC))
suite.addTest(unittest.makeSuite(AxiLiteEndpointDenseStartTC))
suite.addTest(unittest.makeSuite(AxiLiteEndpointDenseTC))
for tc in AxiLiteEndpointArrTCs:
suite.addTest(unittest.makeSuite(tc))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
# u = AxiLiteEndpoint(structStructsInArray,
# shouldEnterFn=lambda tmpl: True)
# u.DATA_WIDTH = 32
# print(to_rtl_str(u))
| 32.124204
| 82
| 0.552791
|
4a0d76aec811e227178982bf722fb8c7b3683e97
| 5,961
|
py
|
Python
|
taming/data/utils.py
|
vipermu/taming-transformers
|
d64977b4a0c3639798e997b2eaed057be0dee9e6
|
[
"MIT"
] | null | null | null |
taming/data/utils.py
|
vipermu/taming-transformers
|
d64977b4a0c3639798e997b2eaed057be0dee9e6
|
[
"MIT"
] | null | null | null |
taming/data/utils.py
|
vipermu/taming-transformers
|
d64977b4a0c3639798e997b2eaed057be0dee9e6
|
[
"MIT"
] | null | null | null |
import collections
import os
import tarfile
import urllib
import zipfile
from pathlib import Path
import numpy as np
import torch
from forks.taming_transformers.taming.data.helper_types import Annotation
from torch._six import string_classes
from torch.utils.data._utils.collate import np_str_obj_array_pattern, default_collate_err_msg_format
from tqdm import tqdm
def unpack(path):
if path.endswith("tar.gz"):
with tarfile.open(path, "r:gz") as tar:
tar.extractall(path=os.path.split(path)[0])
elif path.endswith("tar"):
with tarfile.open(path, "r:") as tar:
tar.extractall(path=os.path.split(path)[0])
elif path.endswith("zip"):
with zipfile.ZipFile(path, "r") as f:
f.extractall(path=os.path.split(path)[0])
else:
raise NotImplementedError("Unknown file extension: {}".format(
os.path.splitext(path)[1]))
def reporthook(bar):
"""tqdm progress bar for downloads."""
def hook(b=1, bsize=1, tsize=None):
if tsize is not None:
bar.total = tsize
bar.update(b * bsize - bar.n)
return hook
def get_root(name):
base = "data/"
root = os.path.join(base, name)
os.makedirs(root, exist_ok=True)
return root
def is_prepared(root):
return Path(root).joinpath(".ready").exists()
def mark_prepared(root):
Path(root).joinpath(".ready").touch()
def prompt_download(file_, source, target_dir, content_dir=None):
targetpath = os.path.join(target_dir, file_)
while not os.path.exists(targetpath):
if content_dir is not None and os.path.exists(
os.path.join(target_dir, content_dir)):
break
print("Please download '{}' from '{}' to '{}'.".format(
file_, source, targetpath))
if content_dir is not None:
print("Or place its content into '{}'.".format(
os.path.join(target_dir, content_dir)))
input("Press Enter when done...")
return targetpath
def download_url(file_, url, target_dir):
targetpath = os.path.join(target_dir, file_)
os.makedirs(target_dir, exist_ok=True)
with tqdm(unit="B",
unit_scale=True,
unit_divisor=1024,
miniters=1,
desc=file_) as bar:
urllib.request.urlretrieve(url, targetpath, reporthook=reporthook(bar))
return targetpath
def download_urls(urls, target_dir):
paths = dict()
for fname, url in urls.items():
outpath = download_url(fname, url, target_dir)
paths[fname] = outpath
return paths
def quadratic_crop(x, bbox, alpha=1.0):
"""bbox is xmin, ymin, xmax, ymax"""
im_h, im_w = x.shape[:2]
bbox = np.array(bbox, dtype=np.float32)
bbox = np.clip(bbox, 0, max(im_h, im_w))
center = 0.5 * (bbox[0] + bbox[2]), 0.5 * (bbox[1] + bbox[3])
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
l = int(alpha * max(w, h))
l = max(l, 2)
required_padding = -1 * min(center[0] - l, center[1] - l, im_w -
(center[0] + l), im_h - (center[1] + l))
required_padding = int(np.ceil(required_padding))
if required_padding > 0:
padding = [
[required_padding, required_padding],
[required_padding, required_padding],
]
padding += [[0, 0]] * (len(x.shape) - 2)
x = np.pad(x, padding, "reflect")
center = center[0] + required_padding, center[1] + required_padding
xmin = int(center[0] - l / 2)
ymin = int(center[1] - l / 2)
return np.array(x[ymin:ymin + l, xmin:xmin + l, ...])
def custom_collate(batch):
r"""source: pytorch 1.9.0, only one modification to original code """
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(
default_collate_err_msg_format.format(elem.dtype))
return custom_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: custom_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(custom_collate(samples) for samples in zip(*batch)))
if isinstance(elem, collections.abc.Sequence) and isinstance(
elem[0], Annotation): # added
return batch # added
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError(
'each element in list of batch should be of equal size')
transposed = zip(*batch)
return [custom_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type))
| 35.694611
| 100
| 0.623889
|
4a0d774b312033f23ecb9abd216dd2f0e250c5d6
| 37,521
|
py
|
Python
|
ckan/controllers/group.py
|
SebastianEsp/ckan
|
635cc824d3d9544aa87564732a4c888183a52e00
|
[
"Apache-2.0"
] | null | null | null |
ckan/controllers/group.py
|
SebastianEsp/ckan
|
635cc824d3d9544aa87564732a4c888183a52e00
|
[
"Apache-2.0"
] | null | null | null |
ckan/controllers/group.py
|
SebastianEsp/ckan
|
635cc824d3d9544aa87564732a4c888183a52e00
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
import logging
import datetime
from urllib import urlencode
from pylons.i18n import get_lang
from six import string_types, text_type
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.logic as logic
import ckan.lib.search as search
import ckan.model as model
import ckan.authz as authz
import ckan.lib.plugins
import ckan.plugins as plugins
from ckan.common import OrderedDict, c, config, request, _
log = logging.getLogger(__name__)
render = base.render
abort = base.abort
NotFound = logic.NotFound
NotAuthorized = logic.NotAuthorized
ValidationError = logic.ValidationError
check_access = logic.check_access
get_action = logic.get_action
tuplize_dict = logic.tuplize_dict
clean_dict = logic.clean_dict
parse_params = logic.parse_params
lookup_group_plugin = ckan.lib.plugins.lookup_group_plugin
lookup_group_controller = ckan.lib.plugins.lookup_group_controller
class GroupController(base.BaseController):
group_types = ['group']
# hooks for subclasses
def _group_form(self, group_type=None):
return lookup_group_plugin(group_type).group_form()
def _form_to_db_schema(self, group_type=None):
return lookup_group_plugin(group_type).form_to_db_schema()
def _db_to_form_schema(self, group_type=None):
'''This is an interface to manipulate data from the database
into a format suitable for the form (optional)'''
return lookup_group_plugin(group_type).db_to_form_schema()
def _setup_template_variables(self, context, data_dict, group_type=None):
if 'type' not in data_dict:
data_dict['type'] = group_type
return lookup_group_plugin(group_type).\
setup_template_variables(context, data_dict)
def _new_template(self, group_type):
return lookup_group_plugin(group_type).new_template()
def _index_template(self, group_type):
return lookup_group_plugin(group_type).index_template()
def _about_template(self, group_type):
return lookup_group_plugin(group_type).about_template()
def _read_template(self, group_type):
return lookup_group_plugin(group_type).read_template()
def _history_template(self, group_type):
return lookup_group_plugin(group_type).history_template()
def _edit_template(self, group_type):
return lookup_group_plugin(group_type).edit_template()
def _activity_template(self, group_type):
return lookup_group_plugin(group_type).activity_template()
def _admins_template(self, group_type):
return lookup_group_plugin(group_type).admins_template()
def _bulk_process_template(self, group_type):
return lookup_group_plugin(group_type).bulk_process_template()
# end hooks
def _replace_group_org(self, string):
''' substitute organization for group if this is an org'''
return string
def _action(self, action_name):
''' select the correct group/org action '''
return get_action(self._replace_group_org(action_name))
def _check_access(self, action_name, *args, **kw):
''' select the correct group/org check_access '''
return check_access(self._replace_group_org(action_name), *args, **kw)
def _render_template(self, template_name, group_type):
''' render the correct group/org template '''
return render(self._replace_group_org(template_name),
extra_vars={'group_type': group_type})
def _guess_group_type(self, expecting_name=False):
"""
Guess the type of group from the URL.
* The default url '/group/xyz' returns None
* group_type is unicode
* this handles the case where there is a prefix on the URL
(such as /data/organization)
"""
parts = [x for x in request.path.split('/') if x]
idx = -1
if expecting_name:
idx = -2
gt = parts[idx]
return gt
def _ensure_controller_matches_group_type(self, id):
group = model.Group.get(id)
if group is None:
abort(404, _('Group not found'))
if group.type not in self.group_types:
abort(404, _('Incorrect group type'))
return group.type
@classmethod
def add_group_type(cls, group_type):
''' Notify this controller that it is to be used for a particular
group_type. (Called on plugin registration.)
'''
cls.group_types.append(group_type)
def index(self):
group_type = self._guess_group_type()
page = h.get_page_number(request.params) or 1
items_per_page = 21
context = {'model': model, 'session': model.Session,
'user': c.user, 'for_view': True,
'with_private': False}
q = c.q = request.params.get('q', '')
sort_by = c.sort_by_selected = request.params.get('sort')
try:
self._check_access('site_read', context)
self._check_access('group_list', context)
except NotAuthorized:
abort(403, _('Not authorized to see this page'))
# pass user info to context as needed to view private datasets of
# orgs correctly
if c.userobj:
context['user_id'] = c.userobj.id
context['user_is_admin'] = c.userobj.sysadmin
try:
data_dict_global_results = {
'all_fields': False,
'q': q,
'sort': sort_by,
'type': group_type or 'group',
}
global_results = self._action('group_list')(
context, data_dict_global_results)
except ValidationError as e:
if e.error_dict and e.error_dict.get('message'):
msg = e.error_dict['message']
else:
msg = str(e)
h.flash_error(msg)
c.page = h.Page([], 0)
return render(self._index_template(group_type),
extra_vars={'group_type': group_type})
data_dict_page_results = {
'all_fields': True,
'q': q,
'sort': sort_by,
'type': group_type or 'group',
'limit': items_per_page,
'offset': items_per_page * (page - 1),
'include_extras': True
}
page_results = self._action('group_list')(context,
data_dict_page_results)
c.page = h.Page(
collection=global_results,
page=page,
url=h.pager_url,
items_per_page=items_per_page,
)
c.page.items = page_results
return render(self._index_template(group_type),
extra_vars={'group_type': group_type})
def read(self, id, limit=20):
group_type = self._ensure_controller_matches_group_type(
id.split('@')[0])
context = {'model': model, 'session': model.Session,
'user': c.user,
'schema': self._db_to_form_schema(group_type=group_type),
'for_view': True}
data_dict = {'id': id, 'type': group_type}
# unicode format (decoded from utf8)
c.q = request.params.get('q', '')
try:
# Do not query for the group datasets when dictizing, as they will
# be ignored and get requested on the controller anyway
data_dict['include_datasets'] = False
# Do not query group members as they aren't used in the view
data_dict['include_users'] = False
c.group_dict = self._action('group_show')(context, data_dict)
c.group = context['group']
except (NotFound, NotAuthorized):
abort(404, _('Group not found'))
# if the user specified a group id, redirect to the group name
if data_dict['id'] == c.group_dict['id'] and \
data_dict['id'] != c.group_dict['name']:
h.redirect_to(controller=group_type, action='read',
id=c.group_dict['name'])
self._read(id, limit, group_type)
return render(self._read_template(c.group_dict['type']),
extra_vars={'group_type': group_type})
def _read(self, id, limit, group_type):
''' This is common code used by both read and bulk_process'''
context = {'model': model, 'session': model.Session,
'user': c.user,
'schema': self._db_to_form_schema(group_type=group_type),
'for_view': True, 'extras_as_string': True}
q = c.q = request.params.get('q', '')
# Search within group
if c.group_dict.get('is_organization'):
fq = 'owner_org:"%s"' % c.group_dict.get('id')
else:
fq = 'groups:"%s"' % c.group_dict.get('name')
c.description_formatted = \
h.render_markdown(c.group_dict.get('description'))
context['return_query'] = True
page = h.get_page_number(request.params)
# most search operations should reset the page counter:
params_nopage = [(k, v) for k, v in request.params.items()
if k != 'page']
sort_by = request.params.get('sort', None)
def search_url(params):
controller = lookup_group_controller(group_type)
action = 'bulk_process' if c.action == 'bulk_process' else 'read'
url = h.url_for(controller=controller, action=action, id=id)
params = [(k, v.encode('utf-8') if isinstance(v, string_types)
else str(v)) for k, v in params]
return url + u'?' + urlencode(params)
def drill_down_url(**by):
return h.add_url_param(alternative_url=None,
controller='group', action='read',
extras=dict(id=c.group_dict.get('name')),
new_params=by)
c.drill_down_url = drill_down_url
def remove_field(key, value=None, replace=None):
controller = lookup_group_controller(group_type)
return h.remove_url_param(key, value=value, replace=replace,
controller=controller, action='read',
extras=dict(id=c.group_dict.get('name')))
c.remove_field = remove_field
def pager_url(q=None, page=None):
params = list(params_nopage)
params.append(('page', page))
return search_url(params)
try:
c.fields = []
c.fields_grouped = {}
search_extras = {}
for (param, value) in request.params.items():
if param not in ['q', 'page', 'sort'] \
and len(value) and not param.startswith('_'):
if not param.startswith('ext_'):
c.fields.append((param, value))
q += ' %s: "%s"' % (param, value)
if param not in c.fields_grouped:
c.fields_grouped[param] = [value]
else:
c.fields_grouped[param].append(value)
else:
search_extras[param] = value
facets = OrderedDict()
default_facet_titles = {'organization': _('Organizations'),
'groups': _('Groups'),
'tags': _('Tags'),
'res_format': _('Formats'),
'license_id': _('Licenses')}
for facet in h.facets():
if facet in default_facet_titles:
facets[facet] = default_facet_titles[facet]
else:
facets[facet] = facet
# Facet titles
self._update_facet_titles(facets, group_type)
c.facet_titles = facets
data_dict = {
'q': q,
'fq': fq,
'include_private': True,
'facet.field': facets.keys(),
'rows': limit,
'sort': sort_by,
'start': (page - 1) * limit,
'extras': search_extras
}
context_ = dict((k, v) for (k, v) in context.items()
if k != 'schema')
query = get_action('package_search')(context_, data_dict)
c.page = h.Page(
collection=query['results'],
page=page,
url=pager_url,
item_count=query['count'],
items_per_page=limit
)
c.group_dict['package_count'] = query['count']
c.search_facets = query['search_facets']
c.search_facets_limits = {}
for facet in c.search_facets.keys():
limit = int(request.params.get('_%s_limit' % facet,
config.get('search.facets.default', 10)))
c.search_facets_limits[facet] = limit
c.page.items = query['results']
c.sort_by_selected = sort_by
except search.SearchError as se:
log.error('Group search error: %r', se.args)
c.query_error = True
c.page = h.Page(collection=[])
self._setup_template_variables(context, {'id': id},
group_type=group_type)
def _update_facet_titles(self, facets, group_type):
for plugin in plugins.PluginImplementations(plugins.IFacets):
facets = plugin.group_facets(
facets, group_type, None)
def bulk_process(self, id):
''' Allow bulk processing of datasets for an organization. Make
private/public or delete. For organization admins.'''
group_type = self._ensure_controller_matches_group_type(
id.split('@')[0])
# check we are org admin
context = {'model': model, 'session': model.Session,
'user': c.user,
'schema': self._db_to_form_schema(group_type=group_type),
'for_view': True, 'extras_as_string': True}
data_dict = {'id': id, 'type': group_type}
try:
self._check_access('bulk_update_public', context, {'org_id': id})
# Do not query for the group datasets when dictizing, as they will
# be ignored and get requested on the controller anyway
data_dict['include_datasets'] = False
c.group_dict = self._action('group_show')(context, data_dict)
c.group = context['group']
except NotFound:
abort(404, _('Group not found'))
except NotAuthorized:
abort(403, _('User %r not authorized to edit %s') % (c.user, id))
if not c.group_dict['is_organization']:
# FIXME: better error
raise Exception('Must be an organization')
# use different form names so that ie7 can be detected
form_names = set(["bulk_action.public", "bulk_action.delete",
"bulk_action.private"])
actions_in_form = set(request.params.keys())
actions = form_names.intersection(actions_in_form)
# If no action then just show the datasets
if not actions:
# unicode format (decoded from utf8)
limit = 500
self._read(id, limit, group_type)
c.packages = c.page.items
return render(self._bulk_process_template(group_type),
extra_vars={'group_type': group_type})
# ie7 puts all buttons in form params but puts submitted one twice
for key, value in dict(request.params.dict_of_lists()).items():
if len(value) == 2:
action = key.split('.')[-1]
break
else:
# normal good browser form submission
action = actions.pop().split('.')[-1]
# process the action first find the datasets to perform the action on.
# they are prefixed by dataset_ in the form data
datasets = []
for param in request.params:
if param.startswith('dataset_'):
datasets.append(param[8:])
action_functions = {
'private': 'bulk_update_private',
'public': 'bulk_update_public',
'delete': 'bulk_update_delete',
}
data_dict = {'datasets': datasets, 'org_id': c.group_dict['id']}
try:
get_action(action_functions[action])(context, data_dict)
except NotAuthorized:
abort(403, _('Not authorized to perform bulk update'))
h.redirect_to(group_type + '_bulk_process', id=id)
def new(self, data=None, errors=None, error_summary=None):
if data and 'type' in data:
group_type = data['type']
else:
group_type = self._guess_group_type(True)
if data:
data['type'] = group_type
context = {'model': model, 'session': model.Session,
'user': c.user,
'save': 'save' in request.params,
'parent': request.params.get('parent', None)}
try:
self._check_access('group_create', context)
except NotAuthorized:
abort(403, _('Unauthorized to create a group'))
if context['save'] and not data and request.method == 'POST':
return self._save_new(context, group_type)
data = data or {}
if not data.get('image_url', '').startswith('http'):
data.pop('image_url', None)
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'new',
'group_type': group_type}
self._setup_template_variables(context, data, group_type=group_type)
c.form = render(self._group_form(group_type=group_type),
extra_vars=vars)
return render(self._new_template(group_type),
extra_vars={'group_type': group_type})
def edit(self, id, data=None, errors=None, error_summary=None):
group_type = self._ensure_controller_matches_group_type(
id.split('@')[0])
context = {'model': model, 'session': model.Session,
'user': c.user,
'save': 'save' in request.params,
'for_edit': True,
'parent': request.params.get('parent', None)
}
data_dict = {'id': id, 'include_datasets': False}
if context['save'] and not data and request.method == 'POST':
return self._save_edit(id, context)
try:
data_dict['include_datasets'] = False
old_data = self._action('group_show')(context, data_dict)
c.grouptitle = old_data.get('title')
c.groupname = old_data.get('name')
data = data or old_data
except (NotFound, NotAuthorized):
abort(404, _('Group not found'))
group = context.get("group")
c.group = group
c.group_dict = self._action('group_show')(context, data_dict)
try:
self._check_access('group_update', context)
except NotAuthorized:
abort(403, _('User %r not authorized to edit %s') % (c.user, id))
errors = errors or {}
vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'edit',
'group_type': group_type}
self._setup_template_variables(context, data, group_type=group_type)
c.form = render(self._group_form(group_type), extra_vars=vars)
return render(self._edit_template(c.group.type),
extra_vars={'group_type': group_type})
def _save_new(self, context, group_type=None):
try:
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.params))))
data_dict['type'] = group_type or 'group'
context['message'] = data_dict.get('log_message', '')
data_dict['users'] = [{'name': c.user, 'capacity': 'admin'}]
group = self._action('group_create')(context, data_dict)
# Redirect to the appropriate _read route for the type of group
h.redirect_to(group['type'] + '_read', id=group['name'])
except (NotFound, NotAuthorized) as e:
abort(404, _('Group not found'))
except dict_fns.DataError:
abort(400, _(u'Integrity Error'))
except ValidationError as e:
errors = e.error_dict
error_summary = e.error_summary
return self.new(data_dict, errors, error_summary)
def _force_reindex(self, grp):
''' When the group name has changed, we need to force a reindex
of the datasets within the group, otherwise they will stop
appearing on the read page for the group (as they're connected via
the group name)'''
group = model.Group.get(grp['name'])
for dataset in group.packages():
search.rebuild(dataset.name)
def _save_edit(self, id, context):
try:
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
data_dict['id'] = id
context['allow_partial_update'] = True
group = self._action('group_update')(context, data_dict)
if id != group['name']:
self._force_reindex(group)
h.redirect_to('%s_read' % group['type'], id=group['name'])
except (NotFound, NotAuthorized) as e:
abort(404, _('Group not found'))
except dict_fns.DataError:
abort(400, _(u'Integrity Error'))
except ValidationError as e:
errors = e.error_dict
error_summary = e.error_summary
return self.edit(id, data_dict, errors, error_summary)
def authz(self, id):
group = model.Group.get(id)
if group is None:
abort(404, _('Group not found'))
group_type = group.type
if group_type not in self.group_types:
abort(404, _('Incorrect group type'))
c.groupname = group.name
c.grouptitle = group.display_name
try:
context = \
{'model': model, 'user': c.user, 'group': group}
self._check_access('group_edit_permissions', context)
c.authz_editable = True
c.group = context['group']
except NotAuthorized:
c.authz_editable = False
if not c.authz_editable:
abort(403,
_('User %r not authorized to edit %s authorizations') %
(c.user, id))
roles = self._handle_update_of_authz(group)
self._prepare_authz_info_for_render(roles)
return render('group/authz.html',
extra_vars={'group_type': group_type})
def delete(self, id):
group_type = self._ensure_controller_matches_group_type(id)
if 'cancel' in request.params:
h.redirect_to(group_type + '_edit', id=id)
context = {'model': model, 'session': model.Session,
'user': c.user}
try:
self._check_access('group_delete', context, {'id': id})
except NotAuthorized:
abort(403, _('Unauthorized to delete group %s') % '')
try:
if request.method == 'POST':
self._action('group_delete')(context, {'id': id})
if group_type == 'organization':
h.flash_notice(_('Organization has been deleted.'))
elif group_type == 'group':
h.flash_notice(_('Group has been deleted.'))
else:
h.flash_notice(_('%s has been deleted.')
% _(group_type.capitalize()))
h.redirect_to(group_type + '_index')
c.group_dict = self._action('group_show')(context, {'id': id})
except NotAuthorized:
abort(403, _('Unauthorized to delete group %s') % '')
except NotFound:
abort(404, _('Group not found'))
except ValidationError as e:
h.flash_error(e.error_dict['message'])
h.redirect_to(controller='organization', action='read', id=id)
return self._render_template('group/confirm_delete.html', group_type)
def members(self, id):
group_type = self._ensure_controller_matches_group_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user}
data_dict = {'id': id}
try:
check_access('group_edit_permissions', context, data_dict)
except NotAuthorized:
abort(403,
_('User %r not authorized to edit members of %s') % (c.user,
id))
try:
c.members = self._action('member_list')(
context, {'id': id, 'object_type': 'user'}
)
data_dict['include_datasets'] = False
c.group_dict = self._action('group_show')(context, data_dict)
except NotFound:
abort(404, _('Group not found'))
return self._render_template('group/members.html', group_type)
def member_new(self, id):
group_type = self._ensure_controller_matches_group_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user}
try:
self._check_access('group_member_create', context, {'id': id})
except NotAuthorized:
abort(403, _('Unauthorized to create group %s members') % '')
try:
data_dict = {'id': id}
data_dict['include_datasets'] = False
c.group_dict = self._action('group_show')(context, data_dict)
c.roles = self._action('member_roles_list')(
context, {'group_type': group_type}
)
if request.method == 'POST':
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.params))))
data_dict['id'] = id
email = data_dict.get('email')
if email:
user_data_dict = {
'email': email,
'group_id': data_dict['id'],
'role': data_dict['role']
}
del data_dict['email']
user_dict = self._action('user_invite')(
context, user_data_dict)
data_dict['username'] = user_dict['name']
c.group_dict = self._action('group_member_create')(
context, data_dict)
h.redirect_to(group_type + '_members', id=id)
else:
user = request.params.get('user')
if user:
c.user_dict = \
get_action('user_show')(context, {'id': user})
c.user_role = \
authz.users_role_for_group_or_org(id, user) or 'member'
else:
c.user_role = 'member'
except NotAuthorized:
abort(403, _('Unauthorized to add member to group %s') % '')
except NotFound:
abort(404, _('Group not found'))
except ValidationError as e:
h.flash_error(e.error_summary)
return self._render_template('group/member_new.html', group_type)
def member_delete(self, id):
group_type = self._ensure_controller_matches_group_type(id)
if 'cancel' in request.params:
h.redirect_to(group_type + '_members', id=id)
context = {'model': model, 'session': model.Session,
'user': c.user}
try:
self._check_access('group_member_delete', context, {'id': id})
except NotAuthorized:
abort(403, _('Unauthorized to delete group %s members') % '')
try:
user_id = request.params.get('user')
if request.method == 'POST':
self._action('group_member_delete')(
context, {'id': id, 'user_id': user_id})
h.flash_notice(_('Group member has been deleted.'))
h.redirect_to(group_type + '_members', id=id)
c.user_dict = self._action('user_show')(context, {'id': user_id})
c.user_id = user_id
c.group_id = id
except NotAuthorized:
abort(403, _('Unauthorized to delete group %s members') % '')
except NotFound:
abort(404, _('Group not found'))
return self._render_template('group/confirm_delete_member.html',
group_type)
def history(self, id):
group_type = self._ensure_controller_matches_group_type(id)
if 'diff' in request.params or 'selected1' in request.params:
try:
params = {'id': request.params.getone('group_name'),
'diff': request.params.getone('selected1'),
'oldid': request.params.getone('selected2'),
}
except KeyError:
if 'group_name' in dict(request.params):
id = request.params.getone('group_name')
c.error = \
_('Select two revisions before doing the comparison.')
else:
params['diff_entity'] = 'group'
h.redirect_to(controller='revision', action='diff', **params)
context = {'model': model, 'session': model.Session,
'user': c.user,
'schema': self._db_to_form_schema()}
data_dict = {'id': id}
try:
c.group_dict = self._action('group_show')(context, data_dict)
c.group_revisions = self._action('group_revision_list')(context,
data_dict)
# TODO: remove
# Still necessary for the authz check in group/layout.html
c.group = context['group']
except (NotFound, NotAuthorized):
abort(404, _('Group not found'))
format = request.params.get('format', '')
if format == 'atom':
# Generate and return Atom 1.0 document.
from webhelpers.feedgenerator import Atom1Feed
feed = Atom1Feed(
title=_(u'CKAN Group Revision History'),
link=h.url_for(
group_type + '_read',
id=c.group_dict['name']),
description=_(u'Recent changes to CKAN Group: ') +
c.group_dict['display_name'],
language=text_type(get_lang()),
)
for revision_dict in c.group_revisions:
revision_date = h.date_str_to_datetime(
revision_dict['timestamp'])
try:
dayHorizon = int(request.params.get('days'))
except:
dayHorizon = 30
dayAge = (datetime.datetime.now() - revision_date).days
if dayAge >= dayHorizon:
break
if revision_dict['message']:
item_title = u'%s' % revision_dict['message'].\
split('\n')[0]
else:
item_title = u'%s' % revision_dict['id']
item_link = h.url_for(controller='revision', action='read',
id=revision_dict['id'])
item_description = _('Log message: ')
item_description += '%s' % (revision_dict['message'] or '')
item_author_name = revision_dict['author']
item_pubdate = revision_date
feed.add_item(
title=item_title,
link=item_link,
description=item_description,
author_name=item_author_name,
pubdate=item_pubdate,
)
feed.content_type = 'application/atom+xml'
return feed.writeString('utf-8')
return render(self._history_template(group_type),
extra_vars={'group_type': group_type})
def activity(self, id, offset=0):
'''Render this group's public activity stream page.'''
group_type = self._ensure_controller_matches_group_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user, 'for_view': True}
try:
c.group_dict = self._get_group_dict(id)
except (NotFound, NotAuthorized):
abort(404, _('Group not found'))
try:
# Add the group's activity stream (already rendered to HTML) to the
# template context for the group/read.html
# template to retrieve later.
c.group_activity_stream = self._action('group_activity_list_html')(
context, {'id': c.group_dict['id'], 'offset': offset})
except ValidationError as error:
base.abort(400)
return render(self._activity_template(group_type),
extra_vars={'group_type': group_type})
def follow(self, id):
'''Start following this group.'''
self._ensure_controller_matches_group_type(id)
context = {'model': model,
'session': model.Session,
'user': c.user}
data_dict = {'id': id}
try:
get_action('follow_group')(context, data_dict)
group_dict = get_action('group_show')(context, data_dict)
h.flash_success(_("You are now following {0}").format(
group_dict['title']))
id = group_dict['name']
except ValidationError as e:
error_message = (e.message or e.error_summary
or e.error_dict)
h.flash_error(error_message)
except NotAuthorized as e:
h.flash_error(e.message)
h.redirect_to(controller='group', action='read', id=id)
def unfollow(self, id):
'''Stop following this group.'''
self._ensure_controller_matches_group_type(id)
context = {'model': model,
'session': model.Session,
'user': c.user}
data_dict = {'id': id}
try:
get_action('unfollow_group')(context, data_dict)
group_dict = get_action('group_show')(context, data_dict)
h.flash_success(_("You are no longer following {0}").format(
group_dict['title']))
id = group_dict['name']
except ValidationError as e:
error_message = (e.message or e.error_summary
or e.error_dict)
h.flash_error(error_message)
except (NotFound, NotAuthorized) as e:
error_message = e.message
h.flash_error(error_message)
h.redirect_to(controller='group', action='read', id=id)
def followers(self, id):
group_type = self._ensure_controller_matches_group_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user}
c.group_dict = self._get_group_dict(id)
try:
c.followers = \
get_action('group_follower_list')(context, {'id': id})
except NotAuthorized:
abort(403, _('Unauthorized to view followers %s') % '')
return render('group/followers.html',
extra_vars={'group_type': group_type})
def admins(self, id):
group_type = self._ensure_controller_matches_group_type(id)
c.group_dict = self._get_group_dict(id)
c.admins = authz.get_group_or_org_admin_ids(id)
return render(self._admins_template(c.group_dict['type']),
extra_vars={'group_type': group_type})
def about(self, id):
group_type = self._ensure_controller_matches_group_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user}
c.group_dict = self._get_group_dict(id)
group_type = c.group_dict['type']
self._setup_template_variables(context, {'id': id},
group_type=group_type)
return render(self._about_template(group_type),
extra_vars={'group_type': group_type})
def _get_group_dict(self, id):
''' returns the result of group_show action or aborts if there is a
problem '''
context = {'model': model, 'session': model.Session,
'user': c.user,
'for_view': True}
try:
return self._action('group_show')(
context, {'id': id, 'include_datasets': False})
except (NotFound, NotAuthorized):
abort(404, _('Group not found'))
| 39.662791
| 79
| 0.557981
|
4a0d777874bb39678d6ef519e8265a497168f10e
| 3,340
|
py
|
Python
|
reinvent-2019/rhythm-cloud/lambda/Greengrass_startSong/boto3.old1/__init__.py
|
kienpham2000/aws-builders-fair-projects
|
6c4075c0945a6318b217355a6fc663e35ffb9dba
|
[
"Apache-2.0"
] | 2
|
2019-12-17T03:38:38.000Z
|
2021-05-28T06:23:58.000Z
|
reinvent-2019/rhythm-cloud/lambda/Greengrass_startSong/boto3.old1/__init__.py
|
kienpham2000/aws-builders-fair-projects
|
6c4075c0945a6318b217355a6fc663e35ffb9dba
|
[
"Apache-2.0"
] | 8
|
2021-05-09T06:05:46.000Z
|
2022-03-02T09:53:20.000Z
|
reinvent-2019/rhythm-cloud/lambda/Greengrass_startSong/boto3.old1/__init__.py
|
kienpham2000/aws-builders-fair-projects
|
6c4075c0945a6318b217355a6fc663e35ffb9dba
|
[
"Apache-2.0"
] | 3
|
2020-09-30T18:46:59.000Z
|
2020-10-21T21:20:26.000Z
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from boto3.session import Session
__author__ = 'Amazon Web Services'
__version__ = '1.9.233'
# The default Boto3 session; autoloaded when needed.
DEFAULT_SESSION = None
def setup_default_session(**kwargs):
"""
Set up a default session, passing through any parameters to the session
constructor. There is no need to call this unless you wish to pass custom
parameters, because a default session will be created for you.
"""
global DEFAULT_SESSION
DEFAULT_SESSION = Session(**kwargs)
def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):
"""
Add a stream handler for the given name and level to the logging module.
By default, this logs all boto3 messages to ``stdout``.
>>> import boto3
>>> boto3.set_stream_logger('boto3.resources', logging.INFO)
For debugging purposes a good choice is to set the stream logger to ``''``
which is equivalent to saying "log everything".
.. WARNING::
Be aware that when logging anything from ``'botocore'`` the full wire
trace will appear in your logs. If your payloads contain sensitive data
this should not be used in production.
:type name: string
:param name: Log name
:type level: int
:param level: Logging level, e.g. ``logging.INFO``
:type format_string: str
:param format_string: Log message format
"""
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def _get_default_session():
"""
Get the default session, creating one if needed.
:rtype: :py:class:`~boto3.session.Session`
:return: The default session
"""
if DEFAULT_SESSION is None:
setup_default_session()
return DEFAULT_SESSION
def client(*args, **kwargs):
"""
Create a low-level service client by name using the default session.
See :py:meth:`boto3.session.Session.client`.
"""
return _get_default_session().client(*args, **kwargs)
def resource(*args, **kwargs):
"""
Create a resource service client by name using the default session.
See :py:meth:`boto3.session.Session.resource`.
"""
return _get_default_session().resource(*args, **kwargs)
# Set up logging to ``/dev/null`` like a library is supposed to.
# http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('boto3').addHandler(NullHandler())
| 30.09009
| 81
| 0.703593
|
4a0d77a3bc75bc026ff6339304c919e5a67ae319
| 207
|
py
|
Python
|
src/neynpy/utils/exceptions.py
|
Neyn/Neynpy
|
1a29fdfcf0679e86db50de28303d5f1ab99be402
|
[
"BSD-3-Clause"
] | null | null | null |
src/neynpy/utils/exceptions.py
|
Neyn/Neynpy
|
1a29fdfcf0679e86db50de28303d5f1ab99be402
|
[
"BSD-3-Clause"
] | null | null | null |
src/neynpy/utils/exceptions.py
|
Neyn/Neynpy
|
1a29fdfcf0679e86db50de28303d5f1ab99be402
|
[
"BSD-3-Clause"
] | null | null | null |
class ValidationError(Exception):
"""
ValidationError to raise all invalid errors
"""
def __init__(self, message, errors=None):
super().__init__(message)
self.errors = errors
| 25.875
| 47
| 0.652174
|
4a0d787c5c7ee2b7228481591ea201bc76cfcde3
| 1,057
|
py
|
Python
|
tests/integration/test_client_industry_endpoints.py
|
joeseggie/resourceidea
|
aae6120e3ec84f3fc7e1ab1bc833ce37bd06685f
|
[
"MIT"
] | null | null | null |
tests/integration/test_client_industry_endpoints.py
|
joeseggie/resourceidea
|
aae6120e3ec84f3fc7e1ab1bc833ce37bd06685f
|
[
"MIT"
] | 21
|
2019-01-26T20:39:34.000Z
|
2019-06-20T10:09:57.000Z
|
tests/integration/test_client_industry_endpoints.py
|
joeseggie/resourceidea
|
aae6120e3ec84f3fc7e1ab1bc833ce37bd06685f
|
[
"MIT"
] | null | null | null |
"""Test the client industry endpoints"""
from uuid import UUID
from flask import json
def test_post_client_industry(app, session, fake_lorem):
# Arrange
client = app.test_client()
request_body = {
"name": fake_lorem.word()
}
# Act
result = client.post(
'/api/v0.1/clientindustries',
json=request_body)
output = json.loads(result.get_data(as_text=True))
# Assert
assert result.status_code == 201
assert 'id' in output
assert 'name' in output
assert isinstance(output, dict)
assert isinstance(UUID(output['id']), UUID)
def test_post_client_industry_exists(app, session, fake_lorem):
"""Test response when the client industry name exists."""
# Arrange
client = app.test_client()
request_body = {
'name': 'Existing name'
}
# Act
result = client.post(
'/api/v0.1/clientindustries',
json=request_body)
output = json.loads(result.get_data(as_text=True))
# Assert
assert result == 400
assert 'message' in output
| 23.488889
| 63
| 0.648061
|
4a0d789f027ccd174474a3a7770d2eab4de43f0e
| 2,646
|
py
|
Python
|
map.js.py.py
|
jarainf/map.js.py
|
5a80e8e86f019025f9cf263ffc716285bc05fd1a
|
[
"MIT"
] | null | null | null |
map.js.py.py
|
jarainf/map.js.py
|
5a80e8e86f019025f9cf263ffc716285bc05fd1a
|
[
"MIT"
] | null | null | null |
map.js.py.py
|
jarainf/map.js.py
|
5a80e8e86f019025f9cf263ffc716285bc05fd1a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from html.parser import HTMLParser
from urllib.parse import urlparse
from urllib.request import Request, urlopen
from urllib.error import URLError
class Linker(HTMLParser):
global parsed_sites
def __init__(self, url, maxdepth=5, node=None, depth=-1, nonhtml=False):
if not url == '':
self._node = LinkTree(self._handle_url(url))
else:
self._node = node
self._depth = depth
self._maxdepth = maxdepth
self._nonhtml = nonhtml
self._level = []
HTMLParser.__init__(self)
self._feed_link(self._node)
def handle_starttag(self, tag, attrs):
if tag == 'a':
for (x, y) in attrs:
if x.lower() == 'href':
link = self._handle_url(y)
if link:
self._decide_usage(link)
def _handle_url(self, url):
if '://' in url:
if url.startswith('http://') or url.startswith('https://'):
return urlparse(url)
else:
if url.startswith('//'):
return urlparse('http:' + url)
def _decide_usage(self, url):
if url[1] in parsed_sites:
return
else:
parsed_sites.append(url[1])
req = Request(url.geturl(), method="HEAD")
try:
resp = urlopen(req)
except URLError as e:
return
if 'Content-Type' in resp:
if 'text/html' in resp[Content-Type]:
self._level.append(url)
else:
if url[2] == '' or url[2].endswith('.html') or url[2].endswith('.php') or '.' in url[2] or url[2].endswith('.htm'):
self._level.append(url)
elif self._nonhtml:
self._node.add(LinkTree(url))
def _feed_link(self, node):
content = self._retrieve_url(node.data.geturl())
if content is None:
return
self.feed(content)
for x in self._level:
nxtnode = LinkTree(x)
self._node.add(nxtnode)
if not (self._depth + 1) >= self._maxdepth:
nxtlevel = Linker('', self._maxdepth, nxtnode, self._depth + 1, self._nonhtml)
self._node.add(nxtlevel.get_tree())
def _retrieve_url(self, url):
data = None
if not self._check_robots(url):
return None
try:
data = urlopen(url)
except URLError as e:
return None
encoding = data.headers.get_content_charset()
if encoding:
return data.read().decode(encoding)
else:
return data.read().decode('utf-8')
def _check_robots(self, url):
return True
def get_tree(self):
return self._node
class LinkTree(object):
def __init__(self, data):
self.data = data
self._children = []
def add(self, tree):
self._children.append(tree)
def print_self(self):
print(self.data.geturl())
for x in self._children:
x.print_self()
def main():
global parsed_sites
parsed_sites = []
a = Linker('http://vehk.de/blag/')
a.get_tree().print_self()
if __name__ == '__main__':
main()
| 23.625
| 118
| 0.670068
|
4a0d79698770883898b4c3917812da85bb1ac4a1
| 1,124
|
py
|
Python
|
test/udp/server.py
|
uzum/cran-orchestrator
|
c2235bf324c8c04e82960ca58ec49f2f700c065d
|
[
"MIT"
] | null | null | null |
test/udp/server.py
|
uzum/cran-orchestrator
|
c2235bf324c8c04e82960ca58ec49f2f700c065d
|
[
"MIT"
] | null | null | null |
test/udp/server.py
|
uzum/cran-orchestrator
|
c2235bf324c8c04e82960ca58ec49f2f700c065d
|
[
"MIT"
] | null | null | null |
import socket
import os
import time
import argparse
from threading import Timer
REPORT_INTERVAL = 10.0
parser = argparse.ArgumentParser()
parser.add_argument("port")
args = parser.parse_args()
timer = None
packetCount = 0
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.bind(('', int(args.port)))
def report():
cpuUtilization = round(float(os.popen('''grep 'cpu ' /proc/stat | awk '{usage=($2+$4)*100/($2+$4+$5)} END {print usage }' ''').readline()))
total, used, free = list(map(int, os.popen('free -t -m').readlines()[-1].split()[1:]))
memoryUtilization = round(float(used)/total, 2) * 100
payload = {
'timestamp': int(time.time()),
'packetCount': packetCount,
'cpuUtilization': cpuUtilization,
'memoryUtilization': memoryUtilization
}
print(payload)
timer = Timer(REPORT_INTERVAL, report)
timer.start()
print('listening to ' + args.port)
report()
while True:
(message, address) = server_socket.recvfrom(1024)
packetCount += 1
print('received message from ' + str(address))
print(message.decode('utf-8'))
| 28.1
| 143
| 0.669929
|
4a0d7a84d99224b72759226f3708a9d008a82056
| 10,952
|
py
|
Python
|
lib/torchncon.py
|
xwkgch/IsoTensor
|
b08e9753d50f082023d4f516361bc666ee359223
|
[
"MIT"
] | 17
|
2021-10-01T04:24:04.000Z
|
2022-03-22T14:11:56.000Z
|
lib/torchncon.py
|
xwkgch/IsoTensor
|
b08e9753d50f082023d4f516361bc666ee359223
|
[
"MIT"
] | null | null | null |
lib/torchncon.py
|
xwkgch/IsoTensor
|
b08e9753d50f082023d4f516361bc666ee359223
|
[
"MIT"
] | 1
|
2021-10-05T11:30:20.000Z
|
2021-10-05T11:30:20.000Z
|
import torch
import collections
""" A module for the function ncon, which does contractions of several tensors.
"""
ncon_check = True
def ncon(AA, v, order=None, forder=None, check_indices=True):
""" AA = [A1, A2, ..., Ap] list of tensors.
v = (v1, v2, ..., vp) tuple of lists of indices e.g. v1 = [3 4 -1] labels
the three indices of tensor A1, with -1 indicating an uncontracted index
(open leg) and 3 and 4 being the contracted indices.
order, if present, contains a list of all positive indices - if not
[1 2 3 4 ...] by default. This is the order in which they are contracted.
forder, if present, contains the final ordering of the uncontracted indices
- if not, [-1 -2 ..] by default.
There is some leeway in the way the inputs are given. For example,
instead of giving a list of tensors as the first argument one can
give some different iterable of tensors, such as a tuple, or a
single tensor by itself (anything that has the attribute "shape"
will be considered a tensor).
"""
# We want to handle the tensors as a list, regardless of what kind
# of iterable we are given. In addition, if only a single element is
# given, we make list out of it. Inputs are assumed to be non-empty.
check_indices = ncon_check
if hasattr(AA, "shape"):
AA = [AA]
else:
AA = list(AA)
v = list(v)
if not isinstance(v[0], collections.Iterable):
# v is not a list of lists, so make it such.
v = [v]
else:
v = list(map(list, v))
if order == None:
order = create_order(v)
if forder == None:
forder = create_forder(v)
if check_indices:
# Raise a RuntimeError if the indices are wrong.
do_check_indices(AA, v, order, forder)
# If the graph is dinconnected, connect it with trivial indices that
# will be contracted at the very end.
connect_graph(AA, v, order)
while len(order) > 0:
tcon = get_tcon(v, order[0]) # tcon = tensors to be contracted
# Find the indices icon that are to be contracted.
if len(tcon)==1:
tracing = True
icon = [order[0]]
else:
tracing = False
icon = get_icon(v, tcon)
# Position in tcon[0] and tcon[1] of indices to be contracted.
# In the case of trace, pos2 = []
pos1, pos2 = get_pos(v, tcon, icon)
if tracing:
# Trace on a tensor
new_A = trace(AA[tcon[0]], axis1=pos1[0], axis2=pos1[1])
else:
# Contraction of 2 tensors
new_A = con(AA[tcon[0]], AA[tcon[1]], (pos1, pos2))
AA.append(new_A)
v.append(find_newv(v, tcon, icon)) # Add the v for the new tensor
for i in sorted(tcon, reverse=True):
# Delete the contracted tensors and indices from the lists.
# tcon is reverse sorted so that tensors are removed starting from
# the end of AA, otherwise the order would get messed.
del AA[i]
del v[i]
order = renew_order(order, icon) # Update order
vlast = v[0]
A = AA[0]
A = permute_final(A, vlast, forder)
return A
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def create_order(v):
""" Identify all unique, positive indices and return them sorted. """
flat_v = sum(v, [])
x = [i for i in flat_v if i>0]
# Converting to a set and back removes duplicates
x = list(set(x))
return sorted(x)
def create_forder(v):
""" Identify all unique, negative indices and return them reverse sorted
(-1 first).
"""
flat_v = sum(v, [])
x = [i for i in flat_v if i<0]
# Converting to a set and back removes duplicates
x = list(set(x))
return sorted(x, reverse=True)
def connect_graph(AA, v, order):
""" Connect the graph of tensors to be contracted by trivial
indices, if necessary. Add these trivial indices to the end of the
contraction order.
AA, v and order are modified in place.
"""
# Build ccomponents, a list of the connected components of the graph,
# where each component is represented by a a set of indices.
unvisited = set(range(len(AA)))
visited = set()
ccomponents = []
while unvisited:
component = set()
next_visit = unvisited.pop()
to_visit = {next_visit}
while to_visit:
i = to_visit.pop()
unvisited.discard(i)
component.add(i)
visited.add(i)
# Get the indices of tensors neighbouring AA[i].
i_inds = set(v[i])
neighs = (j for j, j_inds in enumerate(v) if i_inds.intersection(j_inds))
for neigh in neighs:
if neigh not in visited:
to_visit.add(neigh)
ccomponents.append(component)
return None
def get_tcon(v, index):
""" Gets the list indices in AA of the tensors that have index as their
leg.
"""
tcon = []
for i, inds in enumerate(v):
if index in inds:
tcon.append(i)
l = len(tcon)
# If check_indices is called and it does its work properly then these
# checks should in fact be unnecessary.
if l > 2:
raise ValueError('In ncon.get_tcon, more than two tensors share a '
'contraction index.')
elif l < 1:
raise ValueError('In ncon.get_tcon, less than one tensor share a '
'contraction index.')
elif l == 1:
# The contraction is a trace.
how_many = v[tcon[0]].count(index)
if how_many != 2:
# Only one tensor has this index but it is not a trace because it
# does not occur twice for that tensor.
raise ValueError('In ncon.get_tcon, a trace index is listed '
'!= 2 times for the same tensor.')
return tcon
def get_icon(v, tcon):
""" Returns a list of indices that are to be contracted when contractions
between the two tensors numbered in tcon are contracted. """
inds1 = v[tcon[0]]
inds2 = v[tcon[1]]
icon = set(inds1).intersection(inds2)
icon = list(icon)
return icon
def get_pos(v, tcon, icon):
""" Get the positions of the indices icon in the list of legs the tensors
tcon to be contracted.
"""
pos1 = [[i for i, x in enumerate(v[tcon[0]]) if x == e] for e in icon]
pos1 = sum(pos1, [])
if len(tcon) < 2:
pos2 = []
else:
pos2 = [[i for i, x in enumerate(v[tcon[1]]) if x == e] for e in icon]
pos2 = sum(pos2, [])
return pos1, pos2
def find_newv(v, tcon, icon):
""" Find the list of indices for the new tensor after contraction of
indices icon of the tensors tcon.
"""
if len(tcon) == 2:
newv = v[tcon[0]] + v[tcon[1]]
else:
newv = v[tcon[0]]
newv = [i for i in newv if i not in icon]
return newv
def renew_order(order, icon):
""" Returns the new order with the contracted indices removed from it. """
return [i for i in order if i not in icon]
def permute_final(A, v, forder):
""" Returns the final tensor A with its legs permuted to the order given
in forder.
"""
perm = [v.index(i) for i in forder]
return A.permute(tuple(perm))
def do_check_indices(AA, v, order, forder):
""" Check that
1) the number of tensors in AA matches the number of index lists in v.
2) every tensor is given the right number of indices.
3) every contracted index is featured exactly twice and every free index
exactly once.
4) the dimensions of the two ends of each contracted index match.
"""
#1)
if len(AA) != len(v):
raise ValueError(('In ncon.do_check_indices, the number of tensors %i'
' does not match the number of index lists %i')
%(len(AA), len(v)))
#2)
# Create a list of lists with the shapes of each A in AA.
shapes = list(map(lambda A: list(A.shape), AA))
for i,inds in enumerate(v):
if len(inds) != len(shapes[i]):
raise ValueError(('In ncon.do_check_indices, len(v[%i])=%i '
'does not match the numbers of indices of '
'AA[%i] = %i')%(i, len(inds), i,
len(shapes[i])))
#3) and 4)
# v_pairs = [[(0,0), (0,1), (0,2), ...], [(1,0), (1,1), (1,2), ...], ...]
v_pairs = [[(i,j) for j in range(len(s))] for i, s in enumerate(v)]
v_pairs = sum(v_pairs, [])
v_sum = sum(v, [])
# For t, o in zip(v_pairs, v_sum) t is the tuple of the number of
# the tensor and the index and o is the contraction order of that
# index. We group these tuples by the contraction order.
order_groups = [[t for t, o in zip(v_pairs, v_sum) if o == e]
for e in order]
forder_groups = [[1 for fo in v_sum if fo == e] for e in forder]
for i, o in enumerate(order_groups):
if len(o) != 2:
raise ValueError(('In ncon.do_check_indices, the contracted index '
'%i is not featured exactly twice in v.')%order[i])
else:
A0, ind0 = o[0]
A1, ind1 = o[1]
try:
compatible = AA[A0].compatible_indices(AA[A1], ind0, ind1)
except AttributeError:
compatible = AA[A0].shape[ind0] == AA[A1].shape[ind1]
if not compatible:
raise ValueError('In ncon.do_check_indices, for the '
'contraction index %i, the leg %i of tensor '
'number %i and the leg %i of tensor number '
'%i are not compatible.'
%(order[i], ind0, A0, ind1, A1))
for i, fo in enumerate(forder_groups):
if len(fo) != 1:
raise ValueError(('In ncon.do_check_indices, the free index '
'%i is not featured exactly once in v.')%forder[i])
# All is well if we made it here.
return True
####################################################################
# The following are simple wrappers around numpy/Tensor functions, #
# but may be replaced with fancier stuff later. #
####################################################################
def con(A, B, inds):
if torch.is_tensor(A) and torch.is_tensor(B):
return torch.tensordot(A, B, inds)
else:
return A.dot(B, inds)
def trace(A, axis1=0, axis2=1):
return A.trace(axis1=axis1, axis2=axis2)
if __name__=='__main__':
A = torch.randn(2,3,4,5,dtype=torch.float64, device='cuda:0')
B = torch.randn(4,5,2, dtype=torch.float64, device='cuda:0')
C_ncon = ncon([A, B], ([-1, -2, 1, 2], [1, 2, -3]), [1, 2], [-3, -1, -2])
C_einsum = torch.einsum('abcd,cde->eab',(A, B))
print ((C_ncon - C_einsum).abs().sum())
| 36.145215
| 85
| 0.570672
|
4a0d7c0735eea2d9f2cfb63ce6ca226f64ae37a1
| 9,279
|
py
|
Python
|
boardfarm/devices/debian_wifi.py
|
mattsm/boardfarm
|
100521fde1fb67536682cafecc2f91a6e2e8a6f8
|
[
"BSD-3-Clause-Clear"
] | 40
|
2018-03-23T14:17:13.000Z
|
2022-02-05T05:59:41.000Z
|
boardfarm/devices/debian_wifi.py
|
mattsm/boardfarm
|
100521fde1fb67536682cafecc2f91a6e2e8a6f8
|
[
"BSD-3-Clause-Clear"
] | 1
|
2020-04-17T01:20:12.000Z
|
2020-04-20T20:42:00.000Z
|
boardfarm/devices/debian_wifi.py
|
mattsm/boardfarm
|
100521fde1fb67536682cafecc2f91a6e2e8a6f8
|
[
"BSD-3-Clause-Clear"
] | 9
|
2018-04-11T08:31:14.000Z
|
2020-08-06T14:55:35.000Z
|
"""Extension of Debian class with wifi functions
"""
import re
import pexpect
import pycountry
from boardfarm.lib.wifi import wifi_client_stub
from . import debian
class DebianWifi(debian.DebianBox, wifi_client_stub):
"""Extension of Debian class with wifi functions
wifi_client_stub is inherited from lib/wifi.py
"""
model = ('debianwifi')
def __init__(self, *args, **kwargs):
"""Constructor method to initialise wifi interface
"""
super(DebianWifi, self).__init__(*args, **kwargs)
self.iface_dut = self.iface_wifi = self.kwargs.get(
'dut_interface', 'wlan1')
def disable_and_enable_wifi(self):
"""Disable and enable wifi interface
i.e., set the interface link to "down" and then to "up"
This calls the disable wifi and enable wifi methods
"""
self.disable_wifi()
self.enable_wifi()
def disable_wifi(self):
"""Disabling the wifi interface
setting the interface link to "down"
"""
self.set_link_state(self.iface_wifi, "down")
def enable_wifi(self):
"""Enabling the wifi interface
setting the interface link to "up"
"""
self.set_link_state(self.iface_wifi, "up")
def release_wifi(self):
"""DHCP release of the wifi interface
"""
iface = self.iface_wifi
self.release_dhcp(iface)
def renew_wifi(self):
"""DHCP renew of the wifi interface
"""
self.sudo_sendline("dhclient {}".format(self.iface_wifi))
try:
self.expect(self.prompt, timeout=10)
except pexpect.TIMEOUT:
self.sendcontrol('c')
self.expect(self.prompt)
self.sudo_sendline("killall dhclient")
self.expect(self.prompt)
return False
match = re.search('File exist', self.before)
return match
def change_channel(self, channel):
"""change wifi client scan channel
"""
self.sudo_sendline('iwconfig wlan0 channel {}'.format(channel))
self.expect(self.prompt)
def wifi_scan(self):
"""Scanning the SSID associated with the wifi interface
:return: List of SSID
:rtype: string
"""
from boardfarm.lib.installers import install_iw
install_iw(self)
self.sudo_sendline('iw %s scan | grep SSID:' % self.iface_wifi)
self.expect(self.prompt)
return self.before
def wifi_check_ssid(self, ssid_name):
"""Check the SSID provided is present in the scan list
:param ssid_name: SSID name to be verified
:type ssid_name: string
:return: True or False
:rtype: boolean
"""
from boardfarm.lib.installers import install_iw
install_iw(self)
self.sudo_sendline('iw %s scan | grep "SSID: %s"' %
(self.iface_wifi, ssid_name))
self.expect(self.prompt)
match = re.search(r"%s\"\s+.*(%s)" % (ssid_name, ssid_name),
self.before)
if match:
return True
else:
return False
def wifi_connect(self,
ssid_name,
password=None,
security_mode='NONE',
hotspot_id='cbn',
hotspot_pwd='cbn',
boardcast=True):
"""Initialise wpa supplicant file
:param ssid_name: SSID name
:type ssid_name: string
:param password: wifi password, defaults to None
:type password: string, optional
:param security_mode: Security mode for the wifi, [NONE|WPA-PSK|WPA-EAP]
:type security_mode: string, optional
:param hotspot_id: identity of hotspot
:type hotspot_id: string
:param hotspot_pwd: password of hotspot
:type hotspot_pwd: string
:param boardcast: Enable/Disable boardcast for ssid scan
:type boardcast: bool
:return: True or False
:rtype: boolean
"""
'''Setup config of wpa_supplicant connect'''
config = dict()
config['ssid'] = ssid_name
config['key_mgmt'] = security_mode
if security_mode == "WPA-PSK":
config['psk'] = password
elif security_mode == "WPA-EAP":
config['eap'] = 'PEAP'
config['identity'] = hotspot_id
config['password'] = hotspot_pwd
config['scan_ssid'] = int(not boardcast)
config_str = ''
for k, v in config.items():
if k in ['ssid', 'psk', 'identity', 'password']:
v = '"{}"'.format(v)
config_str += '{}={}\n'.format(k, v)
final_config = 'network={{\n{}}}'.format(config_str)
'''Create wpa_supplicant config'''
self.sudo_sendline("rm {}.conf".format(ssid_name))
self.expect(self.prompt)
self.sudo_sendline("echo -e '{}' > {}.conf".format(
final_config, ssid_name))
self.expect(self.prompt)
self.sendline("cat {}.conf".format(ssid_name))
self.expect(self.prompt)
'''Generate WPA supplicant connect'''
driver_name = 'nl80211'
if security_mode == "WPA-EAP":
driver_name = 'wext'
self.sudo_sendline("wpa_supplicant -B -D{} -i {} -c {}.conf".format(
driver_name, self.iface_wifi, ssid_name))
self.expect(self.prompt)
match = re.search('Successfully initialized wpa_supplicant',
self.before)
return bool(match)
def wifi_connectivity_verify(self):
"""Verify wifi is in teh connected state
:return: True or False
:rtype: boolean
"""
self.sendline("iw %s link" % self.iface_wifi)
self.expect(self.prompt)
match = re.search('Connected', self.before)
if match:
return True
else:
return False
def wifi_connect_check(self, ssid_name, password=None):
"""Connect to a SSID and verify
WIFI connectivity
:param ssid_name: SSID name
:type ssid_name: string
:param password: wifi password, defaults to None
:type password: string, optional
:return: True or False
:rtype: boolean
"""
for _ in range(5):
self.wifi_connect(ssid_name, password)
self.expect(pexpect.TIMEOUT, timeout=10)
verify_connect = self.wifi_connectivity_verify()
if verify_connect:
break
else:
self.wifi_disconnect()
return verify_connect
def disconnect_wpa(self):
"""Disconnect the wpa supplicant initialisation
"""
self.sudo_sendline("killall wpa_supplicant")
self.expect(self.prompt)
def wlan_ssid_disconnect(self):
"""Disconnect the wifi connectivity if connected
through iwconfig method using ssid alone
"""
self.sudo_sendline("iw dev %s disconnect" % self.iface_wifi)
self.expect(self.prompt)
def wifi_disconnect(self):
"""Common method to disconnect wifi connectivity
by disconnecting wpa supplicant initialisation as well as
iwconfig disconnection
"""
self.disconnect_wpa()
self.wlan_ssid_disconnect()
def wifi_change_region(self, country):
"""Change the region of the wifi
:param country: region to be set
:type country: string
:return: country name if matched else None
:rtype: string or boolean
"""
country = pycountry.countries.get(name=country).alpha_2
self.sudo_sendline("iw reg set %s" % (country))
self.expect(self.prompt)
self.sendline("iw reg get")
self.expect(self.prompt)
match = re.search(country, self.before)
if match:
return match.group(0)
else:
return None
def start_lan_client(self):
"""Start_lan_method execution for the wifi interface
"""
self.iface_dut = self.iface_wifi
super(DebianWifi, self).start_lan_client()
def wifi_client_connect(self,
ssid_name,
password=None,
security_mode=None):
"""Scan for SSID and verify wifi connectivity
:param ssid_name: SSID name
:type ssid_name: string
:param password: wifi password, defaults to None
:type password: string, optional
:param security_mode: Security mode for the wifi, defaults to None
:type security_mode: string, optional
:raise assertion: If SSID value check in WLAN container fails,
If connection establishment in WIFI fails
"""
self.disable_and_enable_wifi()
self.expect(pexpect.TIMEOUT, timeout=20)
output = self.wifi_check_ssid(ssid_name)
assert output == True, 'SSID value check in WLAN container'
self.wifi_connect(ssid_name, password)
self.expect(pexpect.TIMEOUT, timeout=20)
verify_connect = self.wifi_connectivity_verify()
assert verify_connect == True, 'Connection establishment in WIFI'
| 33.864964
| 80
| 0.591551
|
4a0d7c89c26a160a5aeb013309b9f503a4ca078a
| 2,874
|
py
|
Python
|
venv/Lib/site-packages/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | 585
|
2015-01-12T16:06:47.000Z
|
2022-03-26T14:51:08.000Z
|
nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py
|
tamires-consulting/nipype
|
b7879d75a63b6500b2e7d2c3eba5aa7670339274
|
[
"Apache-2.0"
] | 2,329
|
2015-01-01T09:56:41.000Z
|
2022-03-30T14:24:49.000Z
|
nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py
|
tamires-consulting/nipype
|
b7879d75a63b6500b2e7d2c3eba5aa7670339274
|
[
"Apache-2.0"
] | 487
|
2015-01-20T01:04:52.000Z
|
2022-03-21T21:22:47.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..brains import FindCenterOfBrain
def test_FindCenterOfBrain_inputs():
input_map = dict(
args=dict(
argstr="%s",
),
axis=dict(
argstr="--axis %d",
),
backgroundValue=dict(
argstr="--backgroundValue %d",
),
clippedImageMask=dict(
argstr="--clippedImageMask %s",
hash_files=False,
),
closingSize=dict(
argstr="--closingSize %d",
),
debugAfterGridComputationsForegroundImage=dict(
argstr="--debugAfterGridComputationsForegroundImage %s",
hash_files=False,
),
debugClippedImageMask=dict(
argstr="--debugClippedImageMask %s",
hash_files=False,
),
debugDistanceImage=dict(
argstr="--debugDistanceImage %s",
hash_files=False,
),
debugGridImage=dict(
argstr="--debugGridImage %s",
hash_files=False,
),
debugTrimmedImage=dict(
argstr="--debugTrimmedImage %s",
hash_files=False,
),
environ=dict(
nohash=True,
usedefault=True,
),
generateDebugImages=dict(
argstr="--generateDebugImages ",
),
headSizeEstimate=dict(
argstr="--headSizeEstimate %f",
),
headSizeLimit=dict(
argstr="--headSizeLimit %f",
),
imageMask=dict(
argstr="--imageMask %s",
extensions=None,
),
inputVolume=dict(
argstr="--inputVolume %s",
extensions=None,
),
maximize=dict(
argstr="--maximize ",
),
otsuPercentileThreshold=dict(
argstr="--otsuPercentileThreshold %f",
),
)
inputs = FindCenterOfBrain.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_FindCenterOfBrain_outputs():
output_map = dict(
clippedImageMask=dict(
extensions=None,
),
debugAfterGridComputationsForegroundImage=dict(
extensions=None,
),
debugClippedImageMask=dict(
extensions=None,
),
debugDistanceImage=dict(
extensions=None,
),
debugGridImage=dict(
extensions=None,
),
debugTrimmedImage=dict(
extensions=None,
),
)
outputs = FindCenterOfBrain.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 27.634615
| 68
| 0.536186
|
4a0d7db050612ca178da7d6525086854e07d565a
| 786
|
py
|
Python
|
singermatch/main.py
|
isguoqiang/SingerMatch
|
e9f68953420dba6c496cece269cc3334fed1e7eb
|
[
"MIT"
] | null | null | null |
singermatch/main.py
|
isguoqiang/SingerMatch
|
e9f68953420dba6c496cece269cc3334fed1e7eb
|
[
"MIT"
] | null | null | null |
singermatch/main.py
|
isguoqiang/SingerMatch
|
e9f68953420dba6c496cece269cc3334fed1e7eb
|
[
"MIT"
] | null | null | null |
import configparser
import argparse
from routines import Routines
config = configparser.ConfigParser()
config.read('../system.ini')
routines = Routines(config)
parser = argparse.ArgumentParser(description='Singer Match Entry Point')
parser.add_argument('routine', nargs='*', default='test', help='Run predefined routine')
parser.add_argument('--skip', type=int, default=0, help='Skip lines')
args = parser.parse_args()
if 'spectrogram' == args.routine[0]:
routines.to_spectrogram(skip=args.skip)
elif 'slice' == args.routine[0]:
routines.slice(skip=args.skip)
elif 'filter' == args.routine[0]:
# Do pre-processing for all mp3 files under ORIGINAL_MP3_DIR
routines.filter(skip=args.skip)
elif 'bag_of_pitch' == args.routine[0]:
# Extract melodia features
pass
| 32.75
| 88
| 0.739186
|
4a0d7ec2a61a3783692021f005d07e8df5b2232d
| 5,911
|
py
|
Python
|
mdit_py_plugins/tasklists/__init__.py
|
shivam05011996/mdit-py-plugins
|
5c0038fd4348cc37548b3ee7891a9f380517f959
|
[
"MIT"
] | null | null | null |
mdit_py_plugins/tasklists/__init__.py
|
shivam05011996/mdit-py-plugins
|
5c0038fd4348cc37548b3ee7891a9f380517f959
|
[
"MIT"
] | null | null | null |
mdit_py_plugins/tasklists/__init__.py
|
shivam05011996/mdit-py-plugins
|
5c0038fd4348cc37548b3ee7891a9f380517f959
|
[
"MIT"
] | null | null | null |
"""Builds task/todo lists out of markdown lists with items starting with [ ] or [x]"""
# Ported by Wolmar Nyberg Åkerström from https://github.com/revin/markdown-it-task-lists
# ISC License
# Copyright (c) 2016, Revin Guillen
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from typing import List
from uuid import uuid4
from markdown_it import MarkdownIt
from markdown_it.token import Token
def tasklists_plugin(
md: MarkdownIt,
enabled: bool = False,
label: bool = False,
label_after: bool = False,
):
"""Plugin for building task/todo lists out of markdown lists with items starting with [ ] or [x]
.. Nothing else
For example::
- [ ] An item that needs doing
- [x] An item that is complete
The rendered HTML checkboxes are disabled; to change this, pass a truthy value into the enabled
property of the plugin options.
:param enabled: True enables the rendered checkboxes
:param label: True wraps the rendered list items in a <label> element for UX purposes,
:param label_after: True – adds the <label> element after the checkbox.
"""
disable_checkboxes = not enabled
use_label_wrapper = label
use_label_after = label_after
def fcn(state):
tokens: List[Token] = state.tokens
for i in range(2, len(tokens) - 1):
if is_todo_item(tokens, i):
todoify(tokens[i], tokens[i].__class__)
attr_set(
tokens[i - 2],
"class",
"task-list-item" + (" enabled" if not disable_checkboxes else ""),
)
attr_set(
tokens[parent_token(tokens, i - 2)], "class", "contains-task-list"
)
md.core.ruler.after("inline", "github-tasklists", fcn)
def attr_set(token, name, value):
index = token.attrIndex(name)
attr = [name, value]
if index < 0:
token.attrPush(attr)
else:
token.attrs[index] = attr
def parent_token(tokens, index):
target_level = tokens[index].level - 1
for i in range(1, index + 1):
if tokens[index - i].level == target_level:
return index - i
return -1
def is_todo_item(tokens, index):
return (
is_inline(tokens[index])
and is_paragraph(tokens[index - 1])
and is_list_item(tokens[index - 2])
and starts_with_todo_markdown(tokens[index])
)
def todoify(token: Token, token_constructor):
assert token.children is not None
token.children.insert(0, make_checkbox(token, token_constructor))
token.children[1].content = token.children[1].content[3:]
token.content = token.content[3:]
if use_label_wrapper:
if use_label_after:
token.children.pop()
# Replaced number generator from original plugin with uuid.
checklist_id = f"task-item-{uuid4()}"
token.children[0].content = (
token.children[0].content[0:-1] + f' id="{checklist_id}">'
)
token.children.append(
after_label(token.content, checklist_id, token_constructor)
)
else:
token.children.insert(0, begin_label(token_constructor))
token.children.append(end_label(token_constructor))
def make_checkbox(token, token_constructor):
checkbox = token_constructor("html_inline", "", 0)
disabled_attr = 'disabled="disabled"' if disable_checkboxes else ""
if token.content.startswith("[ ] "):
checkbox.content = (
'<input class="task-list-item-checkbox" '
f'{disabled_attr} type="checkbox">'
)
elif token.content.startswith("[x] ") or token.content.startswith("[X] "):
checkbox.content = (
'<input class="task-list-item-checkbox" checked="checked" '
f'{disabled_attr} type="checkbox">'
)
return checkbox
def begin_label(token_constructor):
token = token_constructor("html_inline", "", 0)
token.content = "<label>"
return token
def end_label(token_constructor):
token = token_constructor("html_inline", "", 0)
token.content = "</label>"
return token
def after_label(content, checkbox_id, token_constructor):
token = token_constructor("html_inline", "", 0)
token.content = (
f'<label class="task-list-item-label" for="{checkbox_id}">{content}</label>'
)
token.attrs = [{"for": checkbox_id}]
return token
def is_inline(token):
return token.type == "inline"
def is_paragraph(token):
return token.type == "paragraph_open"
def is_list_item(token):
return token.type == "list_item_open"
def starts_with_todo_markdown(token):
# leading whitespace in a list item is already trimmed off by markdown-it
return (
token.content.startswith("[ ] ")
or token.content.startswith("[x] ")
or token.content.startswith("[X] ")
)
| 36.487654
| 100
| 0.615294
|
4a0d7f92ced6cd3cd14fd566271dba5881fdaf2a
| 110
|
py
|
Python
|
PyCharm/individual2.py
|
UnDeR-The-mAsK/lab3
|
bda56ac49811b96e002b9181de94bf05ff7ff856
|
[
"MIT"
] | null | null | null |
PyCharm/individual2.py
|
UnDeR-The-mAsK/lab3
|
bda56ac49811b96e002b9181de94bf05ff7ff856
|
[
"MIT"
] | null | null | null |
PyCharm/individual2.py
|
UnDeR-The-mAsK/lab3
|
bda56ac49811b96e002b9181de94bf05ff7ff856
|
[
"MIT"
] | null | null | null |
print((100*int(input('a3='))+10*int(input('a2='))+int(input('a1=')))+(10*int(input('b2='))+int(input('b1='))))
| 110
| 110
| 0.563636
|
4a0d7fd791388b522581a6242970fb3a340b92b4
| 9,005
|
py
|
Python
|
ibms_project/ibms/models.py
|
parksandwildlife/ibms
|
caea0cb15deed1744ee73a6a44c264650391f71d
|
[
"Apache-2.0"
] | 2
|
2019-09-07T20:39:29.000Z
|
2021-09-16T12:02:16.000Z
|
ibms_project/ibms/models.py
|
ropable/ibms
|
8cb2c24ad0202e961c4cf7e3c79385f5716b8c63
|
[
"Apache-2.0"
] | 11
|
2020-06-18T06:53:01.000Z
|
2022-02-11T01:55:42.000Z
|
ibms_project/ibms/models.py
|
ropable/ibms
|
8cb2c24ad0202e961c4cf7e3c79385f5716b8c63
|
[
"Apache-2.0"
] | 5
|
2016-01-18T04:36:48.000Z
|
2017-09-07T06:38:28.000Z
|
from datetime import datetime
from django.conf import settings
from django.db import models
from sfm.models import FinancialYear
FINYEAR_CHOICES = (
('2011/12', '2011/12'),
('2012/13', '2012/13'),
('2013/14', '2013/14'),
('2014/15', '2014/15'),
('2015/16', '2015/16'),
('2016/17', '2016/17'),
('2017/18', '2017/18'),
('2018/19', '2018/19'),
('2019/20', '2019/20'),
('2020/21', '2020/21'),
)
class IBMData(models.Model):
fy = models.ForeignKey(FinancialYear, on_delete=models.PROTECT, blank=True, null=True)
ibmIdentifier = models.CharField(
max_length=100,
verbose_name='IBMId',
help_text='IBM Identifier')
budgetArea = models.CharField(max_length=100, db_index=True)
projectSponsor = models.CharField(max_length=100, db_index=True)
corporatePlanNo = models.CharField(max_length=100, db_index=True)
strategicPlanNo = models.CharField(max_length=100, db_index=True)
regionalSpecificInfo = models.TextField()
servicePriorityID = models.CharField(max_length=100)
annualWPInfo = models.TextField()
costCentre = models.CharField(max_length=4, null=True, blank=True, db_index=True)
account = models.IntegerField(null=True, blank=True)
service = models.IntegerField(null=True, blank=True, db_index=True)
activity = models.CharField(max_length=4, null=True, blank=True)
project = models.CharField(max_length=6, null=True, blank=True)
job = models.CharField(max_length=6, null=True, blank=True)
def __str__(self):
return self.ibmIdentifier
class Meta:
unique_together = [('ibmIdentifier', 'fy')]
verbose_name = 'IBM data'
verbose_name_plural = 'IBM data'
class GLPivDownload(models.Model):
fy = models.ForeignKey(FinancialYear, on_delete=models.PROTECT, blank=True, null=True)
download_period = models.DateField(blank=True, null=True)
downloadPeriod = models.CharField(max_length=10)
costCentre = models.CharField(max_length=4, db_index=True)
account = models.IntegerField(db_index=True)
service = models.IntegerField(db_index=True)
activity = models.CharField(max_length=4, db_index=True)
resource = models.IntegerField(db_index=True)
project = models.CharField(max_length=6)
job = models.CharField(max_length=6)
shortCode = models.CharField(max_length=20)
shortCodeName = models.CharField(max_length=200)
gLCode = models.CharField(max_length=30)
ptdActual = models.DecimalField(max_digits=10, decimal_places=2)
ptdBudget = models.DecimalField(max_digits=10, decimal_places=2)
ytdActual = models.DecimalField(max_digits=10, decimal_places=2)
ytdBudget = models.DecimalField(max_digits=10, decimal_places=2)
fybudget = models.DecimalField(max_digits=12, decimal_places=2)
ytdVariance = models.DecimalField(max_digits=10, decimal_places=2)
ccName = models.CharField(max_length=100)
serviceName = models.CharField(max_length=100)
activityName = models.CharField(max_length=100)
resourceName = models.CharField(max_length=100)
projectName = models.CharField(max_length=100)
jobName = models.CharField(max_length=100)
codeID = models.CharField(
max_length=30, db_index=True,
help_text="This should match an IBMData object's IBMIdentifier field.")
resNameNo = models.CharField(max_length=100)
actNameNo = models.CharField(max_length=100)
projNameNo = models.CharField(max_length=100)
regionBranch = models.CharField(max_length=100, db_index=True)
division = models.CharField(max_length=100, db_index=True)
resourceCategory = models.CharField(max_length=100)
wildfire = models.CharField(max_length=30)
expenseRevenue = models.CharField(max_length=7)
fireActivities = models.CharField(max_length=50)
mPRACategory = models.CharField(max_length=100)
class Meta:
unique_together = [('gLCode', 'fy')]
verbose_name = 'GL pivot download'
verbose_name_plural = 'GL pivot downloads'
def save(self, force_insert=False, force_update=False, *args, **kwargs):
"""Overide save() to parse string date to a Python date.
"""
if self.downloadPeriod:
self.download_period = datetime.strptime(self.downloadPeriod, "%d/%m/%Y")
super().save(force_insert, force_update)
class CorporateStrategy(models.Model):
fy = models.ForeignKey(FinancialYear, on_delete=models.PROTECT, blank=True, null=True)
corporateStrategyNo = models.CharField(max_length=100)
description1 = models.TextField(null=True)
description2 = models.TextField(null=True)
def __str__(self):
# Truncate description text longer than 100 characters.
if len(self.description1) <= 100:
return self.description1
else:
desc_trunc = ' '.join(self.description1[:101].split(' ')[0:-1])
return '{0} (...more...)'.format(desc_trunc)
class Meta:
unique_together = [('corporateStrategyNo', 'fy')]
verbose_name_plural = 'corporate strategies'
class ServicePriority(models.Model):
"""
Abstract base class.
"""
fy = models.ForeignKey(FinancialYear, on_delete=models.PROTECT, blank=True, null=True)
categoryID = models.CharField(max_length=100, null=True, blank=True, db_index=True)
servicePriorityNo = models.CharField(max_length=100, null=False, default='-1', db_index=True)
strategicPlanNo = models.CharField(max_length=100, null=True, blank=True)
corporateStrategyNo = models.CharField(
max_length=100,
null=True,
blank=True)
description = models.TextField(null=True)
pvsExampleAnnWP = models.TextField()
pvsExampleActNo = models.TextField()
def __str__(self):
return '{0}: {1}'.format(self.pk, self.servicePriorityNo)
class Meta:
abstract = True
unique_together = [('servicePriorityNo', 'fy')]
class GeneralServicePriority(ServicePriority):
description2 = models.TextField(null=True)
class Meta:
verbose_name_plural = 'general service priorities'
class NCServicePriority(ServicePriority):
assetNo = models.CharField(max_length=5)
asset = models.TextField()
targetNo = models.CharField(max_length=30)
target = models.TextField()
actionNo = models.CharField(max_length=30)
action = models.TextField()
mileNo = models.CharField(max_length=30)
milestone = models.TextField()
class Meta:
unique_together = [('servicePriorityNo', 'fy')]
verbose_name = 'NC service priority'
verbose_name_plural = 'NC service priorities'
class PVSServicePriority(ServicePriority):
servicePriority1 = models.TextField()
class Meta:
verbose_name = 'PVS service priority'
verbose_name_plural = 'PVS service priorities'
class SFMServicePriority(ServicePriority):
regionBranch = models.CharField(max_length=20)
description2 = models.TextField()
class Meta:
verbose_name = 'SFM service priority'
verbose_name_plural = 'SFM service priorities'
class ERServicePriority(ServicePriority):
classification = models.TextField()
class Meta:
verbose_name = 'ER service priority'
verbose_name_plural = 'ER service priorities'
class NCStrategicPlan(models.Model):
fy = models.ForeignKey(FinancialYear, on_delete=models.PROTECT, blank=True, null=True)
strategicPlanNo = models.CharField(max_length=100)
directionNo = models.CharField(max_length=100)
direction = models.TextField()
AimNo = models.CharField(max_length=100)
Aim1 = models.TextField()
Aim2 = models.TextField()
ActionNo = models.TextField()
Action = models.TextField()
class Meta:
unique_together = [('strategicPlanNo', 'fy')]
verbose_name = 'NC strategic plan'
verbose_name_plural = 'NC strategic plans'
class Outcomes(models.Model):
fy = models.ForeignKey(FinancialYear, on_delete=models.PROTECT, blank=True, null=True)
q1Input = models.TextField()
q2Input = models.TextField(blank=True)
q3Input = models.TextField(blank=True)
q4Input = models.TextField(blank=True)
def __str__(self):
return self.fy
class Meta:
verbose_name_plural = 'outcomes'
class ServicePriorityMappings(models.Model):
fy = models.ForeignKey(FinancialYear, on_delete=models.PROTECT, blank=True, null=True)
costCentreNo = models.CharField(max_length=4)
wildlifeManagement = models.CharField(max_length=100)
parksManagement = models.CharField(max_length=100)
forestManagement = models.CharField(max_length=100)
def __str__(self):
return self.costCentreNo
class Meta:
verbose_name_plural = 'Service Priority Mappings'
| 38.15678
| 98
| 0.688284
|
4a0d7fd799218dfbcc5053638f47e3633dfdd1ee
| 2,526
|
py
|
Python
|
mmpdblib/playhouse/db_url.py
|
trumanw/mmpdb
|
c73f81ea7601202e9f113dc87030414d03e71b20
|
[
"MIT"
] | 102
|
2017-08-15T17:17:27.000Z
|
2022-03-22T22:34:03.000Z
|
mmpdblib/playhouse/db_url.py
|
trumanw/mmpdb
|
c73f81ea7601202e9f113dc87030414d03e71b20
|
[
"MIT"
] | 43
|
2018-04-27T00:56:32.000Z
|
2022-03-24T17:43:19.000Z
|
mmpdblib/playhouse/db_url.py
|
trumanw/mmpdb
|
c73f81ea7601202e9f113dc87030414d03e71b20
|
[
"MIT"
] | 39
|
2017-08-15T11:51:47.000Z
|
2022-03-26T03:35:12.000Z
|
from __future__ import absolute_import
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from ..peewee import *
from .pool import PooledMySQLDatabase
from .pool import PooledPostgresqlDatabase
try:
from .pool import PooledPostgresqlExtDatabase
except ImportError:
PooledPostgresqlExtDatabase = None
from .sqlite_ext import SqliteExtDatabase
try:
from .apsw_ext import APSWDatabase
except ImportError:
APSWDatabase = None
try:
from .berkeleydb import BerkeleyDatabase
except ImportError:
BerkeleyDatabase = None
try:
from .postgres_ext import PostgresqlExtDatabase
except ImportError:
PostgresqlExtDatabase = None
schemes = {
'apsw': APSWDatabase,
'berkeleydb': BerkeleyDatabase,
'mysql': MySQLDatabase,
'mysql+pool': PooledMySQLDatabase,
'postgres': PostgresqlDatabase,
'postgresql': PostgresqlDatabase,
'postgresext': PostgresqlExtDatabase,
'postgresqlext': PostgresqlExtDatabase,
'postgres+pool': PooledPostgresqlDatabase,
'postgresql+pool': PooledPostgresqlDatabase,
'postgresext+pool': PooledPostgresqlExtDatabase,
'postgresqlext+pool': PooledPostgresqlExtDatabase,
'sqlite': SqliteDatabase,
'sqliteext': SqliteExtDatabase,
}
def parseresult_to_dict(parsed):
connect_kwargs = {'database': parsed.path[1:]}
if parsed.username:
connect_kwargs['user'] = parsed.username
if parsed.password:
connect_kwargs['password'] = parsed.password
if parsed.hostname:
connect_kwargs['host'] = parsed.hostname
if parsed.port:
connect_kwargs['port'] = parsed.port
# Adjust parameters for MySQL.
if parsed.scheme == 'mysql' and 'password' in connect_kwargs:
connect_kwargs['passwd'] = connect_kwargs.pop('password')
return connect_kwargs
def parse(url):
parsed = urlparse(url)
return parseresult_to_dict(parsed)
def connect(url, **connect_params):
parsed = urlparse(url)
connect_kwargs = parseresult_to_dict(parsed)
connect_kwargs.update(connect_params)
database_class = schemes.get(parsed.scheme)
if database_class is None:
if database_class in schemes:
raise RuntimeError('Attempted to use "%s" but a required library '
'could not be imported.' % parsed.scheme)
else:
raise RuntimeError('Unrecognized or unsupported scheme: "%s".' %
parsed.scheme)
return database_class(**connect_kwargs)
| 30.804878
| 78
| 0.712589
|
4a0d825b0c067e23f7a00acfe7524608226cf0a2
| 2,565
|
py
|
Python
|
TermTk/TTkLayouts/boxlayout.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | 1
|
2022-02-28T16:33:25.000Z
|
2022-02-28T16:33:25.000Z
|
TermTk/TTkLayouts/boxlayout.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | null | null | null |
TermTk/TTkLayouts/boxlayout.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 Eugenio Parodi <ceccopierangiolieugenio AT googlemail DOT com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
### Box Layout - [Tutorial](https://github.com/ceccopierangiolieugenio/pyTermTk/blob/main/tutorial/002-layout.md)
"""
from TermTk.TTkCore.log import TTkLog
from TermTk.TTkLayouts.gridlayout import TTkGridLayout
class TTkHBoxLayout(TTkGridLayout):
"""The TTkHBoxLayout class lines up widgets horizontally
::
TTkHBoxLayout
╔═════════╤═════════╤═════════╗
║ Widget1 │ Widget2 │ Widget3 ║
║ │ │ ║
║ │ │ ║
║ │ │ ║
║ │ │ ║
║ │ │ ║
╚═════════╧═════════╧═════════╝
"""
pass
class TTkVBoxLayout(TTkGridLayout):
"""The TTkVBoxLayout class lines up widgets vertically
::
TTkVBoxLayout
╔═════════════════════════════╗
║ Widget 1 ║
╟─────────────────────────────╢
║ Widget 2 ║
╟─────────────────────────────╢
║ Widget 3 ║
╟─────────────────────────────╢
║ Widget 4 ║
╚═════════════════════════════╝
"""
def addItem(self, item):
TTkGridLayout.addItem(self, item, self.count(), 0)
def addWidget(self, widget):
TTkGridLayout.addWidget(self, widget, self.count(), 0)
| 34.662162
| 113
| 0.562573
|
4a0d82ba7d8a5d926a50cd19c28c288cc2ab460f
| 1,279
|
py
|
Python
|
setup.py
|
mbelles/dwolla-v2-python
|
d3886b5272223d3825b31bd077c7c75b1cf218c3
|
[
"MIT"
] | null | null | null |
setup.py
|
mbelles/dwolla-v2-python
|
d3886b5272223d3825b31bd077c7c75b1cf218c3
|
[
"MIT"
] | null | null | null |
setup.py
|
mbelles/dwolla-v2-python
|
d3886b5272223d3825b31bd077c7c75b1cf218c3
|
[
"MIT"
] | null | null | null |
import os
import sys
import warnings
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='dwollav2',
version='1.2.4',
packages=['dwollav2'],
install_requires=[
'requests>=2.9.1',
'future>=0.15.2'
],
test_suite='dwollav2.test.all',
url='https://docsv2.dwolla.com',
license='MIT',
author='Stephen Ausman',
author_email='stephen@dwolla.com',
long_description=open('README.rst').read(),
description='Official Dwolla V2 API client',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
| 30.452381
| 71
| 0.605942
|
4a0d82d7f73922c8690167148c24606cf5bf7c6e
| 8,835
|
py
|
Python
|
rally/plugins/openstack/context/vm/custom_image.py
|
TeamXgrid/xgrid-rally
|
828ff148da4395af615c79cd94db7878e1e08491
|
[
"Apache-2.0"
] | 1
|
2018-06-03T13:52:28.000Z
|
2018-06-03T13:52:28.000Z
|
rally/plugins/openstack/context/vm/custom_image.py
|
TeamXgrid/xgrid-rally
|
828ff148da4395af615c79cd94db7878e1e08491
|
[
"Apache-2.0"
] | null | null | null |
rally/plugins/openstack/context/vm/custom_image.py
|
TeamXgrid/xgrid-rally
|
828ff148da4395af615c79cd94db7878e1e08491
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from rally.common import broker
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils
from rally import consts
from rally import osclients
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.plugins.openstack.scenarios.vm import vmtasks
from rally.plugins.openstack import types
from rally.plugins.openstack.wrappers import glance as glance_wrapper
from rally.task import context
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
@context.configure(name="custom_image", order=500, hidden=True)
class BaseCustomImageGenerator(context.Context):
"""Base class for the contexts providing customized image with.
Every context class for the specific customization must implement
the method `_customize_image` that is able to connect to the server
using SSH and e.g. install applications inside it.
This is used e.g. to install the benchmark application using SSH
access.
This base context class provides a way to prepare an image with
custom preinstalled applications. Basically, this code boots a VM, calls
the `_customize_image` and then snapshots the VM disk, removing the VM
afterwards. The image UUID is stored in the user["custom_image"]["id"]
and can be used afterwards by scenario.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"image": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
}
},
"flavor": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
}
},
"username": {
"type": "string"
},
"password": {
"type": "string"
},
"floating_network": {
"type": "string"
},
"internal_network": {
"type": "string"
},
"port": {
"type": "integer",
"minimum": 1,
"maximum": 65535
},
"userdata": {
"type": "string"
},
"workers": {
"type": "integer",
"minimum": 1,
}
},
"required": ["image", "flavor"],
"additionalProperties": False
}
DEFAULT_CONFIG = {
"username": "root",
"port": 22,
"workers": 1
}
@logging.log_task_wrapper(LOG.info, _("Enter context: `custom_image`"))
def setup(self):
"""Creates custom image(s) with preinstalled applications.
When admin is present creates one public image that is usable
from all the tenants and users. Otherwise create one image
per user and tenant.
"""
if "admin" in self.context:
# NOTE(pboldin): Create by first user and make it public by
# the admin
user = self.context["users"][0]
tenant = self.context["tenants"][user["tenant_id"]]
nics = None
if "networks" in tenant:
nics = [{"net-id": tenant["networks"][0]["id"]}]
custom_image = self.create_one_image(user, nics=nics)
for tenant in self.context["tenants"].values():
tenant["custom_image"] = custom_image
else:
def publish(queue):
users = self.context.get("users", [])
for user, tenant_id in utils.iterate_per_tenants(users):
queue.append((user, tenant_id))
def consume(cache, args):
user, tenant_id = args
tenant = self.context["tenants"][tenant_id]
tenant["custom_image"] = self.create_one_image(user)
broker.run(publish, consume, self.config["workers"])
def create_one_image(self, user, **kwargs):
"""Create one image for the user."""
clients = osclients.Clients(user["credential"])
admin_clients = osclients.Clients(self.context["admin"]["credential"])
image_id = types.GlanceImage.transform(
clients=clients, resource_config=self.config["image"])
flavor_id = types.Flavor.transform(
clients=clients, resource_config=self.config["flavor"])
vm_scenario = vmtasks.BootRuncommandDelete(self.context,
clients=clients)
glance_wrap = glance_wrapper.wrap(admin_clients.glance, self)
server, fip = vm_scenario._boot_server_with_fip(
image=image_id, flavor=flavor_id,
floating_network=self.config.get("floating_network"),
userdata=self.config.get("userdata"),
key_name=user["keypair"]["name"],
security_groups=[user["secgroup"]["name"]],
**kwargs)
try:
LOG.debug("Installing benchmark on %r %s", server, fip["ip"])
self.customize_image(server, fip, user)
LOG.debug("Stopping server %r", server)
vm_scenario._stop_server(server)
LOG.debug("Creating snapshot for %r", server)
custom_image = vm_scenario._create_image(server)
glance_wrap.set_visibility(custom_image)
finally:
vm_scenario._delete_server_with_fip(server, fip)
if hasattr(custom_image, "to_dict"):
# NOTE(stpierre): Glance v1 images are objects that can be
# converted to dicts; Glance v2 images are already
# dict-like
custom_image = custom_image.to_dict()
return custom_image
@logging.log_task_wrapper(LOG.info, _("Exit context: `custom_image`"))
def cleanup(self):
"""Delete created custom image(s)."""
if "admin" in self.context:
user = self.context["users"][0]
tenant = self.context["tenants"][user["tenant_id"]]
if "custom_image" in tenant:
self.delete_one_image(user, tenant["custom_image"])
tenant.pop("custom_image")
else:
def publish(queue):
users = self.context.get("users", [])
for user, tenant_id in utils.iterate_per_tenants(users):
queue.append((user, tenant_id))
def consume(cache, args):
user, tenant_id = args
tenant = self.context["tenants"][tenant_id]
if "custom_image" in tenant:
self.delete_one_image(user, tenant["custom_image"])
tenant.pop("custom_image")
broker.run(publish, consume, self.config["workers"])
def delete_one_image(self, user, custom_image):
"""Delete the image created for the user and tenant."""
clients = osclients.Clients(user["credential"])
nova_scenario = nova_utils.NovaScenario(
context=self.context, clients=clients)
with logging.ExceptionLogger(
LOG, _("Unable to delete image %s") % custom_image["id"]):
custom_image = nova_scenario.clients("nova").images.get(
custom_image["id"])
nova_scenario._delete_image(custom_image)
@logging.log_task_wrapper(LOG.info,
_("Custom image context: customizing"))
def customize_image(self, server, ip, user):
return self._customize_image(server, ip, user)
@abc.abstractmethod
def _customize_image(self, server, ip, user):
"""Override this method with one that customizes image.
Basically, code can simply call `VMScenario._run_command` function
specifying an installation script and interpreter. This script will
be then executed using SSH.
:param server: nova.Server instance
:param ip: dict with server IP details
:param user: user who started a VM instance. Used to extract keypair
"""
pass
| 35.625
| 78
| 0.584946
|
4a0d82ec4dd58e00b2bf735f7caad9f1072f5ca9
| 7,618
|
py
|
Python
|
electrum_dash/verifier.py
|
sibcool/electrum-dash
|
e123699b34260fe799aa5da818c33fa9cdf8e4c9
|
[
"MIT"
] | 4
|
2021-02-14T08:48:36.000Z
|
2021-04-23T11:14:41.000Z
|
electrum_dash/verifier.py
|
sibcool/electrum-dash
|
e123699b34260fe799aa5da818c33fa9cdf8e4c9
|
[
"MIT"
] | 79
|
2019-04-03T06:56:46.000Z
|
2019-10-11T17:56:43.000Z
|
electrum_dash/verifier.py
|
sibcool/electrum-dash
|
e123699b34260fe799aa5da818c33fa9cdf8e4c9
|
[
"MIT"
] | 15
|
2018-04-02T11:21:43.000Z
|
2020-08-14T20:27:29.000Z
|
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2012 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Sequence, Optional
from .util import ThreadJob, bh2u, VerifiedTxInfo
from .bitcoin import Hash, hash_decode, hash_encode
from .transaction import Transaction
from .blockchain import hash_header
class MerkleVerificationFailure(Exception): pass
class MissingBlockHeader(MerkleVerificationFailure): pass
class MerkleRootMismatch(MerkleVerificationFailure): pass
class InnerNodeOfSpvProofIsValidTx(MerkleVerificationFailure): pass
class SPV(ThreadJob):
""" Simple Payment Verification """
def __init__(self, network, wallet):
self.wallet = wallet
self.network = network
self.blockchain = network.blockchain()
self.merkle_roots = {} # txid -> merkle root (once it has been verified)
self.requested_merkle = set() # txid set of pending requests
def run(self):
interface = self.network.interface
if not interface:
return
blockchain = interface.blockchain
if not blockchain:
return
local_height = self.network.get_local_height()
unverified = self.wallet.get_unverified_txs()
for tx_hash, tx_height in unverified.items():
# do not request merkle branch before headers are available
if tx_height <= 0 or tx_height > local_height:
continue
header = blockchain.read_header(tx_height)
if header is None:
index = tx_height // 2016
if index < len(blockchain.checkpoints):
self.network.request_chunk(interface, index)
elif (tx_hash not in self.requested_merkle
and tx_hash not in self.merkle_roots):
self.network.get_merkle_for_transaction(
tx_hash,
tx_height,
self.verify_merkle)
self.print_error('requested merkle', tx_hash)
self.requested_merkle.add(tx_hash)
if self.network.blockchain() != self.blockchain:
self.blockchain = self.network.blockchain()
self.undo_verifications()
def verify_merkle(self, response):
if self.wallet.verifier is None:
return # we have been killed, this was just an orphan callback
if response.get('error'):
self.print_error('received an error:', response)
return
params = response['params']
merkle = response['result']
# Verify the hash of the server-provided merkle branch to a
# transaction matches the merkle root of its block
tx_hash = params[0]
tx_height = merkle.get('block_height')
pos = merkle.get('pos')
merkle_branch = merkle.get('merkle')
header = self.network.blockchain().read_header(tx_height)
try:
verify_tx_is_in_block(tx_hash, merkle_branch, pos, header, tx_height)
except MerkleVerificationFailure as e:
self.print_error(str(e))
# FIXME: we should make a fresh connection to a server
# to recover from this, as this TX will now never verify
return
# we passed all the tests
self.merkle_roots[tx_hash] = header.get('merkle_root')
try:
# note: we could pop in the beginning, but then we would request
# this proof again in case of verification failure from the same server
self.requested_merkle.remove(tx_hash)
except KeyError: pass
self.print_error("verified %s" % tx_hash)
header_hash = hash_header(header)
vtx_info = VerifiedTxInfo(tx_height, header.get('timestamp'), pos, header_hash)
self.wallet.add_verified_tx(tx_hash, vtx_info)
if self.is_up_to_date() and self.wallet.is_up_to_date():
self.wallet.save_verified_tx(write=True)
@classmethod
def hash_merkle_root(cls, merkle_branch: Sequence[str], tx_hash: str, leaf_pos_in_tree: int):
"""Return calculated merkle root."""
try:
h = hash_decode(tx_hash)
merkle_branch_bytes = [hash_decode(item) for item in merkle_branch]
int(leaf_pos_in_tree) # raise if invalid
except Exception as e:
raise MerkleVerificationFailure(e)
for i, item in enumerate(merkle_branch_bytes):
h = Hash(item + h) if ((leaf_pos_in_tree >> i) & 1) else Hash(h + item)
cls._raise_if_valid_tx(bh2u(h))
return hash_encode(h)
@classmethod
def _raise_if_valid_tx(cls, raw_tx: str):
# If an inner node of the merkle proof is also a valid tx, chances are, this is an attack.
# https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-June/016105.html
# https://lists.linuxfoundation.org/pipermail/bitcoin-dev/attachments/20180609/9f4f5b1f/attachment-0001.pdf
# https://bitcoin.stackexchange.com/questions/76121/how-is-the-leaf-node-weakness-in-merkle-trees-exploitable/76122#76122
tx = Transaction(raw_tx)
try:
tx.deserialize()
except:
pass
else:
raise InnerNodeOfSpvProofIsValidTx()
def undo_verifications(self):
height = self.blockchain.get_forkpoint()
tx_hashes = self.wallet.undo_verifications(self.blockchain, height)
for tx_hash in tx_hashes:
self.print_error("redoing", tx_hash)
self.remove_spv_proof_for_tx(tx_hash)
def remove_spv_proof_for_tx(self, tx_hash):
self.merkle_roots.pop(tx_hash, None)
try:
self.requested_merkle.remove(tx_hash)
except KeyError:
pass
def is_up_to_date(self):
return not self.requested_merkle
def verify_tx_is_in_block(tx_hash: str, merkle_branch: Sequence[str],
leaf_pos_in_tree: int, block_header: Optional[dict],
block_height: int) -> None:
"""Raise MerkleVerificationFailure if verification fails."""
if not block_header:
raise MissingBlockHeader("merkle verification failed for {} (missing header {})"
.format(tx_hash, block_height))
calc_merkle_root = SPV.hash_merkle_root(merkle_branch, tx_hash, leaf_pos_in_tree)
if block_header.get('merkle_root') != calc_merkle_root:
raise MerkleRootMismatch("merkle verification failed for {} ({} != {})".format(
tx_hash, block_header.get('merkle_root'), calc_merkle_root))
| 43.284091
| 129
| 0.66461
|
4a0d8374bef8e6fcaa4ba56ef6474db5fee723da
| 298
|
py
|
Python
|
Python/inputFromUser.py
|
JoaoRobertoFernandes/Code-to-study
|
ac0a69035f5aa124b4ef789bc884342dc93e6fc8
|
[
"MIT"
] | null | null | null |
Python/inputFromUser.py
|
JoaoRobertoFernandes/Code-to-study
|
ac0a69035f5aa124b4ef789bc884342dc93e6fc8
|
[
"MIT"
] | null | null | null |
Python/inputFromUser.py
|
JoaoRobertoFernandes/Code-to-study
|
ac0a69035f5aa124b4ef789bc884342dc93e6fc8
|
[
"MIT"
] | null | null | null |
#Input
'''
name = input("Enter your name: ")
print("Hello " + name +".")
'''
'''
#Basic calculator
val1 = input("Enter a number: ")
val2 = input("Enter another number: ")
res = int(val1) + int(val2) #Float can be used for decimal numbers.
print("The sum of these two numbers is: " + str(res))
'''
| 22.923077
| 68
| 0.630872
|
4a0d83a501b01259a11e5639a7677cb1ec497fb7
| 2,206
|
py
|
Python
|
mbot/plugins/helper.py
|
aafusam/Phono-Music-Bot
|
e279863371bedb408a0336b31ed75343568489aa
|
[
"MIT"
] | null | null | null |
mbot/plugins/helper.py
|
aafusam/Phono-Music-Bot
|
e279863371bedb408a0336b31ed75343568489aa
|
[
"MIT"
] | null | null | null |
mbot/plugins/helper.py
|
aafusam/Phono-Music-Bot
|
e279863371bedb408a0336b31ed75343568489aa
|
[
"MIT"
] | null | null | null |
import base64
import jiosaavn
from pyDes import *
def format_song(data,lyrics):
try:
url = data['media_preview_url']
url = url.replace("preview", "aac")
if data['320kbps']=="true":
url = url.replace("_96_p.mp4", "_320.mp4")
else:
url = url.replace("_96_p.mp4", "_160.mp4")
data['media_url'] = url
except KeyError or TypeError:
data['media_url'] = decrypt_url(data['encrypted_media_url'])
if data['320kbps']!="true":
data['media_url'] = data['media_url'].replace("_320.mp4","_160.mp4")
data['song'] = format(data['song'])
data['music'] = format(data['music'])
data['singers'] = format(data['singers'])
data['starring'] = format(data['starring'])
data['album'] = format(data['album'])
data["primary_artists"] = format(data["primary_artists"])
data['image'] = data['image'].replace("150x150","500x500")
if lyrics:
if data['has_lyrics']=='true':
data['lyrics'] = jiosaavn.get_lyrics(data['id'])
else:
data['lyrics'] = None
try:
data['copyright_text'] = data['copyright_text'].replace("©","©")
except KeyError:
pass
return data
def format_album(data,lyrics):
data['image'] = data['image'].replace("150x150","500x500")
data['name'] = format(data['name'])
data['primary_artists'] = format(data['primary_artists'])
data['title'] = format(data['title'])
for song in data['songs']:
song = format_song(song,lyrics)
return data
def format_playlist(data,lyrics):
data['firstname'] = format(data['firstname'])
data['listname'] = format(data['listname'])
for song in data['songs']:
song = format_song(song,lyrics)
return data
def format(string):
return string.encode().decode().replace(""","'").replace("&", "&").replace("'", "'")
def decrypt_url(url):
des_cipher = des(b"38346591", ECB, b"\0\0\0\0\0\0\0\0",pad=None, padmode=PAD_PKCS5)
enc_url = base64.b64decode(url.strip())
dec_url = des_cipher.decrypt(enc_url, padmode=PAD_PKCS5).decode('utf-8')
dec_url = dec_url.replace("_96.mp4", "_320.mp4")
return dec_url
| 34.46875
| 102
| 0.607888
|
4a0d83c1947d487ee5bf62a06a8d4dc70d3fe28e
| 6,844
|
py
|
Python
|
run_container.py
|
ruema/secure_container
|
bdea14694b642c0b62e2a87305af3005b7c28950
|
[
"MIT"
] | null | null | null |
run_container.py
|
ruema/secure_container
|
bdea14694b642c0b62e2a87305af3005b7c28950
|
[
"MIT"
] | null | null | null |
run_container.py
|
ruema/secure_container
|
bdea14694b642c0b62e2a87305af3005b7c28950
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
import os
import time
import json
import uuid
import subprocess
import argparse
import ctypes
import ctypes.util
from pathlib import Path
import crypt_fs
BASE_PATH = Path(__file__).parent.absolute()
NAMESPACES = ["pid", "ipc", "uts"]
CLONE_NEWNS = 0x00020000
CLONE_NEWUSER = 0x10000000
_PATH_PROC_UIDMAP = "/proc/self/uid_map"
_PATH_PROC_GIDMAP = "/proc/self/gid_map"
_PATH_PROC_SETGROUPS = "/proc/self/setgroups"
def setgroups_control(cmd):
with open(_PATH_PROC_SETGROUPS, 'w') as fd:
fd.write(cmd)
def map_id(filename, id_from, id_to):
with open(filename, "w") as fd:
fd.write(f"{id_from} {id_to} 1")
def unshare_mount():
real_euid = os.geteuid()
real_egid = os.getegid()
_libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
if _libc.unshare(CLONE_NEWUSER|CLONE_NEWNS):
raise RuntimeError("unshare failed")
setgroups_control("deny")
map_id(_PATH_PROC_UIDMAP, 0, real_euid)
map_id(_PATH_PROC_GIDMAP, 0, real_egid)
def need_layer(layer):
path = BASE_PATH / 'work_layers' / layer.replace(':', '_')
if not path.is_dir():
path.mkdir(parents=True)
layerpath = BASE_PATH / 'layers' / layer
subprocess.run(['tar', '-xf', layerpath], check=False, cwd=path)
def store_layer(path):
layerpath = BASE_PATH / 'layers' / str(uuid.uuid4())
upper = path / 'ovl' / 'upper'
filenames = [f.name for f in upper.iterdir()]
subprocess.run(['tar', '-czf', layerpath] + filenames, check=False, cwd=upper)
sha = subprocess.run(['sha256sum', '-b', layerpath], stdout=subprocess.PIPE)
sha = sha.stdout.split()[0].decode('ASCII')
layerpath.rename(layerpath.parent / f'sha265:{sha}')
return f'sha265:{sha}'
def push_image(run_name, name, tag):
path = BASE_PATH / 'runtime' / run_name
layer = store_layer(path)
with open(path / 'config.json', 'r', encoding='utf8') as input:
config = json.load(input)
env = dict(c.split('=',1) for c in config["process"]["env"])
parent_name = env["container_bname"]
parent_tag = env["container_tag"]
manifest = Path(BASE_PATH) / 'images' / parent_name / parent_tag
with open(manifest, encoding='utf8') as lines:
manifest = json.load(lines)
mani_config = json.loads(manifest['history'][0]['v1Compatibility'])
architecture = manifest['architecture']
layers = manifest['fsLayers']
layers.append({"blobSum": layer})
history = manifest['history']
manifest = {
"schemaVersion": 1,
"name": name,
"tag": tag,
"architecture": architecture,
"fsLayers": layers,
"history": history
}
filename = Path(BASE_PATH) / 'images' / name / tag
filename.parent.mkdir(parents=True)
with open(filename, 'w', encoding='utf8') as output:
json.dump(output, manifest)
def create_runimage(name, tag, net=False, work_path=None, encrypt=False, password=""):
run_name = str(uuid.uuid4())
print(f"Create image {run_name}.")
path = BASE_PATH / 'runtime' / run_name
manifest = Path(BASE_PATH) / 'images' / name / tag
with open(manifest, encoding='utf8') as lines:
manifest = json.load(lines)
mani_config = json.loads(manifest['history'][0]['v1Compatibility'])
need_layer('ROOT')
for layer in manifest['fsLayers']:
need_layer(layer["blobSum"])
mount_path = path / 'mnt'
ovl_path = path / 'ovl'
pipein, pipeout = os.pipe()
pid = os.fork()
if not pid:
os.close(pipein)
unshare_mount()
mount_path.mkdir(parents=True)
if encrypt:
ovl_path.mkdir()
secure_fs = crypt_fs.SecureFs()
if password:
with open(path / 'encrypt.json', 'w') as cfg:
json.dump(secure_fs.generate_config(password), cfg)
enc_path = path / 'enc'
enc_path.mkdir()
subprocess.run([
str(BASE_PATH / 'tools' / 'securefs'),
"mount", "-b",
"--config", "/dev/stdin",
"--log", "/dev/null",
"--pass", "password",
str(enc_path),
str(ovl_path)
], input=json.dumps(secure_fs.generate_config("password",
crypt_fs.PBKDF_ALGO_PKCS5, rounds=1)).encode())
layers = ':'.join(
str(BASE_PATH / 'work_layers' / '{}'.format(l["blobSum"].replace(':','_')))
for l in reversed(manifest['fsLayers'])
)
upper_path = ovl_path / 'upper'
work_path = ovl_path / 'work'
upper_path.mkdir(parents=True)
work_path.mkdir()
subprocess.run([
str(BASE_PATH / 'tools' / 'fuse-overlayfs'),
"-o", f"lowerdir={layers},upperdir={upper_path},workdir={work_path}",
str(mount_path)
])
os.write(pipeout, b'x')
time.sleep(9999)
sys.exit()
os.close(pipeout)
# wait for mounts
_ = os.read(pipein, 1)
with open(BASE_PATH / 'config' / 'config.json', encoding='utf8') as lines:
config = json.load(lines)
config["linux"]["uidMappings"][0]["hostID"] = os.getuid()
config["linux"]["gidMappings"][0]["hostID"] = os.getgid()
config["linux"]["namespaces"] = [
{"type": "mount", "path": f"/proc/{pid}/ns/mnt"},
{"type": "user", "path": f"/proc/{pid}/ns/user"},
] + [
{"type": ns} for ns in NAMESPACES if not net or ns != 'ipc'
]
config["process"]["args"] = mani_config["config"]["Cmd"]
config["process"]["env"] = mani_config["config"]["Env"] + [
f"container_name={name}", f"container_tag={tag}",
]
config["root"] = {"path": str(mount_path), "readonly": False}
config["mount"].append({
"source": Path(work_path).absolute(),
"type": "bind",
"destination": "/work",
})
with open(path / 'config.json', 'w', encoding='utf8') as output:
json.dump(config, output)
return path
def main():
parser = argparse.ArgumentParser(description='Run container.')
parser.add_argument('name', help='image name')
parser.add_argument('tag', help='image tag')
parser.add_argument('--net', action='store_true', default=False, help='use host network')
parser.add_argument('--encrypt', action='store_true', default=False, help='encrypt upper layer')
parser.add_argument('--password', help="password for encryption")
parser.add_argument('--work-path', help="mount point /work")
args = parser.parse_args()
path = create_runimage(args.name, args.tag, args.net, args.work_path, args.encrypt, args.password)
subprocess.run([
str(BASE_PATH / 'tools' / 'runc-x86_64'),
'--root', path, 'run', '--no-pivot',
'--bundle', path, path.name])
if __name__ == '__main__':
main()
| 36.021053
| 102
| 0.607832
|
4a0d8651ebf2cd8f6adfa4cd5e44642ac3aa7a17
| 53,505
|
py
|
Python
|
data.py
|
taeyen/graph-generation
|
70787c77205bc02e90fa587c22a64706cb975892
|
[
"MIT"
] | 532
|
2018-05-30T18:41:41.000Z
|
2022-03-29T21:53:39.000Z
|
data.py
|
taeyen/graph-generation
|
70787c77205bc02e90fa587c22a64706cb975892
|
[
"MIT"
] | 16
|
2018-07-19T12:39:56.000Z
|
2022-03-01T08:41:53.000Z
|
data.py
|
taeyen/graph-generation
|
70787c77205bc02e90fa587c22a64706cb975892
|
[
"MIT"
] | 126
|
2018-06-07T06:33:33.000Z
|
2022-03-21T07:16:17.000Z
|
import torch
import torchvision as tv
import torch.nn as nn
from torch.autograd import Variable
import matplotlib.pyplot as plt
from random import shuffle
import networkx as nx
import pickle as pkl
import scipy.sparse as sp
import logging
import random
import shutil
import os
import time
from model import *
from utils import *
# load ENZYMES and PROTEIN and DD dataset
def Graph_load_batch(min_num_nodes = 20, max_num_nodes = 1000, name = 'ENZYMES',node_attributes = True,graph_labels=True):
'''
load many graphs, e.g. enzymes
:return: a list of graphs
'''
print('Loading graph dataset: '+str(name))
G = nx.Graph()
# load data
path = 'dataset/'+name+'/'
data_adj = np.loadtxt(path+name+'_A.txt', delimiter=',').astype(int)
if node_attributes:
data_node_att = np.loadtxt(path+name+'_node_attributes.txt', delimiter=',')
data_node_label = np.loadtxt(path+name+'_node_labels.txt', delimiter=',').astype(int)
data_graph_indicator = np.loadtxt(path+name+'_graph_indicator.txt', delimiter=',').astype(int)
if graph_labels:
data_graph_labels = np.loadtxt(path+name+'_graph_labels.txt', delimiter=',').astype(int)
data_tuple = list(map(tuple, data_adj))
# print(len(data_tuple))
# print(data_tuple[0])
# add edges
G.add_edges_from(data_tuple)
# add node attributes
for i in range(data_node_label.shape[0]):
if node_attributes:
G.add_node(i+1, feature = data_node_att[i])
G.add_node(i+1, label = data_node_label[i])
G.remove_nodes_from(list(nx.isolates(G)))
# print(G.number_of_nodes())
# print(G.number_of_edges())
# split into graphs
graph_num = data_graph_indicator.max()
node_list = np.arange(data_graph_indicator.shape[0])+1
graphs = []
max_nodes = 0
for i in range(graph_num):
# find the nodes for each graph
nodes = node_list[data_graph_indicator==i+1]
G_sub = G.subgraph(nodes)
if graph_labels:
G_sub.graph['label'] = data_graph_labels[i]
# print('nodes', G_sub.number_of_nodes())
# print('edges', G_sub.number_of_edges())
# print('label', G_sub.graph)
if G_sub.number_of_nodes()>=min_num_nodes and G_sub.number_of_nodes()<=max_num_nodes:
graphs.append(G_sub)
if G_sub.number_of_nodes() > max_nodes:
max_nodes = G_sub.number_of_nodes()
# print(G_sub.number_of_nodes(), 'i', i)
# print('Graph dataset name: {}, total graph num: {}'.format(name, len(graphs)))
# logging.warning('Graphs loaded, total num: {}'.format(len(graphs)))
print('Loaded')
return graphs
def test_graph_load_DD():
graphs, max_num_nodes = Graph_load_batch(min_num_nodes=10,name='DD',node_attributes=False,graph_labels=True)
shuffle(graphs)
plt.switch_backend('agg')
plt.hist([len(graphs[i]) for i in range(len(graphs))], bins=100)
plt.savefig('figures/test.png')
plt.close()
row = 4
col = 4
draw_graph_list(graphs[0:row*col], row=row,col=col, fname='figures/test')
print('max num nodes',max_num_nodes)
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
# load cora, citeseer and pubmed dataset
def Graph_load(dataset = 'cora'):
'''
Load a single graph dataset
:param dataset: dataset name
:return:
'''
names = ['x', 'tx', 'allx', 'graph']
objects = []
for i in range(len(names)):
load = pkl.load(open("dataset/ind.{}.{}".format(dataset, names[i]), 'rb'), encoding='latin1')
# print('loaded')
objects.append(load)
# print(load)
x, tx, allx, graph = tuple(objects)
test_idx_reorder = parse_index_file("dataset/ind.{}.test.index".format(dataset))
test_idx_range = np.sort(test_idx_reorder)
if dataset == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
G = nx.from_dict_of_lists(graph)
adj = nx.adjacency_matrix(G)
return adj, features, G
######### code test ########
# adj, features,G = Graph_load()
# print(adj)
# print(G.number_of_nodes(), G.number_of_edges())
# _,_,G = Graph_load(dataset='citeseer')
# G = max(nx.connected_component_subgraphs(G), key=len)
# G = nx.convert_node_labels_to_integers(G)
#
# count = 0
# max_node = 0
# for i in range(G.number_of_nodes()):
# G_ego = nx.ego_graph(G, i, radius=3)
# # draw_graph(G_ego,prefix='test'+str(i))
# m = G_ego.number_of_nodes()
# if m>max_node:
# max_node = m
# if m>=50:
# print(i, G_ego.number_of_nodes(), G_ego.number_of_edges())
# count += 1
# print('count', count)
# print('max_node', max_node)
def bfs_seq(G, start_id):
'''
get a bfs node sequence
:param G:
:param start_id:
:return:
'''
dictionary = dict(nx.bfs_successors(G, start_id))
start = [start_id]
output = [start_id]
while len(start) > 0:
next = []
while len(start) > 0:
current = start.pop(0)
neighbor = dictionary.get(current)
if neighbor is not None:
#### a wrong example, should not permute here!
# shuffle(neighbor)
next = next + neighbor
output = output + next
start = next
return output
def encode_adj(adj, max_prev_node=10, is_full = False):
'''
:param adj: n*n, rows means time step, while columns are input dimension
:param max_degree: we want to keep row number, but truncate column numbers
:return:
'''
if is_full:
max_prev_node = adj.shape[0]-1
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0:n-1]
# use max_prev_node to truncate
# note: now adj is a (n-1)*(n-1) matrix
adj_output = np.zeros((adj.shape[0], max_prev_node))
for i in range(adj.shape[0]):
input_start = max(0, i - max_prev_node + 1)
input_end = i + 1
output_start = max_prev_node + input_start - input_end
output_end = max_prev_node
adj_output[i, output_start:output_end] = adj[i, input_start:input_end]
adj_output[i,:] = adj_output[i,:][::-1] # reverse order
return adj_output
def decode_adj(adj_output):
'''
recover to adj from adj_output
note: here adj_output have shape (n-1)*m
'''
max_prev_node = adj_output.shape[1]
adj = np.zeros((adj_output.shape[0], adj_output.shape[0]))
for i in range(adj_output.shape[0]):
input_start = max(0, i - max_prev_node + 1)
input_end = i + 1
output_start = max_prev_node + max(0, i - max_prev_node + 1) - (i + 1)
output_end = max_prev_node
adj[i, input_start:input_end] = adj_output[i,::-1][output_start:output_end] # reverse order
adj_full = np.zeros((adj_output.shape[0]+1, adj_output.shape[0]+1))
n = adj_full.shape[0]
adj_full[1:n, 0:n-1] = np.tril(adj, 0)
adj_full = adj_full + adj_full.T
return adj_full
def encode_adj_flexible(adj):
'''
return a flexible length of output
note that here there is no loss when encoding/decoding an adj matrix
:param adj: adj matrix
:return:
'''
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0:n-1]
adj_output = []
input_start = 0
for i in range(adj.shape[0]):
input_end = i + 1
adj_slice = adj[i, input_start:input_end]
adj_output.append(adj_slice)
non_zero = np.nonzero(adj_slice)[0]
input_start = input_end-len(adj_slice)+np.amin(non_zero)
return adj_output
def decode_adj_flexible(adj_output):
'''
return a flexible length of output
note that here there is no loss when encoding/decoding an adj matrix
:param adj: adj matrix
:return:
'''
adj = np.zeros((len(adj_output), len(adj_output)))
for i in range(len(adj_output)):
output_start = i+1-len(adj_output[i])
output_end = i+1
adj[i, output_start:output_end] = adj_output[i]
adj_full = np.zeros((len(adj_output)+1, len(adj_output)+1))
n = adj_full.shape[0]
adj_full[1:n, 0:n-1] = np.tril(adj, 0)
adj_full = adj_full + adj_full.T
return adj_full
def test_encode_decode_adj():
######## code test ###########
G = nx.ladder_graph(5)
G = nx.grid_2d_graph(20,20)
G = nx.ladder_graph(200)
G = nx.karate_club_graph()
G = nx.connected_caveman_graph(2,3)
print(G.number_of_nodes())
adj = np.asarray(nx.to_numpy_matrix(G))
G = nx.from_numpy_matrix(adj)
#
start_idx = np.random.randint(adj.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj = adj[np.ix_(x_idx, x_idx)]
print('adj\n',adj)
adj_output = encode_adj(adj,max_prev_node=5)
print('adj_output\n',adj_output)
adj_recover = decode_adj(adj_output,max_prev_node=5)
print('adj_recover\n',adj_recover)
print('error\n',np.amin(adj_recover-adj),np.amax(adj_recover-adj))
adj_output = encode_adj_flexible(adj)
for i in range(len(adj_output)):
print(len(adj_output[i]))
adj_recover = decode_adj_flexible(adj_output)
print(adj_recover)
print(np.amin(adj_recover-adj),np.amax(adj_recover-adj))
def encode_adj_full(adj):
'''
return a n-1*n-1*2 tensor, the first dimension is an adj matrix, the second show if each entry is valid
:param adj: adj matrix
:return:
'''
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0:n-1]
adj_output = np.zeros((adj.shape[0],adj.shape[1],2))
adj_len = np.zeros(adj.shape[0])
for i in range(adj.shape[0]):
non_zero = np.nonzero(adj[i,:])[0]
input_start = np.amin(non_zero)
input_end = i + 1
adj_slice = adj[i, input_start:input_end]
# write adj
adj_output[i,0:adj_slice.shape[0],0] = adj_slice[::-1] # put in reverse order
# write stop token (if token is 0, stop)
adj_output[i,0:adj_slice.shape[0],1] = 1 # put in reverse order
# write sequence length
adj_len[i] = adj_slice.shape[0]
return adj_output,adj_len
def decode_adj_full(adj_output):
'''
return an adj according to adj_output
:param
:return:
'''
# pick up lower tri
adj = np.zeros((adj_output.shape[0]+1,adj_output.shape[1]+1))
for i in range(adj_output.shape[0]):
non_zero = np.nonzero(adj_output[i,:,1])[0] # get valid sequence
input_end = np.amax(non_zero)
adj_slice = adj_output[i, 0:input_end+1, 0] # get adj slice
# write adj
output_end = i+1
output_start = i+1-input_end-1
adj[i+1,output_start:output_end] = adj_slice[::-1] # put in reverse order
adj = adj + adj.T
return adj
def test_encode_decode_adj_full():
########### code test #############
# G = nx.ladder_graph(10)
G = nx.karate_club_graph()
# get bfs adj
adj = np.asarray(nx.to_numpy_matrix(G))
G = nx.from_numpy_matrix(adj)
start_idx = np.random.randint(adj.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj = adj[np.ix_(x_idx, x_idx)]
adj_output, adj_len = encode_adj_full(adj)
print('adj\n',adj)
print('adj_output[0]\n',adj_output[:,:,0])
print('adj_output[1]\n',adj_output[:,:,1])
# print('adj_len\n',adj_len)
adj_recover = decode_adj_full(adj_output)
print('adj_recover\n', adj_recover)
print('error\n',adj_recover-adj)
print('error_sum\n',np.amax(adj_recover-adj), np.amin(adj_recover-adj))
########## use pytorch dataloader
class Graph_sequence_sampler_pytorch(torch.utils.data.Dataset):
def __init__(self, G_list, max_num_node=None, max_prev_node=None, iteration=20000):
self.adj_all = []
self.len_all = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
self.len_all.append(G.number_of_nodes())
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
if max_prev_node is None:
print('calculating max previous node, total iteration: {}'.format(iteration))
self.max_prev_node = max(self.calc_max_prev_node(iter=iteration))
print('max previous node: {}'.format(self.max_prev_node))
else:
self.max_prev_node = max_prev_node
# self.max_prev_node = max_prev_node
# # sort Graph in descending order
# len_batch_order = np.argsort(np.array(self.len_all))[::-1]
# self.len_all = [self.len_all[i] for i in len_batch_order]
# self.adj_all = [self.adj_all[i] for i in len_batch_order]
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
adj_copy = self.adj_all[idx].copy()
x_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
x_batch[0,:] = 1 # the first input token is all ones
y_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
# generate input x, y pairs
len_batch = adj_copy.shape[0]
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_encoded = encode_adj(adj_copy.copy(), max_prev_node=self.max_prev_node)
# get x and y and adj
# for small graph the rest are zero padded
y_batch[0:adj_encoded.shape[0], :] = adj_encoded
x_batch[1:adj_encoded.shape[0] + 1, :] = adj_encoded
return {'x':x_batch,'y':y_batch, 'len':len_batch}
def calc_max_prev_node(self, iter=20000,topk=10):
max_prev_node = []
for i in range(iter):
if i % (iter / 5) == 0:
print('iter {} times'.format(i))
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
# print('Graph size', adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
# encode adj
adj_encoded = encode_adj_flexible(adj_copy.copy())
max_encoded_len = max([len(adj_encoded[i]) for i in range(len(adj_encoded))])
max_prev_node.append(max_encoded_len)
max_prev_node = sorted(max_prev_node)[-1*topk:]
return max_prev_node
########## use pytorch dataloader
class Graph_sequence_sampler_pytorch_nobfs(torch.utils.data.Dataset):
def __init__(self, G_list, max_num_node=None):
self.adj_all = []
self.len_all = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
self.len_all.append(G.number_of_nodes())
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
adj_copy = self.adj_all[idx].copy()
x_batch = np.zeros((self.n, self.n-1)) # here zeros are padded for small graph
x_batch[0,:] = 1 # the first input token is all ones
y_batch = np.zeros((self.n, self.n-1)) # here zeros are padded for small graph
# generate input x, y pairs
len_batch = adj_copy.shape[0]
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_encoded = encode_adj(adj_copy.copy(), max_prev_node=self.n-1)
# get x and y and adj
# for small graph the rest are zero padded
y_batch[0:adj_encoded.shape[0], :] = adj_encoded
x_batch[1:adj_encoded.shape[0] + 1, :] = adj_encoded
return {'x':x_batch,'y':y_batch, 'len':len_batch}
# dataset = Graph_sequence_sampler_pytorch_nobfs(graphs)
# print(dataset[1]['x'])
# print(dataset[1]['y'])
# print(dataset[1]['len'])
########## use pytorch dataloader
class Graph_sequence_sampler_pytorch_canonical(torch.utils.data.Dataset):
def __init__(self, G_list, max_num_node=None, max_prev_node=None, iteration=20000):
self.adj_all = []
self.len_all = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
self.len_all.append(G.number_of_nodes())
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
if max_prev_node is None:
# print('calculating max previous node, total iteration: {}'.format(iteration))
# self.max_prev_node = max(self.calc_max_prev_node(iter=iteration))
# print('max previous node: {}'.format(self.max_prev_node))
self.max_prev_node = self.n-1
else:
self.max_prev_node = max_prev_node
# self.max_prev_node = max_prev_node
# # sort Graph in descending order
# len_batch_order = np.argsort(np.array(self.len_all))[::-1]
# self.len_all = [self.len_all[i] for i in len_batch_order]
# self.adj_all = [self.adj_all[i] for i in len_batch_order]
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
adj_copy = self.adj_all[idx].copy()
x_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
x_batch[0,:] = 1 # the first input token is all ones
y_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
# generate input x, y pairs
len_batch = adj_copy.shape[0]
# adj_copy_matrix = np.asmatrix(adj_copy)
# G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
# start_idx = G.number_of_nodes()-1
# x_idx = np.array(bfs_seq(G, start_idx))
# adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_encoded = encode_adj(adj_copy, max_prev_node=self.max_prev_node)
# get x and y and adj
# for small graph the rest are zero padded
y_batch[0:adj_encoded.shape[0], :] = adj_encoded
x_batch[1:adj_encoded.shape[0] + 1, :] = adj_encoded
return {'x':x_batch,'y':y_batch, 'len':len_batch}
def calc_max_prev_node(self, iter=20000,topk=10):
max_prev_node = []
for i in range(iter):
if i % (iter / 5) == 0:
print('iter {} times'.format(i))
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
# print('Graph size', adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
# encode adj
adj_encoded = encode_adj_flexible(adj_copy.copy())
max_encoded_len = max([len(adj_encoded[i]) for i in range(len(adj_encoded))])
max_prev_node.append(max_encoded_len)
max_prev_node = sorted(max_prev_node)[-1*topk:]
return max_prev_node
########## use pytorch dataloader
class Graph_sequence_sampler_pytorch_nll(torch.utils.data.Dataset):
def __init__(self, G_list, max_num_node=None, max_prev_node=None, iteration=20000):
self.adj_all = []
self.len_all = []
for G in G_list:
adj = np.asarray(nx.to_numpy_matrix(G))
adj_temp = self.calc_adj(adj)
self.adj_all.extend(adj_temp)
self.len_all.append(G.number_of_nodes())
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
if max_prev_node is None:
# print('calculating max previous node, total iteration: {}'.format(iteration))
# self.max_prev_node = max(self.calc_max_prev_node(iter=iteration))
# print('max previous node: {}'.format(self.max_prev_node))
self.max_prev_node = self.n-1
else:
self.max_prev_node = max_prev_node
# self.max_prev_node = max_prev_node
# # sort Graph in descending order
# len_batch_order = np.argsort(np.array(self.len_all))[::-1]
# self.len_all = [self.len_all[i] for i in len_batch_order]
# self.adj_all = [self.adj_all[i] for i in len_batch_order]
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
adj_copy = self.adj_all[idx].copy()
x_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
x_batch[0,:] = 1 # the first input token is all ones
y_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
# generate input x, y pairs
len_batch = adj_copy.shape[0]
# adj_copy_matrix = np.asmatrix(adj_copy)
# G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
# start_idx = G.number_of_nodes()-1
# x_idx = np.array(bfs_seq(G, start_idx))
# adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_encoded = encode_adj(adj_copy, max_prev_node=self.max_prev_node)
# get x and y and adj
# for small graph the rest are zero padded
y_batch[0:adj_encoded.shape[0], :] = adj_encoded
x_batch[1:adj_encoded.shape[0] + 1, :] = adj_encoded
return {'x':x_batch,'y':y_batch, 'len':len_batch}
def calc_adj(self,adj):
max_iter = 10000
adj_all = [adj]
adj_all_len = 1
i_old = 0
for i in range(max_iter):
adj_copy = adj.copy()
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
add_flag = True
for adj_exist in adj_all:
if np.array_equal(adj_exist, adj_copy):
add_flag = False
break
if add_flag:
adj_all.append(adj_copy)
adj_all_len += 1
if adj_all_len % 10 ==0:
print('adj found:',adj_all_len,'iter used',i)
return adj_all
# graphs = [nx.barabasi_albert_graph(20,3)]
# graphs = [nx.grid_2d_graph(4,4)]
# dataset = Graph_sequence_sampler_pytorch_nll(graphs)
############## below are codes not used in current version
############## they are based on pytorch default data loader, we should consider reimplement them in current datasets, since they are more efficient
# normal version
class Graph_sequence_sampler_truncate():
'''
the output will truncate according to the max_prev_node
'''
def __init__(self, G_list, max_node_num=25, batch_size=4, max_prev_node = 25):
self.batch_size = batch_size
self.n = max_node_num
self.max_prev_node = max_prev_node
self.adj_all = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
def sample(self):
# batch, length, feature
x_batch = np.zeros((self.batch_size, self.n, self.max_prev_node)) # here zeros are padded for small graph
y_batch = np.zeros((self.batch_size, self.n, self.max_prev_node)) # here zeros are padded for small graph
len_batch = np.zeros(self.batch_size)
# generate input x, y pairs
for i in range(self.batch_size):
# first sample and get a permuted adj
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
len_batch[i] = adj_copy.shape[0]
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_encoded = encode_adj(adj_copy.copy(), max_prev_node=self.max_prev_node)
# get x and y and adj
# for small graph the rest are zero padded
y_batch[i, 0:adj_encoded.shape[0], :] = adj_encoded
x_batch[i, 1:adj_encoded.shape[0]+1, :] = adj_encoded
# sort in descending order
len_batch_order = np.argsort(len_batch)[::-1]
len_batch = len_batch[len_batch_order]
x_batch = x_batch[len_batch_order,:,:]
y_batch = y_batch[len_batch_order,:,:]
return torch.from_numpy(x_batch).float(), torch.from_numpy(y_batch).float(), len_batch.astype('int').tolist()
def calc_max_prev_node(self,iter):
max_prev_node = []
for i in range(iter):
if i%(iter/10)==0:
print(i)
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
# print('Graph size', adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
time1 = time.time()
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
# encode adj
adj_encoded = encode_adj_flexible(adj_copy.copy())
max_encoded_len = max([len(adj_encoded[i]) for i in range(len(adj_encoded))])
max_prev_node.append(max_encoded_len)
max_prev_node = sorted(max_prev_node)[-100:]
return max_prev_node
# graphs, max_num_nodes = Graph_load_batch(min_num_nodes=6, name='DD',node_attributes=False)
# dataset = Graph_sequence_sampler_truncate([nx.karate_club_graph()])
# max_prev_nodes = dataset.calc_max_prev_node(iter=10000)
# print(max_prev_nodes)
# x,y,len = dataset.sample()
# print('x',x)
# print('y',y)
# print(len)
# only output y_batch (which is needed in batch version of new model)
class Graph_sequence_sampler_fast():
def __init__(self, G_list, max_node_num=25, batch_size=4, max_prev_node = 25):
self.batch_size = batch_size
self.G_list = G_list
self.n = max_node_num
self.max_prev_node = max_prev_node
self.adj_all = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
def sample(self):
# batch, length, feature
y_batch = np.zeros((self.batch_size, self.n, self.max_prev_node)) # here zeros are padded for small graph
# generate input x, y pairs
for i in range(self.batch_size):
# first sample and get a permuted adj
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
# print('graph size',adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
# get the feature for the permuted G
# dict = nx.bfs_successors(G, start_idx)
# print('dict', dict, 'node num', self.G.number_of_nodes())
# print('x idx', x_idx, 'len', len(x_idx))
# print('adj')
# np.set_printoptions(linewidth=200)
# for print_i in range(adj_copy.shape[0]):
# print(adj_copy[print_i].astype(int))
# adj_before = adj_copy.copy()
# encode adj
adj_encoded = encode_adj(adj_copy.copy(), max_prev_node=self.max_prev_node)
# print('adj encoded')
# np.set_printoptions(linewidth=200)
# for print_i in range(adj_copy.shape[0]):
# print(adj_copy[print_i].astype(int))
# decode adj
# print('adj recover error')
# adj_decode = decode_adj(adj_encoded.copy(), max_prev_node=self.max_prev_node)
# adj_err = adj_decode-adj_copy
# print(np.sum(adj_err))
# if np.sum(adj_err)!=0:
# print(adj_err)
# np.set_printoptions(linewidth=200)
# for print_i in range(adj_err.shape[0]):
# print(adj_err[print_i].astype(int))
# get x and y and adj
# for small graph the rest are zero padded
y_batch[i, 0:adj_encoded.shape[0], :] = adj_encoded
# np.set_printoptions(linewidth=200,precision=3)
# print('y\n')
# for print_i in range(self.y_batch[i,:,:].shape[0]):
# print(self.y_batch[i,:,:][print_i].astype(int))
# print('x\n')
# for print_i in range(self.x_batch[i, :, :].shape[0]):
# print(self.x_batch[i, :, :][print_i].astype(int))
# print('adj\n')
# for print_i in range(self.adj_batch[i, :, :].shape[0]):
# print(self.adj_batch[i, :, :][print_i].astype(int))
# print('adj_norm\n')
# for print_i in range(self.adj_norm_batch[i, :, :].shape[0]):
# print(self.adj_norm_batch[i, :, :][print_i].astype(float))
# print('feature\n')
# for print_i in range(self.feature_batch[i, :, :].shape[0]):
# print(self.feature_batch[i, :, :][print_i].astype(float))
# print('x_batch\n',self.x_batch)
# print('y_batch\n',self.y_batch)
return torch.from_numpy(y_batch).float()
# graphs, max_num_nodes = Graph_load_batch(min_num_nodes=6, name='PROTEINS_full')
# print(max_num_nodes)
# G = nx.ladder_graph(100)
# # G1 = nx.karate_club_graph()
# # G2 = nx.connected_caveman_graph(4,5)
# G_list = [G]
# dataset = Graph_sequence_sampler_fast(graphs, batch_size=128, max_node_num=max_num_nodes, max_prev_node=30)
# for i in range(5):
# time0 = time.time()
# y = dataset.sample()
# time1 = time.time()
# print(i,'time', time1 - time0)
# output size is flexible (using list to represent), batch size is 1
class Graph_sequence_sampler_flexible():
def __init__(self, G_list):
self.G_list = G_list
self.adj_all = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
self.y_batch = []
def sample(self):
# generate input x, y pairs
# first sample and get a permuted adj
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
# print('graph size',adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
# get the feature for the permuted G
# dict = nx.bfs_successors(G, start_idx)
# print('dict', dict, 'node num', self.G.number_of_nodes())
# print('x idx', x_idx, 'len', len(x_idx))
# print('adj')
# np.set_printoptions(linewidth=200)
# for print_i in range(adj_copy.shape[0]):
# print(adj_copy[print_i].astype(int))
# adj_before = adj_copy.copy()
# encode adj
adj_encoded = encode_adj_flexible(adj_copy.copy())
# print('adj encoded')
# np.set_printoptions(linewidth=200)
# for print_i in range(adj_copy.shape[0]):
# print(adj_copy[print_i].astype(int))
# decode adj
# print('adj recover error')
# adj_decode = decode_adj(adj_encoded.copy(), max_prev_node=self.max_prev_node)
# adj_err = adj_decode-adj_copy
# print(np.sum(adj_err))
# if np.sum(adj_err)!=0:
# print(adj_err)
# np.set_printoptions(linewidth=200)
# for print_i in range(adj_err.shape[0]):
# print(adj_err[print_i].astype(int))
# get x and y and adj
# for small graph the rest are zero padded
self.y_batch=adj_encoded
# np.set_printoptions(linewidth=200,precision=3)
# print('y\n')
# for print_i in range(self.y_batch[i,:,:].shape[0]):
# print(self.y_batch[i,:,:][print_i].astype(int))
# print('x\n')
# for print_i in range(self.x_batch[i, :, :].shape[0]):
# print(self.x_batch[i, :, :][print_i].astype(int))
# print('adj\n')
# for print_i in range(self.adj_batch[i, :, :].shape[0]):
# print(self.adj_batch[i, :, :][print_i].astype(int))
# print('adj_norm\n')
# for print_i in range(self.adj_norm_batch[i, :, :].shape[0]):
# print(self.adj_norm_batch[i, :, :][print_i].astype(float))
# print('feature\n')
# for print_i in range(self.feature_batch[i, :, :].shape[0]):
# print(self.feature_batch[i, :, :][print_i].astype(float))
return self.y_batch,adj_copy
# G = nx.ladder_graph(5)
# # G = nx.grid_2d_graph(20,20)
# # G = nx.ladder_graph(200)
# graphs = [G]
#
# graphs, max_num_nodes = Graph_load_batch(min_num_nodes=6, name='ENZYMES')
# sampler = Graph_sequence_sampler_flexible(graphs)
#
# y_max_all = []
# for i in range(10000):
# y_raw,adj_copy = sampler.sample()
# y_max = max(len(y_raw[i]) for i in range(len(y_raw)))
# y_max_all.append(y_max)
# # print('max bfs node',y_max)
# print('max', max(y_max_all))
# print(y[1])
# print(Variable(torch.FloatTensor(y[1])).cuda(CUDA))
########### potential use: an encoder along with the GraphRNN decoder
# preprocess the adjacency matrix
def preprocess(A):
# Get size of the adjacency matrix
size = len(A)
# Get the degrees for each node
degrees = np.sum(A, axis=1)+1
# Create diagonal matrix D from the degrees of the nodes
D = np.diag(np.power(degrees, -0.5).flatten())
# Cholesky decomposition of D
# D = np.linalg.cholesky(D)
# Inverse of the Cholesky decomposition of D
# D = np.linalg.inv(D)
# Create an identity matrix of size x size
I = np.eye(size)
# Create A hat
A_hat = A + I
# Return A_hat
A_normal = np.dot(np.dot(D,A_hat),D)
return A_normal
# truncate the output seqence to save representation, and allowing for infinite generation
# now having a list of graphs
class Graph_sequence_sampler_bfs_permute_truncate_multigraph():
def __init__(self, G_list, max_node_num=25, batch_size=4, max_prev_node = 25, feature = None):
self.batch_size = batch_size
self.G_list = G_list
self.n = max_node_num
self.max_prev_node = max_prev_node
self.adj_all = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
self.has_feature = feature
def sample(self):
# batch, length, feature
# self.x_batch = np.ones((self.batch_size, self.n - 1, self.max_prev_node))
x_batch = np.zeros((self.batch_size, self.n, self.max_prev_node)) # here zeros are padded for small graph
# self.x_batch[:,0,:] = np.ones((self.batch_size, self.max_prev_node)) # first input is all ones
# batch, length, feature
y_batch = np.zeros((self.batch_size, self.n, self.max_prev_node)) # here zeros are padded for small graph
# batch, length, length
adj_batch = np.zeros((self.batch_size, self.n, self.n)) # here zeros are padded for small graph
# batch, size, size
adj_norm_batch = np.zeros((self.batch_size, self.n, self.n)) # here zeros are padded for small graph
# batch, size, feature_len: degree and clustering coefficient
if self.has_feature is None:
feature_batch = np.zeros((self.batch_size, self.n, self.n)) # use one hot feature
else:
feature_batch = np.zeros((self.batch_size, self.n, 2))
# generate input x, y pairs
for i in range(self.batch_size):
time0 = time.time()
# first sample and get a permuted adj
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
# print('Graph size', adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
time1 = time.time()
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
# get the feature for the permuted G
node_list = [G.nodes()[i] for i in x_idx]
feature_degree = np.array(list(G.degree(node_list).values()))[:,np.newaxis]
feature_clustering = np.array(list(nx.clustering(G,nodes=node_list).values()))[:,np.newaxis]
time2 = time.time()
# dict = nx.bfs_successors(G, start_idx)
# print('dict', dict, 'node num', self.G.number_of_nodes())
# print('x idx', x_idx, 'len', len(x_idx))
# print('adj')
# np.set_printoptions(linewidth=200)
# for print_i in range(adj_copy.shape[0]):
# print(adj_copy[print_i].astype(int))
# adj_before = adj_copy.copy()
# encode adj
adj_encoded = encode_adj(adj_copy.copy(), max_prev_node=self.max_prev_node)
# print('adj encoded')
# np.set_printoptions(linewidth=200)
# for print_i in range(adj_copy.shape[0]):
# print(adj_copy[print_i].astype(int))
# decode adj
# print('adj recover error')
# adj_decode = decode_adj(adj_encoded.copy(), max_prev_node=self.max_prev_node)
# adj_err = adj_decode-adj_copy
# print(np.sum(adj_err))
# if np.sum(adj_err)!=0:
# print(adj_err)
# np.set_printoptions(linewidth=200)
# for print_i in range(adj_err.shape[0]):
# print(adj_err[print_i].astype(int))
# get x and y and adj
# for small graph the rest are zero padded
y_batch[i, 0:adj_encoded.shape[0], :] = adj_encoded
x_batch[i, 1:adj_encoded.shape[0]+1, :] = adj_encoded
adj_batch[i, 0:adj_copy.shape[0], 0:adj_copy.shape[0]] = adj_copy
adj_copy_norm = preprocess(adj_copy)
time3 = time.time()
adj_norm_batch[i, 0:adj_copy.shape[0], 0:adj_copy.shape[0]] = adj_copy_norm
if self.has_feature is None:
feature_batch[i, 0:adj_copy.shape[0], 0:adj_copy.shape[0]] = np.eye(adj_copy.shape[0])
else:
feature_batch[i,0:adj_copy.shape[0],:] = np.concatenate((feature_degree,feature_clustering),axis=1)
# np.set_printoptions(linewidth=200,precision=3)
# print('y\n')
# for print_i in range(self.y_batch[i,:,:].shape[0]):
# print(self.y_batch[i,:,:][print_i].astype(int))
# print('x\n')
# for print_i in range(self.x_batch[i, :, :].shape[0]):
# print(self.x_batch[i, :, :][print_i].astype(int))
# print('adj\n')
# for print_i in range(self.adj_batch[i, :, :].shape[0]):
# print(self.adj_batch[i, :, :][print_i].astype(int))
# print('adj_norm\n')
# for print_i in range(self.adj_norm_batch[i, :, :].shape[0]):
# print(self.adj_norm_batch[i, :, :][print_i].astype(float))
# print('feature\n')
# for print_i in range(self.feature_batch[i, :, :].shape[0]):
# print(self.feature_batch[i, :, :][print_i].astype(float))
time4 = time.time()
# print('1 ',time1-time0)
# print('2 ',time2-time1)
# print('3 ',time3-time2)
# print('4 ',time4-time3)
# print('x_batch\n',self.x_batch)
# print('y_batch\n',self.y_batch)
return torch.from_numpy(x_batch).float(), torch.from_numpy(y_batch).float(),\
torch.from_numpy(adj_batch).float(), torch.from_numpy(adj_norm_batch).float(), torch.from_numpy(feature_batch).float()
# generate own synthetic dataset
def Graph_synthetic(seed):
G = nx.Graph()
np.random.seed(seed)
base = np.repeat(np.eye(5), 20, axis=0)
rand = np.random.randn(100, 5) * 0.05
node_features = base + rand
# # print('node features')
# for i in range(node_features.shape[0]):
# print(np.around(node_features[i], decimals=4))
node_distance_l1 = np.ones((node_features.shape[0], node_features.shape[0]))
node_distance_np = np.zeros((node_features.shape[0], node_features.shape[0]))
for i in range(node_features.shape[0]):
for j in range(node_features.shape[0]):
if i != j:
node_distance_l1[i,j] = np.sum(np.abs(node_features[i] - node_features[j]))
# print('node distance', node_distance_l1[i,j])
node_distance_np[i, j] = 1 / np.sum(np.abs(node_features[i] - node_features[j]) ** 2)
print('node distance max', np.max(node_distance_l1))
print('node distance min', np.min(node_distance_l1))
node_distance_np_sum = np.sum(node_distance_np, axis=1, keepdims=True)
embedding_dist = node_distance_np / node_distance_np_sum
# generate the graph
average_degree = 9
for i in range(node_features.shape[0]):
for j in range(i + 1, embedding_dist.shape[0]):
p = np.random.rand()
if p < embedding_dist[i, j] * average_degree:
G.add_edge(i, j)
G.remove_nodes_from(nx.isolates(G))
print('num of nodes', G.number_of_nodes())
print('num of edges', G.number_of_edges())
G_deg = nx.degree_histogram(G)
G_deg_sum = [a * b for a, b in zip(G_deg, range(0, len(G_deg)))]
print('average degree', sum(G_deg_sum) / G.number_of_nodes())
print('average path length', nx.average_shortest_path_length(G))
print('diameter', nx.diameter(G))
G_cluster = sorted(list(nx.clustering(G).values()))
print('average clustering coefficient', sum(G_cluster) / len(G_cluster))
print('Graph generation complete!')
# node_features = np.concatenate((node_features, np.zeros((1,node_features.shape[1]))),axis=0)
return G, node_features
# G = Graph_synthetic(10)
# return adj and features from a single graph
class GraphDataset_adj(torch.utils.data.Dataset):
"""Graph Dataset"""
def __init__(self, G, features=None):
self.G = G
self.n = G.number_of_nodes()
adj = np.asarray(nx.to_numpy_matrix(self.G))
# permute adj
subgraph_idx = np.random.permutation(self.n)
# subgraph_idx = np.arange(self.n)
adj = adj[np.ix_(subgraph_idx, subgraph_idx)]
self.adj = torch.from_numpy(adj+np.eye(len(adj))).float()
self.adj_norm = torch.from_numpy(preprocess(adj)).float()
if features is None:
self.features = torch.Tensor(self.n, self.n)
self.features = nn.init.eye(self.features)
else:
features = features[subgraph_idx,:]
self.features = torch.from_numpy(features).float()
print('embedding size', self.features.size())
def __len__(self):
return 1
def __getitem__(self, idx):
sample = {'adj':self.adj,'adj_norm':self.adj_norm, 'features':self.features}
return sample
# G = nx.karate_club_graph()
# dataset = GraphDataset_adj(G)
# train_loader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True, num_workers=1)
# for data in train_loader:
# print(data)
# return adj and features from a list of graphs
class GraphDataset_adj_batch(torch.utils.data.Dataset):
"""Graph Dataset"""
def __init__(self, graphs, has_feature = True, num_nodes = 20):
self.graphs = graphs
self.has_feature = has_feature
self.num_nodes = num_nodes
def __len__(self):
return len(self.graphs)
def __getitem__(self, idx):
adj_raw = np.asarray(nx.to_numpy_matrix(self.graphs[idx]))
np.fill_diagonal(adj_raw,0) # in case the self connection already exists
# sample num_nodes size subgraph
subgraph_idx = np.random.permutation(adj_raw.shape[0])[0:self.num_nodes]
adj_raw = adj_raw[np.ix_(subgraph_idx,subgraph_idx)]
adj = torch.from_numpy(adj_raw+np.eye(len(adj_raw))).float()
adj_norm = torch.from_numpy(preprocess(adj_raw)).float()
adj_raw = torch.from_numpy(adj_raw).float()
if self.has_feature:
dictionary = nx.get_node_attributes(self.graphs[idx], 'feature')
features = np.zeros((self.num_nodes, list(dictionary.values())[0].shape[0]))
for i in range(self.num_nodes):
features[i, :] = list(dictionary.values())[subgraph_idx[i]]
# normalize
features -= np.mean(features, axis=0)
epsilon = 1e-6
features /= (np.std(features, axis=0)+epsilon)
features = torch.from_numpy(features).float()
else:
n = self.num_nodes
features = torch.Tensor(n, n)
features = nn.init.eye(features)
sample = {'adj':adj,'adj_norm':adj_norm, 'features':features, 'adj_raw':adj_raw}
return sample
# return adj and features from a list of graphs, batch size = 1, so that graphs can have various size each time
class GraphDataset_adj_batch_1(torch.utils.data.Dataset):
"""Graph Dataset"""
def __init__(self, graphs, has_feature=True):
self.graphs = graphs
self.has_feature = has_feature
def __len__(self):
return len(self.graphs)
def __getitem__(self, idx):
adj_raw = np.asarray(nx.to_numpy_matrix(self.graphs[idx]))
np.fill_diagonal(adj_raw, 0) # in case the self connection already exists
n = adj_raw.shape[0]
# give a permutation
subgraph_idx = np.random.permutation(n)
# subgraph_idx = np.arange(n)
adj_raw = adj_raw[np.ix_(subgraph_idx, subgraph_idx)]
adj = torch.from_numpy(adj_raw + np.eye(len(adj_raw))).float()
adj_norm = torch.from_numpy(preprocess(adj_raw)).float()
if self.has_feature:
dictionary = nx.get_node_attributes(self.graphs[idx], 'feature')
features = np.zeros((n, list(dictionary.values())[0].shape[0]))
for i in range(n):
features[i, :] = list(dictionary.values())[i]
features = features[subgraph_idx, :]
# normalize
features -= np.mean(features, axis=0)
epsilon = 1e-6
features /= (np.std(features, axis=0) + epsilon)
features = torch.from_numpy(features).float()
else:
features = torch.Tensor(n, n)
features = nn.init.eye(features)
sample = {'adj': adj, 'adj_norm': adj_norm, 'features': features}
return sample
# get one node at a time, for a single graph
class GraphDataset(torch.utils.data.Dataset):
"""Graph Dataset"""
def __init__(self, G, hops = 1, max_degree = 5, vocab_size = 35, embedding_dim = 35, embedding = None, shuffle_neighbour = True):
self.G = G
self.shuffle_neighbour = shuffle_neighbour
self.hops = hops
self.max_degree = max_degree
if embedding is None:
self.embedding = torch.Tensor(vocab_size, embedding_dim)
self.embedding = nn.init.eye(self.embedding)
else:
self.embedding = torch.from_numpy(embedding).float()
print('embedding size', self.embedding.size())
def __len__(self):
return len(self.G.nodes())
def __getitem__(self, idx):
idx = idx+1
idx_list = [idx]
node_list = [self.embedding[idx].view(-1, self.embedding.size(1))]
node_count_list = []
for i in range(self.hops):
# sample this hop
adj_list = np.array([])
adj_count_list = np.array([])
for idx in idx_list:
if self.shuffle_neighbour:
adj_list_new = list(self.G.adj[idx - 1])
random.shuffle(adj_list_new)
adj_list_new = np.array(adj_list_new) + 1
else:
adj_list_new = np.array(list(self.G.adj[idx-1]))+1
adj_count_list_new = np.array([len(adj_list_new)])
adj_list = np.concatenate((adj_list, adj_list_new), axis=0)
adj_count_list = np.concatenate((adj_count_list, adj_count_list_new), axis=0)
# print(i, adj_list)
# print(i, embedding(Variable(torch.from_numpy(adj_list)).long()))
index = torch.from_numpy(adj_list).long()
adj_list_emb = self.embedding[index]
node_list.append(adj_list_emb)
node_count_list.append(adj_count_list)
idx_list = adj_list
# padding, used as target
idx_list = [idx]
node_list_pad = [self.embedding[idx].view(-1, self.embedding.size(1))]
node_count_list_pad = []
node_adj_list = []
for i in range(self.hops):
adj_list = np.zeros(self.max_degree ** (i + 1))
adj_count_list = np.ones(self.max_degree ** (i)) * self.max_degree
for j, idx in enumerate(idx_list):
if idx == 0:
adj_list_new = np.zeros(self.max_degree)
else:
if self.shuffle_neighbour:
adj_list_new = list(self.G.adj[idx - 1])
# random.shuffle(adj_list_new)
adj_list_new = np.array(adj_list_new) + 1
else:
adj_list_new = np.array(list(self.G.adj[idx-1]))+1
start_idx = j * self.max_degree
incre_idx = min(self.max_degree, adj_list_new.shape[0])
adj_list[start_idx:start_idx + incre_idx] = adj_list_new[:incre_idx]
index = torch.from_numpy(adj_list).long()
adj_list_emb = self.embedding[index]
node_list_pad.append(adj_list_emb)
node_count_list_pad.append(adj_count_list)
idx_list = adj_list
# calc adj matrix
node_adj = torch.zeros(index.size(0),index.size(0))
for first in range(index.size(0)):
for second in range(first, index.size(0)):
if index[first]==index[second]:
node_adj[first,second] = 1
node_adj[second,first] = 1
elif self.G.has_edge(index[first],index[second]):
node_adj[first, second] = 0.5
node_adj[second, first] = 0.5
node_adj_list.append(node_adj)
node_list = list(reversed(node_list))
node_count_list = list(reversed(node_count_list))
node_list_pad = list(reversed(node_list_pad))
node_count_list_pad = list(reversed(node_count_list_pad))
node_adj_list = list(reversed(node_adj_list))
sample = {'node_list':node_list, 'node_count_list':node_count_list,
'node_list_pad':node_list_pad, 'node_count_list_pad':node_count_list_pad, 'node_adj_list':node_adj_list}
return sample
| 38.409907
| 148
| 0.611233
|
4a0d875fdaba728f05164d8050a142644d2e9b54
| 3,372
|
py
|
Python
|
server/main.py
|
latonaio/template-matching-summary
|
6bb312c7d11e6d47ddc7093deb0b440037c03593
|
[
"MIT"
] | 10
|
2021-09-22T07:25:31.000Z
|
2021-11-05T01:26:32.000Z
|
server/main.py
|
latonaio/template-matching-summary
|
6bb312c7d11e6d47ddc7093deb0b440037c03593
|
[
"MIT"
] | null | null | null |
server/main.py
|
latonaio/template-matching-summary
|
6bb312c7d11e6d47ddc7093deb0b440037c03593
|
[
"MIT"
] | null | null | null |
import sys
import os
import grpc
from concurrent import futures
from aion.logger import initialize_logger, lprint
from src.errors import TemplateMatchingSummaryServerError
from src.vehicle import VehicleSummary
from src.trigger import TriggerSummary
from proto import template_matcning_summary_pb2, template_matcning_summary_pb2_grpc
from google.protobuf.json_format import MessageToDict
from google.protobuf.struct_pb2 import Struct
SERVICE_NAME = os.environ.get("SERVICE")
CURRENT_DEVICE_NAME = os.environ.get("CURRENT_DEVICE_NAME")
SERVER_PORT = 50052
initialize_logger(SERVICE_NAME)
class VehicleSummaryServer(template_matcning_summary_pb2_grpc.TemplateMatchingSummaryServicer):
def __init__(self):
super().__init__()
self.summary = VehicleSummary()
def get_matching_summary(self, request, context):
lprint("connect from template-matching-summary client")
data = MessageToDict(request.matching_data)
matching_data = data['templateMatchingByOpenCV']
self.summary.set(matching_data)
vehicle = self.summary.get_vehicle()
end = self.summary.get_end()
ret = self.summary.get_metadata()
self.summary.stack()
summary = self.summary.get_all_vehicles()
ret['vehicle'] = vehicle
ret['end'] = end
ret['summary'] = summary
if end['status']:
self.summary.reset()
s = Struct()
s.update(ret)
return template_matcning_summary_pb2.SummaryResponse(summary_data=s)
class TriggerSummaryServer(template_matcning_summary_pb2_grpc.TemplateMatchingSummaryServicer):
def __init__(self):
super().__init__()
self.summary = TriggerSummary()
def get_matching_summary(self, request, context):
lprint("connect from template-matching-summary client")
data = MessageToDict(request.matching_data)
matching_data = data['templateMatchingByOpenCV']
should_be_reset = self.summary.should_be_reset(matching_data)
if should_be_reset:
self.summary.reset()
self.summary.set(matching_data)
trigger = self.summary.get_trigger()
end = self.summary.get_end()
ret = self.summary.get_metadata()
ret['trigger'] = trigger
ret['end'] = end
if end['status']:
self.summary.reset()
s = Struct()
s.update(ret)
return template_matcning_summary_pb2.SummaryResponse(summary_data=s)
def main():
try:
server = grpc.server(futures.ThreadPoolExecutor(max_workers=5))
if CURRENT_DEVICE_NAME in ['tartarus', 'poseidon', 'lib']:
template_matcning_summary_pb2_grpc.add_TemplateMatchingSummaryServicer_to_server(
VehicleSummaryServer(), server,
)
elif CURRENT_DEVICE_NAME in ['deneb', 'elpis', 'neo', 'moca']:
template_matcning_summary_pb2_grpc.add_TemplateMatchingSummaryServicer_to_server(
TriggerSummaryServer(), server,
)
else:
raise TemplateMatchingSummaryServerError("Device Name " + CURRENT_DEVICE_NAME + " is wrong.")
server.add_insecure_port('[::]:' + str(SERVER_PORT))
server.start()
server.wait_for_termination()
except Exception as e:
lprint(e)
sys.exit(1)
if __name__ == "__main__":
main()
| 32.114286
| 105
| 0.687129
|
4a0d8843017836e07a35910c2ffebcd93e448446
| 1,999
|
py
|
Python
|
aoc_2020/_utils.py
|
eferm/aoc-2020
|
95dfd494bf66011556dc747120f280368a4e1fbc
|
[
"MIT"
] | null | null | null |
aoc_2020/_utils.py
|
eferm/aoc-2020
|
95dfd494bf66011556dc747120f280368a4e1fbc
|
[
"MIT"
] | null | null | null |
aoc_2020/_utils.py
|
eferm/aoc-2020
|
95dfd494bf66011556dc747120f280368a4e1fbc
|
[
"MIT"
] | null | null | null |
# Copyright Ryan Norris https://github.com/rynorris/adventofcode
import os
import re
from typing import Dict, Tuple, TypeVar
import requests
from dotenv import load_dotenv
load_dotenv(verbose=True)
SESSION = os.getenv("SESSION")
INPUTS_DIR = os.path.join(os.path.dirname(__file__), "inputs")
def get_input(year, day):
cached_input = _get_cached_input(year, day)
if cached_input:
print(f"Found cached input for {year} day {day}")
return cached_input
print(f"No cached input for {year} day {day}, fetching from server...")
url = f"https://adventofcode.com/{year}/day/{day}/input"
headers = {
"Cookie": f"session={SESSION}",
}
resp = requests.get(url, headers=headers)
resp.raise_for_status()
print(f"Fetched input for {year} day {day}. Caching for later.")
_cache_input(year, day, resp.text)
return resp.text
def _get_cached_input(year, day):
path = _cached_input_path(year, day)
if os.path.exists(path):
with open(path) as f:
return f.read()
return None
def _cache_input(year, day, text):
if not os.path.exists(INPUTS_DIR):
os.mkdir(INPUTS_DIR)
path = _cached_input_path(year, day)
with open(path, "w") as f:
f.write(text)
def _cached_input_path(year, day):
filename = f"{year}_{day:02}.txt"
return os.path.join(INPUTS_DIR, filename)
def lmap(f, *seqs):
return list(map(f, *seqs))
def lfilter(f, seq):
return list(filter(f, seq))
def filterre(r, seq):
regex = re.compile(r)
return lfilter(regex.search, seq)
def translate(mapping, s):
return s.translate(str.maketrans(mapping))
T = TypeVar("T")
def gridprint(grid: Dict[Tuple[int, int], T], default: T = " ") -> None:
xs = [x for x, _ in grid]
ys = [y for _, y in grid]
for y in range(min(ys), max(ys) + 1):
row = []
for x in range(min(xs), max(xs) + 1):
row.append(str(grid.get((x, y), default)))
print("".join(row))
| 23.517647
| 75
| 0.637319
|
4a0d88f5deee10e45b0d39b4bea959db7825b5ae
| 13,995
|
py
|
Python
|
tests/test_postprocessing/test_shap_explainer.py
|
moonson619/AI4Water-1
|
285d46824502b6a787e42570b72432f4f6acf45e
|
[
"MIT"
] | 17
|
2021-05-21T13:01:52.000Z
|
2022-03-19T15:17:10.000Z
|
tests/test_postprocessing/test_shap_explainer.py
|
moonson619/AI4Water-1
|
285d46824502b6a787e42570b72432f4f6acf45e
|
[
"MIT"
] | 3
|
2021-10-31T22:40:28.000Z
|
2021-11-08T02:28:35.000Z
|
tests/test_postprocessing/test_shap_explainer.py
|
moonson619/AI4Water-1
|
285d46824502b6a787e42570b72432f4f6acf45e
|
[
"MIT"
] | 7
|
2021-08-06T07:27:50.000Z
|
2022-01-26T00:38:32.000Z
|
import time
import unittest
import os
import sys
import site
ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
site.addsitedir(ai4_dir)
import shap
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from ai4water import Model
from ai4water.datasets import busan_beach, MtropicsLaos
from ai4water.postprocessing.explain import ShapExplainer, explain_model_with_shap
from test_lime_explainer import make_lstm_reg_model, lstm_model, get_fitted_model, make_mlp_model
laos = MtropicsLaos()
class_data = laos.make_classification()
beach_data = busan_beach()
# todo, do not use any transformation on y for classification problem
# todo, allowed y_transformation are only log and sqrt
# todo unable to use functional api with transformation for model explainability
def fit_and_plot(model_name, data, heatmap=False, beeswarm_plot=False):
model = get_fitted_model(model_name, data)
x_test, y_test = model.test_data()
x_test = pd.DataFrame(x_test, columns=model.input_features).iloc[0:5]
interpreter = ShapExplainer(model, x_test, path=model.path,
explainer="Explainer", framework="ML")
if heatmap: interpreter.heatmap(show=False)
if beeswarm_plot: interpreter.beeswarm_plot(show=False)
interpreter = ShapExplainer(model, x_test.values, path=model.path,
explainer="Explainer", framework="ML")
if heatmap: interpreter.heatmap(show=False)
if beeswarm_plot: interpreter.beeswarm_plot(show=False)
return
def fit_and_interpret(model_name:str,
data,
draw_heatmap=True,
**kwargs
):
model = get_fitted_model(model_name, data)
x_train, y_train = model.training_data()
x_train = pd.DataFrame(x_train, columns=model.input_features).iloc[0:11]
x_test, y_test = model.test_data()
x_test = pd.DataFrame(x_test, columns=model.input_features).iloc[0:2]
interpreter = ShapExplainer(model, x_test,
train_data=x_train,
path=model.path,
framework="ML",
**kwargs
)
interpreter(save=False)
if draw_heatmap:
interpreter.heatmap(show=False)
explainer = ShapExplainer(model,
x_test.values,
train_data=x_train.values,
features=model.input_features,
path=model.path,
framework="ML",
**kwargs
)
explainer(save=False)
return
def get_explainer(model_name, data):
model = get_fitted_model(model_name, data)
x_test, y_test = model.test_data()
x_test = pd.DataFrame(x_test, columns=model.input_features).iloc[0:5]
explainer = ShapExplainer(model, x_test, explainer="Explainer",
path=model.path)
return explainer
def fit_and_draw_plots(model_name, data, draw_heatmap=False):
model = get_fitted_model(model_name, data)
x_test, y_test = model.test_data()
x_test = pd.DataFrame(x_test, columns=model.input_features).iloc[0:5]
explainer = ShapExplainer(model, x_test, explainer="Explainer",
path=model.path, framework="ML")
explainer.waterfall_plot_all_examples(show=False)
explainer.scatter_plot_all_features(show=False)
if draw_heatmap:
explainer.heatmap(show=False)
explainer = ShapExplainer(model, x_test.values, explainer="Explainer",
path=model.path, framework="ML")
explainer.waterfall_plot_all_examples(show=False)
explainer.scatter_plot_all_features(show=False)
#explainer.heatmap()
return
def get_mlp():
model = make_mlp_model()
#train_x, train_y = model.training_data()
testx, testy = model.test_data()
testx = pd.DataFrame(testx, columns=model.input_features).iloc[0:5]
#train_x = pd.DataFrame(train_x, columns=model.input_features).iloc[0:5]
plt.rcParams.update(plt.rcParamsDefault)
return model, testx
class TestShapExplainers(unittest.TestCase):
def test_doc_example(self):
X, y = shap.datasets.diabetes()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
lin_regr = linear_model.LinearRegression()
lin_regr.fit(X_train, y_train)
explainer = ShapExplainer(lin_regr,
data=X_test.iloc[0:14],
train_data=X_train,
num_means=12,
path=os.path.join(os.getcwd(), "results"))
explainer(plot_force_all=True)
explainer.heatmap(show=False)
explainer.plot_shap_values(show=False)
return
def test_pd_plot(self):
for mod in [
"XGBRegressor", # todo error
"RandomForestRegressor",
"LGBMRegressor",
"DecisionTreeRegressor",
"ExtraTreeRegressor",
"ExtraTreesRegressor",
"GradientBoostingRegressor",
"HistGradientBoostingRegressor",
"XGBRFRegressor" # todo error
]:
exp = get_explainer(mod, busan_beach(inputs=["pcp_mm", "air_p_hpa", "air_temp_c"]))
exp.pdp_single_feature(feature_name=exp.features[0], show=False, save=False)
time.sleep(1)
return
def test_ai4water_model(self):
model = Model(
model="LinearRegression",
verbosity=0
)
model.fit(data=busan_beach(inputs=['wat_temp_c', 'tide_cm']))
x_train, y_train = model.training_data()
x_test, y_test = model.test_data()
x_train = pd.DataFrame(x_train, columns=model.input_features)
x_test = pd.DataFrame(x_test, columns=model.input_features).iloc[0:5]
explainer = ShapExplainer(model,
data=x_test, train_data=x_train,
num_means=10, path=model.path,
explainer="KernelExplainer")
explainer(plot_force_all=False)
#explainer.heatmap()
explainer = ShapExplainer(model,
train_data=x_train.values, data=x_test.values,
num_means=10, path=model.path, explainer="KernelExplainer")
explainer(plot_force_all=False)
#explainer.heatmap()
return
def test_raise_error(self):
model = Model(
model={"layers": {"LSTM":{"units": 4}}},
input_features=['wat_temp_c', 'tide_cm'],
output_features=['tetx_coppml', "ecoli", "16s", "inti1"],
verbosity=0
)
model.fit(data=busan_beach(inputs=['wat_temp_c', 'tide_cm'],
target=['tetx_coppml', "ecoli", "16s", "inti1"]))
x_test, y_test = model.test_data()
def initiate_class():
return ShapExplainer(model, x_test)
self.assertRaises(AssertionError,
initiate_class)
return
def test_xgb(self):
fit_and_interpret("XGBRegressor", data=busan_beach(inputs=['wat_temp_c', 'tide_cm']),
draw_heatmap=True, explainer="TreeExplainer")
return
def test_lgbm(self):
fit_and_interpret("LGBMRegressor", data=busan_beach(inputs=['wat_temp_c', 'tide_cm']),
draw_heatmap=False, explainer="TreeExplainer")
return
def test_catboost(self):
fit_and_interpret("CatBoostRegressor", data=busan_beach(inputs=['wat_temp_c', 'tide_cm']),
draw_heatmap=False, explainer="TreeExplainer")
return
def test_waterfall_with_xgb(self):
fit_and_draw_plots("XGBRegressor", busan_beach(inputs=['wat_temp_c', 'tide_cm']),
draw_heatmap=True)
return
def test_waterfall_with_catboost(self):
fit_and_draw_plots("CatBoostRegressor", busan_beach(inputs=['wat_temp_c', 'tide_cm']))
return
def test_heatmap(self):
for mod in [
"XGBRegressor",
"RandomForestRegressor",
##"LGBMRegressor", # process stopping problem
"DecisionTreeRegressor",
"ExtraTreeRegressor",
"ExtraTreesRegressor",
"GradientBoostingRegressor",
##"HISTGRADIENTBOOSTINGREGRESSOR", # taking very long time
"XGBRFRegressor"
]:
fit_and_plot(mod, beach_data, heatmap=True)
time.sleep(1)
return
def test_beeswarm_plot(self):
for mod in [
"XGBRegressor",
"RandomForestRegressor",
"LGBMRegressor",
"DecisionTreeRegressor",
"ExtraTreeRegressor",
"ExtraTreesRegressor",
"GradientBoostingRegressor",
"HistGradientBoostingRegressor",
"XGBRFRegressor"
]:
fit_and_plot(mod, beach_data, beeswarm_plot=True)
time.sleep(1)
return
def test_deepexplainer_mlp(self):
model, testx = get_mlp()
ex = ShapExplainer(model, testx, explainer="DeepExplainer", layer=2,
path=model.path)
ex.plot_shap_values(show=False)
return
def test_gradientexplainer_mlp(self):
model, testx = get_mlp()
ex = ShapExplainer(model, testx, layer=1, explainer="GradientExplainer",
path=model.path)
plt.rcParams.update(plt.rcParamsDefault)
ex.plot_shap_values(show=False)
return
def test_class_model(self):
fit_and_interpret("DecisionTreeClassifier", data=class_data, draw_heatmap=False,
explainer="KernelExplainer")
return
def test_lstm_model_deep_exp(self):
m = make_lstm_reg_model()
train_x, _ = m.training_data()
exp = ShapExplainer(model=m, data=train_x, layer=2, features=m.input_features, path=m.path)
exp.summary_plot(show=False)
exp.force_plot_single_example(0, show=False)
exp.plot_shap_values(show=False)
return
def test_lstm_model_gradient_exp(self):
m = make_lstm_reg_model()
train_x, _ = m.training_data()
exp = ShapExplainer(model=m, data=train_x, layer="LSTM", explainer="GradientExplainer",
features=m.input_features, path=m.path)
exp.plot_shap_values(show=False)
exp.force_plot_single_example(0, show=False)
return
def test_lstm_model_ai4water(self):
time.sleep(1)
m = make_lstm_reg_model()
train_x, _ = m.training_data()
exp = ShapExplainer(model=m, data=train_x, layer="LSTM", explainer="GradientExplainer",
features=m.input_features, path=m.path)
exp.force_plot_single_example(0, show=False)
return
def test_ai4water_ml(self):
for m in [
"XGBRegressor",
"RandomForestRegressor",
"GradientBoostingRegressor"
]:
model = get_fitted_model(m, busan_beach(inputs=['wat_temp_c', 'tide_cm']))
exp = explain_model_with_shap(model, examples_to_explain=2, explainer="TreeExplainer")
assert isinstance(exp, ShapExplainer)
return
def test_ai4water_mlp(self):
time.sleep(1)
model = make_mlp_model()
exp = explain_model_with_shap(model, examples_to_explain=2)
assert isinstance(exp, ShapExplainer)
return
def test_ai4water_lstm(self):
m = lstm_model()
exp = explain_model_with_shap(m, examples_to_explain=2)
assert isinstance(exp, ShapExplainer)
return
def test_plots_for_3d_input(self):
model = lstm_model()
test_x, _ = model.test_data()
p = model.predict(test_x)
exp = ShapExplainer(model, test_x, layer=2, path=model.path,
features=model.input_features
)
exp.force_plot_single_example(np.argmin(p).item(), show=False)
exp.force_plot_single_example(np.argmax(p).item(), show=False)
exp.waterfall_plot_single_example(np.argmin(p).item(), show=False)
exp.waterfall_plot_single_example(np.argmax(p).item(), show=False)
exp.pdp_all_features(lookback=0, show=False)
return
def test_multiple_inputs(self):
model = Model(model={"layers": {"Input_0": {"shape": (10, 2)},
"LSTM_0": {"config": {"units": 62},
"inputs": "Input_0",
"outputs": "lstm0_output"},
"Input_1": {"shape": (5, 3)},
"LSTM_1": {"config": {"units": 32},
"inputs": "Input_1",
"outputs": "lstm1_output"},
"Concatenate": {"config": {}, "inputs": ["lstm0_output", "lstm1_output"]},
"Dense": {"config": 1}
}}, verbosity=0)
test_x = [np.random.random((100, 10, 2)), np.random.random((100, 5, 3))]
exp = ShapExplainer(model, test_x, layer="LSTM_1", path=model.path)
exp.summary_plot(show=False)
exp.plot_shap_values(show=False)
return
if __name__ == "__main__":
unittest.main()
| 32.775176
| 114
| 0.587638
|
4a0d89733526103f97024c2f0fbd0add5d664662
| 49
|
py
|
Python
|
starfish/core/spots/DetectPixels/__init__.py
|
kne42/starfish
|
78b348c9756f367221dcca725cfa5107e5520b33
|
[
"MIT"
] | null | null | null |
starfish/core/spots/DetectPixels/__init__.py
|
kne42/starfish
|
78b348c9756f367221dcca725cfa5107e5520b33
|
[
"MIT"
] | null | null | null |
starfish/core/spots/DetectPixels/__init__.py
|
kne42/starfish
|
78b348c9756f367221dcca725cfa5107e5520b33
|
[
"MIT"
] | null | null | null |
from .pixel_spot_decoder import PixelSpotDecoder
| 24.5
| 48
| 0.897959
|
4a0d8983713df0164d6fbfd9919856b260a7966c
| 171
|
py
|
Python
|
cloudmesh/flow/visualize/manager.py
|
cloudmesh/cloudmesh-flow
|
aa0c00034c2947b8f41d726c5926cdcb068ce54a
|
[
"Apache-2.0"
] | 1
|
2019-05-07T13:20:33.000Z
|
2019-05-07T13:20:33.000Z
|
cloudmesh/flow/visualize/manager.py
|
cloudmesh/cloudmesh-flow
|
aa0c00034c2947b8f41d726c5926cdcb068ce54a
|
[
"Apache-2.0"
] | 29
|
2019-04-25T18:04:25.000Z
|
2022-03-25T18:41:44.000Z
|
cloudmesh/flow/visualize/manager.py
|
cloudmesh/cloudmesh-flow
|
aa0c00034c2947b8f41d726c5926cdcb068ce54a
|
[
"Apache-2.0"
] | 1
|
2020-03-23T03:00:51.000Z
|
2020-03-23T03:00:51.000Z
|
import requests
import os
def shutdown():
url = "http://127.0.0.1:8080/shutdown"
response = requests.get(url)
def start():
os.system('python server.py')
| 12.214286
| 42
| 0.643275
|
4a0d8a86fec392c900179bb1d83f1e1c12af8eb1
| 4,995
|
py
|
Python
|
tests/neptune/new/internal/test_operations.py
|
Raalsky/neptune-client
|
24ac58581774e61056d49cd1a22727799c14ad54
|
[
"Apache-2.0"
] | 254
|
2020-01-27T14:18:57.000Z
|
2022-03-31T21:40:33.000Z
|
tests/neptune/new/internal/test_operations.py
|
Raalsky/neptune-client
|
24ac58581774e61056d49cd1a22727799c14ad54
|
[
"Apache-2.0"
] | 160
|
2020-02-05T11:00:22.000Z
|
2022-03-31T08:50:24.000Z
|
tests/neptune/new/internal/test_operations.py
|
Raalsky/neptune-client
|
24ac58581774e61056d49cd1a22727799c14ad54
|
[
"Apache-2.0"
] | 23
|
2020-02-07T09:19:50.000Z
|
2022-02-15T09:52:56.000Z
|
#
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import unittest
import uuid
from neptune.new.attributes import Integer
from neptune.new.internal.operation import *
# pylint: disable=protected-access
class TestOperations(unittest.TestCase):
def test_serialization_to_dict(self):
classes = {cls.__name__ for cls in all_subclasses(Operation)}
for obj in self._list_objects():
if obj.__class__.__name__ in classes:
classes.remove(obj.__class__.__name__)
deserialized_obj = Operation.from_dict(
json.loads(json.dumps(obj.to_dict()))
)
self.assertEqual(obj.__dict__, deserialized_obj.__dict__)
self.assertEqual(classes, set())
@staticmethod
def _list_objects():
now = datetime.now()
return [
AssignFloat(TestOperations._random_path(), 5.0),
AssignInt(TestOperations._random_path(), 5),
AssignBool(TestOperations._random_path(), True),
AssignBool(TestOperations._random_path(), False),
AssignString(TestOperations._random_path(), "a\rsdf\thr"),
AssignDatetime(
TestOperations._random_path(),
now.replace(microsecond=1000 * int(now.microsecond / 1000)),
),
AssignArtifact(
TestOperations._random_path(),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
),
UploadFile(TestOperations._random_path(), "f.txt", "file/path/f.txt"),
UploadFileContent(
TestOperations._random_path(), "stream.txt", "some base64"
),
UploadFileSet(
TestOperations._random_path(),
["file/path/*.txt", "another/file/path/*.txt"],
True,
),
UploadFileSet(
TestOperations._random_path(),
["file/path/*.txt", "another/file/path/*.txt"],
False,
),
DeleteFiles(
TestOperations._random_path(), {"file/path/*.txt", "dir/path/"}
),
LogFloats(
TestOperations._random_path(),
[
LogFloats.ValueType(5, 4, 500),
LogFloats.ValueType(3, None, 1000),
LogFloats.ValueType(10, 10, 1234),
],
),
LogStrings(
TestOperations._random_path(),
[
LogStrings.ValueType("jetybv", 1, 5),
LogStrings.ValueType("ghs\ner", 3, 123),
LogStrings.ValueType("r", None, 1356),
LogStrings.ValueType("ghsr", 13, 53682),
],
),
LogImages(
TestOperations._random_path(),
[
LogImages.ValueType(
ImageValue("base64_image_1", "name1", "description1"), None, 2
),
LogImages.ValueType(
ImageValue("base64_image_2", "name2", "description2"), 0, 5
),
],
),
ClearFloatLog(TestOperations._random_path()),
ClearStringLog(TestOperations._random_path()),
ClearImageLog(TestOperations._random_path()),
ConfigFloatSeries(
TestOperations._random_path(), min=11, max=600, unit="kg/h"
),
AddStrings(TestOperations._random_path(), {"asef", "asrge4"}),
RemoveStrings(TestOperations._random_path(), {"a\ne", "aeg\t4ger", "agrg"}),
ClearStringSet(TestOperations._random_path()),
DeleteAttribute(TestOperations._random_path()),
TrackFilesToArtifact(
TestOperations._random_path(),
str(uuid.uuid4()),
[("file/path/f.txt", None)],
),
ClearArtifact(TestOperations._random_path()),
CopyAttribute(
TestOperations._random_path(),
container_id=str(uuid.uuid4()),
container_type=ContainerType.RUN,
source_path=TestOperations._random_path(),
source_attr_cls=Integer,
),
]
@staticmethod
def _random_path():
return ["some", "random", "path", str(uuid.uuid4())]
| 38.72093
| 88
| 0.557357
|
4a0d8b3bb38ad22cff856b8a87e7978aa0c54d64
| 38,522
|
py
|
Python
|
sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_service_operations.py
|
JianpingChen/azure-sdk-for-python
|
3072fc8c0366287fbaea1b02493a50259c3248a2
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_service_operations.py
|
JianpingChen/azure-sdk-for-python
|
3072fc8c0366287fbaea1b02493a50259c3248a2
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_service_operations.py
|
JianpingChen/azure-sdk-for-python
|
3072fc8c0366287fbaea1b02493a50259c3248a2
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceOperations(object):
"""ServiceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.storage.blob.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def set_properties(
self,
storage_service_properties, # type: "_models.StorageServiceProperties"
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Sets properties for a storage account's Blob service endpoint, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param storage_service_properties: The StorageService properties.
:type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "properties"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.set_properties.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
if cls:
return cls(pipeline_response, None, response_headers)
set_properties.metadata = {'url': '/'} # type: ignore
def get_properties(
self,
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.StorageServiceProperties"
"""gets the properties of a storage account's Blob service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageServiceProperties, or the result of cls(response)
:rtype: ~azure.storage.blob.models.StorageServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "properties"
accept = "application/xml"
# Construct URL
url = self.get_properties.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
deserialized = self._deserialize('StorageServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_properties.metadata = {'url': '/'} # type: ignore
def get_statistics(
self,
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.StorageServiceStats"
"""Retrieves statistics related to replication for the Blob service. It is only available on the
secondary location endpoint when read-access geo-redundant replication is enabled for the
storage account.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageServiceStats, or the result of cls(response)
:rtype: ~azure.storage.blob.models.StorageServiceStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "stats"
accept = "application/xml"
# Construct URL
url = self.get_statistics.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('StorageServiceStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_statistics.metadata = {'url': '/'} # type: ignore
def list_containers_segment(
self,
prefix=None, # type: Optional[str]
marker=None, # type: Optional[str]
maxresults=None, # type: Optional[int]
include=None, # type: Optional[List[Union[str, "_models.ListContainersIncludeType"]]]
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.ListContainersSegmentResponse"
"""The List Containers Segment operation returns a list of the containers under the specified
account.
:param prefix: Filters the results to return only containers whose name begins with the
specified prefix.
:type prefix: str
:param marker: A string value that identifies the portion of the list of containers to be
returned with the next listing operation. The operation returns the NextMarker value within the
response body if the listing operation did not return all containers remaining to be listed
with the current page. The NextMarker value can be used as the value for the marker parameter
in a subsequent call to request the next page of list items. The marker value is opaque to the
client.
:type marker: str
:param maxresults: Specifies the maximum number of containers to return. If the request does
not specify maxresults, or specifies a value greater than 5000, the server will return up to
5000 items. Note that if the listing operation crosses a partition boundary, then the service
will return a continuation token for retrieving the remainder of the results. For this reason,
it is possible that the service will return fewer results than specified by maxresults, or than
the default of 5000.
:type maxresults: int
:param include: Include this parameter to specify that the container's metadata be returned as
part of the response body.
:type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType]
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListContainersSegmentResponse, or the result of cls(response)
:rtype: ~azure.storage.blob.models.ListContainersSegmentResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "list"
accept = "application/xml"
# Construct URL
url = self.list_containers_segment.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if prefix is not None:
query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
if marker is not None:
query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
if include is not None:
query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
list_containers_segment.metadata = {'url': '/'} # type: ignore
def get_user_delegation_key(
self,
key_info, # type: "_models.KeyInfo"
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.UserDelegationKey"
"""Retrieves a user delegation key for the Blob service. This is only a valid operation when using
bearer token authentication.
:param key_info:
:type key_info: ~azure.storage.blob.models.KeyInfo
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UserDelegationKey, or the result of cls(response)
:rtype: ~azure.storage.blob.models.UserDelegationKey
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "userdelegationkey"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.get_user_delegation_key.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('UserDelegationKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_user_delegation_key.metadata = {'url': '/'} # type: ignore
def get_account_info(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Returns the sku name and account kind.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "account"
comp = "properties"
accept = "application/xml"
# Construct URL
url = self.get_account_info.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name'))
response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind'))
response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled'))
if cls:
return cls(pipeline_response, None, response_headers)
get_account_info.metadata = {'url': '/'} # type: ignore
def submit_batch(
self,
content_length, # type: int
multipart_content_type, # type: str
body, # type: IO
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> IO
"""The Batch operation allows multiple API calls to be embedded into a single HTTP request.
:param content_length: The length of the request.
:type content_length: long
:param multipart_content_type: Required. The value of this header must be multipart/mixed with
a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:`<GUID>`.
:type multipart_content_type: str
:param body: Initial data.
:type body: IO
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IO, or the result of cls(response)
:rtype: IO
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[IO]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "batch"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.submit_batch.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'IO', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
deserialized = response.stream_download(self._client._pipeline)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
submit_batch.metadata = {'url': '/'} # type: ignore
def filter_blobs(
self,
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
where=None, # type: Optional[str]
marker=None, # type: Optional[str]
maxresults=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> "_models.FilterBlobSegment"
"""The Filter Blobs operation enables callers to list blobs across all containers whose tags match
a given search expression. Filter blobs searches across all containers within a storage
account but can be scoped within the expression to a single container.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param where: Filters the results to return only to return only blobs whose tags match the
specified expression.
:type where: str
:param marker: A string value that identifies the portion of the list of containers to be
returned with the next listing operation. The operation returns the NextMarker value within the
response body if the listing operation did not return all containers remaining to be listed
with the current page. The NextMarker value can be used as the value for the marker parameter
in a subsequent call to request the next page of list items. The marker value is opaque to the
client.
:type marker: str
:param maxresults: Specifies the maximum number of containers to return. If the request does
not specify maxresults, or specifies a value greater than 5000, the server will return up to
5000 items. Note that if the listing operation crosses a partition boundary, then the service
will return a continuation token for retrieving the remainder of the results. For this reason,
it is possible that the service will return fewer results than specified by maxresults, or than
the default of 5000.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FilterBlobSegment, or the result of cls(response)
:rtype: ~azure.storage.blob.models.FilterBlobSegment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "blobs"
accept = "application/xml"
# Construct URL
url = self.filter_blobs.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if where is not None:
query_parameters['where'] = self._serialize.query("where", where, 'str')
if marker is not None:
query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('FilterBlobSegment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
filter_blobs.metadata = {'url': '/'} # type: ignore
| 54.71875
| 133
| 0.675692
|
4a0d8dd39b4ae2802472c7d785534a2fe83ad818
| 4,312
|
py
|
Python
|
compute/navigation_ngrams.py
|
andrewhead/Search-Task-Analysis
|
ef73745a760b5c2ec7060488219bb29237c26464
|
[
"MIT"
] | null | null | null |
compute/navigation_ngrams.py
|
andrewhead/Search-Task-Analysis
|
ef73745a760b5c2ec7060488219bb29237c26464
|
[
"MIT"
] | null | null | null |
compute/navigation_ngrams.py
|
andrewhead/Search-Task-Analysis
|
ef73745a760b5c2ec7060488219bb29237c26464
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from peewee import fn
import json
from nltk.util import ngrams as nltk_compute_ngrams
from dump._urls import standardize_url
from models import LocationVisit, NavigationNgram
logger = logging.getLogger('data')
def compute_navigation_ngrams(length, page_type_lookup):
'''
Compute n-grams of sequences of pages visited, of a certain length.
A `page_type_lookup` dictionary must be provided, that maps URLs to their page types.
'''
# Create a new index for this computation
last_compute_index = NavigationNgram.select(fn.Max(NavigationNgram.compute_index)).scalar() or 0
compute_index = last_compute_index + 1
# Fetch the set of visits for the most recently computed visits
visit_compute_index = LocationVisit.select(fn.Max(LocationVisit.compute_index)).scalar()
visits = LocationVisit.select().where(LocationVisit.compute_index == visit_compute_index)
# Get the distinct participant IDs and concern indexes
participant_ids = set([visit.user_id for visit in visits])
concern_indexes = set([visit.concern_index for visit in visits])
# Go through every concern for every participant. For each page they visit,
# increment the visits to a vertex. For each transition from one page to the next,
# increment the occurrence of a transition between two page types.
for participant_id in participant_ids:
for concern_index in concern_indexes:
participant_concern_visits = visits.where(
LocationVisit.user_id == participant_id,
LocationVisit.concern_index == concern_index,
).order_by(LocationVisit.start.asc())
# Create a list of unique URLs that each participant visited
urls = [visit.url for visit in participant_concern_visits]
standardized_urls = [standardize_url(url) for url in urls]
# Create a list of all page types visited.
# If this is a redirect, then skip it. For all intents and purposes,
# someone is traveling between two the page type before and after it.
page_types = []
for url in standardized_urls:
if url in page_type_lookup:
url_info = page_type_lookup[url]
if not url_info['redirect']:
page_types.append(url_info['main_type'])
else:
logger.warn("URL %s not in page type lookup. Giving it 'Unknown' type", url)
page_types.append("Unknown")
# Compute n-grams using NLTK command
ngrams = nltk_compute_ngrams(page_types, length)
# Save each n-gram to the database
for ngram_tuple in ngrams:
NavigationNgram.create(
compute_index=compute_index,
user_id=participant_id,
concern_index=concern_index,
length=length,
ngram=", ".join(ngram_tuple),
)
def main(page_types_json_filename, min_length, max_length, *args, **kwargs):
# Load a dictionary that describes the page types for URLs visited
with open(page_types_json_filename) as page_types_file:
page_type_lookup = json.load(page_types_file)
# Compute n-grams for all requested lengths of n-gram
for length in range(min_length, max_length + 1):
compute_navigation_ngrams(length, page_type_lookup)
def configure_parser(parser):
parser.description = "Compute n-grams of page types that participants visited in sequence."
parser.add_argument(
"page_types_json_filename",
help=(
"Name of a JSON file that maps URLs to file types. " +
"The format of each row should be:" +
"\"<url>\": {\"main_type\": \"<main type>\", \"types\": " +
"[<list of all relevant types>]}"
)
)
parser.add_argument(
"--min-length",
default=2,
help="The minimum length of ngram to extract (default: %(default)s)",
)
parser.add_argument(
"--max-length",
help="The maximum length of ngram to extract (default: %(default)s)",
default=6,
)
| 39.925926
| 100
| 0.649814
|
4a0d906f7f9b633efb8488d593c801aecb183e2c
| 23
|
py
|
Python
|
Python/Tests/TestData/ProfileTestSysPath/B/mod2.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/ProfileTestSysPath/B/mod2.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/ProfileTestSysPath/B/mod2.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
def func():
pass
| 7.666667
| 12
| 0.478261
|
4a0d91d6a70d5b99f1624591197c9d1689445c67
| 60
|
py
|
Python
|
operators/selection/__init__.py
|
f4nku4n/MOENAS-TF-PSI
|
5e25df9143a09ffdcfbb4d03851b919aed60003a
|
[
"MIT"
] | null | null | null |
operators/selection/__init__.py
|
f4nku4n/MOENAS-TF-PSI
|
5e25df9143a09ffdcfbb4d03851b919aed60003a
|
[
"MIT"
] | null | null | null |
operators/selection/__init__.py
|
f4nku4n/MOENAS-TF-PSI
|
5e25df9143a09ffdcfbb4d03851b919aed60003a
|
[
"MIT"
] | null | null | null |
from .RankAndCrowdingSurvival import RankAndCrowdingSurvival
| 60
| 60
| 0.933333
|
4a0d91f7e1b066a2d5cd0fa5e2df0d1a99fb2f50
| 218
|
py
|
Python
|
spacy/tests/lang/uk/test_lemmatizer.py
|
nsorros/spaCy
|
caba63b74f556f4e0b36f1c9f608e765772ea24e
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2020-09-27T13:51:57.000Z
|
2020-09-27T13:51:57.000Z
|
spacy/tests/lang/uk/test_lemmatizer.py
|
nsorros/spaCy
|
caba63b74f556f4e0b36f1c9f608e765772ea24e
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2021-06-22T13:32:07.000Z
|
2021-06-23T09:15:29.000Z
|
spacy/tests/lang/uk/test_lemmatizer.py
|
ezorita/spaCy
|
66f1d29b4effb0b355268832fdcff21c279658bc
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2021-06-25T02:39:44.000Z
|
2021-06-25T02:39:44.000Z
|
import pytest
from spacy.tokens import Doc
def test_uk_lemmatizer(uk_lemmatizer):
"""Check that the default uk lemmatizer runs."""
doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"])
uk_lemmatizer(doc)
| 24.222222
| 57
| 0.701835
|
4a0d9292ba6466cfe3e9ee39f5f28130030e96b4
| 2,716
|
py
|
Python
|
lib/adapter/cmd.py
|
Guitar420/cheat.sh
|
69a34b606762ee8c20541e2c7adf2db347de82b5
|
[
"MIT"
] | 2
|
2019-05-05T22:56:29.000Z
|
2019-05-17T02:09:25.000Z
|
lib/adapter/cmd.py
|
Guitar420/cheat.sh
|
69a34b606762ee8c20541e2c7adf2db347de82b5
|
[
"MIT"
] | 1
|
2021-04-28T21:43:36.000Z
|
2021-06-25T15:18:23.000Z
|
lib/adapter/cmd.py
|
Guitar420/cheat.sh
|
69a34b606762ee8c20541e2c7adf2db347de82b5
|
[
"MIT"
] | 1
|
2020-11-30T13:47:06.000Z
|
2020-11-30T13:47:06.000Z
|
from gevent.monkey import patch_all
from gevent.subprocess import Popen, PIPE
patch_all()
import sys
import abc
import os
import glob
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from globals import PATH_TLDR_PAGES, PATH_CHEAT_PAGES
from adapter import Adapter
def _get_filenames(path):
return [os.path.split(topic)[1] for topic in glob.glob(path)]
class Tldr(Adapter):
_adapter_name = "tldr"
_output_format = "code"
_cache_needed = True
def _get_list(self, prefix=None):
return [filename[:-3]
for filename in _get_filenames(PATH_TLDR_PAGES) if filename.endswith('.md')]
def _get_page(self, topic, request_options=None):
cmd = ["tldr", topic]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
answer = proc.communicate()[0]
fixed_answer = []
for line in answer.splitlines():
line = line[2:]
if line.startswith('-'):
line = '# '+line[2:]
elif not line.startswith(' '):
line = "# "+line
else:
pass
fixed_answer.append(line)
answer = "\n".join(fixed_answer) + "\n"
return answer.decode('utf-8')
class Cheat(Adapter):
_adapter_name = "cheat"
_output_format = "code"
_cache_needed = True
def _get_list(self, prefix=None):
return _get_filenames(PATH_CHEAT_PAGES)
def _get_page(self, topic, request_options=None):
cmd = ["/usr/local/bin/cheat", topic]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
answer = proc.communicate()[0].decode('utf-8')
return answer
class Fosdem(Adapter):
_adapter_name = "fosdem"
_output_format = "ansi"
def _get_list(self, prefix=None):
return ['fosdem']
def _get_page(self, topic, request_options=None):
cmd = ["sudo", "/usr/local/bin/current-fosdem-slide"]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
answer = proc.communicate()[0].decode('utf-8')
return answer
class Translation(Adapter):
_adapter_name = "translation"
_output_format = "text"
_cache_needed = True
def _get_list(self, prefix=None):
return []
def _get_page(self, topic, request_options=None):
from_, topic = topic.split('/', 1)
to_ = request_options.get('lang', 'en')
if '-' in from_:
from_, to_ = from_.split('-', 1)
cmd = ["/home/igor/cheat.sh/bin/get_translation",
from_, to_, topic.replace('+', ' ')]
print("calling:", cmd)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
answer = proc.communicate()[0].decode('utf-8')
return answer
| 28
| 92
| 0.608616
|
4a0d936a7bf3baef3fc4ce704868e81e02f6f8e7
| 9,205
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/documentdb/v20200601preview/sql_resource_sql_user_defined_function.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/documentdb/v20200601preview/sql_resource_sql_user_defined_function.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/documentdb/v20200601preview/sql_resource_sql_user_defined_function.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['SqlResourceSqlUserDefinedFunction']
class SqlResourceSqlUserDefinedFunction(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['SqlUserDefinedFunctionResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_defined_function_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
An Azure Cosmos DB userDefinedFunction.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] container_name: Cosmos DB container name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: Identity for the resource.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[pulumi.InputType['SqlUserDefinedFunctionResourceArgs']] resource: The standard JSON format of a userDefinedFunction
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
:param pulumi.Input[str] user_defined_function_name: Cosmos DB userDefinedFunction name.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
if container_name is None:
raise TypeError("Missing required property 'container_name'")
__props__['container_name'] = container_name
if database_name is None:
raise TypeError("Missing required property 'database_name'")
__props__['database_name'] = database_name
__props__['identity'] = identity
__props__['location'] = location
if options is None:
raise TypeError("Missing required property 'options'")
__props__['options'] = options
if resource is None:
raise TypeError("Missing required property 'resource'")
__props__['resource'] = resource
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
if user_defined_function_name is None:
raise TypeError("Missing required property 'user_defined_function_name'")
__props__['user_defined_function_name'] = user_defined_function_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/latest:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:SqlResourceSqlUserDefinedFunction")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlResourceSqlUserDefinedFunction, __self__).__init__(
'azure-nextgen:documentdb/v20200601preview:SqlResourceSqlUserDefinedFunction',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlResourceSqlUserDefinedFunction':
"""
Get an existing SqlResourceSqlUserDefinedFunction resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return SqlResourceSqlUserDefinedFunction(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
Identity for the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resource(self) -> pulumi.Output[Optional['outputs.SqlUserDefinedFunctionGetPropertiesResponseResource']]:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 54.467456
| 602
| 0.681152
|
4a0d945a16bab44e13f98ca5dec6854835ae489a
| 6,086
|
py
|
Python
|
backend/apps/modellog/mixins.py
|
Huoran559/vue-element-frontend-backend
|
2ab21792c314692fb9b11c6e5f1e890ffaf5cf3c
|
[
"MIT"
] | 15
|
2021-01-31T01:45:46.000Z
|
2022-03-27T00:23:21.000Z
|
backend/apps/modellog/mixins.py
|
zouv/vue-admin-django
|
40e91e2ff0781dad89a3f5325514a04816406cfb
|
[
"MIT"
] | 13
|
2020-02-11T21:33:40.000Z
|
2022-03-11T23:12:16.000Z
|
apps/modellog/mixins.py
|
codelieche/codelieche.com
|
8f18a9f4064af81a6dd0203fbaa138565065fff5
|
[
"MIT"
] | 4
|
2021-07-09T09:45:44.000Z
|
2022-03-28T06:47:31.000Z
|
# -*- coding:utf-8 -*-
import json
from django.contrib.contenttypes.models import ContentType
from .models import LogsEntry
# Create your views here.
SECRET_FIELDS = ('password', 'admin_pwd')
# 另外注意action_flag: 1. 添加;2. 修改;3. 删除
class LoggingBaseMethodMixin:
"""
添加日志基本的中间件
"""
def log_action(self, action_flag, message):
"""
执行添加日志的操作
:param action_flag: 操作类型
:param message: 消息内容
:return:
"""
pass
class LoggingViewSetMixin:
"""
日志记录的中间件
"""
def perform_create(self, serializer):
super().perform_create(serializer)
try:
# 发起请求的用户
user = self.request.user
# 这个对象的Model
model = serializer.Meta.model
# model对应的ContentType
content_type = ContentType.objects.get_for_model(model)
# 消息从data中提取
data = json.loads(json.dumps(serializer.data))
for field in data:
if field in SECRET_FIELDS: data[field] = "保密字段"
obj = model.objects.get(pk=data['id'])
LogsEntry.objects.create(
user=user,
action_flag=1,
content_type=content_type,
object_id=obj.id,
object_repr=repr(obj),
message=json.dumps(data),
)
except Exception:
pass
def perform_update(self, serializer):
"""
更新对象日志
:param serializer: 序列化对象
"""
# 第1步:先获取到修改前的对象, 得到老的数值
# 1-1: 得到老的对象,处理处理,后续比较会用到
obj_old = self.get_object()
obj_old_dic = {}
try:
# 1-2:迭代每个validated_data字段,获取老对象这个字段的值
for field in serializer.validated_data:
field_v_old = getattr(obj_old, field)
# 判断field是不是多对多关系类型
if field_v_old.__repr__().find('ManyRelatedManager') > 0:
field_v_old_list_pk = list(field_v_old.values_list('pk', flat=True))
obj_old_dic[field] = field_v_old_list_pk
else:
# 如果不是多对多的关系,直接设置其为这个值,后面字符串输出要用,field_v_old.__repr__()
obj_old_dic[field] = field_v_old
except Exception:
# 取老对象的值,如果出现异常,依然要调用父类的方法,记得要return
super().perform_update(serializer)
return
# 第2步:执行父类的方法, 出错直接会返回不执行后续步骤了的
super().perform_update(serializer)
# 第3步:获取新的对象和其它需要用到的数据
obj_new = self.get_object()
# 发起请求的用户
user = self.request.user
# 这个对象的Model
model = serializer.Meta.model
# model对应的ContentType
content_type = ContentType.objects.get_for_model(model)
# 消息从data中提取
data = json.loads(json.dumps(serializer.data))
# 第4步:判断哪些字段变更了
# 4-1: validated_data
validated_data = serializer.validated_data
message = []
try:
# 第5步:迭代每个校验过的字段
for field in validated_data:
# 5-1:获取老的字段值和新的字段值
# obj_old_dic:老对象的值,而且多对关系的数据已经改成了pk列表
field_v_old = obj_old_dic[field]
field_v_new = getattr(obj_new, field)
# 5-2:判断field是不是多对多关系类型
if field_v_new.__repr__().find('ManyRelatedManager') > 0:
# 说明这个字段是多对多的关系,判断其是否相等要用.all()
# 5-4: 多对多关系根据主键的列表,判断是否相等
# list_pk_old = list(field_v_old.values_list('pk', flat=True))
list_pk_new = list(field_v_new.values_list('pk', flat=True))
if field_v_old != list_pk_new:
# print({'field': field, 'value': data[field]})
# 5-4:构造消息
message_i = {
'action': 'changed',
'field': field,
'value_new': '值修改了' if field in SECRET_FIELDS else data[field],
'value_old': '值修改了' if field in SECRET_FIELDS else field_v_old
}
message.append(message_i)
# else:
# print('关系型数据库没变', data[field])
else:
# 不是多对多关系,就直接判断值是否相等
if field_v_old != field_v_new:
# 5-4:构造消息
message_i = {
'action': 'changed',
'field': field,
'value_new': '保密字段(new)' if field in SECRET_FIELDS else data[field],
'value_old':
'保密字段(old)' if field in SECRET_FIELDS else field_v_old.__repr__()
}
message.append(message_i)
# print({'field': field, 'value': data[field]})
# 第6步:写入日志
if message:
LogsEntry.objects.create(
user=user,
action_flag=2,
content_type=content_type,
object_id=obj_new.pk,
object_repr=repr(obj_new),
message=json.dumps(message),
)
except Exception:
pass
def perform_destroy(self, instance):
"""删除对象"""
# 第1步:获取信息
# 发起请求的用户
user = self.request.user
# 对象model对应的ContentType
content_type = ContentType.objects.get_for_model(instance)
object_id = instance.pk
object_repr = repr(instance)
# 第2步:执行父级的perform_destroy方法
super().perform_destroy(instance)
try:
# 第3步:写入日志
message = "删除对象:{}".format(instance.__class__)
LogsEntry.objects.create(
user=user,
action_flag=3,
content_type=content_type,
object_id=object_id,
object_repr=object_repr,
message=json.dumps(message),
)
except Exception:
pass
| 32.897297
| 97
| 0.505422
|
4a0d94c018020fe6797cc65a32f8b1dd603f6166
| 5,974
|
py
|
Python
|
utils/model_utils.py
|
sethusaim/Phising-Classification
|
406645b709a212fe54f6ec5e68e02672e0d25804
|
[
"MIT"
] | null | null | null |
utils/model_utils.py
|
sethusaim/Phising-Classification
|
406645b709a212fe54f6ec5e68e02672e0d25804
|
[
"MIT"
] | null | null | null |
utils/model_utils.py
|
sethusaim/Phising-Classification
|
406645b709a212fe54f6ec5e68e02672e0d25804
|
[
"MIT"
] | null | null | null |
from mlflow import start_run
from phising.mlflow_utils.mlflow_operations import MLFlow_Operation
from phising.model_finder.tuner import Model_Finder
from phising.s3_bucket_operations.s3_operations import S3_Operation
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import GridSearchCV, train_test_split
from utils.logger import App_Logger
from utils.read_params import read_params
class Model_Utils:
"""
Description : This class is used for all the model utils
Version : 1.2
Revisions : Moved to setup to cloud
"""
def __init__(self):
self.log_writer = App_Logger()
self.config = read_params()
self.tuner_kwargs = self.config["model_utils"]
self.split_kwargs = self.config["base"]
self.train_model_dir = self.config["models_dir"]["trained"]
self.save_format = self.config["model_save_format"]
self.model_bucket = self.config["s3_bucket"]["model"]
self.exp_name = self.config["mlflow_config"]["experiment_name"]
self.run_name = self.config["mlflow_config"]["run_name"]
self.mlflow_op = MLFlow_Operation()
self.model_finder = Model_Finder()
self.s3 = S3_Operation()
self.class_name = self.__class__.__name__
def get_model_score(self, model, test_x, test_y, log_file):
"""
Method Name : get_model_score
Description : This method gets model score againist the test data
Output : A model score is returned
On Failure : Write an exception log and then raise an exception
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.get_model_score.__name__
self.log_writer.start_log("start", self.class_name, method_name, log_file)
try:
model_name = model.__class__.__name__
preds = model.predict(test_x)
self.log_writer.log(
log_file, f"Used {model_name} model to get predictions on test data"
)
if len(test_y.unique()) == 1:
model_score = accuracy_score(test_y, preds)
self.log_writer.log(
log_file, f"Accuracy for {model_name} is {model_score}"
)
else:
model_score = roc_auc_score(test_y, preds)
self.log_writer.log(
log_file, f"AUC score for {model_name} is {model_score}"
)
self.log_writer.start_log("exit", self.class_name, method_name, log_file)
return model_score
except Exception as e:
self.log_writer.exception_log(e, self.class_name, method_name, log_file)
def get_model_params(self, model, x_train, y_train, log_file):
"""
Method Name : get_model_params
Description : This method gets the model parameters based on model_key_name and train data
Output : Best model parameters are returned
On Failure : Write an exception log and then raise an exception
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.get_model_params.__name__
self.log_writer.start_log("start", self.class_name, method_name, log_file)
try:
model_name = model.__class__.__name__
model_param_grid = self.config[model_name]
model_grid = GridSearchCV(
estimator=model, param_grid=model_param_grid, **self.tuner_kwargs
)
self.log_writer.log(
log_file,
f"Initialized {model_grid.__class__.__name__} with {model_param_grid} as params",
)
model_grid.fit(x_train, y_train)
self.log_writer.log(
log_file,
f"Found the best params for {model_name} model based on {model_param_grid} as params",
)
self.log_writer.start_log("exit", self.class_name, method_name, log_file)
return model_grid.best_params_
except Exception as e:
self.log_writer.exception_log(e, self.class_name, method_name, log_file)
def train_and_log_models(self, X_data, Y_data, log_file, idx=None, kmeans=None):
method_name = self.train_and_log_models.__name__
self.log_writer.start_log("start", log_file, self.class_name, method_name)
try:
x_train, x_test, y_train, y_test = train_test_split(
X_data, Y_data, **self.split_kwargs
)
self.log_writer.log(
log_file,
f"Performed train test split with kwargs as {self.split_kwargs}",
)
lst = self.model_finder.get_trained_models(x_train, y_train, x_test, y_test)
self.log_writer.log(log_file, "Got trained models")
for _, tm in enumerate(lst):
self.s3.save_model(
tm[0],
self.train_model_dir,
self.model_bucket,
log_file,
format=self.save_format,
)
self.mlflow_op.set_mlflow_tracking_uri()
self.mlflow_op.set_mlflow_experiment(self.exp_name)
with start_run(run_name=self.run_name):
self.mlflow_op.log_all_for_model(idx, tm[0], tm[1])
if kmeans is not None:
self.mlflow_op.log_all_for_model(None, kmeans, None)
else:
pass
self.log_writer.log(
log_file, "Saved and logged all trained models to mlflow"
)
self.log_writer.start_log("exit", log_file, self.class_name, method_name)
except Exception as e:
self.log_writer.exception_log(e, log_file, self.class_name, method_name)
| 32.11828
| 102
| 0.605624
|
4a0d94d50b8c63f3c4ea1efe9b7aaa790ef95ed7
| 752
|
py
|
Python
|
setup.py
|
akachanov/single-beat
|
e500ac4b56756cdf96836666883af8060aaef455
|
[
"MIT"
] | 1
|
2021-07-13T11:30:33.000Z
|
2021-07-13T11:30:33.000Z
|
setup.py
|
akachanov/single-beat
|
e500ac4b56756cdf96836666883af8060aaef455
|
[
"MIT"
] | null | null | null |
setup.py
|
akachanov/single-beat
|
e500ac4b56756cdf96836666883af8060aaef455
|
[
"MIT"
] | null | null | null |
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='single-beat',
version='0.4.2',
long_description=long_description,
long_description_content_type="text/markdown",
description='ensures only one instance of your process across your servers',
url='https://github.com/ybrs/single-beat',
packages=['singlebeat'],
zip_safe=True,
install_requires=[
'tornado>=4.2.1,<6.0',
'redis >= 3.3.0',
'Click>=7.0'
],
test_require=[
'psutil>=5.2.2'
],
entry_points={
'console_scripts': [
'single-beat = singlebeat.beat:run_process',
'single-beat-cli = singlebeat.cli:main',
],
}
)
| 25.066667
| 80
| 0.598404
|
4a0d95d570287ca1d482796562421a075f5053e3
| 34,408
|
py
|
Python
|
active_3d_planning_app_reconstruction/src/experiments/eval_plotting_node.py
|
mansoorcheema/mav_active_3d_planning
|
774f0e922ca589945a5bad8a38eef55819e3feeb
|
[
"BSD-3-Clause"
] | 288
|
2020-01-15T00:50:07.000Z
|
2022-03-30T02:54:30.000Z
|
active_3d_planning_app_reconstruction/src/experiments/eval_plotting_node.py
|
mansoorcheema/mav_active_3d_planning
|
774f0e922ca589945a5bad8a38eef55819e3feeb
|
[
"BSD-3-Clause"
] | 22
|
2020-01-22T14:36:04.000Z
|
2022-03-14T14:41:28.000Z
|
active_3d_planning_app_reconstruction/src/experiments/eval_plotting_node.py
|
mansoorcheema/mav_active_3d_planning
|
774f0e922ca589945a5bad8a38eef55819e3feeb
|
[
"BSD-3-Clause"
] | 66
|
2020-01-21T09:16:29.000Z
|
2022-03-24T12:28:21.000Z
|
#!/usr/bin/env python
import csv
import datetime
import os
import re
import shutil
import sys
import numpy as np
import rospy
# Plotting
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from std_srvs.srv import Empty
class EvalPlotting(object):
"""
This is the main evaluation node. It expects the data folders and files to
have the format hardcoded in the eval_data_node and calls the
eval_voxblox_node to execute c++ code. Pretty ugly and non-general code but
just needs to work in this specific case atm...
"""
def __init__(self):
# Parse parameters
target_dir = rospy.get_param('~target_directory')
self.method = rospy.get_param('~method', 'single')
self.ns_voxblox = rospy.get_param('~ns_eval_voxblox_node',
'/eval_voxblox_node')
self.evaluate = rospy.get_param('~evaluate', True)
self.evaluate_volume = rospy.get_param('~evaluate_volume', False)
self.create_plots = rospy.get_param('~create_plots', True)
self.show_plots = rospy.get_param(
'~show_plots', False) # Auxiliary param, prob removed later
self.create_meshes = rospy.get_param('~create_meshes', True)
self.series = rospy.get_param(
'~series', False) # True: skip single evaluation and create
# series evaluation data and plots for all runs in the target directory
self.clear_voxblox_maps = rospy.get_param(
'~clear_voxblox_maps',
False) # rm all maps after eval (disk space!)
self.unobservable_points_pct = rospy.get_param(
'~unobservable_points_pct', 0.0) # Exlude unobservable points
# from the plots (in percent of total)
# Check for valid params
methods = {
'single': 'single',
'recent': 'recent',
'all': 'all'
} # Dictionary of implemented models
selected = methods.get(self.method, 'NotFound')
if selected == 'NotFound':
warning = "Unknown method '" + self.method + \
"'. Implemented are: " + \
"".join(["'" + m + "', " for m in methods])
rospy.logfatal(warning[:-2])
sys.exit(-1)
else:
self.method = selected
# Setup
self.eval_log_file = None
rospy.wait_for_service(self.ns_voxblox + "/evaluate")
self.eval_voxblox_srv = rospy.ServiceProxy(
self.ns_voxblox + "/evaluate", Empty)
# Evaluate
if self.series:
self.evaluate_series(target_dir)
elif self.method == 'single':
self.run_single_evaluation(target_dir)
elif self.method == 'recent':
dir_expression = re.compile(
r'\d{8}_\d{6}') # Only check the default names
subdirs = [
o for o in os.listdir(target_dir)
if os.path.isdir(os.path.join(target_dir, o))
and dir_expression.match(o)
]
subdirs.sort(reverse=True)
if len(subdirs) == 0:
rospy.loginfo(
"No recent directories in target dir '%s' to evaluate.",
target_dir)
sys.exit(-1)
self.run_single_evaluation(os.path.join(target_dir, subdirs[0]))
elif self.method == 'all':
subdirs = [
o for o in os.listdir(target_dir)
if os.path.isdir(os.path.join(target_dir, o))
]
for subdir in subdirs:
self.run_single_evaluation(os.path.join(target_dir, subdir))
rospy.loginfo(
"\n" + "*" * 53 +
"\n* Evaluation completed successfully, shutting down. *\n" +
"*" * 53)
def run_single_evaluation(self, target_dir):
rospy.loginfo("Starting evaluation on target '%s'.", target_dir)
# Check target dir is valid (approximately)
if not os.path.isfile(os.path.join(target_dir, "data_log.txt")):
rospy.logerr("Invalid target directory: Could not find a "
"'data_log.txt' file.")
return
# Check for rosbag renaming
self.eval_log_file = open(os.path.join(target_dir, "data_log.txt"),
'a+')
lines = [line.rstrip('\n') for line in self.eval_log_file]
if not "[FLAG] Rosbag renamed" in lines:
for line in lines:
if line[:14] == "[FLAG] Rosbag:":
file_name = os.path.join(os.path.dirname(target_dir),
"tmp_bags", line[15:] + ".bag")
if os.path.isfile(file_name):
os.rename(
file_name,
os.path.join(target_dir, "visualization.bag"))
self.writelog(
"Moved the tmp rosbag into 'visualization.bag'")
self.eval_log_file.write("[FLAG] Rosbag renamed\n")
else:
self.writelog("Error: unable to locate '" + file_name +
"'.")
rospy.logwarn("Error: unable to locate '" + file_name +
"'.")
self.eval_log_file.close() # Make it available for voxblox node
# Create meshes and voxblox eval
if self.create_meshes:
# Configure directory
if not os.path.isdir(os.path.join(target_dir, "meshes")):
os.mkdir(os.path.join(target_dir, "meshes"))
if self.evaluate or self.create_meshes or self.evaluate_volume:
# Set params and call the voxblox evaluator
rospy.set_param(self.ns_voxblox + "/target_directory", target_dir)
try:
self.eval_voxblox_srv()
except:
rospy.logerr(
"eval_voxblox service call failed. Shutting down.")
sys.exit(-1)
# Reopen logfile
self.eval_log_file = open(os.path.join(target_dir, "data_log.txt"),
'a+')
if self.create_plots:
# Create dirs
if not os.path.isdir(os.path.join(target_dir, "graphs")):
os.mkdir(os.path.join(target_dir, "graphs"))
if os.path.isfile(os.path.join(target_dir, "voxblox_data.csv")):
# Read voxblox data file
data_voxblox = self.read_voxblox_data(
os.path.join(target_dir, "voxblox_data.csv"))
if len(data_voxblox['RosTime']) > 1:
if 'MeanError' in data_voxblox:
self.plot_sim_overview(data_voxblox, target_dir)
else:
rospy.loginfo(
"Unevaluated 'voxblox_data.csv', skipping dependent"
" graphs.")
else:
rospy.loginfo(
"Too few entries in 'voxblox_data.csv', skipping "
"dependent graphs.")
else:
rospy.loginfo(
"No 'voxblox_data.csv' found, skipping dependent graphs.")
if os.path.isfile(os.path.join(target_dir, "performance_log.csv")):
# Read performance data file
data_perf = {}
headers = None
with open(os.path.join(target_dir,
"performance_log.csv")) as infile:
reader = csv.reader(infile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
for row in reader:
if row[0] == 'RunTime':
headers = row
for header in headers:
data_perf[header] = []
continue
for i in range(len(row)):
data_perf[headers[i]].append(row[i])
# Create graph
if len(data_perf['RosTime']) > 1:
self.plot_perf_overview(data_perf, target_dir)
else:
rospy.loginfo(
"Too few entries in 'performance_log.csv', skipping "
"dependent graphs.")
else:
rospy.loginfo(
"No 'performance_log.csv' found, skipping dependent graphs."
)
if os.path.isfile(os.path.join(target_dir, "error_hist.csv")):
# Read error data file
with open(os.path.join(target_dir,
"error_hist.csv")) as infile:
reader = csv.reader(infile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
data = np.array(list(reader))
data_error_hist = data[1:, 1:].astype(int)
# Create graph
self.plot_error_hist(data_error_hist, target_dir)
else:
rospy.loginfo(
"No 'error_hist.csv' found, skipping dependent graphs.")
# Finish
if self.clear_voxblox_maps:
# Remove all voxblox maps to free up disk space
shutil.rmtree(os.path.join(target_dir, 'voxblox_maps'),
ignore_errors=True)
self.eval_log_file.close()
def evaluate_series(self, target_dir):
rospy.loginfo("Evaluating experiment series at '%s'", target_dir)
# Setup a directory for data, plots, ...
folder_name = "series_evaluation"
if not os.path.isdir(os.path.join(target_dir, folder_name)):
os.mkdir(os.path.join(target_dir, folder_name))
self.eval_log_file = open(
os.path.join(target_dir, folder_name, "eval_log.txt"), 'a')
# Read all the data
dir_expression = re.compile(r'\d{8}_\d{6}')
subdirs = [
o for o in os.listdir(target_dir)
if os.path.isdir(os.path.join(target_dir, o))
and dir_expression.match(o)
]
self.writelog("Evaluating '%s' (%i subdirs)." %
(target_dir, len(subdirs)))
voxblox_data = []
max_data_length = 0
names = []
for o in subdirs:
if os.path.isfile((os.path.join(target_dir, o, "graphs",
"SimulationOverview.png"))):
# Valid evaluated directory
data = self.read_voxblox_data(
os.path.join(target_dir, o, "voxblox_data.csv"))
max_data_length = max(max_data_length, len(data["RosTime"]))
voxblox_data.append(data)
names.append(o)
else:
rospy.logwarn("Experiment at '%s' not properly evaluated!", o)
self.writelog("Experiment at '%s' not properly evaluated!" % o)
if max_data_length < 2:
rospy.loginfo(
"No valid experiments found, stopping series evaluation.")
self.writelog(
"No valid experiments found, stopping series evaluation.")
self.eval_log_file.close()
return
# Create common data timeline by averaging measurement times (these
# should be similar)
data_file = open(
os.path.join(target_dir, folder_name, "series_data.csv"), 'wb')
data_writer = csv.writer(data_file,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL,
lineterminator='\n')
means = {}
std_devs = {}
keys = voxblox_data[0].keys()
keys.remove('RosTime')
keys = ['RosTime'] + keys # RosTime is expected as the first argument
prev_pcls = [0.0] * len(voxblox_data)
for key in keys:
means[key] = np.array([])
std_devs[key] = np.array([])
for i in range(max_data_length):
line = []
if i == 0:
header_line = []
for key in keys:
header_line.extend((key, ''))
for name in names:
line.append(name)
header_line.append('')
line.extend(("Mean", "StdDev"))
data_writer.writerow(header_line)
data_writer.writerow(line)
line = []
for key in keys:
values = []
for dataset in voxblox_data:
if i < len(dataset[key]):
if key == 'NPointclouds':
# These need to accumulate
ind = voxblox_data.index(dataset)
prev_pcls[ind] = prev_pcls[ind] + float(
dataset[key][i])
line.append(prev_pcls[ind])
values.append(prev_pcls[ind])
else:
line.append(dataset[key][i])
values.append(dataset[key][i])
else:
line.append("")
values = np.array(values, dtype=float)
mean = np.mean(values)
std = np.std(values)
means[key] = np.append(means[key], mean)
std_devs[key] = np.append(std_devs[key], std)
line.extend((mean, std))
data_writer.writerow(line)
data_file.close()
# Create plot
rospy.loginfo("Creating graph 'SeriesOverview'")
x = means['RosTime']
unit = "s"
if x[-1] >= 300:
unit = "min"
x = np.divide(x, 60)
cpu_use = np.zeros(np.shape(means['CPUTime']))
cpu_std = np.zeros(np.shape(means['CPUTime']))
for i in range(len(means['CPUTime']) - 1):
div = (means['RosTime'][i + 1] - means['RosTime'][i])
cpu_use[i] = (means['CPUTime'][i + 1]) / div
cpu_std[i] = (std_devs['CPUTime'][i + 1]) / div
cpu_use[-1] = cpu_use[-2]
cpu_use = np.repeat(cpu_use, 2)
cpu_std[-1] = cpu_std[-2]
cpu_std = np.repeat(cpu_std, 2)
# Plot ends of data series for unequal lengths
early_stops = []
x_early = []
for i in range(len(voxblox_data)):
dataset = voxblox_data[i]
length = len(dataset['RosTime']) - 1
if length < max_data_length - 1:
early_stops.append(length)
x_early.append(float(dataset['RosTime'][length]))
self.writelog("Early stop detected for '%s' at %.2fs." %
(names[i], float(dataset['RosTime'][length])))
fig, axes = plt.subplots(3, 2)
axes[0, 0].plot(x, means['MeanError'], 'b-')
axes[0, 0].fill_between(x,
means['MeanError'] - std_devs['MeanError'],
means['MeanError'] + std_devs['MeanError'],
facecolor='b',
alpha=.2)
axes[0, 0].plot([x[i] for i in early_stops],
[means['MeanError'][i] for i in early_stops],
'kx',
markersize=9,
markeredgewidth=2)
axes[0, 0].set_ylabel('MeanError [m]')
axes[0, 0].set_ylim(bottom=0)
axes[0, 0].set_xlim(left=0, right=x[-1])
axes[1, 0].plot(x, means['StdDevError'], 'b-')
axes[1, 0].fill_between(x,
means['StdDevError'] - std_devs['StdDevError'],
means['StdDevError'] + std_devs['StdDevError'],
facecolor='b',
alpha=.2)
axes[1, 0].plot([x[i] for i in early_stops],
[means['StdDevError'][i] for i in early_stops],
'kx',
markersize=9,
markeredgewidth=2)
axes[1, 0].set_ylabel('StdDevError [m]')
axes[1, 0].set_ylim(bottom=0)
axes[1, 0].set_xlim(left=0, right=x[-1])
axes[2, 0].plot(x, means['OutsideTruncation'], 'r-')
axes[2, 0].fill_between(
x,
means['OutsideTruncation'] - std_devs['OutsideTruncation'],
means['OutsideTruncation'] + std_devs['OutsideTruncation'],
facecolor='r',
alpha=.2)
axes[2, 0].plot([x[i] for i in early_stops],
[means['OutsideTruncation'][i] for i in early_stops],
'kx',
markersize=9,
markeredgewidth=2)
axes[2, 0].set_ylabel('Truncated Voxels [%]')
axes[2, 0].set_ylim(0, 1)
axes[2, 0].set_xlabel("Simulated Time [%s]" % unit)
axes[2, 0].set_xlim(left=0, right=x[-1])
# Compensate unobservable voxels
if np.max(means['UnknownVoxels']) > 0:
unknown = (means['UnknownVoxels'] - self.unobservable_points_pct
) / (1.0 - self.unobservable_points_pct)
unknown = np.maximum(unknown, np.zeros_like(unknown))
axes[0, 1].plot(x, unknown, 'g-')
axes[0, 1].fill_between(x,
unknown - std_devs['UnknownVoxels'],
unknown + std_devs['UnknownVoxels'],
facecolor='g',
alpha=.2)
axes[0, 1].plot([x[i] for i in early_stops],
[means['UnknownVoxels'][i] for i in early_stops],
'kx',
markersize=9,
markeredgewidth=2)
axes[0, 1].set_ylabel('Unknown Voxels [%]')
axes[0, 1].set_ylim(0, 1)
else:
axes[0, 1].plot(x, means['Volume'], 'g-')
axes[0, 1].fill_between(x,
means['Volume'] - std_devs['Volume'],
means['Volume'] + std_devs['Volume'],
facecolor='g',
alpha=.2)
axes[0, 1].set_ylabel('Explored Volume [m3]')
axes[0, 1].set_ylim(0, 40 * 40 * 3)
axes[0, 1].set_xlim(left=0, right=x[-1])
axes[1, 1].plot(x, means['NPointclouds'], 'k-')
axes[1,
1].fill_between(x,
means['NPointclouds'] - std_devs['NPointclouds'],
means['NPointclouds'] + std_devs['NPointclouds'],
facecolor='k',
alpha=.2)
axes[1, 1].plot([x[i] for i in early_stops],
[means['NPointclouds'][i] for i in early_stops],
'kx',
markersize=9,
markeredgewidth=2)
axes[1, 1].set_ylabel('Processed Pointclouds [-]')
axes[1, 1].set_xlim(left=0, right=x[-1])
x = np.repeat(x, 2)
x = np.concatenate((np.array([0]), x[:-1]))
axes[2, 1].plot(x, cpu_use, 'k-')
axes[2, 1].fill_between(x,
cpu_use - cpu_std,
cpu_use + cpu_std,
facecolor='k',
alpha=.2)
axes[2, 1].plot([x[i * 2 + 1] for i in early_stops],
[cpu_use[i * 2 + 1] for i in early_stops],
'kx',
markersize=9,
markeredgewidth=2)
axes[2, 1].set_ylabel('Simulated CPU usage [cores]')
axes[2, 1].set_xlabel("Simulated Time [%s]" % unit)
axes[2, 1].set_ylim(bottom=0)
axes[2, 1].set_xlim(left=0, right=x[-1])
plt.suptitle("Experiment Series Overview (" + str(len(voxblox_data)) +
" experiments)\nMeans + Std. Deviations (shaded)")
fig.set_size_inches(15, 10, forward=True)
save_name = os.path.join(target_dir, folder_name, "SeriesOverview.png")
plt.savefig(save_name, dpi=300, format='png', bbox_inches='tight')
self.writelog("Created graph 'SeriesOverview'.")
self.eval_log_file.close()
@staticmethod
def read_voxblox_data(file_name):
# Read voxblox data file
data_voxblox = {}
headers = None
with open(file_name) as infile:
reader = csv.reader(infile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
for row in reader:
if row[0] == 'MapName':
headers = row
for header in headers:
data_voxblox[header] = []
continue
if row[0] != 'Unit':
for i in range(len(row)):
data_voxblox[headers[i]].append(row[i])
return data_voxblox
def plot_sim_overview(self, data, target_dir):
rospy.loginfo("Creating Graphs: SimulationOverview")
unit = "s"
x = np.array(data['RosTime'], dtype=float)
if x[-1] >= 300:
unit = "min"
x = np.divide(x, 60)
meanerr = np.array(data['MeanError'])
stddev = np.array(data['StdDevError'])
truncated = np.array(data['OutsideTruncation'])
pointclouds = np.cumsum(np.array(data['NPointclouds'], dtype=float))
ros_time = np.array(data['RosTime'], dtype=float)
cpu_time = np.array(data['CPUTime'], dtype=float)
cpu_use = np.zeros(np.shape(cpu_time))
for i in range(len(cpu_time) - 1):
cpu_use[i] = (cpu_time[i + 1]) / (ros_time[i + 1] - ros_time[i])
cpu_use[-1] = cpu_use[-2]
cpu_use = np.repeat(cpu_use, 2)
fig, axes = plt.subplots(3, 2)
axes[0, 0].plot(x, meanerr, 'b-')
axes[0, 0].set_ylabel('MeanError [m]')
axes[0, 0].set_ylim(bottom=0)
axes[0, 0].set_xlim(left=0, right=x[-1])
axes[1, 0].plot(x, stddev, 'b-')
axes[1, 0].set_ylabel('StdDevError [m]')
axes[1, 0].set_ylim(bottom=0)
axes[1, 0].set_xlim(left=0, right=x[-1])
axes[2, 0].plot(x, truncated, 'r-')
axes[2, 0].set_ylabel('Truncated Voxels [%]')
axes[2, 0].set_ylim(0, 1)
axes[2, 0].set_xlabel("Simulated Time [%s]" % unit)
axes[2, 0].set_xlim(left=0, right=x[-1])
unknown = np.array(data['UnknownVoxels'], dtype=float)
if np.max(unknown) > 0:
# compensate unobservable voxels
unknown = (unknown - self.unobservable_points_pct) / (
1.0 - self.unobservable_points_pct) # compensate invisible
unknown = np.maximum(unknown, np.zeros_like(unknown))
axes[0, 1].set_ylabel('Unknown Voxels [%]')
axes[0, 1].set_ylim(0, 1)
else:
unknown = np.array(data['Volume'], dtype=float)
axes[0, 1].set_ylabel('Explored Volume [m3]')
axes[0, 1].set_ylim(0, 40 * 40 * 3)
axes[0, 1].plot(x, unknown, 'g-')
axes[0, 1].set_xlim(left=0, right=x[-1])
axes[1, 1].plot(x, pointclouds, 'k-')
axes[1, 1].set_ylabel('Processed Pointclouds [-]')
axes[1, 1].set_xlim(left=0, right=x[-1])
x = np.repeat(x, 2)
x = np.concatenate((np.array([0]), x[:-1]))
axes[2, 1].plot(x, cpu_use, 'k-')
axes[2, 1].set_ylabel('Simulated CPU usage [cores]')
axes[2, 1].set_xlabel("Simulated Time [%s]" % unit)
axes[2, 1].set_ylim(bottom=0)
axes[2, 1].set_xlim(left=0, right=x[-1])
plt.suptitle("Simulation Overview")
fig.set_size_inches(15, 10, forward=True)
save_name = os.path.join(target_dir, "graphs",
"SimulationOverview.png")
plt.savefig(save_name, dpi=300, format='png', bbox_inches='tight')
self.writelog("Created graph 'SimulationOverview'.")
if self.show_plots:
rospy.loginfo("Displaying '%s'. Close to continue...", save_name)
plt.show()
def plot_perf_overview(self, data, target_dir):
rospy.loginfo("Creating Graphs: PerformanceOverview")
x = np.cumsum(np.array(data['RosTime'], dtype=float))
unit = "s"
if x[-1] >= 300:
unit = "min"
x = np.true_divide(x, 60)
y_select = np.array(data['Select'], dtype=float)
y_expand = np.array(data['Expand'], dtype=float)
y_gain = np.array(data['Gain'], dtype=float)
y_cost = np.array(data['Cost'], dtype=float)
y_value = np.array(data['Value'], dtype=float)
y_next = np.array(data['NextBest'], dtype=float)
y_upTG = np.array(data['UpdateTG'], dtype=float)
y_upTE = np.array(data['UpdateTE'], dtype=float)
y_vis = np.array(data['Visualization'], dtype=float)
y_ros = np.array(data['RosCallbacks'], dtype=float)
y_tot = np.array(data['Total'], dtype=float)
y0 = np.divide(y_select, y_tot)
y1 = np.divide(y_expand, y_tot) + y0
y2 = np.divide(y_gain, y_tot) + y1
y3 = np.divide(y_cost, y_tot) + y2
y4 = np.divide(y_value, y_tot) + y3
y5 = np.divide(y_next, y_tot) + y4
y6 = np.divide(y_upTG, y_tot) + y5
y7 = np.divide(y_upTE, y_tot) + y6
y8 = np.divide(y_vis, y_tot) + y7
y9 = 1.0 - np.divide(y_ros, y_tot)
sum_tot = np.sum(y_tot)
s0 = np.sum(y_select) / sum_tot * 100
s1 = np.sum(y_expand) / sum_tot * 100
s2 = np.sum(y_gain) / sum_tot * 100
s3 = np.sum(y_cost) / sum_tot * 100
s4 = np.sum(y_value) / sum_tot * 100
s5 = np.sum(y_next) / sum_tot * 100
s6 = np.sum(y_upTG) / sum_tot * 100
s7 = np.sum(y_upTE) / sum_tot * 100
s8 = np.sum(y_vis) / sum_tot * 100
s9 = np.sum(y_ros) / sum_tot * 100
s10 = 100 - s0 - s1 - s2 - s3 - s4 - s5 - s6 - s7 - s8 - s9
x = np.repeat(x, 2)
x = np.concatenate((np.array([0]), x[:-1]))
y0 = np.repeat(y0, 2)
y1 = np.repeat(y1, 2)
y2 = np.repeat(y2, 2)
y3 = np.repeat(y3, 2)
y4 = np.repeat(y4, 2)
y5 = np.repeat(y5, 2)
y6 = np.repeat(y6, 2)
y7 = np.repeat(y7, 2)
y8 = np.repeat(y8, 2)
y9 = np.repeat(y9, 2)
fig = plt.figure()
axes = [
plt.subplot2grid((5, 1), (0, 0), rowspan=3),
plt.subplot(5, 1, 4),
plt.subplot(5, 1, 5)
]
axes[0].fill_between(x, 0, y0, facecolor="#a1b400", alpha=.5)
axes[0].fill_between(x, y0, y1, facecolor="#009000", alpha=.5)
axes[0].fill_between(x, y1, y2, facecolor="#dc1000", alpha=.5)
axes[0].fill_between(x, y2, y3, facecolor="#ff4f00", alpha=.5)
axes[0].fill_between(x, y3, y4, facecolor="#ffb800", alpha=.5)
axes[0].fill_between(x, y4, y5, facecolor="#ffff00", alpha=.5)
axes[0].fill_between(x, y5, y6, facecolor="#00eebc", alpha=.5)
axes[0].fill_between(x, y6, y7, facecolor="#d800dd", alpha=.5)
axes[0].fill_between(x, y7, y8, facecolor="#a3baff", alpha=.5)
axes[0].fill_between(x, y8, y9, facecolor="#cccccc", alpha=.5)
axes[0].fill_between(x, y9, 1, facecolor="#606060", alpha=.5)
axes[0].set_xlim(left=0, right=x[-1])
axes[0].set_ylim(bottom=0, top=1)
axes[0].set_title("Percentage of CPU Time Spent per Function")
axes[0].set_ylabel('Percent [%]')
x = np.cumsum(np.array(data['RosTime'], dtype=float))
if unit == "min":
x = np.true_divide(x, 60)
n_trajectories = np.array(data['NTrajectories'], dtype=int)
n_after_update = np.array(data['NTrajAfterUpdate'], dtype=int)
n_after_update = np.concatenate((np.array([0]), n_after_update[:-1]))
n_new = n_trajectories - n_after_update
axes[1].plot(x, n_trajectories, 'b-')
axes[1].plot(x, n_new, 'g-')
axes[1].fill_between(x, 0, n_new, facecolor="#009000", alpha=.3)
axes[1].fill_between(x,
n_new,
n_trajectories,
facecolor="#0000ff",
alpha=.3)
axes[1].set_xlim(left=0, right=x[-1])
axes[1].set_ylim(bottom=0)
axes[1].set_title("Trajectory Tree Size")
axes[1].set_ylabel('TrajectorySegments [-]')
axes[1].legend(["Total", "New"], loc='upper left', fancybox=True)
x = np.array([])
ros_time = np.array(data['RosTime'], dtype=float)
cpu_times = [
np.array(data['Total'], dtype=float), y_select + y_expand +
y_gain + y_cost + y_value + y_next + y_upTE + y_upTG
] # Total, Planning
cpu_use = [np.array([])] * len(cpu_times)
i = 0
averaging_threshold = 2.0 # seconds, for smoothing
t_curr = ros_time[0]
x_curr = 0
cpu_curr = [time[0] for time in cpu_times]
while i + 1 < len(ros_time):
i = i + 1
if t_curr >= averaging_threshold:
for j in range(len(cpu_times)):
cpu_use[j] = np.append(cpu_use[j], cpu_curr[j] / t_curr)
x_curr = x_curr + t_curr
x = np.append(x, x_curr)
t_curr = ros_time[i]
for j in range(len(cpu_times)):
cpu_curr[j] = cpu_times[j][i]
else:
t_curr = t_curr + ros_time[i]
for j in range(len(cpu_times)):
cpu_curr[j] = cpu_curr[j] + cpu_times[j][i]
if unit == "min":
x = np.true_divide(x, 60)
axes[2].plot(x, cpu_use[0], 'k-')
axes[2].plot(x, cpu_use[1], linestyle='-', color='#5492E7')
axes[2].plot(np.array([0, x[-1]]),
np.array([1, 1]),
linestyle='-',
color='0.7',
alpha=0.8)
axes[2].set_xlim(left=0, right=x[-1])
axes[2].set_ylim(bottom=0)
axes[2].set_ylabel('CPU Usage [cores]')
axes[2].set_title("Planner Consumed CPU Time per Simulated Time")
axes[2].set_xlabel('Simulated Time [%s]' % unit)
axes[2].legend(["Process", "Planning"],
loc='upper left',
fancybox=True)
fig.set_size_inches(15, 15, forward=True)
plt.tight_layout()
box = axes[0].get_position()
axes[0].set_position(
[box.x0, box.y0 + box.height * 0.16, box.width, box.height * 0.84])
legend = [
"({0:02.1f}%) Select".format(s0), "({0:02.1f}%) Expand".format(s1),
"({0:02.1f}%) Gain".format(s2), "({0:02.1f}%) Cost".format(s3),
"({0:02.1f}%) Value".format(s4),
"({0:02.1f}%) NextBest".format(s5),
"({0:02.1f}%) updateGen".format(s6),
"({0:02.1f}%) UpdateEval".format(s7),
"({0:02.1f}%) Vis".format(s8), "({0:02.1f}%) Other".format(s10),
"({0:02.1f}%) ROS".format(s9)
]
axes[0].legend(legend,
loc='upper center',
bbox_to_anchor=(0.5, -0.04),
ncol=6,
fancybox=True)
save_name = os.path.join(target_dir, "graphs",
"PerformanceOverview.png")
plt.savefig(save_name, dpi=300, format='png', bbox_inches='tight')
self.writelog("Created graph 'PerformanceOverview'.")
if self.show_plots:
rospy.loginfo("Displaying '%s'. Close to continue...", save_name)
plt.show()
def plot_error_hist(self, data, target_dir):
rospy.loginfo("Creating Graphs: ErrorHistogram")
time = np.arange(np.shape(data)[0])
bins = np.arange(np.shape(data)[1])
B, T = np.meshgrid(bins, time)
# Plotting
fig = plt.figure()
ax = fig.add_subplot(211, projection='3d')
Xi = B.flatten()
Yi = T.flatten()
Zi = np.zeros(data.size)
dx = .5 * np.ones(data.size)
dy = .5 * np.ones(data.size)
dz = data.flatten()
ax.set_xlabel('error bins [0, trunc]')
ax.set_ylabel('time [measurements]')
ax.set_zlabel('bin count')
ax.bar3d(Xi, Yi, Zi, dx, dy, dz, color='g')
ax.set_title('Absolute error histogram')
ax = fig.add_subplot(212, projection='3d')
data2 = data.astype(float)
for i in range(np.shape(data)[0]):
data2[i, :] = data2[i, :] / np.sum(data2[i, :])
dz = data2.flatten()
ax.set_xlabel('error bins [0, trunc]')
ax.set_ylabel('time [measurements]')
ax.set_zlabel('bin percentage')
ax.bar3d(Xi, Yi, Zi, dx, dy, dz, color='b')
ax.set_title('Relative error histogram')
fig.set_size_inches(10, 12, forward=True)
plt.tight_layout()
save_name = os.path.join(target_dir, "graphs", "ErrorHistogram.png")
plt.savefig(save_name, dpi=300, format='png', bbox_inches='tight')
self.writelog("Created graph 'ErrorHistogram'.")
if self.show_plots:
rospy.loginfo("Displaying '%s'. Close to continue...", save_name)
plt.show()
def writelog(self, text):
if self.eval_log_file is not None:
self.eval_log_file.write(
datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S] ") +
text + "\n")
if __name__ == '__main__':
rospy.init_node('eval_plotting_node', anonymous=True)
ep = EvalPlotting()
| 42.636927
| 80
| 0.501163
|
4a0d95e82783f682f7cdf85faa707fd1db352855
| 1,354
|
py
|
Python
|
src/game_objects/hud/period.py
|
ozcer/Project-Ooze
|
28eb84995f4fa283366e3f04edb7e393d5281ac5
|
[
"MIT"
] | 1
|
2018-10-10T02:11:50.000Z
|
2018-10-10T02:11:50.000Z
|
src/game_objects/hud/period.py
|
ozcer/Project-Ooze
|
28eb84995f4fa283366e3f04edb7e393d5281ac5
|
[
"MIT"
] | 29
|
2018-03-16T05:07:18.000Z
|
2018-04-03T03:58:32.000Z
|
src/game_objects/hud/period.py
|
ozcer/FlaPy-Bird
|
28eb84995f4fa283366e3f04edb7e393d5281ac5
|
[
"MIT"
] | 1
|
2018-03-18T00:27:12.000Z
|
2018-03-18T00:27:12.000Z
|
import pygame
from src.const import *
from src.game_objects.scenic import Scenic
class Period(Scenic):
def __init__(self, *args,
length,
name,
left=DISPLAY_WIDTH,
depth=PERIOD_DEPTH,
color=OLIVE,
):
self.images = {"init": pygame.Surface((length, TIMELINE_HEIGHT))}
x = left + length / 2
y = DISPLAY_HEIGHT - TIMELINE_HEIGHT / 2
super().__init__(*args, pos=(x, y), depth=depth, init_image_key="init")
self.name = name
self.length = self.rect.w
self.color = color
self.image.fill(self.color)
self.dx = 0
self.dy = 0
def set_left(self, left):
self.x = left + self.rect.w / 2
self.y = DISPLAY_HEIGHT - TIMELINE_HEIGHT / 2
self.rect.center = (self.x, self.y)
def display_name(self):
text_surf = self.debug_font.render(f"{self.name}",
True,
BLACK)
text_rect = text_surf.get_rect()
text_rect.center = self.x, self.y
self.game.surface.blit(text_surf, text_rect)
def draw(self):
super().draw()
self.display_name()
def update(self):
super().update()
| 28.208333
| 79
| 0.509601
|
4a0d95ffe19734863ff5dd6a83b16feb819ac608
| 4,937
|
py
|
Python
|
eval_ppl_v2.py
|
iwangjian/Persona-dialogue
|
f40e415574d3233693f575de31d6291c969dff09
|
[
"Apache-2.0"
] | 6
|
2020-09-14T13:47:57.000Z
|
2021-09-27T08:57:25.000Z
|
eval_ppl_v2.py
|
iwangjian/Persona-dialogue
|
f40e415574d3233693f575de31d6291c969dff09
|
[
"Apache-2.0"
] | null | null | null |
eval_ppl_v2.py
|
iwangjian/Persona-dialogue
|
f40e415574d3233693f575de31d6291c969dff09
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# coding:utf-8
# Copyright (c) Tsinghua university conversational AI group (THU-coai).
# This source code is licensed under the MIT license.
"""Script for the Evaluation of Chinese Human-Computer Dialogue Technology (SMP2019-ECDT) Task2.
This script evaluates the perplexity of the submitted model.
This uses a the version of the dataset which does not contain the "Golden Response" .
Leaderboard scores will be run in the same form but on a hidden test set.
The official vocabulary for the competition is based on using "jieba"
and is built on the training and validation sets. The test set contains some
tokens which are not in this dictionary--this tokens will not be provided, but
we will also *SKIP* calculating perplexity on these tokens. The model should
still produce a good guess for the remaining tokens in the sentence, so
handling unknown words or expanding the vocabulary with pre-trained or
multitasked embeddings are legitimate strategies that may or may not impact the
score of the models.
The model will be asked to predict one word at a time.
This requires each team to implement the following function:
def next_word_probability(self, context, partial_out):
Return probability distribution over next words given a context and a partial true output.
This is used to calculate the per-word perplexity.
Arguments:
context -- dialogue histories and personal profiles of every speaker
partial_out -- list of previous "true" words
Returns a dict, where each key is a word and each value is a probability
score for that word. Unset keys assume a probability of zero.
"""
from main import Model
import math
import json
import sys
import codecs
def read_dialog(file):
"""
Read dialogs from file
:param file: str, file path to the dataset
:return: list, a list of dialogue (context) contained in file
"""
with codecs.open(file, 'r', 'utf-8') as f:
contents = [i.strip() for i in f.readlines() if len(i.strip()) != 0]
return [json.loads(i) for i in contents]
def eval_ppl(model, context, resp_gt, vocab):
"""
Compute the perplexity for the model on the "Golden Responses"
:param model: class, model class that have a method named 'next_word_probability'
:param context: dict, given context
:param resp_gt: list, list of tokens of the "Golden Responses"
:param vocab: list, target vocabulary that the perplexity is evaluated on
:return: list, [average loss, average perplexity]
if a token in resp_gt is contained in vocab and receives 0 probability in the returned
value of the method 'next_word_probability', then 'inf' will be returned
"""
loss = 0
num_tokens = 0
num_unk = 0
for i in range(len(resp_gt)):
if resp_gt[i] in vocab:
probs, eos_probs = model.next_word_probability(context, resp_gt[:i])
prob_true = probs.get(resp_gt[i], 0)
if prob_true > 0:
prob_true /= (sum((probs.get(k, 0) for k in vocab)) + eos_probs)
loss -= math.log(prob_true)
else:
loss = float('inf')
num_tokens += 1
else:
num_unk += 1
probs, eos_probs = model.next_word_probability(context, resp_gt)
eos_probs /= (sum((probs.get(k, 0) for k in vocab)) + eos_probs)
loss -= math.log(eos_probs)
num_tokens += 1
return loss / num_tokens, math.exp(loss / num_tokens)
if __name__ == '__main__':
model = Model()
if len(sys.argv) < 4:
print('Too few args for this script')
vocab_file = sys.argv[1]
random_test = sys.argv[2]
biased_test = sys.argv[3]
with codecs.open(vocab_file, 'r', 'utf-8') as f:
vocab = set([i.strip() for i in f.readlines() if len(i.strip()) != 0])
random_test_data = read_dialog(random_test)
biased_test_data = read_dialog(biased_test)
random_ppl = 0
biased_ppl = 0
for count, dialog in enumerate(random_test_data):
if count % 100 == 0:
print(count)
resp_gt = dialog['golden_response'][0].split()
del dialog['golden_response']
random_ppl += eval_ppl(model, dialog, resp_gt, vocab)[1]
for count, dialog in enumerate(biased_test_data):
if count % 100 == 0:
print(count)
resp_gt = dialog['golden_response'][0].split()
del dialog['golden_response']
biased_ppl += eval_ppl(model, dialog, resp_gt, vocab)[1]
random_ppl /= len(random_test_data)
biased_ppl /= len(biased_test_data)
print('random ppl', random_ppl)
print('biased ppl', biased_ppl)
if random_ppl + biased_ppl == float('inf'):
print('You model got an inf for PPL score, mostly likely you do not assign ' +
'any probability to a token in the golden response. You should consider to enlarge your vocab')
else:
print((random_ppl + biased_ppl) / 2.0)
| 40.467213
| 109
| 0.680778
|
4a0d98c56d2c0ba7edbbc23a0b328574b24acabe
| 1,435
|
py
|
Python
|
go/setup.py
|
oberhamsi/FrameworkBenchmarks
|
660a66d51a9aad10b43c0660208fb13c098121af
|
[
"BSD-3-Clause"
] | 1
|
2017-11-02T13:25:06.000Z
|
2017-11-02T13:25:06.000Z
|
go/setup.py
|
oberhamsi/FrameworkBenchmarks
|
660a66d51a9aad10b43c0660208fb13c098121af
|
[
"BSD-3-Clause"
] | null | null | null |
go/setup.py
|
oberhamsi/FrameworkBenchmarks
|
660a66d51a9aad10b43c0660208fb13c098121af
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
import os
import setup_util
def start(args, logfile, errfile):
setup_util.replace_text("go/src/hello/hello.go", "tcp\(.*:3306\)", "tcp(" + args.database_host + ":3306)")
if os.name == 'nt':
#subprocess.call("rmdir /s /q pkg\\windows_amd64", shell=True, cwd="go")
#subprocess.call("rmdir /s /q src\\github.com", shell=True, cwd="go")
#subprocess.call("del /s /q /f bin\\hello.exe", shell=True, cwd="go")
subprocess.call("set GOPATH=C:\\FrameworkBenchmarks\\go&& go get ./...", shell=True, cwd="go", stderr=errfile, stdout=logfile)
subprocess.Popen("setup.bat", shell=True, cwd="go", stderr=errfile, stdout=logfile)
return 0
os.environ["GOPATH"] = os.path.expanduser('~/FrameworkBenchmarks/go')
subprocess.call("go get ./...", shell=True, cwd="go", stderr=errfile, stdout=logfile)
subprocess.Popen("go run src/hello/hello.go".rsplit(" "), cwd="go", stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
if os.name == 'nt':
subprocess.call("taskkill /f /im go.exe > NUL", shell=True, stderr=errfile, stdout=logfile)
subprocess.call("taskkill /f /im hello.exe > NUL", shell=True, stderr=errfile, stdout=logfile)
return 0
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'hello' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
return 0
| 46.290323
| 130
| 0.667596
|
4a0d9c727fb486e8a34996393419caa2bfaca27f
| 361
|
py
|
Python
|
Objects/Metadata.py
|
spanoselias/LazyReplicationTool
|
8fdc968e4fdf82992b704e1c7422f3a5591798eb
|
[
"MIT"
] | null | null | null |
Objects/Metadata.py
|
spanoselias/LazyReplicationTool
|
8fdc968e4fdf82992b704e1c7422f3a5591798eb
|
[
"MIT"
] | null | null | null |
Objects/Metadata.py
|
spanoselias/LazyReplicationTool
|
8fdc968e4fdf82992b704e1c7422f3a5591798eb
|
[
"MIT"
] | null | null | null |
class Metadata:
def __init__(self, filename, path, hashcode):
self.filename = filename
self.path = path
self.modifiedDate = hashcode
def __get_filename(self):
return self.filename
def __get_path(self):
return self.path
def __get_hashcode(self):
return self.modifiedDate
| 24.066667
| 49
| 0.601108
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.