hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3aba465304b6285d331e2c3d8dd66f956edd2d | 1,471 | py | Python | src/media_server/serializers.py | nefarius/portfolio-backend | f595041354eedee71a4aa5b761501be030b81d09 | [
"Apache-2.0"
] | 6 | 2019-06-19T12:56:42.000Z | 2021-12-26T07:22:47.000Z | src/media_server/serializers.py | nefarius/portfolio-backend | f595041354eedee71a4aa5b761501be030b81d09 | [
"Apache-2.0"
] | 13 | 2019-12-20T10:39:44.000Z | 2022-02-10T09:11:09.000Z | src/media_server/serializers.py | nefarius/portfolio-backend | f595041354eedee71a4aa5b761501be030b81d09 | [
"Apache-2.0"
] | 1 | 2021-12-01T12:03:29.000Z | 2021-12-01T12:03:29.000Z | from rest_framework import serializers
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from core.models import Entry
from .validators import validate_license as vl
def validate_license(value):
if value is not None:
try:
vl(value)
except ValidationError as e:
raise serializers.ValidationError(e.message) # noqa: B306
return value
class MediaCreateSerializer(serializers.Serializer):
file = serializers.FileField()
entry = serializers.CharField()
published = serializers.BooleanField()
license = serializers.JSONField()
def validate_entry(self, value):
try:
entry = Entry.objects.get(id=value)
except Entry.DoesNotExist:
raise serializers.ValidationError(_('Entry does not exist'))
user = self.context['request'].user
if user != entry.owner:
raise serializers.ValidationError(_('Current user is not the owner of entry'))
return value
def validate_license(self, value):
return validate_license(value)
class MediaUpdateSerializer(serializers.Serializer):
published = serializers.BooleanField()
class MediaPartialUpdateSerializer(serializers.Serializer):
published = serializers.BooleanField(required=False)
license = serializers.JSONField(required=False)
def validate_license(self, value):
return validate_license(value)
| 29.42 | 90 | 0.71448 | from rest_framework import serializers
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from core.models import Entry
from .validators import validate_license as vl
def validate_license(value):
if value is not None:
try:
vl(value)
except ValidationError as e:
raise serializers.ValidationError(e.message)
return value
class MediaCreateSerializer(serializers.Serializer):
file = serializers.FileField()
entry = serializers.CharField()
published = serializers.BooleanField()
license = serializers.JSONField()
def validate_entry(self, value):
try:
entry = Entry.objects.get(id=value)
except Entry.DoesNotExist:
raise serializers.ValidationError(_('Entry does not exist'))
user = self.context['request'].user
if user != entry.owner:
raise serializers.ValidationError(_('Current user is not the owner of entry'))
return value
def validate_license(self, value):
return validate_license(value)
class MediaUpdateSerializer(serializers.Serializer):
published = serializers.BooleanField()
class MediaPartialUpdateSerializer(serializers.Serializer):
published = serializers.BooleanField(required=False)
license = serializers.JSONField(required=False)
def validate_license(self, value):
return validate_license(value)
| true | true |
1c3abb1ad5688d5f6754c39edb8aa508dcfa7d1c | 1,028 | py | Python | Python/flip-binary-tree-to-match-preorder-traversal.py | black-shadows/LeetCode-Solutions | b1692583f7b710943ffb19b392b8bf64845b5d7a | [
"Fair",
"Unlicense"
] | null | null | null | Python/flip-binary-tree-to-match-preorder-traversal.py | black-shadows/LeetCode-Solutions | b1692583f7b710943ffb19b392b8bf64845b5d7a | [
"Fair",
"Unlicense"
] | null | null | null | Python/flip-binary-tree-to-match-preorder-traversal.py | black-shadows/LeetCode-Solutions | b1692583f7b710943ffb19b392b8bf64845b5d7a | [
"Fair",
"Unlicense"
] | null | null | null | # Time: O(n)
# Space: O(h)
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def flipMatchVoyage(self, root, voyage):
"""
:type root: TreeNode
:type voyage: List[int]
:rtype: List[int]
"""
def dfs(root, voyage, i, result):
if not root:
return True
if root.val != voyage[i[0]]:
return False
i[0] += 1
if root.left and root.left.val != voyage[i[0]]:
result.append(root.val)
return dfs(root.right, voyage, i, result) and \
dfs(root.left, voyage, i, result)
return dfs(root.left, voyage, i, result) and \
dfs(root.right, voyage, i, result)
result = []
return result if dfs(root, voyage, [0], result) else [-1]
| 30.235294 | 66 | 0.476654 |
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def flipMatchVoyage(self, root, voyage):
def dfs(root, voyage, i, result):
if not root:
return True
if root.val != voyage[i[0]]:
return False
i[0] += 1
if root.left and root.left.val != voyage[i[0]]:
result.append(root.val)
return dfs(root.right, voyage, i, result) and \
dfs(root.left, voyage, i, result)
return dfs(root.left, voyage, i, result) and \
dfs(root.right, voyage, i, result)
result = []
return result if dfs(root, voyage, [0], result) else [-1]
| true | true |
1c3abb9ac79583acad9bbf281f80f7e7c3b927d3 | 7,926 | py | Python | applications/auto_pilot_bigball/container/c2_Obstacle_Detection/app/predict.py | Dumpkin1996/clipper | 1a08bbdde846c3cfe76236c68548a848f71605e0 | [
"Apache-2.0"
] | 2 | 2019-04-24T13:46:28.000Z | 2019-05-28T06:59:26.000Z | applications/auto_pilot_bigball/container/c2_Obstacle_Detection/app/predict.py | SimonZsx/clipper | 457088be2ebe68c68b94d90389d1308e35b4c844 | [
"Apache-2.0"
] | null | null | null | applications/auto_pilot_bigball/container/c2_Obstacle_Detection/app/predict.py | SimonZsx/clipper | 457088be2ebe68c68b94d90389d1308e35b4c844 | [
"Apache-2.0"
] | 4 | 2019-04-03T11:03:57.000Z | 2019-06-26T08:22:38.000Z | import numpy as np
import tensorflow as tf
import cv2
import time
previous = []
class yolo_tf:
w_img = 1280
h_img = 720
weights_file = '/container/c2_Obstacle_Detection/app/weights/YOLO_small.ckpt'
alpha = 0.1
threshold = 0.3
iou_threshold = 0.5
result_list = None
classes = ["car"]
def __init__(self):
self.build_networks()
def build_networks(self):
print("Building YOLO_small graph...")
self.x = tf.placeholder('float32',[None,448,448,3])
# self.x = tf.placeholder('float32',[None,252, 1280, 3])
self.conv_1 = self.conv_layer(1,self.x,64,7,2)
self.pool_2 = self.pooling_layer(2,self.conv_1,2,2)
self.conv_3 = self.conv_layer(3,self.pool_2,192,3,1)
self.pool_4 = self.pooling_layer(4,self.conv_3,2,2)
self.conv_5 = self.conv_layer(5,self.pool_4,128,1,1)
self.conv_6 = self.conv_layer(6,self.conv_5,256,3,1)
self.conv_7 = self.conv_layer(7,self.conv_6,256,1,1)
self.conv_8 = self.conv_layer(8,self.conv_7,512,3,1)
self.pool_9 = self.pooling_layer(9,self.conv_8,2,2)
self.conv_10 = self.conv_layer(10,self.pool_9,256,1,1)
self.conv_11 = self.conv_layer(11,self.conv_10,512,3,1)
self.conv_12 = self.conv_layer(12,self.conv_11,256,1,1)
self.conv_13 = self.conv_layer(13,self.conv_12,512,3,1)
self.conv_14 = self.conv_layer(14,self.conv_13,256,1,1)
self.conv_15 = self.conv_layer(15,self.conv_14,512,3,1)
self.conv_16 = self.conv_layer(16,self.conv_15,256,1,1)
self.conv_17 = self.conv_layer(17,self.conv_16,512,3,1)
self.conv_18 = self.conv_layer(18,self.conv_17,512,1,1)
self.conv_19 = self.conv_layer(19,self.conv_18,1024,3,1)
self.pool_20 = self.pooling_layer(20,self.conv_19,2,2)
self.conv_21 = self.conv_layer(21,self.pool_20,512,1,1)
self.conv_22 = self.conv_layer(22,self.conv_21,1024,3,1)
self.conv_23 = self.conv_layer(23,self.conv_22,512,1,1)
self.conv_24 = self.conv_layer(24,self.conv_23,1024,3,1)
self.conv_25 = self.conv_layer(25,self.conv_24,1024,3,1)
self.conv_26 = self.conv_layer(26,self.conv_25,1024,3,2)
self.conv_27 = self.conv_layer(27,self.conv_26,1024,3,1)
self.conv_28 = self.conv_layer(28,self.conv_27,1024,3,1)
self.fc_29 = self.fc_layer(29,self.conv_28,512,flat=True,linear=False)
self.fc_30 = self.fc_layer(30,self.fc_29,4096,flat=False,linear=False)
#skip dropout_31
self.fc_32 = self.fc_layer(32, self.fc_30, 1470, flat=False, linear=True)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.weights_file)
print("Loading complete!")
def conv_layer(self,idx,inputs,filters,size,stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size,size,int(channels),filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size//2
pad_mat = np.array([[0,0],[pad_size,pad_size],[pad_size,pad_size],[0,0]])
inputs_pad = tf.pad(inputs,pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',name=str(idx)+'_conv')
conv_biased = tf.add(conv,biases,name=str(idx)+'_conv_biased')
print('Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (idx,size,size,stride,filters,int(channels)))
return tf.maximum(self.alpha*conv_biased,conv_biased,name=str(idx)+'_leaky_relu')
def pooling_layer(self,idx,inputs,size,stride):
print ('Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx,size,size,stride))
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1],strides=[1, stride, stride, 1], padding='SAME',name=str(idx)+'_pool')
def fc_layer(self,idx,inputs,hiddens,flat = False,linear = False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs_transposed = tf.transpose(inputs,(0,3,1,2))
inputs_processed = tf.reshape(inputs_transposed, [-1,dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim,hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
print ('Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (idx,hiddens,int(dim),int(flat),1-int(linear)) )
if linear : return tf.add(tf.matmul(inputs_processed,weight),biases,name=str(idx)+'_fc')
ip = tf.add(tf.matmul(inputs_processed,weight),biases)
return tf.maximum(self.alpha*ip,ip,name=str(idx)+'_fc')
def detect_from_cvmat(yolo,img):
yolo.h_img,yolo.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_resized_np = np.asarray( img_resized )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {yolo.x: inputs}
net_output = yolo.sess.run(yolo.fc_32,feed_dict=in_dict)
result = interpret_output(yolo, net_output[0])
yolo.result_list = result
def interpret_output(yolo,output):
probs = np.zeros((7,7,2,20))
class_probs = np.reshape(output[0:980],(7,7,20))
scales = np.reshape(output[980:1078],(7,7,2))
boxes = np.reshape(output[1078:],(7,7,2,4))
offset = np.transpose(np.reshape(np.array([np.arange(7)]*14),(2,7,7)),(1,2,0))
boxes[:,:,:,0] += offset
boxes[:,:,:,1] += np.transpose(offset,(1,0,2))
boxes[:,:,:,0:2] = boxes[:,:,:,0:2] / 7.0
boxes[:,:,:,2] = np.multiply(boxes[:,:,:,2],boxes[:,:,:,2])
boxes[:,:,:,3] = np.multiply(boxes[:,:,:,3],boxes[:,:,:,3])
boxes[:,:,:,0] *= yolo.w_img
boxes[:,:,:,1] *= yolo.h_img
boxes[:,:,:,2] *= yolo.w_img
boxes[:,:,:,3] *= yolo.h_img
for i in range(2):
for j in range(20):
probs[:,:,i,j] = np.multiply(class_probs[:,:,j],scales[:,:,i])
filter_mat_probs = np.array(probs>=yolo.threshold,dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs,axis=3)[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0 : continue
for j in range(i+1,len(boxes_filtered)):
if iou(boxes_filtered[i],boxes_filtered[j]) > yolo.iou_threshold :
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered>0.0,dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
return len(boxes_filtered)
def iou(box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def read_image(i):
image_path = "/container/dataset/" + i + ".jpg"
image = cv2.imread(image_path)
print("original shape", image.shape)
return image
yolo = yolo_tf()
def predict(info):
global previous
try:
start = time.time()
image_index_str = info.split("***")[0]
if True:
image = read_image(image_index_str)
detect_from_cvmat(yolo, image)
results = yolo.result_list
print("Dectection Result:", results)
# Consider when more than three cars are detected, then ther is an obstacle
obstacle_detected = (results > 3)
print("Obstacle Detected?", obstacle_detected)
to_return = obstacle_detected or (sum(previous[-3:]) > 0)
print("To Return:", to_return)
previous.append(obstacle_detected)
end = time.time()
print("ELASPSED TIME", (end-start)*1000)
return str(to_return) + "***" + info
else:
end = time.time()
print("ELASPSED TIME", (end-start)*1000)
return str(previous[-1]) + "***" + info
except Exception as exc:
print('Generated an exception: %s' % (exc))
| 37.386792 | 148 | 0.696064 | import numpy as np
import tensorflow as tf
import cv2
import time
previous = []
class yolo_tf:
w_img = 1280
h_img = 720
weights_file = '/container/c2_Obstacle_Detection/app/weights/YOLO_small.ckpt'
alpha = 0.1
threshold = 0.3
iou_threshold = 0.5
result_list = None
classes = ["car"]
def __init__(self):
self.build_networks()
def build_networks(self):
print("Building YOLO_small graph...")
self.x = tf.placeholder('float32',[None,448,448,3])
self.conv_1 = self.conv_layer(1,self.x,64,7,2)
self.pool_2 = self.pooling_layer(2,self.conv_1,2,2)
self.conv_3 = self.conv_layer(3,self.pool_2,192,3,1)
self.pool_4 = self.pooling_layer(4,self.conv_3,2,2)
self.conv_5 = self.conv_layer(5,self.pool_4,128,1,1)
self.conv_6 = self.conv_layer(6,self.conv_5,256,3,1)
self.conv_7 = self.conv_layer(7,self.conv_6,256,1,1)
self.conv_8 = self.conv_layer(8,self.conv_7,512,3,1)
self.pool_9 = self.pooling_layer(9,self.conv_8,2,2)
self.conv_10 = self.conv_layer(10,self.pool_9,256,1,1)
self.conv_11 = self.conv_layer(11,self.conv_10,512,3,1)
self.conv_12 = self.conv_layer(12,self.conv_11,256,1,1)
self.conv_13 = self.conv_layer(13,self.conv_12,512,3,1)
self.conv_14 = self.conv_layer(14,self.conv_13,256,1,1)
self.conv_15 = self.conv_layer(15,self.conv_14,512,3,1)
self.conv_16 = self.conv_layer(16,self.conv_15,256,1,1)
self.conv_17 = self.conv_layer(17,self.conv_16,512,3,1)
self.conv_18 = self.conv_layer(18,self.conv_17,512,1,1)
self.conv_19 = self.conv_layer(19,self.conv_18,1024,3,1)
self.pool_20 = self.pooling_layer(20,self.conv_19,2,2)
self.conv_21 = self.conv_layer(21,self.pool_20,512,1,1)
self.conv_22 = self.conv_layer(22,self.conv_21,1024,3,1)
self.conv_23 = self.conv_layer(23,self.conv_22,512,1,1)
self.conv_24 = self.conv_layer(24,self.conv_23,1024,3,1)
self.conv_25 = self.conv_layer(25,self.conv_24,1024,3,1)
self.conv_26 = self.conv_layer(26,self.conv_25,1024,3,2)
self.conv_27 = self.conv_layer(27,self.conv_26,1024,3,1)
self.conv_28 = self.conv_layer(28,self.conv_27,1024,3,1)
self.fc_29 = self.fc_layer(29,self.conv_28,512,flat=True,linear=False)
self.fc_30 = self.fc_layer(30,self.fc_29,4096,flat=False,linear=False)
self.fc_32 = self.fc_layer(32, self.fc_30, 1470, flat=False, linear=True)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.weights_file)
print("Loading complete!")
def conv_layer(self,idx,inputs,filters,size,stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size,size,int(channels),filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size//2
pad_mat = np.array([[0,0],[pad_size,pad_size],[pad_size,pad_size],[0,0]])
inputs_pad = tf.pad(inputs,pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',name=str(idx)+'_conv')
conv_biased = tf.add(conv,biases,name=str(idx)+'_conv_biased')
print('Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (idx,size,size,stride,filters,int(channels)))
return tf.maximum(self.alpha*conv_biased,conv_biased,name=str(idx)+'_leaky_relu')
def pooling_layer(self,idx,inputs,size,stride):
print ('Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx,size,size,stride))
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1],strides=[1, stride, stride, 1], padding='SAME',name=str(idx)+'_pool')
def fc_layer(self,idx,inputs,hiddens,flat = False,linear = False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs_transposed = tf.transpose(inputs,(0,3,1,2))
inputs_processed = tf.reshape(inputs_transposed, [-1,dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim,hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
print ('Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (idx,hiddens,int(dim),int(flat),1-int(linear)) )
if linear : return tf.add(tf.matmul(inputs_processed,weight),biases,name=str(idx)+'_fc')
ip = tf.add(tf.matmul(inputs_processed,weight),biases)
return tf.maximum(self.alpha*ip,ip,name=str(idx)+'_fc')
def detect_from_cvmat(yolo,img):
yolo.h_img,yolo.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_resized_np = np.asarray( img_resized )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {yolo.x: inputs}
net_output = yolo.sess.run(yolo.fc_32,feed_dict=in_dict)
result = interpret_output(yolo, net_output[0])
yolo.result_list = result
def interpret_output(yolo,output):
probs = np.zeros((7,7,2,20))
class_probs = np.reshape(output[0:980],(7,7,20))
scales = np.reshape(output[980:1078],(7,7,2))
boxes = np.reshape(output[1078:],(7,7,2,4))
offset = np.transpose(np.reshape(np.array([np.arange(7)]*14),(2,7,7)),(1,2,0))
boxes[:,:,:,0] += offset
boxes[:,:,:,1] += np.transpose(offset,(1,0,2))
boxes[:,:,:,0:2] = boxes[:,:,:,0:2] / 7.0
boxes[:,:,:,2] = np.multiply(boxes[:,:,:,2],boxes[:,:,:,2])
boxes[:,:,:,3] = np.multiply(boxes[:,:,:,3],boxes[:,:,:,3])
boxes[:,:,:,0] *= yolo.w_img
boxes[:,:,:,1] *= yolo.h_img
boxes[:,:,:,2] *= yolo.w_img
boxes[:,:,:,3] *= yolo.h_img
for i in range(2):
for j in range(20):
probs[:,:,i,j] = np.multiply(class_probs[:,:,j],scales[:,:,i])
filter_mat_probs = np.array(probs>=yolo.threshold,dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs,axis=3)[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0 : continue
for j in range(i+1,len(boxes_filtered)):
if iou(boxes_filtered[i],boxes_filtered[j]) > yolo.iou_threshold :
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered>0.0,dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
return len(boxes_filtered)
def iou(box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def read_image(i):
image_path = "/container/dataset/" + i + ".jpg"
image = cv2.imread(image_path)
print("original shape", image.shape)
return image
yolo = yolo_tf()
def predict(info):
global previous
try:
start = time.time()
image_index_str = info.split("***")[0]
if True:
image = read_image(image_index_str)
detect_from_cvmat(yolo, image)
results = yolo.result_list
print("Dectection Result:", results)
obstacle_detected = (results > 3)
print("Obstacle Detected?", obstacle_detected)
to_return = obstacle_detected or (sum(previous[-3:]) > 0)
print("To Return:", to_return)
previous.append(obstacle_detected)
end = time.time()
print("ELASPSED TIME", (end-start)*1000)
return str(to_return) + "***" + info
else:
end = time.time()
print("ELASPSED TIME", (end-start)*1000)
return str(previous[-1]) + "***" + info
except Exception as exc:
print('Generated an exception: %s' % (exc))
| true | true |
1c3abbb1826a720dc0409dbf7f20ba1bb775ab71 | 532 | py | Python | api/tracker/lib.py | yurebecca/spam-tracker | c210f683cdef1b6c61edc45759534a8f8a2cef49 | [
"MIT"
] | null | null | null | api/tracker/lib.py | yurebecca/spam-tracker | c210f683cdef1b6c61edc45759534a8f8a2cef49 | [
"MIT"
] | null | null | null | api/tracker/lib.py | yurebecca/spam-tracker | c210f683cdef1b6c61edc45759534a8f8a2cef49 | [
"MIT"
] | null | null | null | import re
import math
# Simple way to count number of words in a string
def word_count(str):
words = str.split()
return len(words)
# This is due to python's rounding issue
def round_half_up(n, decimals = 0):
multiplier = 10 ** decimals
return math.floor(n * multiplier + 0.5) / multiplier
def pre_process(text):
# lowercase
text = text.lower()
#remove tags
text = re.sub("<!--?.*?-->","",text)
# remove special characters and digits
text = re.sub("(\\d|\\W)+"," ",text)
return text | 21.28 | 56 | 0.62782 | import re
import math
def word_count(str):
words = str.split()
return len(words)
def round_half_up(n, decimals = 0):
multiplier = 10 ** decimals
return math.floor(n * multiplier + 0.5) / multiplier
def pre_process(text):
# lowercase
text = text.lower()
#remove tags
text = re.sub("<!--?.*?-->","",text)
# remove special characters and digits
text = re.sub("(\\d|\\W)+"," ",text)
return text | true | true |
1c3abd66b0857aa0c1ac086a6dc7a04879fe82eb | 2,265 | py | Python | urls.py | Pickerup-Yirui/ourSpider | f85e39b07335b77a57b08ab5aea5792ad3dc67e7 | [
"MIT"
] | null | null | null | urls.py | Pickerup-Yirui/ourSpider | f85e39b07335b77a57b08ab5aea5792ad3dc67e7 | [
"MIT"
] | null | null | null | urls.py | Pickerup-Yirui/ourSpider | f85e39b07335b77a57b08ab5aea5792ad3dc67e7 | [
"MIT"
] | null | null | null | """
author = "YiRui Wang"
定义了一系列抓取51jobs页面上urls的函数(包)
创建于2020 1 16
getPageNum(num):根据初始url找到并返回网页总页数
webUrlsPool(page_num):根据得到的网页总数,构造并返回所有符合搜索标准的网页url列表
getJobUrls(pageUrl):根据pageUrl,得到该page上的jobUrl
"""
import requests
from bs4 import BeautifulSoup
def getPageNum():
"""
根据初始url找到并返回网页总页数
"""
# keyword = quote(keyword, safe='/:?=')
url = 'https://search.51job.com/list/070300%252C020000,000000,0000,00,9,99,\
%25E6%2595%25B0%25E5%25AD%2597%25E8%2590%25A5%25E9%2594%2580,2,1.html?\
lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&\
companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&\
fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
html = requests.get(url)
html.encoding = 'gb18030'
soup = BeautifulSoup(html.text, "lxml") # 用html.content需要的代码较少,不需要解码编码的调整
span = soup.find('div', class_='p_in').find('span', class_='td')
page_num = span.get_text(strip= True).replace('共', '').replace('页,到第', '')
return page_num
def webUrlsPool(page_num):
"""
根据得到的网页总数,构造并返回所有符合搜索标准的网页url列表
params:
page_num:根据初始url获得的网页页数
"""
pageUrls = []
page_num = int(page_num)
for i in range(1,page_num + 1):
url = 'https://search.51job.com/list/070300%252C020000,000000,0000,00,9,99,%25E6%2595%25B0%25E5%25AD%2597%25E8%2590%25A5%25E9%2594%2580,2,' + str(i) + \
'.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
pageUrls.append(url)
return pageUrls
# webUrlsPool()
# -*- coding: gb2312 -*-
def getJobUrls(pageUrl,):
"""
根据pageUrl,得到该page上的jobUrl列表
params:
pageUrl:搜索结果的一页
"""
# 用requests请求网页url
r = requests.get(pageUrl)
r.encoding = 'gb18030'
htmlContents = r.text
# 用beautifulsoup,找到岗位urls,并添加到pool中
htmlSoup = BeautifulSoup(htmlContents,'html.parser')
withTags = htmlSoup.find_all('p',class_='t1')
# 删掉第0个元素--html文档中表格标头,不含有目标信息(岗位url)
del withTags[0]
# 找到岗位url,并返回
urls0 = []
for a in withTags:
b = a.find('a')
urls0.append(b['href'])
return urls0
| 27.962963 | 160 | 0.661369 |
import requests
from bs4 import BeautifulSoup
def getPageNum():
url = 'https://search.51job.com/list/070300%252C020000,000000,0000,00,9,99,\
%25E6%2595%25B0%25E5%25AD%2597%25E8%2590%25A5%25E9%2594%2580,2,1.html?\
lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&\
companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&\
fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
html = requests.get(url)
html.encoding = 'gb18030'
soup = BeautifulSoup(html.text, "lxml")
span = soup.find('div', class_='p_in').find('span', class_='td')
page_num = span.get_text(strip= True).replace('共', '').replace('页,到第', '')
return page_num
def webUrlsPool(page_num):
pageUrls = []
page_num = int(page_num)
for i in range(1,page_num + 1):
url = 'https://search.51job.com/list/070300%252C020000,000000,0000,00,9,99,%25E6%2595%25B0%25E5%25AD%2597%25E8%2590%25A5%25E9%2594%2580,2,' + str(i) + \
'.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
pageUrls.append(url)
return pageUrls
def getJobUrls(pageUrl,):
r = requests.get(pageUrl)
r.encoding = 'gb18030'
htmlContents = r.text
htmlSoup = BeautifulSoup(htmlContents,'html.parser')
withTags = htmlSoup.find_all('p',class_='t1')
del withTags[0]
urls0 = []
for a in withTags:
b = a.find('a')
urls0.append(b['href'])
return urls0
| true | true |
1c3abf155ac5dafd3b312ee9b76e109b091d1503 | 2,368 | py | Python | examples/adspygoogle/dfp/v201302/creative_wrapper_service/create_creative_wrappers.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | null | null | null | examples/adspygoogle/dfp/v201302/creative_wrapper_service/create_creative_wrappers.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | null | null | null | examples/adspygoogle/dfp/v201302/creative_wrapper_service/create_creative_wrappers.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | 2 | 2020-04-02T19:00:31.000Z | 2020-08-06T03:28:38.000Z | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates a new creative wrapper.
Creative wrappers must be associated with a LabelType.CREATIVE_WRAPPER label and
applied to ad units by AdUnit.appliedLabels. To determine which creative
wrappers exist, run get_all_creative_wrappers.py
Tags: CreativeWrapperService.createCreativeWrappers
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
LABEL_ID = 'INSERT_CREATIVE_WRAPPER_LABEL_ID_HERE'
def main(client, label_id):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201302')
# Create creative wrapper objects.
creative_wrapper = {
# A label can only be associated with one creative wrapper.
'labelId': label_id,
'ordering': 'INNER',
'header': {'htmlSnippet': '<b>My creative wrapper header</b>'},
'footer': {'htmlSnippet': '<b>My creative wrapper footer</b>'}
}
# Add creative wrapper.
creative_wrappers = creative_wrapper_service.CreateCreativeWrappers(
[creative_wrapper])
# Display results.
for creative_wrapper in creative_wrappers:
print ('Creative wrapper with ID \'%s\' applying to label \'%s\' was '
'created.' % (creative_wrapper['id'], creative_wrapper['labelId']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, LABEL_ID)
| 34.823529 | 80 | 0.712416 |
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
from adspygoogle import DfpClient
LABEL_ID = 'INSERT_CREATIVE_WRAPPER_LABEL_ID_HERE'
def main(client, label_id):
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201302')
creative_wrapper = {
'labelId': label_id,
'ordering': 'INNER',
'header': {'htmlSnippet': '<b>My creative wrapper header</b>'},
'footer': {'htmlSnippet': '<b>My creative wrapper footer</b>'}
}
creative_wrappers = creative_wrapper_service.CreateCreativeWrappers(
[creative_wrapper])
for creative_wrapper in creative_wrappers:
print ('Creative wrapper with ID \'%s\' applying to label \'%s\' was '
'created.' % (creative_wrapper['id'], creative_wrapper['labelId']))
if __name__ == '__main__':
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, LABEL_ID)
| true | true |
1c3abf390f833ba023c82673917278d8ccce4a81 | 4,331 | py | Python | lisa/show_segmentation.py | mjirik/lisa | 06c5cb8f375f51302341e768512f02236774c8a3 | [
"BSD-3-Clause"
] | 22 | 2015-01-26T12:58:54.000Z | 2021-04-15T17:48:13.000Z | lisa/show_segmentation.py | mjirik/lisa | 06c5cb8f375f51302341e768512f02236774c8a3 | [
"BSD-3-Clause"
] | 31 | 2015-01-23T14:46:13.000Z | 2018-05-18T14:47:18.000Z | lisa/show_segmentation.py | mjirik/lisa | 06c5cb8f375f51302341e768512f02236774c8a3 | [
"BSD-3-Clause"
] | 13 | 2015-06-30T08:54:27.000Z | 2020-09-11T16:08:19.000Z | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Module is used for visualization of segmentation stored in pkl file.
"""
import sys
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "../extern/dicom2fem/src"))
from loguru import logger
# logger = logging.getLogger()
# from PyQt4.QtCore import Qt
from PyQt5.QtWidgets import QApplication
import argparse
import numpy as np
try:
import dicom2fem
import dicom2fem.seg2fem
# from dicom2fem import seg2fem
from dicom2fem.seg2fem import gen_mesh_from_voxels_mc, smooth_mesh
except:
print('dicom2fem not found')
# logger.warning('dicom2fem not found')
from seg2mesh import gen_mesh_from_voxels, smooth_mesh
def showSegmentation(
segmentation,
voxelsize_mm=np.ones([3, 1]),
degrad=4,
label=1,
smoothing=True,
vtk_file=None,
qt_app=None,
show=True,
resize_mm=None
):
"""
Funkce vrací trojrozměrné porobné jako data['segmentation']
v data['slab'] je popsáno, co která hodnota znamená
"""
if vtk_file is None:
vtk_file = "mesh_geom.vtk"
vtk_file = os.path.expanduser(vtk_file)
labels = []
segmentation = segmentation[::degrad, ::degrad, ::degrad]
voxelsize_mm = voxelsize_mm * degrad
_stats(segmentation)
if resize_mm is not None:
logger.debug("resize begin")
print("resize")
new_voxelsize_mm = np.asarray([resize_mm, resize_mm, resize_mm])
import imtools
segmentation = imtools.misc.resize_to_mm(segmentation, voxelsize_mm=voxelsize_mm, new_voxelsize_mm=new_voxelsize_mm)
voxelsize_mm = new_voxelsize_mm
logger.debug("resize begin")
_stats(segmentation)
# import pdb; pdb.set_trace()
mesh_data = gen_mesh_from_voxels_mc(segmentation, voxelsize_mm)
if smoothing:
mesh_data.coors = smooth_mesh(mesh_data)
# mesh_data.coors = seg2fem.smooth_mesh(mesh_data)
else:
mesh_data = gen_mesh_from_voxels_mc(segmentation, voxelsize_mm * 1.0e-2)
# mesh_data.coors +=
mesh_data.write(vtk_file)
if qt_app is None:
qt_app = QApplication(sys.argv)
logger.debug("qapp constructed")
if show:
import viewer
view = viewer.QVTKViewer(vtk_file)
print('show viewer')
view.exec_()
return labels
def _stats(data):
print("stats")
un = np.unique(data)
for lab in un:
print(lab, " : ", np.sum(data==lab))
def main():
# logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
# logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(
description='\
3D visualization of segmentation\n\
\npython show_segmentation.py\n\
\npython show_segmentation.py -i resection.pkl -l 2 3 4 -d 4')
parser.add_argument(
'-i', '--inputfile',
default='organ.pkl',
help='input file')
parser.add_argument(
'-o', '--outputfile',
default='~/lisa_data/mesh_geom.vtk',
help='output file')
parser.add_argument(
'-d', '--degrad', type=int,
default=4,
help='data degradation, default 4')
parser.add_argument(
'-r', '--resize', type=float,
default=None,
help='resize voxel to defined size in milimeters, default is None')
parser.add_argument(
'-l', '--label', type=int, metavar='N', nargs='+',
default=[1],
help='segmentation labels, default 1')
args = parser.parse_args()
# data = misc.obj_from_file(args.inputfile, filetype='pickle')
import io3d
data = io3d.read(args.inputfile, dataplus_format=True)
# args.label = np.array(eval(args.label))
# print args.label
# import pdb; pdb.set_trace()
ds = np.zeros(data['segmentation'].shape, np.bool)
for i in range(0, len(args.label)):
ds = ds | (data['segmentation'] == args.label[i])
outputfile = os.path.expanduser(args.outputfile)
showSegmentation(ds, degrad=args.degrad, voxelsize_mm=data['voxelsize_mm'], vtk_file=outputfile, resize_mm=args.resize)
if __name__ == "__main__":
main()
| 25.034682 | 124 | 0.643962 |
import sys
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "../extern/dicom2fem/src"))
from loguru import logger
from PyQt5.QtWidgets import QApplication
import argparse
import numpy as np
try:
import dicom2fem
import dicom2fem.seg2fem
from dicom2fem.seg2fem import gen_mesh_from_voxels_mc, smooth_mesh
except:
print('dicom2fem not found')
from seg2mesh import gen_mesh_from_voxels, smooth_mesh
def showSegmentation(
segmentation,
voxelsize_mm=np.ones([3, 1]),
degrad=4,
label=1,
smoothing=True,
vtk_file=None,
qt_app=None,
show=True,
resize_mm=None
):
if vtk_file is None:
vtk_file = "mesh_geom.vtk"
vtk_file = os.path.expanduser(vtk_file)
labels = []
segmentation = segmentation[::degrad, ::degrad, ::degrad]
voxelsize_mm = voxelsize_mm * degrad
_stats(segmentation)
if resize_mm is not None:
logger.debug("resize begin")
print("resize")
new_voxelsize_mm = np.asarray([resize_mm, resize_mm, resize_mm])
import imtools
segmentation = imtools.misc.resize_to_mm(segmentation, voxelsize_mm=voxelsize_mm, new_voxelsize_mm=new_voxelsize_mm)
voxelsize_mm = new_voxelsize_mm
logger.debug("resize begin")
_stats(segmentation)
mesh_data = gen_mesh_from_voxels_mc(segmentation, voxelsize_mm)
if smoothing:
mesh_data.coors = smooth_mesh(mesh_data)
else:
mesh_data = gen_mesh_from_voxels_mc(segmentation, voxelsize_mm * 1.0e-2)
mesh_data.write(vtk_file)
if qt_app is None:
qt_app = QApplication(sys.argv)
logger.debug("qapp constructed")
if show:
import viewer
view = viewer.QVTKViewer(vtk_file)
print('show viewer')
view.exec_()
return labels
def _stats(data):
print("stats")
un = np.unique(data)
for lab in un:
print(lab, " : ", np.sum(data==lab))
def main():
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
parser = argparse.ArgumentParser(
description='\
3D visualization of segmentation\n\
\npython show_segmentation.py\n\
\npython show_segmentation.py -i resection.pkl -l 2 3 4 -d 4')
parser.add_argument(
'-i', '--inputfile',
default='organ.pkl',
help='input file')
parser.add_argument(
'-o', '--outputfile',
default='~/lisa_data/mesh_geom.vtk',
help='output file')
parser.add_argument(
'-d', '--degrad', type=int,
default=4,
help='data degradation, default 4')
parser.add_argument(
'-r', '--resize', type=float,
default=None,
help='resize voxel to defined size in milimeters, default is None')
parser.add_argument(
'-l', '--label', type=int, metavar='N', nargs='+',
default=[1],
help='segmentation labels, default 1')
args = parser.parse_args()
import io3d
data = io3d.read(args.inputfile, dataplus_format=True)
ds = np.zeros(data['segmentation'].shape, np.bool)
for i in range(0, len(args.label)):
ds = ds | (data['segmentation'] == args.label[i])
outputfile = os.path.expanduser(args.outputfile)
showSegmentation(ds, degrad=args.degrad, voxelsize_mm=data['voxelsize_mm'], vtk_file=outputfile, resize_mm=args.resize)
if __name__ == "__main__":
main()
| true | true |
1c3abf7fcae14682cbe89c0667091899ac054267 | 5,384 | py | Python | examples/plot_interpolation.py | mdrolet01/scikit-fda | f16ffb3986408c12a2dfdf910688bd56ddecb188 | [
"BSD-3-Clause"
] | 1 | 2020-06-27T22:25:49.000Z | 2020-06-27T22:25:49.000Z | examples/plot_interpolation.py | KonstantinKlepikov/scikit-fda | 93c4ad80aaba8739b4f90932a2a759d6f5960387 | [
"BSD-3-Clause"
] | null | null | null | examples/plot_interpolation.py | KonstantinKlepikov/scikit-fda | 93c4ad80aaba8739b4f90932a2a759d6f5960387 | [
"BSD-3-Clause"
] | null | null | null | """
Interpolation
=====================
This example shows the types of interpolation used in the evaluation of
FDataGrids.
"""
# Author: Pablo Marcos Manchón
# License: MIT
# sphinx_gallery_thumbnail_number = 3
import skfda
from skfda.representation.interpolation import SplineInterpolation
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
##############################################################################
# The :class:`~skfda.representation.grid.FDataGrid` class is used for datasets
# containing discretized functions. For the evaluation between the points of
# discretization, or sample points, is necessary to interpolate.
#
# We will construct an example dataset with two curves with 6 points of
# discretization.
#
fd = skfda.datasets.make_sinusoidal_process(n_samples=2, n_features=6,
random_state=1)
fig = fd.scatter()
fig.legend(["Sample 1", "Sample 2"])
##############################################################################
# By default it is used linear interpolation, which is one of the simplest
# methods of interpolation and therefore one of the least computationally
# expensive, but has the disadvantage that the interpolant is not
# differentiable at the points of discretization.
#
fig = fd.plot()
fd.scatter(fig=fig)
##############################################################################
# The interpolation method of the FDataGrid could be changed setting the
# attribute ``interpolation``. Once we have set an interpolation it is used for
# the evaluation of the object.
#
# Polynomial spline interpolation could be performed using the interpolation
# :class:`~skfda.representation.interpolation.SplineInterpolation. In the
# following example a cubic interpolation is set.
fd.interpolation = SplineInterpolation(interpolation_order=3)
fig = fd.plot()
fd.scatter(fig=fig)
##############################################################################
# Smooth interpolation could be performed with the attribute
# ``smoothness_parameter`` of the spline interpolation.
#
# Sample with noise
fd_smooth = skfda.datasets.make_sinusoidal_process(n_samples=1, n_features=30,
random_state=1, error_std=.3)
# Cubic interpolation
fd_smooth.interpolation = SplineInterpolation(interpolation_order=3)
fig = fd_smooth.plot(label="Cubic")
# Smooth interpolation
fd_smooth.interpolation = SplineInterpolation(interpolation_order=3,
smoothness_parameter=1.5)
fd_smooth.plot(fig=fig, label="Cubic smoothed")
fd_smooth.scatter(fig=fig)
fig.legend()
##############################################################################
# Sometimes our samples are required to be monotone, in these cases it is
# possible to use monotone cubic interpolation with the attribute
# ``monotone``. A piecewise cubic hermite interpolating polynomial (PCHIP)
# will be used.
#
fd = fd[1]
fd_monotone = fd.copy(data_matrix=np.sort(fd.data_matrix, axis=1))
fig = fd_monotone.plot(linestyle='--', label="cubic")
fd_monotone.interpolation = SplineInterpolation(interpolation_order=3,
monotone=True)
fd_monotone.plot(fig=fig, label="PCHIP")
fd_monotone.scatter(fig=fig, c='C1')
fig.legend()
##############################################################################
# All the interpolations will work regardless of the dimension of the image, but
# depending on the domain dimension some methods will not be available.
#
# For the next examples it is constructed a surface, :math:`x_i: \mathbb{R}^2
# \longmapsto \mathbb{R}`. By default, as in unidimensional samples, it is used
# linear interpolation.
#
X, Y, Z = axes3d.get_test_data(1.2)
data_matrix = [Z.T]
sample_points = [X[0, :], Y[:, 0]]
fd = skfda.FDataGrid(data_matrix, sample_points)
fig = fd.plot()
fd.scatter(fig=fig)
##############################################################################
# In the following figure it is shown the result of the cubic interpolation
# applied to the surface.
#
# The degree of the interpolation polynomial does not have to coincide in both
# directions, for example, cubic interpolation in the first
# component and quadratic in the second one could be defined using a tuple with
# the values (3,2).
#
fd.interpolation = SplineInterpolation(interpolation_order=3)
fig = fd.plot()
fd.scatter(fig=fig)
##############################################################################
# The following table shows the interpolation methods available by the class
# :class:`SplineInterpolation` depending on the domain dimension.
#
# +------------------+--------+----------------+----------+-------------+
# | Domain dimension | Linear | Up to degree 5 | Monotone | Smoothing |
# +==================+========+================+==========+=============+
# | 1 | ✔ | ✔ | ✔ | ✔ |
# +------------------+--------+----------------+----------+-------------+
# | 2 | ✔ | ✔ | ✖ | ✔ |
# +------------------+--------+----------------+----------+-------------+
# | 3 or more | ✔ | ✖ | ✖ | ✖ |
# +------------------+--------+----------------+----------+-------------+
#
| 34.961039 | 80 | 0.570208 |
import skfda
from skfda.representation.interpolation import SplineInterpolation
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
| true | true |
1c3ac11edbb911fcd05ae9c9593b4e3342b95310 | 4,326 | py | Python | fbpcs/private_computation/test/service/test_compute_metrics_stage_service.py | chaitanya2334/fbpcs | 8b9f35df31b1c85820805c1a0df7c6a881b15b6c | [
"MIT"
] | 33 | 2021-05-07T19:45:15.000Z | 2021-08-11T17:10:26.000Z | fbpcs/private_computation/test/service/test_compute_metrics_stage_service.py | chaitanya2334/fbpcs | 8b9f35df31b1c85820805c1a0df7c6a881b15b6c | [
"MIT"
] | 111 | 2021-05-18T22:43:05.000Z | 2021-08-13T15:58:12.000Z | fbpcs/private_computation/test/service/test_compute_metrics_stage_service.py | chaitanya2334/fbpcs | 8b9f35df31b1c85820805c1a0df7c6a881b15b6c | [
"MIT"
] | 14 | 2021-05-18T22:42:24.000Z | 2021-07-26T17:51:18.000Z | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from unittest import IsolatedAsyncioTestCase
from unittest.mock import AsyncMock, MagicMock, patch
from fbpcp.entity.mpc_instance import MPCParty
from fbpcs.common.entity.pcs_mpc_instance import PCSMPCInstance
from fbpcs.onedocker_binary_config import OneDockerBinaryConfig
from fbpcs.private_computation.entity.private_computation_instance import (
PrivateComputationGameType,
PrivateComputationInstance,
PrivateComputationRole,
)
from fbpcs.private_computation.entity.private_computation_instance import (
PrivateComputationInstanceStatus,
)
from fbpcs.private_computation.repository.private_computation_game import GameNames
from fbpcs.private_computation.service.compute_metrics_stage_service import (
ComputeMetricsStageService,
)
from fbpcs.private_computation.service.constants import (
NUM_NEW_SHARDS_PER_FILE,
)
class TestComputeMetricsStageService(IsolatedAsyncioTestCase):
@patch("fbpcp.service.mpc.MPCService")
def setUp(self, mock_mpc_svc) -> None:
self.mock_mpc_svc = mock_mpc_svc
self.mock_mpc_svc.create_instance = MagicMock()
onedocker_binary_config_map = defaultdict(
lambda: OneDockerBinaryConfig(
tmp_directory="/test_tmp_directory/", binary_version="latest"
)
)
self.stage_svc = ComputeMetricsStageService(
onedocker_binary_config_map, self.mock_mpc_svc
)
async def test_compute_metrics(self) -> None:
private_computation_instance = self._create_pc_instance()
mpc_instance = PCSMPCInstance.create_instance(
instance_id=private_computation_instance.instance_id + "_compute_metrics0",
game_name=GameNames.LIFT.value,
mpc_party=MPCParty.CLIENT,
num_workers=private_computation_instance.num_mpc_containers,
)
self.mock_mpc_svc.start_instance_async = AsyncMock(return_value=mpc_instance)
test_server_ips = [
f"192.0.2.{i}"
for i in range(private_computation_instance.num_mpc_containers)
]
await self.stage_svc.run_async(private_computation_instance, test_server_ips)
self.assertEqual(mpc_instance, private_computation_instance.instances[0])
def test_get_game_args(self) -> None:
# TODO: add game args test for attribution args
private_computation_instance = self._create_pc_instance()
test_game_args = [
{
"input_base_path": private_computation_instance.data_processing_output_path,
"output_base_path": private_computation_instance.compute_stage_output_base_path,
"file_start_index": 0,
"num_files": private_computation_instance.num_files_per_mpc_container,
"concurrency": private_computation_instance.concurrency,
},
{
"input_base_path": private_computation_instance.data_processing_output_path,
"output_base_path": private_computation_instance.compute_stage_output_base_path,
"file_start_index": private_computation_instance.num_files_per_mpc_container,
"num_files": private_computation_instance.num_files_per_mpc_container,
"concurrency": private_computation_instance.concurrency,
},
]
self.assertEqual(
test_game_args,
self.stage_svc._get_compute_metrics_game_args(private_computation_instance),
)
def _create_pc_instance(self) -> PrivateComputationInstance:
return PrivateComputationInstance(
instance_id="test_instance_123",
role=PrivateComputationRole.PARTNER,
instances=[],
status=PrivateComputationInstanceStatus.ID_MATCHING_COMPLETED,
status_update_ts=1600000000,
num_pid_containers=2,
num_mpc_containers=2,
num_files_per_mpc_container=NUM_NEW_SHARDS_PER_FILE,
game_type=PrivateComputationGameType.LIFT,
input_path="456",
output_dir="789",
)
| 41.596154 | 96 | 0.718678 |
from collections import defaultdict
from unittest import IsolatedAsyncioTestCase
from unittest.mock import AsyncMock, MagicMock, patch
from fbpcp.entity.mpc_instance import MPCParty
from fbpcs.common.entity.pcs_mpc_instance import PCSMPCInstance
from fbpcs.onedocker_binary_config import OneDockerBinaryConfig
from fbpcs.private_computation.entity.private_computation_instance import (
PrivateComputationGameType,
PrivateComputationInstance,
PrivateComputationRole,
)
from fbpcs.private_computation.entity.private_computation_instance import (
PrivateComputationInstanceStatus,
)
from fbpcs.private_computation.repository.private_computation_game import GameNames
from fbpcs.private_computation.service.compute_metrics_stage_service import (
ComputeMetricsStageService,
)
from fbpcs.private_computation.service.constants import (
NUM_NEW_SHARDS_PER_FILE,
)
class TestComputeMetricsStageService(IsolatedAsyncioTestCase):
@patch("fbpcp.service.mpc.MPCService")
def setUp(self, mock_mpc_svc) -> None:
self.mock_mpc_svc = mock_mpc_svc
self.mock_mpc_svc.create_instance = MagicMock()
onedocker_binary_config_map = defaultdict(
lambda: OneDockerBinaryConfig(
tmp_directory="/test_tmp_directory/", binary_version="latest"
)
)
self.stage_svc = ComputeMetricsStageService(
onedocker_binary_config_map, self.mock_mpc_svc
)
async def test_compute_metrics(self) -> None:
private_computation_instance = self._create_pc_instance()
mpc_instance = PCSMPCInstance.create_instance(
instance_id=private_computation_instance.instance_id + "_compute_metrics0",
game_name=GameNames.LIFT.value,
mpc_party=MPCParty.CLIENT,
num_workers=private_computation_instance.num_mpc_containers,
)
self.mock_mpc_svc.start_instance_async = AsyncMock(return_value=mpc_instance)
test_server_ips = [
f"192.0.2.{i}"
for i in range(private_computation_instance.num_mpc_containers)
]
await self.stage_svc.run_async(private_computation_instance, test_server_ips)
self.assertEqual(mpc_instance, private_computation_instance.instances[0])
def test_get_game_args(self) -> None:
private_computation_instance = self._create_pc_instance()
test_game_args = [
{
"input_base_path": private_computation_instance.data_processing_output_path,
"output_base_path": private_computation_instance.compute_stage_output_base_path,
"file_start_index": 0,
"num_files": private_computation_instance.num_files_per_mpc_container,
"concurrency": private_computation_instance.concurrency,
},
{
"input_base_path": private_computation_instance.data_processing_output_path,
"output_base_path": private_computation_instance.compute_stage_output_base_path,
"file_start_index": private_computation_instance.num_files_per_mpc_container,
"num_files": private_computation_instance.num_files_per_mpc_container,
"concurrency": private_computation_instance.concurrency,
},
]
self.assertEqual(
test_game_args,
self.stage_svc._get_compute_metrics_game_args(private_computation_instance),
)
def _create_pc_instance(self) -> PrivateComputationInstance:
return PrivateComputationInstance(
instance_id="test_instance_123",
role=PrivateComputationRole.PARTNER,
instances=[],
status=PrivateComputationInstanceStatus.ID_MATCHING_COMPLETED,
status_update_ts=1600000000,
num_pid_containers=2,
num_mpc_containers=2,
num_files_per_mpc_container=NUM_NEW_SHARDS_PER_FILE,
game_type=PrivateComputationGameType.LIFT,
input_path="456",
output_dir="789",
)
| true | true |
1c3ac2c3e925dd7380b9f7412396e842a9a3d381 | 7,543 | py | Python | backend/coreapp/models/github.py | ChrisNonyminus/decomp.me | b51e8d586ba95bd20f6b2e5881ecc34459e761bc | [
"MIT"
] | 47 | 2021-11-05T14:20:58.000Z | 2022-03-19T18:56:46.000Z | backend/coreapp/models/github.py | ChrisNonyminus/decomp.me | b51e8d586ba95bd20f6b2e5881ecc34459e761bc | [
"MIT"
] | 131 | 2021-10-20T09:17:16.000Z | 2022-03-31T22:22:54.000Z | backend/coreapp/models/github.py | ChrisNonyminus/decomp.me | b51e8d586ba95bd20f6b2e5881ecc34459e761bc | [
"MIT"
] | 20 | 2021-11-23T18:49:45.000Z | 2022-03-29T06:25:10.000Z | import shutil
import subprocess
from pathlib import Path
from typing import Optional
import requests
from django.conf import settings
from django.contrib.auth import login
from django.contrib.auth.models import User
from django.core.cache import cache
from django.db import models, transaction
from django.dispatch import receiver
from django.utils.timezone import now
from github import Github
from github.NamedUser import NamedUser
from github.Repository import Repository
from rest_framework import status
from rest_framework.exceptions import APIException
from ..middleware import Request
from .profile import Profile
from .project import Project
from .scratch import Scratch
API_CACHE_TIMEOUT = 60 * 60 # 1 hour
class BadOAuthCodeException(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_code = "bad_oauth_code"
default_detail = "Invalid or expired GitHub OAuth verification code."
class MissingOAuthScopeException(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_code = "missing_oauth_scope"
class MalformedGitHubApiResponseException(APIException):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_code = "malformed_github_api_response"
default_detail = "The GitHub API returned an malformed or unexpected response."
class GitHubUser(models.Model):
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
primary_key=True,
related_name="github",
)
github_id = models.PositiveIntegerField(unique=True, editable=False)
access_token = models.CharField(max_length=100)
class Meta:
verbose_name = "GitHub user"
verbose_name_plural = "GitHub users"
def details(self) -> NamedUser:
cache_key = f"github_user_details:{self.github_id}"
cached = cache.get(cache_key)
if cached:
return cached
details = Github(self.access_token).get_user_by_id(self.github_id)
cache.set(cache_key, details, API_CACHE_TIMEOUT)
return details
def __str__(self):
return "@" + self.details().login
@staticmethod
@transaction.atomic
def login(request: Request, oauth_code: str) -> "GitHubUser":
response = requests.post(
"https://github.com/login/oauth/access_token",
json={
"client_id": settings.GITHUB_CLIENT_ID,
"client_secret": settings.GITHUB_CLIENT_SECRET,
"code": oauth_code,
},
headers={"Accept": "application/json"},
).json()
error: Optional[str] = response.get("error")
if error == "bad_verification_code":
raise BadOAuthCodeException()
elif error:
raise MalformedGitHubApiResponseException(
f"GitHub API login sent unknown error '{error}'."
)
try:
scope_str = str(response["scope"])
access_token = str(response["access_token"])
except KeyError:
raise MalformedGitHubApiResponseException()
details = Github(access_token).get_user()
try:
gh_user = GitHubUser.objects.get(github_id=details.id)
except GitHubUser.DoesNotExist:
gh_user = GitHubUser()
user = request.user
# make a new user if request.user already has a github account attached
if (
user.is_anonymous
or isinstance(user, User)
and GitHubUser.objects.filter(user=user).get() is not None
):
user = User.objects.create_user(
username=details.login,
email=details.email,
password=None,
)
assert isinstance(user, User)
gh_user.user = user
gh_user.github_id = details.id
gh_user.access_token = access_token
gh_user.save()
profile: Profile = (
Profile.objects.filter(user=gh_user.user).first() or Profile()
)
profile.user = gh_user.user
profile.last_request_date = now()
profile.save()
# If the previous profile was anonymous, give its scratches to the logged-in profile
if request.profile.is_anonymous() and profile.id != request.profile.id:
Scratch.objects.filter(owner=request.profile).update(owner=profile)
request.profile.delete()
login(request, gh_user.user)
request.profile = profile
request.session["profile_id"] = profile.id
return gh_user
class GitHubRepoBusyException(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = "This repository is currently being pulled."
class GitHubRepo(models.Model):
owner = models.CharField(max_length=100)
repo = models.CharField(max_length=100)
branch = models.CharField(max_length=100, default="master", blank=False)
is_pulling = models.BooleanField(default=False)
last_pulled = models.DateTimeField(blank=True, null=True)
class Meta:
verbose_name = "GitHub repo"
verbose_name_plural = "GitHub repos"
def pull(self) -> None:
if self.is_pulling:
raise GitHubRepoBusyException()
self.is_pulling = True
self.save()
try:
repo_dir = self.get_dir()
remote_url = f"https://github.com/{self.owner}/{self.repo}"
if repo_dir.exists():
subprocess.run(
["git", "remote", "set-url", "origin", remote_url], cwd=repo_dir
)
subprocess.run(["git", "fetch", "origin", self.branch], cwd=repo_dir)
subprocess.run(
["git", "reset", "--hard", f"origin/{self.branch}"], cwd=repo_dir
)
subprocess.run(["git", "pull"], cwd=repo_dir)
else:
repo_dir.mkdir(parents=True)
subprocess.run(
[
"git",
"clone",
remote_url,
".",
"--depth",
"1",
"-b",
self.branch,
],
check=True,
cwd=repo_dir,
)
self.last_pulled = now()
self.save()
for project in Project.objects.filter(repo=self):
project.import_functions()
finally:
self.is_pulling = False
self.save()
def get_dir(self) -> Path:
return Path(settings.LOCAL_FILE_DIR) / "repos" / str(self.id)
def details(self, access_token: str) -> Repository:
cache_key = f"github_repo_details:{self.id}"
cached = cache.get(cache_key)
if cached:
return cached
details = Github(access_token).get_repo(f"{self.owner}/{self.repo}")
cache.set(cache_key, details, API_CACHE_TIMEOUT)
return details
def __str__(self):
return f"{self.owner}/{self.repo}#{self.branch} ({self.id})"
def get_html_url(self):
return f"https://github.com/{self.owner}/{self.repo}/tree/{self.branch}"
# When a GitHubRepo is deleted, delete its directory
@receiver(models.signals.pre_delete, sender=GitHubRepo)
def delete_local_repo_dir(instance: GitHubRepo, **kwargs):
dir = instance.get_dir()
if dir.exists():
shutil.rmtree(dir)
| 31.429167 | 92 | 0.610367 | import shutil
import subprocess
from pathlib import Path
from typing import Optional
import requests
from django.conf import settings
from django.contrib.auth import login
from django.contrib.auth.models import User
from django.core.cache import cache
from django.db import models, transaction
from django.dispatch import receiver
from django.utils.timezone import now
from github import Github
from github.NamedUser import NamedUser
from github.Repository import Repository
from rest_framework import status
from rest_framework.exceptions import APIException
from ..middleware import Request
from .profile import Profile
from .project import Project
from .scratch import Scratch
API_CACHE_TIMEOUT = 60 * 60
class BadOAuthCodeException(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_code = "bad_oauth_code"
default_detail = "Invalid or expired GitHub OAuth verification code."
class MissingOAuthScopeException(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_code = "missing_oauth_scope"
class MalformedGitHubApiResponseException(APIException):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_code = "malformed_github_api_response"
default_detail = "The GitHub API returned an malformed or unexpected response."
class GitHubUser(models.Model):
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
primary_key=True,
related_name="github",
)
github_id = models.PositiveIntegerField(unique=True, editable=False)
access_token = models.CharField(max_length=100)
class Meta:
verbose_name = "GitHub user"
verbose_name_plural = "GitHub users"
def details(self) -> NamedUser:
cache_key = f"github_user_details:{self.github_id}"
cached = cache.get(cache_key)
if cached:
return cached
details = Github(self.access_token).get_user_by_id(self.github_id)
cache.set(cache_key, details, API_CACHE_TIMEOUT)
return details
def __str__(self):
return "@" + self.details().login
@staticmethod
@transaction.atomic
def login(request: Request, oauth_code: str) -> "GitHubUser":
response = requests.post(
"https://github.com/login/oauth/access_token",
json={
"client_id": settings.GITHUB_CLIENT_ID,
"client_secret": settings.GITHUB_CLIENT_SECRET,
"code": oauth_code,
},
headers={"Accept": "application/json"},
).json()
error: Optional[str] = response.get("error")
if error == "bad_verification_code":
raise BadOAuthCodeException()
elif error:
raise MalformedGitHubApiResponseException(
f"GitHub API login sent unknown error '{error}'."
)
try:
scope_str = str(response["scope"])
access_token = str(response["access_token"])
except KeyError:
raise MalformedGitHubApiResponseException()
details = Github(access_token).get_user()
try:
gh_user = GitHubUser.objects.get(github_id=details.id)
except GitHubUser.DoesNotExist:
gh_user = GitHubUser()
user = request.user
if (
user.is_anonymous
or isinstance(user, User)
and GitHubUser.objects.filter(user=user).get() is not None
):
user = User.objects.create_user(
username=details.login,
email=details.email,
password=None,
)
assert isinstance(user, User)
gh_user.user = user
gh_user.github_id = details.id
gh_user.access_token = access_token
gh_user.save()
profile: Profile = (
Profile.objects.filter(user=gh_user.user).first() or Profile()
)
profile.user = gh_user.user
profile.last_request_date = now()
profile.save()
if request.profile.is_anonymous() and profile.id != request.profile.id:
Scratch.objects.filter(owner=request.profile).update(owner=profile)
request.profile.delete()
login(request, gh_user.user)
request.profile = profile
request.session["profile_id"] = profile.id
return gh_user
class GitHubRepoBusyException(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = "This repository is currently being pulled."
class GitHubRepo(models.Model):
owner = models.CharField(max_length=100)
repo = models.CharField(max_length=100)
branch = models.CharField(max_length=100, default="master", blank=False)
is_pulling = models.BooleanField(default=False)
last_pulled = models.DateTimeField(blank=True, null=True)
class Meta:
verbose_name = "GitHub repo"
verbose_name_plural = "GitHub repos"
def pull(self) -> None:
if self.is_pulling:
raise GitHubRepoBusyException()
self.is_pulling = True
self.save()
try:
repo_dir = self.get_dir()
remote_url = f"https://github.com/{self.owner}/{self.repo}"
if repo_dir.exists():
subprocess.run(
["git", "remote", "set-url", "origin", remote_url], cwd=repo_dir
)
subprocess.run(["git", "fetch", "origin", self.branch], cwd=repo_dir)
subprocess.run(
["git", "reset", "--hard", f"origin/{self.branch}"], cwd=repo_dir
)
subprocess.run(["git", "pull"], cwd=repo_dir)
else:
repo_dir.mkdir(parents=True)
subprocess.run(
[
"git",
"clone",
remote_url,
".",
"--depth",
"1",
"-b",
self.branch,
],
check=True,
cwd=repo_dir,
)
self.last_pulled = now()
self.save()
for project in Project.objects.filter(repo=self):
project.import_functions()
finally:
self.is_pulling = False
self.save()
def get_dir(self) -> Path:
return Path(settings.LOCAL_FILE_DIR) / "repos" / str(self.id)
def details(self, access_token: str) -> Repository:
cache_key = f"github_repo_details:{self.id}"
cached = cache.get(cache_key)
if cached:
return cached
details = Github(access_token).get_repo(f"{self.owner}/{self.repo}")
cache.set(cache_key, details, API_CACHE_TIMEOUT)
return details
def __str__(self):
return f"{self.owner}/{self.repo}#{self.branch} ({self.id})"
def get_html_url(self):
return f"https://github.com/{self.owner}/{self.repo}/tree/{self.branch}"
@receiver(models.signals.pre_delete, sender=GitHubRepo)
def delete_local_repo_dir(instance: GitHubRepo, **kwargs):
dir = instance.get_dir()
if dir.exists():
shutil.rmtree(dir)
| true | true |
1c3ac2d09d38edf07628177024ae5dbc3817b8e9 | 17,488 | py | Python | tensorflow/python/training/optimizer.py | adsar/tensorflow | b4b2575ec4bf7e6da2686505f61b5f16cb9273ab | [
"Apache-2.0"
] | 21 | 2016-03-10T11:55:45.000Z | 2021-02-03T02:49:11.000Z | tensorflow/python/training/optimizer.py | getnamo/tensorflow | b4b2575ec4bf7e6da2686505f61b5f16cb9273ab | [
"Apache-2.0"
] | null | null | null | tensorflow/python/training/optimizer.py | getnamo/tensorflow | b4b2575ec4bf7e6da2686505f61b5f16cb9273ab | [
"Apache-2.0"
] | 39 | 2016-03-25T05:13:09.000Z | 2020-06-16T01:30:53.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for optimizers."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import slot_creator
class Optimizer(object):
"""Base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Add Ops to the graph to minimize a cost by updating a list of variables.
# "cost" is a Tensor, and the list of variables contains tf.Variable
# objects.
opt_op = opt.minimize(cost, var_list=<list of variables>)
```
In the training program you will just have to run the returned Op.
```python
# Execute opt_op to do one step of training:
opt_op.run()
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `compute_gradients()`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Compute the gradients for a list of variables.
grads_and_vars = opt.compute_gradients(loss, <list of variables>)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
@@__init__
@@minimize
@@compute_gradients
@@apply_gradients
### Gating Gradients
Both `minimize()` and `compute_gradients()` accept a `gate_gradient` argument
that controls the degree of parallelism during the application of the
gradients.
The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.
<b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides
the maximum parallelism in execution, at the cost of some non-reproducibility
in the results. For example the two gradients of `matmul` depend on the input
values: With `GATE_NONE` one of the gradients could be applied to one of the
inputs _before_ the other gradient is computed resulting in non-reproducible
results.
<b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before
they are used. This prevents race conditions for Ops that generate gradients
for multiple inputs where the gradients depend on the inputs.
<b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed
before any one of them is used. This provides the least parallelism but can
be useful if you want to process all gradients before applying any of them.
### Slots
Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`
allocate and manage additional variables associated with the variables to
train. These are called <i>Slots</i>. Slots have names and you can ask the
optimizer for the names of the slots that it uses. Once you have a slot name
you can ask the optimizer for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
@@get_slot_names
@@get_slot
"""
# Values for gate_gradients.
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, use_locking, name):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Args:
use_locking: Bool. If True apply use locks to prevent concurrent updates
to variables.
name: A non-empty string. The name to use for accumulators created
for the optimizer.
Raises:
ValueError: If name is malformed.
"""
if not name:
raise ValueError("Must specify the optimizer name")
self._use_locking = use_locking
self._name = name
# Dictionary of slots.
# {slot_name : { variable_to_train: slot_for_the_variable, ...}, ... }
self._slots = {}
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `compute_gradients()` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list of `Variable` objects to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
"""
grads_and_vars = self.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
return self.apply_gradients(grads_and_vars, global_step=global_step,
name=name)
def compute_gradients(self, loss, var_list=None, gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize.
var_list: Optional list of tf.Variable to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKey.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of (gradient, variable) pairs.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
"""
if gate_gradients not in [Optimizer.GATE_NONE, Optimizer.GATE_OP,
Optimizer.GATE_GRAPH]:
raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" %
gate_gradients)
self._assert_valid_dtypes([loss])
if var_list is None:
var_list = variables.trainable_variables()
for var in var_list:
if not isinstance(var, variables.Variable):
raise TypeError("Argument is not a tf.Variable: %s" % var)
if not var_list:
raise ValueError("No variables to optimize")
var_refs = [v.ref() for v in var_list]
grads = gradients.gradients(
loss, var_refs, gate_gradients=(gate_gradients == Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if gate_gradients == Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes([v for g, v in grads_and_vars if g is not None])
return grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
# This is a default implementation of apply_gradients() that can be shared
# by most optimizers. It relies on the subclass implementing the following
# methods: _create_slots(), _prepare(), _apply_dense(), and _apply_sparse().
grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works
for g, v in grads_and_vars:
if not isinstance(g, (ops.Tensor, ops.IndexedSlices, type(None))):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
if not isinstance(v, variables.Variable):
raise TypeError(
"Variable must be a tf.Variable: %s" % v)
if g is not None:
self._assert_valid_dtypes([g, v])
var_list = [v for g, v in grads_and_vars if g is not None]
if not var_list:
raise ValueError("No gradients provided for any variable: %s" %
(grads_and_vars,))
with ops.control_dependencies(None):
self._create_slots(var_list)
update_ops = []
with ops.op_scope([], name, self._name) as name:
self._prepare()
for grad, var in grads_and_vars:
if not grad:
continue
# We colocate all ops created in _apply_dense or _apply_sparse
# on the same device as the variable.
with ops.name_scope("update_" + var.op.name), ops.colocate_with(var):
if isinstance(grad, ops.Tensor):
update_ops.append(self._apply_dense(grad, var))
else:
update_ops.append(self._apply_sparse(grad, var))
if global_step is None:
return self._finish(update_ops, name)
else:
with ops.control_dependencies([self._finish(update_ops, "update")]):
with ops.colocate_with(global_step):
return state_ops.assign_add(global_step, 1, name=name).op
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
named_slots = self._slots.get(name, None)
if not named_slots:
return None
return named_slots.get(var, None)
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
return sorted(self._slots.keys())
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError(
"Invalid type %r for %s, expected: %s." % (
dtype, t.name, [v for v in valid_dtypes]))
# --------------
# Methods to be implemented by subclasses if they want to use the
# inherited implementation of apply_gradients() or compute_gradients().
# --------------
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Defaults to `float32`. Subclasses should override to allow other types.
Returns:
Valid types for loss, variables and gradients.
"""
return set([dtypes.float32])
def _create_slots(self, var_list):
"""Create all slots needed by the variables.
Args:
var_list: A list of `Variable` objects.
"""
# No slots needed by default
pass
def _prepare(self):
"""Create all needed tensors before applying gradients.
This is called with the name_scope using the "name" that
users have chosen for the application of gradients.
"""
pass
def _apply_dense(self, grad, var):
"""Add ops to apply dense gradients to `var`.
Args:
grad: A `Tensor`.
var: A `Variable` object.
Return:
An `Operation`.
"""
raise NotImplementedError()
def _apply_sparse(self, grad, var):
"""Add ops to apply sparse gradients to `var`.
Args:
grad: `IndexedSlices`.
var: A `Variable` object.
Return:
An `Operation`.
"""
raise NotImplementedError()
def _finish(self, update_ops, name_scope):
"""Do what is needed to finish the update.
This is called with the `name_scope` using the "name" that
users have chosen for the application of gradients.
Args:
update_ops: List of `Operation` objects to update variables. This list
contains the values returned by the `_apply_dense()` and
`_apply_sparse()` calls.
name_scope: String. Name to use for the returned operation.
Returns:
The operation to apply updates.
"""
return control_flow_ops.group(*update_ops, name=name_scope)
# --------------
# Utility methods for subclasses.
# --------------
def _slot_dict(self, slot_name):
"""Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name.
"""
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def _get_or_make_slot(self, var, val, slot_name, op_name):
"""Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if var not in named_slots:
named_slots[var] = slot_creator.create_slot(var, val, op_name)
return named_slots[var]
def _zeros_slot(self, var, slot_name, op_name):
"""Find or create a slot initialized with 0.0.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if var not in named_slots:
named_slots[var] = slot_creator.create_zeros_slot(var, op_name)
return named_slots[var]
| 36.132231 | 80 | 0.687043 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import slot_creator
class Optimizer(object):
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, use_locking, name):
if not name:
raise ValueError("Must specify the optimizer name")
self._use_locking = use_locking
self._name = name
self._slots = {}
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None):
grads_and_vars = self.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
return self.apply_gradients(grads_and_vars, global_step=global_step,
name=name)
def compute_gradients(self, loss, var_list=None, gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False):
if gate_gradients not in [Optimizer.GATE_NONE, Optimizer.GATE_OP,
Optimizer.GATE_GRAPH]:
raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" %
gate_gradients)
self._assert_valid_dtypes([loss])
if var_list is None:
var_list = variables.trainable_variables()
for var in var_list:
if not isinstance(var, variables.Variable):
raise TypeError("Argument is not a tf.Variable: %s" % var)
if not var_list:
raise ValueError("No variables to optimize")
var_refs = [v.ref() for v in var_list]
grads = gradients.gradients(
loss, var_refs, gate_gradients=(gate_gradients == Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if gate_gradients == Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes([v for g, v in grads_and_vars if g is not None])
return grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
grads_and_vars = tuple(grads_and_vars)
for g, v in grads_and_vars:
if not isinstance(g, (ops.Tensor, ops.IndexedSlices, type(None))):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
if not isinstance(v, variables.Variable):
raise TypeError(
"Variable must be a tf.Variable: %s" % v)
if g is not None:
self._assert_valid_dtypes([g, v])
var_list = [v for g, v in grads_and_vars if g is not None]
if not var_list:
raise ValueError("No gradients provided for any variable: %s" %
(grads_and_vars,))
with ops.control_dependencies(None):
self._create_slots(var_list)
update_ops = []
with ops.op_scope([], name, self._name) as name:
self._prepare()
for grad, var in grads_and_vars:
if not grad:
continue
with ops.name_scope("update_" + var.op.name), ops.colocate_with(var):
if isinstance(grad, ops.Tensor):
update_ops.append(self._apply_dense(grad, var))
else:
update_ops.append(self._apply_sparse(grad, var))
if global_step is None:
return self._finish(update_ops, name)
else:
with ops.control_dependencies([self._finish(update_ops, "update")]):
with ops.colocate_with(global_step):
return state_ops.assign_add(global_step, 1, name=name).op
def get_slot(self, var, name):
named_slots = self._slots.get(name, None)
if not named_slots:
return None
return named_slots.get(var, None)
def get_slot_names(self):
return sorted(self._slots.keys())
def _assert_valid_dtypes(self, tensors):
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError(
"Invalid type %r for %s, expected: %s." % (
dtype, t.name, [v for v in valid_dtypes]))
def _valid_dtypes(self):
return set([dtypes.float32])
def _create_slots(self, var_list):
pass
def _prepare(self):
pass
def _apply_dense(self, grad, var):
raise NotImplementedError()
def _apply_sparse(self, grad, var):
raise NotImplementedError()
def _finish(self, update_ops, name_scope):
return control_flow_ops.group(*update_ops, name=name_scope)
def _slot_dict(self, slot_name):
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def _get_or_make_slot(self, var, val, slot_name, op_name):
named_slots = self._slot_dict(slot_name)
if var not in named_slots:
named_slots[var] = slot_creator.create_slot(var, val, op_name)
return named_slots[var]
def _zeros_slot(self, var, slot_name, op_name):
named_slots = self._slot_dict(slot_name)
if var not in named_slots:
named_slots[var] = slot_creator.create_zeros_slot(var, op_name)
return named_slots[var]
| true | true |
1c3ac313d13f15b5d98c34af76ec504b207cd802 | 1,682 | py | Python | 2021/20/20a.py | befeleme/aoc | dec81ceeffb8746af28eeeb7f925b219f2445079 | [
"WTFPL"
] | 3 | 2021-12-07T17:46:48.000Z | 2022-02-23T13:33:19.000Z | 2021/20/20a.py | befeleme/aoc | dec81ceeffb8746af28eeeb7f925b219f2445079 | [
"WTFPL"
] | null | null | null | 2021/20/20a.py | befeleme/aoc | dec81ceeffb8746af28eeeb7f925b219f2445079 | [
"WTFPL"
] | null | null | null | import fileinput
from itertools import product
from collections import Counter
def find_neighbors(coor):
# calculate all neighbors from offsets
n = [(coor[0] + x[0], coor[1] + x[1]) for x in OFFSETS]
n.sort(key=lambda y: y[1])
return n
def read_binary_number(pixels, image):
bin_str = ""
converter = {"#": "1", ".": "0"}
for coor in pixels:
pixel_val = image.get(coor, ".")
bin_str += converter[pixel_val]
return int(bin_str, 2)
def print_img(image, max_len):
for x in range(-offset, max_len+offset):
for y in range(-offset, max_len+offset):
val = image.get((y,x), ".")
print(val, end="")
print()
def enhance(image):
# create copy of the original image
new_image = {}
# process the old image:
for pixel in image:
bn = read_binary_number(find_neighbors(pixel), image)
new_image[pixel] = algorithm[bn]
return new_image
# Each pixel has got 8 neighbors = 9 total
OFFSETS = list(product(range(-1, 2), repeat=2))
data = [line.strip() for line in fileinput.input()]
algorithm = data[0]
image = data[2:]
image_matrix = {}
max_len = len(image)
offset = 110
for y in range(-offset, max_len+offset):
for x in range(-offset, max_len+offset):
image_matrix[(x, y)] = "."
for col_index, row in enumerate(image):
for row_index, char in enumerate(row):
image_matrix[(row_index, col_index)] = char
for i in range(50):
image_matrix = enhance(image_matrix)
c = 0
for x, y in image_matrix:
if x <= -60:
continue
if y >= 160:
continue
else:
if image_matrix[x, y] == "#":
c += 1
print(c) | 25.104478 | 61 | 0.613555 | import fileinput
from itertools import product
from collections import Counter
def find_neighbors(coor):
n = [(coor[0] + x[0], coor[1] + x[1]) for x in OFFSETS]
n.sort(key=lambda y: y[1])
return n
def read_binary_number(pixels, image):
bin_str = ""
converter = {"#": "1", ".": "0"}
for coor in pixels:
pixel_val = image.get(coor, ".")
bin_str += converter[pixel_val]
return int(bin_str, 2)
def print_img(image, max_len):
for x in range(-offset, max_len+offset):
for y in range(-offset, max_len+offset):
val = image.get((y,x), ".")
print(val, end="")
print()
def enhance(image):
new_image = {}
for pixel in image:
bn = read_binary_number(find_neighbors(pixel), image)
new_image[pixel] = algorithm[bn]
return new_image
OFFSETS = list(product(range(-1, 2), repeat=2))
data = [line.strip() for line in fileinput.input()]
algorithm = data[0]
image = data[2:]
image_matrix = {}
max_len = len(image)
offset = 110
for y in range(-offset, max_len+offset):
for x in range(-offset, max_len+offset):
image_matrix[(x, y)] = "."
for col_index, row in enumerate(image):
for row_index, char in enumerate(row):
image_matrix[(row_index, col_index)] = char
for i in range(50):
image_matrix = enhance(image_matrix)
c = 0
for x, y in image_matrix:
if x <= -60:
continue
if y >= 160:
continue
else:
if image_matrix[x, y] == "#":
c += 1
print(c) | true | true |
1c3ac3df3b724f75d33832bc36122e5a31a75e33 | 371 | py | Python | setup.py | zacharyvoase/django-conch | b964715e52f23e998811b6ffc95a481abff141ad | [
"Unlicense"
] | 11 | 2015-02-11T18:00:51.000Z | 2020-12-27T19:40:33.000Z | setup.py | zacharyvoase/django-conch | b964715e52f23e998811b6ffc95a481abff141ad | [
"Unlicense"
] | null | null | null | setup.py | zacharyvoase/django-conch | b964715e52f23e998811b6ffc95a481abff141ad | [
"Unlicense"
] | 3 | 2015-02-06T07:15:02.000Z | 2019-08-21T06:05:22.000Z | from setuptools import setup, find_packages
setup(
name='django-conch',
version='0.0.1',
description='Expose the Django shell as an SSH server.',
author='Zachary Voase',
author_email='z@zacharyvoase.com',
url='https://github.com/zacharyvoase/django-conch',
packages=find_packages(),
install_requires=[
'Twisted>=12.3.0',
],
)
| 24.733333 | 60 | 0.663073 | from setuptools import setup, find_packages
setup(
name='django-conch',
version='0.0.1',
description='Expose the Django shell as an SSH server.',
author='Zachary Voase',
author_email='z@zacharyvoase.com',
url='https://github.com/zacharyvoase/django-conch',
packages=find_packages(),
install_requires=[
'Twisted>=12.3.0',
],
)
| true | true |
1c3ac6e13ded41f757bb93c822f7b8a62f1e0db5 | 1,562 | py | Python | examples/docs_snippets/docs_snippets_tests/concepts_tests/solids_pipelines_tests/test_op_events.py | facultyai/dagster | 779e27faa3e46b7d043cb9624617e655a9ed570c | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets_tests/concepts_tests/solids_pipelines_tests/test_op_events.py | facultyai/dagster | 779e27faa3e46b7d043cb9624617e655a9ed570c | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets_tests/concepts_tests/solids_pipelines_tests/test_op_events.py | facultyai/dagster | 779e27faa3e46b7d043cb9624617e655a9ed570c | [
"Apache-2.0"
] | 1 | 2019-09-11T03:02:27.000Z | 2019-09-11T03:02:27.000Z | import pytest
from dagster import Failure, graph
from docs_snippets.concepts.solids_pipelines.op_events import (
my_asset_op,
my_expectation_op,
my_failure_metadata_op,
my_failure_op,
my_metadata_expectation_op,
my_metadata_output,
my_named_yield_op,
my_retry_op,
my_simple_return_op,
my_simple_yield_op,
)
def execute_op_in_graph(an_op, **kwargs):
@graph
def my_graph():
if kwargs:
return an_op(**kwargs)
else:
return an_op()
result = my_graph.execute_in_process()
return result
def generate_stub_input_values(op):
input_values = {}
default_values = {"String": "abc", "Int": 1, "Any": []}
input_defs = op.input_defs
for input_def in input_defs:
input_values[input_def.name] = default_values[str(input_def.dagster_type.display_name)]
return input_values
def test_ops_compile_and_execute():
ops = [
my_simple_yield_op,
my_simple_return_op,
my_named_yield_op,
my_metadata_output,
my_metadata_expectation_op,
my_retry_op,
my_asset_op,
my_expectation_op,
]
for op in ops:
input_values = generate_stub_input_values(op)
result = execute_op_in_graph(op, **input_values)
assert result
assert result.success
def test_failure_op():
with pytest.raises(Failure):
execute_op_in_graph(my_failure_op)
def test_failure_metadata_op():
with pytest.raises(Failure):
execute_op_in_graph(my_failure_metadata_op)
| 22.970588 | 95 | 0.684379 | import pytest
from dagster import Failure, graph
from docs_snippets.concepts.solids_pipelines.op_events import (
my_asset_op,
my_expectation_op,
my_failure_metadata_op,
my_failure_op,
my_metadata_expectation_op,
my_metadata_output,
my_named_yield_op,
my_retry_op,
my_simple_return_op,
my_simple_yield_op,
)
def execute_op_in_graph(an_op, **kwargs):
@graph
def my_graph():
if kwargs:
return an_op(**kwargs)
else:
return an_op()
result = my_graph.execute_in_process()
return result
def generate_stub_input_values(op):
input_values = {}
default_values = {"String": "abc", "Int": 1, "Any": []}
input_defs = op.input_defs
for input_def in input_defs:
input_values[input_def.name] = default_values[str(input_def.dagster_type.display_name)]
return input_values
def test_ops_compile_and_execute():
ops = [
my_simple_yield_op,
my_simple_return_op,
my_named_yield_op,
my_metadata_output,
my_metadata_expectation_op,
my_retry_op,
my_asset_op,
my_expectation_op,
]
for op in ops:
input_values = generate_stub_input_values(op)
result = execute_op_in_graph(op, **input_values)
assert result
assert result.success
def test_failure_op():
with pytest.raises(Failure):
execute_op_in_graph(my_failure_op)
def test_failure_metadata_op():
with pytest.raises(Failure):
execute_op_in_graph(my_failure_metadata_op)
| true | true |
1c3ac88eb31bff41592b4cd9757639d29e3aa0c2 | 3,988 | py | Python | gazoo_device/gdm_cli.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | 14 | 2020-11-05T23:23:32.000Z | 2022-03-01T18:59:29.000Z | gazoo_device/gdm_cli.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | null | null | null | gazoo_device/gdm_cli.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | 5 | 2021-05-20T22:52:51.000Z | 2022-02-21T08:46:21.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gazoo Device Manager CLI.
The CLI is generated dynamically by Python Fire:
https://github.com/google/python-fire.
"""
import sys
from typing import Dict, NoReturn, Optional, Sequence
import fire
import gazoo_device
from gazoo_device import errors
from gazoo_device import extensions
from gazoo_device import fire_manager
from gazoo_device import fire_patch
from gazoo_device import gdm_logger
from gazoo_device import package_registrar
logger = gdm_logger.get_logger()
VERSION_FLAG = "-v"
FLAG_MARKER = "--"
OMIT_FLAGS = ["help"]
_CLI_NAME = "gdm"
def _get_flags(args: Sequence[str]) -> Dict[str, bool]:
"""Parses flags out of array of CLI args.
Flags in OMIT_FLAGS dict will not be returned.
Args:
args: CLI arguments provided by the user.
Returns:
Parsed flags to pass to the CLI.
"""
flags = {}
for arg in args:
if arg.startswith(FLAG_MARKER):
flag_name = arg[len(FLAG_MARKER):]
if flag_name and flag_name not in OMIT_FLAGS:
flags[flag_name] = True
else:
break # Ignore flags after initial CLI call
return flags
def _create_manager_for_cli(
manager_kwargs: Dict[str, bool]) -> fire_manager.FireManager:
"""Returns a Manager instance to be used by the CLI.
Args:
manager_kwargs: FireManager __init__ keyword arguments.
The Manager class used by the CLI includes CLI-only methods from FireManager
and CLI-only methods from Manager CLI mixins provided by extension packages.
"""
extended_manager_class = type(
"ExtendedFireManager",
(*extensions.manager_cli_mixins, fire_manager.FireManager),
{})
logger.debug("ExtendedFireManager method resolution order: "
f"{extended_manager_class.__mro__}")
return extended_manager_class(**manager_kwargs)
def _execute_command(command: Optional[str] = None,
cli_name: str = _CLI_NAME) -> int:
"""Executes the CLI command through Python Fire."""
# Parse flags out of commands. E.g. "gdm --debug - devices" ->
# flags = {"debug": True}, commands = ["-", "devices"].
if command:
args = command.split()
else:
args = sys.argv[1:]
flags = _get_flags(args)
commands = [arg for arg in args if arg[len(FLAG_MARKER):] not in flags.keys()]
manager_inst = _create_manager_for_cli(flags)
# Execute CLI command
exit_code = 0
try:
fire_patch.apply_patch()
fire.Fire(manager_inst, commands, name=cli_name)
except (ValueError, errors.DeviceError) as err:
logger.error((repr(err)))
exit_code = 1
except KeyboardInterrupt:
exit_code = 2
finally:
manager_inst.close()
return exit_code
def main(command: Optional[str] = None, cli_name: str = _CLI_NAME) -> NoReturn:
"""Main function for Gazoo Device Manager (gazoo_device) package.
Args:
command: Passed to Python Fire. If None, sys.argv are used instead.
cli_name: Name of the CLI executable ("gdm").
Raises:
SystemExit: always calls sys.exit(<return code>).
"""
package_registrar.import_and_register_cli_extension_packages()
if VERSION_FLAG in sys.argv or (command and VERSION_FLAG in command):
logger.info(f"Gazoo Device Manager {gazoo_device.version}")
package_versions = extensions.get_registered_package_info()
logger.info(f"Registered extension packages: {package_versions}")
sys.exit(0)
sys.exit(_execute_command(command, cli_name))
if __name__ == "__main__":
main()
| 29.984962 | 80 | 0.722919 |
import sys
from typing import Dict, NoReturn, Optional, Sequence
import fire
import gazoo_device
from gazoo_device import errors
from gazoo_device import extensions
from gazoo_device import fire_manager
from gazoo_device import fire_patch
from gazoo_device import gdm_logger
from gazoo_device import package_registrar
logger = gdm_logger.get_logger()
VERSION_FLAG = "-v"
FLAG_MARKER = "--"
OMIT_FLAGS = ["help"]
_CLI_NAME = "gdm"
def _get_flags(args: Sequence[str]) -> Dict[str, bool]:
flags = {}
for arg in args:
if arg.startswith(FLAG_MARKER):
flag_name = arg[len(FLAG_MARKER):]
if flag_name and flag_name not in OMIT_FLAGS:
flags[flag_name] = True
else:
break
return flags
def _create_manager_for_cli(
manager_kwargs: Dict[str, bool]) -> fire_manager.FireManager:
extended_manager_class = type(
"ExtendedFireManager",
(*extensions.manager_cli_mixins, fire_manager.FireManager),
{})
logger.debug("ExtendedFireManager method resolution order: "
f"{extended_manager_class.__mro__}")
return extended_manager_class(**manager_kwargs)
def _execute_command(command: Optional[str] = None,
cli_name: str = _CLI_NAME) -> int:
if command:
args = command.split()
else:
args = sys.argv[1:]
flags = _get_flags(args)
commands = [arg for arg in args if arg[len(FLAG_MARKER):] not in flags.keys()]
manager_inst = _create_manager_for_cli(flags)
exit_code = 0
try:
fire_patch.apply_patch()
fire.Fire(manager_inst, commands, name=cli_name)
except (ValueError, errors.DeviceError) as err:
logger.error((repr(err)))
exit_code = 1
except KeyboardInterrupt:
exit_code = 2
finally:
manager_inst.close()
return exit_code
def main(command: Optional[str] = None, cli_name: str = _CLI_NAME) -> NoReturn:
package_registrar.import_and_register_cli_extension_packages()
if VERSION_FLAG in sys.argv or (command and VERSION_FLAG in command):
logger.info(f"Gazoo Device Manager {gazoo_device.version}")
package_versions = extensions.get_registered_package_info()
logger.info(f"Registered extension packages: {package_versions}")
sys.exit(0)
sys.exit(_execute_command(command, cli_name))
if __name__ == "__main__":
main()
| true | true |
1c3ac904e382f2b3305eb0cb5780002ae3f4fb61 | 560 | py | Python | launcher/tray/Main.py | liblit/sampler | eaedba51ee8367b9b355e6f85a6c677878160d49 | [
"BSD-3-Clause"
] | null | null | null | launcher/tray/Main.py | liblit/sampler | eaedba51ee8367b9b355e6f85a6c677878160d49 | [
"BSD-3-Clause"
] | null | null | null | launcher/tray/Main.py | liblit/sampler | eaedba51ee8367b9b355e6f85a6c677878160d49 | [
"BSD-3-Clause"
] | 1 | 2019-01-08T11:09:59.000Z | 2019-01-08T11:09:59.000Z | import gi
gi.require_version('Gtk', '3.0')
from contextlib import closing
from gi.repository import Gio, Gtk
from os.path import abspath, dirname, join
from sys import path
path.insert(1, join(dirname(dirname(abspath(__file__))), 'common'))
from NotificationIcon import NotificationIcon
import Keys
import Service
########################################################################
def main():
unique = Service.unique()
if not unique: return
settings = Keys.settings()
with closing(NotificationIcon(settings)):
Gtk.main()
| 20.740741 | 72 | 0.635714 | import gi
gi.require_version('Gtk', '3.0')
from contextlib import closing
from gi.repository import Gio, Gtk
from os.path import abspath, dirname, join
from sys import path
path.insert(1, join(dirname(dirname(abspath(__file__))), 'common'))
from NotificationIcon import NotificationIcon
import Keys
import Service
| true | true |
1c3ac9352ff35a89ba9ffe44e2a4b1f37bb4ce16 | 1,469 | py | Python | gcloud/contrib/analysis/urls.py | brookylin/bk-sops | 6c0cf78879849921c4ff6ad6bf3bb82dfdf5b973 | [
"Apache-2.0"
] | 881 | 2019-03-25T02:45:42.000Z | 2022-03-30T09:10:49.000Z | gcloud/contrib/analysis/urls.py | m0re-work/bk-sops | d03ba8a4ee0781c6daaf0dd38a7369dc82669f7d | [
"Apache-2.0"
] | 3,303 | 2019-03-25T04:18:03.000Z | 2022-03-31T11:52:03.000Z | gcloud/contrib/analysis/urls.py | m0re-work/bk-sops | d03ba8a4ee0781c6daaf0dd38a7369dc82669f7d | [
"Apache-2.0"
] | 395 | 2019-03-25T02:53:36.000Z | 2022-03-31T08:37:28.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf.urls import url
from gcloud.contrib.analysis import views
urlpatterns = [
url(r"^query_instance_by_group/$", views.query_instance_by_group),
url(r"^query_template_by_group/$", views.query_template_by_group),
url(r"^query_atom_by_group/$", views.query_atom_by_group),
url(r"^query_appmaker_by_group/$", views.query_appmaker_by_group),
url(r"^template/$", views.analysis_home),
url(r"^instance/$", views.analysis_home),
url(r"^appmaker/$", views.analysis_home),
url(r"^atom/$", views.analysis_home),
url(r"^get_task_category/$", views.get_task_category),
url(r"^get_biz_useage/(?P<query>\w+)/$", views.get_biz_useage),
url(r"get_component_list/", views.get_component_list),
]
| 48.966667 | 115 | 0.756297 |
from django.conf.urls import url
from gcloud.contrib.analysis import views
urlpatterns = [
url(r"^query_instance_by_group/$", views.query_instance_by_group),
url(r"^query_template_by_group/$", views.query_template_by_group),
url(r"^query_atom_by_group/$", views.query_atom_by_group),
url(r"^query_appmaker_by_group/$", views.query_appmaker_by_group),
url(r"^template/$", views.analysis_home),
url(r"^instance/$", views.analysis_home),
url(r"^appmaker/$", views.analysis_home),
url(r"^atom/$", views.analysis_home),
url(r"^get_task_category/$", views.get_task_category),
url(r"^get_biz_useage/(?P<query>\w+)/$", views.get_biz_useage),
url(r"get_component_list/", views.get_component_list),
]
| true | true |
1c3ac9ae9d6e7c62868bb21ea49dd185eacdf50e | 1,315 | py | Python | recgov/utils.py | Carl4/recgov | 2ebbf546861e0beece5fd2cb82cc75eabb145dcf | [
"MIT"
] | 1 | 2021-03-27T19:20:03.000Z | 2021-03-27T19:20:03.000Z | recgov/utils.py | Carl4/recgov | 2ebbf546861e0beece5fd2cb82cc75eabb145dcf | [
"MIT"
] | 1 | 2021-01-26T03:28:33.000Z | 2021-01-26T03:28:33.000Z | recgov/utils.py | Carl4/recgov | 2ebbf546861e0beece5fd2cb82cc75eabb145dcf | [
"MIT"
] | 1 | 2021-03-27T19:20:44.000Z | 2021-03-27T19:20:44.000Z | """Just a place to put some handy utility functions.
"""
from datetime import datetime, timedelta
# To find the next month easily: Add 31 days, then truncate back to day 1.
def this_month(month: datetime) -> datetime:
"""Find the first day of this month given a datetime.
:param month: the date
:type month: datetime
:return: The first day of the month.
:rtype: datetime
"""
return datetime(month.year, month.month, 1)
_A_MONTH = timedelta(days=31)
def next_month(month: datetime) -> datetime:
"""Find the first day of the next month given a datetime.
:param month: the date
:type month: datetime
:return: The first day of the next month.
:rtype: datetime
"""
dt = this_month(month)
return datetime((dt+_A_MONTH).year, (dt+_A_MONTH).month, 1)
def tokenize(string: str) -> [str]:
"""Tokenize a string.
:param string: [description]
:type string: str
"""
return string.split(" ")
def represents_int(s: str) -> bool:
"""Checks if a string can be an integer
:param s: [description]
:type s: str
:raises RuntimeError: [description]
:raises ValueError: [description]
:return: [description]
:rtype: bool
"""
try:
int(s)
return True
except ValueError:
return False
| 22.672414 | 74 | 0.643346 | from datetime import datetime, timedelta
def this_month(month: datetime) -> datetime:
return datetime(month.year, month.month, 1)
_A_MONTH = timedelta(days=31)
def next_month(month: datetime) -> datetime:
dt = this_month(month)
return datetime((dt+_A_MONTH).year, (dt+_A_MONTH).month, 1)
def tokenize(string: str) -> [str]:
return string.split(" ")
def represents_int(s: str) -> bool:
try:
int(s)
return True
except ValueError:
return False
| true | true |
1c3aca0376c40b2275667a1611df6d103e55870d | 1,279 | py | Python | tests/ant/core/test_node.py | soundsk/python-antrw | 302bada33a83a8bfa545723883a21a31fcbb9175 | [
"MIT"
] | 16 | 2017-08-23T23:57:14.000Z | 2022-01-05T01:00:23.000Z | tests/ant/core/test_node.py | soundsk/python-antrw | 302bada33a83a8bfa545723883a21a31fcbb9175 | [
"MIT"
] | 12 | 2017-08-26T20:20:29.000Z | 2020-01-22T14:52:33.000Z | tests/ant/core/test_node.py | soundsk/python-antrw | 302bada33a83a8bfa545723883a21a31fcbb9175 | [
"MIT"
] | 11 | 2017-06-08T22:21:56.000Z | 2022-01-10T15:48:47.000Z | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2017, Matt Hughes
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
##############################################################################
| 47.37037 | 78 | 0.6724 | true | true | |
1c3acb79dffe5301debf4d75865233f21cf4f756 | 95 | py | Python | URI2791.py | rashidulhasanhridoy/URI-Online-Judge-Problem-Solve-with-Python-3 | c7db434e2e6e40c2ca3bd56db0d04cf79f69de12 | [
"Apache-2.0"
] | 2 | 2020-07-21T18:01:37.000Z | 2021-11-29T01:08:14.000Z | URI2791.py | rashidulhasanhridoy/URI-Online-Judge-Problem-Solve-with-Python-3 | c7db434e2e6e40c2ca3bd56db0d04cf79f69de12 | [
"Apache-2.0"
] | null | null | null | URI2791.py | rashidulhasanhridoy/URI-Online-Judge-Problem-Solve-with-Python-3 | c7db434e2e6e40c2ca3bd56db0d04cf79f69de12 | [
"Apache-2.0"
] | null | null | null | X = list(map(int, input('').split()))
for i in X:
if i == 1:
print(X.index(i) + 1)
| 19 | 37 | 0.473684 | X = list(map(int, input('').split()))
for i in X:
if i == 1:
print(X.index(i) + 1)
| true | true |
1c3acbabae93bd72a25b201657cbaa1571e73910 | 13,460 | py | Python | tests/api/test_files.py | Xandersoft/seahub | f75f238b3e0a907e8a8003f419e367fa36e992e7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/api/test_files.py | Xandersoft/seahub | f75f238b3e0a907e8a8003f419e367fa36e992e7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/api/test_files.py | Xandersoft/seahub | f75f238b3e0a907e8a8003f419e367fa36e992e7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #coding: UTF-8
"""
Test file/dir operations.
"""
import posixpath
import pytest
import urllib
from urllib import urlencode, quote
import urlparse
from tests.common.utils import randstring, urljoin
from tests.api.apitestbase import ApiTestBase
from tests.api.urls import REPOS_URL
class FilesApiTest(ApiTestBase):
def test_rename_file(self):
with self.get_tmp_repo() as repo:
name, furl = self.create_file(repo)
data = {
'operation': 'rename',
'newname': name + randstring(),
}
res = self.post(furl, data=data)
self.assertRegexpMatches(res.text, r'"http(.*)"')
def test_remove_file(self):
with self.get_tmp_repo() as repo:
_, furl = self.create_file(repo)
res = self.delete(furl)
self.assertEqual(res.text, '"success"')
def test_move_file(self):
with self.get_tmp_repo() as repo:
_, furl = self.create_file(repo)
# TODO: create another repo here, and use it as dst_repo
data = {
'operation': 'move',
'dst_repo': repo.repo_id,
'dst_dir': '/',
}
res = self.post(furl, data=data)
self.assertEqual(res.text, '"success"')
def test_copy_file(self):
with self.get_tmp_repo() as repo:
# TODO: create another repo here, and use it as dst_repo
# create sub folder(dpath)
dpath, _ = self.create_dir(repo)
# create tmp file in sub folder(dpath)
tmp_file = 'tmp_file.txt'
file_path = dpath + '/' + tmp_file
furl = repo.get_filepath_url(file_path)
data = {'operation': 'create'}
res = self.post(furl, data=data, expected=201)
# copy tmp file from sub folder(dpath) to dst dir('/')
data = {
'dst_repo': repo.repo_id,
'dst_dir': '/',
'operation': 'copy',
}
u = urlparse.urlparse(furl)
parsed_furl = urlparse.urlunparse((u.scheme, u.netloc, u.path, '', '', ''))
res = self.post(parsed_furl+ '?p=' + quote(file_path), data=data)
self.assertEqual(res.text, '"success"')
# get info of copied file in dst dir('/')
fdurl = repo.file_url + u'detail/?p=/%s' % quote(tmp_file)
detail = self.get(fdurl).json()
self.assertIsNotNone(detail)
self.assertIsNotNone(detail['id'])
def test_download_file(self):
with self.get_tmp_repo() as repo:
fname, furl = self.create_file(repo)
res = self.get(furl)
self.assertRegexpMatches(res.text, '"http(.*)/%s"' % quote(fname))
def test_download_file_without_reuse_token(self):
with self.get_tmp_repo() as repo:
fname, furl = self.create_file(repo)
res = self.get(furl)
self.assertRegexpMatches(res.text, '"http(.*)/%s"' % quote(fname))
# download for the first time
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 200)
# download for the second time
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 400)
def test_download_file_with_reuse_token(self):
with self.get_tmp_repo() as repo:
fname, furl = self.create_file(repo)
res = self.get(furl + '&reuse=1')
self.assertRegexpMatches(res.text, '"http(.*)/%s"' % quote(fname))
# download for the first time
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 200)
# download for the second time
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 200)
def test_download_file_from_history(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
file_history_url = urljoin(repo.repo_url, 'history/') + \
'?p=/%s' % quote(fname)
res = self.get(file_history_url).json()
commit_id = res['commits'][0]['id']
self.assertEqual(len(commit_id), 40)
data = {
'p': fname,
'commit_id': commit_id,
}
query = '?' + urlencode(data)
res = self.get(repo.file_url + query)
self.assertRegexpMatches(res.text, r'"http(.*)/%s"' % quote(fname))
def test_get_file_detail(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
fdurl = repo.file_url + u'detail/?p=/%s' % quote(fname)
detail = self.get(fdurl).json()
self.assertIsNotNone(detail)
self.assertIsNotNone(detail['id'])
self.assertIsNotNone(detail['mtime'])
self.assertIsNotNone(detail['type'])
self.assertIsNotNone(detail['name'])
self.assertIsNotNone(detail['size'])
def test_get_file_history(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
fhurl = repo.file_url + u'history/?p=%s' % quote(fname)
history = self.get(fhurl).json()
for commit in history['commits']:
self.assertIsNotNone(commit['rev_file_size'])
#self.assertIsNotNone(commit['rev_file_id']) #allow null
self.assertIsNotNone(commit['ctime'])
self.assertIsNotNone(commit['creator_name'])
self.assertIsNotNone(commit['creator'])
self.assertIsNotNone(commit['root_id'])
#self.assertIsNotNone(commit['rev_renamed_old_path']) #allow null
#self.assertIsNotNone(commit['parent_id']) #allow null
self.assertIsNotNone(commit['new_merge'])
self.assertIsNotNone(commit['repo_id'])
self.assertIsNotNone(commit['desc'])
self.assertIsNotNone(commit['id'])
self.assertIsNotNone(commit['conflict'])
#self.assertIsNotNone(commit['second_parent_id']) #allow null
def test_get_upload_link(self):
with self.get_tmp_repo() as repo:
upload_url = urljoin(repo.repo_url, 'upload-link')
res = self.get(upload_url)
self.assertRegexpMatches(res.text, r'"http(.*)/upload-api/[^/]+"')
def test_get_upload_link_with_invalid_repo_id(self):
repo_url = urljoin(REPOS_URL, '12345678-1234-1234-1234-12345678901b')
upload_url = urljoin(repo_url, 'upload-link')
self.get(upload_url, expected=404)
def test_get_update_link(self):
with self.get_tmp_repo() as repo:
update_url = urljoin(repo.repo_url, 'update-link')
res = self.get(update_url)
self.assertRegexpMatches(res.text, r'"http(.*)/update-api/[^/]+"')
def test_get_update_link_with_invalid_repo_id(self):
repo_url = urljoin(REPOS_URL, '12345678-1234-1234-1234-12345678901b')
update_url = urljoin(repo_url, 'update-link')
self.get(update_url, expected=404)
# def test_upload_file(self):
# # XXX: requests has problems when post a file whose name contains
# # non-ascii data
# fname = 'file-upload-test %s.txt' % randstring()
# furl = self.test_file_url + '?p=/%s' % quote(fname)
# self.delete(furl)
# upload_url = self.test_repo_url + u'upload-link/'
# res = self.get(upload_url)
# upload_api_url = re.match(r'"(.*)"', res.text).group(1)
# files = {
# 'file': (fname, 'Some lines in this file'),
# 'parent_dir': '/',
# }
# res = self.post(upload_api_url, files=files)
# self.assertRegexpMatches(res.text, r'\w{40,40}')
# def test_update_file(self):
# fname = 'file-update-test %s.txt' % randstring()
# _, furl = self.create_file(fname=fname)
# update_url = self.test_repo_url + u'update-link/'
# res = self.get(update_url)
# update_api_url = re.match(r'"(.*)"', res.text).group(1)
# files = {
# 'file': ('filename', 'Updated content of this file'),
# 'target_file': '/test_update.c'
# }
# res = self.post(update_api_url, files=files)
# self.assertRegexpMatches(res.text, r'\w{40,40}')
def test_get_upload_blocks_link(self):
with self.get_tmp_repo() as repo:
upload_blks_url = urljoin(repo.repo_url, 'upload-blks-link')
res = self.get(upload_blks_url)
self.assertRegexpMatches(res.text, r'"http(.*)/upload-blks-api/[^/]+"')
def test_get_upload_blocks_link_with_invalid_repo_id(self):
repo_url = urljoin(REPOS_URL, '12345678-1234-1234-1234-12345678901b')
upload_blks_url = urljoin(repo_url, 'upload-blks-link')
self.get(upload_blks_url, expected=404)
def test_get_update_blocks_link(self):
with self.get_tmp_repo() as repo:
update_blks_url = urljoin(repo.repo_url, 'update-blks-link')
res = self.get(update_blks_url)
self.assertRegexpMatches(res.text, r'"http(.*)/update-blks-api/[^/]+"')
def test_get_update_blocks_link_with_invalid_repo_id(self):
repo_url = urljoin(REPOS_URL, '12345678-1234-1234-1234-12345678901b')
update_blks_url = urljoin(repo_url, 'update-blks-link')
self.get(update_blks_url, expected=404)
def test_only_list_dir(self):
with self.get_tmp_repo() as repo:
self.create_file(repo)
self.create_dir(repo)
dirents = self.get(repo.dir_url + '?t=d').json()
self.assertHasLen(dirents, 1)
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertIsNotNone(dirent['name'])
self.assertEqual(dirent['type'], 'dir')
def test_only_list_file(self):
with self.get_tmp_repo() as repo:
self.create_file(repo)
self.create_dir(repo)
dirents = self.get(repo.dir_url + '?t=f').json()
self.assertHasLen(dirents, 1)
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertIsNotNone(dirent['name'])
self.assertIsNotNone(dirent['size'])
self.assertEqual(dirent['type'], 'file')
def test_list_dir_and_file(self):
with self.get_tmp_repo() as repo:
self.create_file(repo)
self.create_dir(repo)
dirents = self.get(repo.dir_url).json()
self.assertHasLen(dirents, 2)
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertIsNotNone(dirent['name'])
self.assertIn(dirent['type'], ('file', 'dir'))
if dirent['type'] == 'file':
self.assertIsNotNone(dirent['size'])
def test_list_recursive_dir(self):
with self.get_tmp_repo() as repo:
# create test dir
data = {'operation': 'mkdir'}
dir_list = ['/1/', '/1/2/', '/1/2/3/', '/4/', '/4/5/', '/6/']
for dpath in dir_list:
durl = repo.get_dirpath_url(dpath)
self.post(durl, data=data, expected=201)
# get recursive dir
dirents = self.get(repo.dir_url + '?t=d&recursive=1').json()
self.assertHasLen(dirents, len(dir_list))
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertEqual(dirent['type'], 'dir')
full_path = posixpath.join(dirent['parent_dir'], dirent['name']) + '/'
self.assertIn(full_path, dir_list)
def test_remove_dir(self):
with self.get_tmp_repo() as repo:
_, durl = self.create_dir(repo)
res = self.delete(durl)
self.assertEqual(res.text, u'"success"')
self.get(durl, expected=404)
def test_download_dir(self):
with self.get_tmp_repo() as repo:
dpath, _ = self.create_dir(repo)
query = '?p=%s' % quote(dpath)
ddurl = urljoin(repo.dir_url, 'download') + query
res = self.get(ddurl)
self.assertRegexpMatches(res.text,
r'"http(.*)/files/[^/]+/%s"' % quote(dpath[1:]))
@pytest.mark.xfail
def test_create_dir_with_parents(self):
with self.get_tmp_repo() as repo:
path = u'/level1/level 2/level_3/目录4'
self.create_dir_with_parents(repo, path)
def create_dir_with_parents(self, repo, path):
data = {'operation': 'mkdir', 'create_parents': 'true'}
durl = repo.get_dirpath_url(path.encode('utf-8'))
self.post(durl, data=data, expected=201)
curpath = ''
# check the parents are created along the way
parts = path.split('/')
for i, name in enumerate(parts):
curpath += '/' + name
url = repo.get_dirpath_url(curpath.encode('utf-8'))
if i < len(parts) - 1:
assert self.get(url).json()[0]['name'] == parts[i+1]
else:
assert self.get(url).json() == []
| 41.16208 | 87 | 0.569614 |
import posixpath
import pytest
import urllib
from urllib import urlencode, quote
import urlparse
from tests.common.utils import randstring, urljoin
from tests.api.apitestbase import ApiTestBase
from tests.api.urls import REPOS_URL
class FilesApiTest(ApiTestBase):
def test_rename_file(self):
with self.get_tmp_repo() as repo:
name, furl = self.create_file(repo)
data = {
'operation': 'rename',
'newname': name + randstring(),
}
res = self.post(furl, data=data)
self.assertRegexpMatches(res.text, r'"http(.*)"')
def test_remove_file(self):
with self.get_tmp_repo() as repo:
_, furl = self.create_file(repo)
res = self.delete(furl)
self.assertEqual(res.text, '"success"')
def test_move_file(self):
with self.get_tmp_repo() as repo:
_, furl = self.create_file(repo)
data = {
'operation': 'move',
'dst_repo': repo.repo_id,
'dst_dir': '/',
}
res = self.post(furl, data=data)
self.assertEqual(res.text, '"success"')
def test_copy_file(self):
with self.get_tmp_repo() as repo:
dpath, _ = self.create_dir(repo)
tmp_file = 'tmp_file.txt'
file_path = dpath + '/' + tmp_file
furl = repo.get_filepath_url(file_path)
data = {'operation': 'create'}
res = self.post(furl, data=data, expected=201)
data = {
'dst_repo': repo.repo_id,
'dst_dir': '/',
'operation': 'copy',
}
u = urlparse.urlparse(furl)
parsed_furl = urlparse.urlunparse((u.scheme, u.netloc, u.path, '', '', ''))
res = self.post(parsed_furl+ '?p=' + quote(file_path), data=data)
self.assertEqual(res.text, '"success"')
fdurl = repo.file_url + u'detail/?p=/%s' % quote(tmp_file)
detail = self.get(fdurl).json()
self.assertIsNotNone(detail)
self.assertIsNotNone(detail['id'])
def test_download_file(self):
with self.get_tmp_repo() as repo:
fname, furl = self.create_file(repo)
res = self.get(furl)
self.assertRegexpMatches(res.text, '"http(.*)/%s"' % quote(fname))
def test_download_file_without_reuse_token(self):
with self.get_tmp_repo() as repo:
fname, furl = self.create_file(repo)
res = self.get(furl)
self.assertRegexpMatches(res.text, '"http(.*)/%s"' % quote(fname))
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 200)
# download for the second time
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 400)
def test_download_file_with_reuse_token(self):
with self.get_tmp_repo() as repo:
fname, furl = self.create_file(repo)
res = self.get(furl + '&reuse=1')
self.assertRegexpMatches(res.text, '"http(.*)/%s"' % quote(fname))
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 200)
# download for the second time
url = urllib.urlopen(res.text.strip('"'))
code = url.getcode()
self.assertEqual(code, 200)
def test_download_file_from_history(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
file_history_url = urljoin(repo.repo_url, 'history/') + \
'?p=/%s' % quote(fname)
res = self.get(file_history_url).json()
commit_id = res['commits'][0]['id']
self.assertEqual(len(commit_id), 40)
data = {
'p': fname,
'commit_id': commit_id,
}
query = '?' + urlencode(data)
res = self.get(repo.file_url + query)
self.assertRegexpMatches(res.text, r'"http(.*)/%s"' % quote(fname))
def test_get_file_detail(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
fdurl = repo.file_url + u'detail/?p=/%s' % quote(fname)
detail = self.get(fdurl).json()
self.assertIsNotNone(detail)
self.assertIsNotNone(detail['id'])
self.assertIsNotNone(detail['mtime'])
self.assertIsNotNone(detail['type'])
self.assertIsNotNone(detail['name'])
self.assertIsNotNone(detail['size'])
def test_get_file_history(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
fhurl = repo.file_url + u'history/?p=%s' % quote(fname)
history = self.get(fhurl).json()
for commit in history['commits']:
self.assertIsNotNone(commit['rev_file_size'])
self.assertIsNotNone(commit['ctime'])
self.assertIsNotNone(commit['creator_name'])
self.assertIsNotNone(commit['creator'])
self.assertIsNotNone(commit['root_id'])
self.assertIsNotNone(commit['new_merge'])
self.assertIsNotNone(commit['repo_id'])
self.assertIsNotNone(commit['desc'])
self.assertIsNotNone(commit['id'])
self.assertIsNotNone(commit['conflict'])
est_get_upload_link(self):
with self.get_tmp_repo() as repo:
upload_url = urljoin(repo.repo_url, 'upload-link')
res = self.get(upload_url)
self.assertRegexpMatches(res.text, r'"http(.*)/upload-api/[^/]+"')
def test_get_upload_link_with_invalid_repo_id(self):
repo_url = urljoin(REPOS_URL, '12345678-1234-1234-1234-12345678901b')
upload_url = urljoin(repo_url, 'upload-link')
self.get(upload_url, expected=404)
def test_get_update_link(self):
with self.get_tmp_repo() as repo:
update_url = urljoin(repo.repo_url, 'update-link')
res = self.get(update_url)
self.assertRegexpMatches(res.text, r'"http(.*)/update-api/[^/]+"')
def test_get_update_link_with_invalid_repo_id(self):
repo_url = urljoin(REPOS_URL, '12345678-1234-1234-1234-12345678901b')
update_url = urljoin(repo_url, 'update-link')
self.get(update_url, expected=404)
def test_get_upload_blocks_link(self):
with self.get_tmp_repo() as repo:
upload_blks_url = urljoin(repo.repo_url, 'upload-blks-link')
res = self.get(upload_blks_url)
self.assertRegexpMatches(res.text, r'"http(.*)/upload-blks-api/[^/]+"')
def test_get_upload_blocks_link_with_invalid_repo_id(self):
repo_url = urljoin(REPOS_URL, '12345678-1234-1234-1234-12345678901b')
upload_blks_url = urljoin(repo_url, 'upload-blks-link')
self.get(upload_blks_url, expected=404)
def test_get_update_blocks_link(self):
with self.get_tmp_repo() as repo:
update_blks_url = urljoin(repo.repo_url, 'update-blks-link')
res = self.get(update_blks_url)
self.assertRegexpMatches(res.text, r'"http(.*)/update-blks-api/[^/]+"')
def test_get_update_blocks_link_with_invalid_repo_id(self):
repo_url = urljoin(REPOS_URL, '12345678-1234-1234-1234-12345678901b')
update_blks_url = urljoin(repo_url, 'update-blks-link')
self.get(update_blks_url, expected=404)
def test_only_list_dir(self):
with self.get_tmp_repo() as repo:
self.create_file(repo)
self.create_dir(repo)
dirents = self.get(repo.dir_url + '?t=d').json()
self.assertHasLen(dirents, 1)
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertIsNotNone(dirent['name'])
self.assertEqual(dirent['type'], 'dir')
def test_only_list_file(self):
with self.get_tmp_repo() as repo:
self.create_file(repo)
self.create_dir(repo)
dirents = self.get(repo.dir_url + '?t=f').json()
self.assertHasLen(dirents, 1)
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertIsNotNone(dirent['name'])
self.assertIsNotNone(dirent['size'])
self.assertEqual(dirent['type'], 'file')
def test_list_dir_and_file(self):
with self.get_tmp_repo() as repo:
self.create_file(repo)
self.create_dir(repo)
dirents = self.get(repo.dir_url).json()
self.assertHasLen(dirents, 2)
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertIsNotNone(dirent['name'])
self.assertIn(dirent['type'], ('file', 'dir'))
if dirent['type'] == 'file':
self.assertIsNotNone(dirent['size'])
def test_list_recursive_dir(self):
with self.get_tmp_repo() as repo:
data = {'operation': 'mkdir'}
dir_list = ['/1/', '/1/2/', '/1/2/3/', '/4/', '/4/5/', '/6/']
for dpath in dir_list:
durl = repo.get_dirpath_url(dpath)
self.post(durl, data=data, expected=201)
dirents = self.get(repo.dir_url + '?t=d&recursive=1').json()
self.assertHasLen(dirents, len(dir_list))
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertEqual(dirent['type'], 'dir')
full_path = posixpath.join(dirent['parent_dir'], dirent['name']) + '/'
self.assertIn(full_path, dir_list)
def test_remove_dir(self):
with self.get_tmp_repo() as repo:
_, durl = self.create_dir(repo)
res = self.delete(durl)
self.assertEqual(res.text, u'"success"')
self.get(durl, expected=404)
def test_download_dir(self):
with self.get_tmp_repo() as repo:
dpath, _ = self.create_dir(repo)
query = '?p=%s' % quote(dpath)
ddurl = urljoin(repo.dir_url, 'download') + query
res = self.get(ddurl)
self.assertRegexpMatches(res.text,
r'"http(.*)/files/[^/]+/%s"' % quote(dpath[1:]))
@pytest.mark.xfail
def test_create_dir_with_parents(self):
with self.get_tmp_repo() as repo:
path = u'/level1/level 2/level_3/目录4'
self.create_dir_with_parents(repo, path)
def create_dir_with_parents(self, repo, path):
data = {'operation': 'mkdir', 'create_parents': 'true'}
durl = repo.get_dirpath_url(path.encode('utf-8'))
self.post(durl, data=data, expected=201)
curpath = ''
parts = path.split('/')
for i, name in enumerate(parts):
curpath += '/' + name
url = repo.get_dirpath_url(curpath.encode('utf-8'))
if i < len(parts) - 1:
assert self.get(url).json()[0]['name'] == parts[i+1]
else:
assert self.get(url).json() == []
| true | true |
1c3acc9a9d4803560d5db364812a6583f8e90b7d | 68 | py | Python | nft_notificator.py | twofacednine380/nft-notificator | 54f7e139b1784c81b91b9305696c9ab94fc32604 | [
"MIT"
] | null | null | null | nft_notificator.py | twofacednine380/nft-notificator | 54f7e139b1784c81b91b9305696c9ab94fc32604 | [
"MIT"
] | null | null | null | nft_notificator.py | twofacednine380/nft-notificator | 54f7e139b1784c81b91b9305696c9ab94fc32604 | [
"MIT"
] | null | null | null | from src.utils import _main
if __name__ == '__main__':
_main() | 13.6 | 27 | 0.676471 | from src.utils import _main
if __name__ == '__main__':
_main() | true | true |
1c3acdecca55f3277f91e256dd8593e2fbe38530 | 699 | py | Python | flop/__init__.py | jwohlwend/flop | c5bbd4c5fae6291e2a056e68b44bcf97e4d757bf | [
"MIT"
] | null | null | null | flop/__init__.py | jwohlwend/flop | c5bbd4c5fae6291e2a056e68b44bcf97e4d757bf | [
"MIT"
] | null | null | null | flop/__init__.py | jwohlwend/flop | c5bbd4c5fae6291e2a056e68b44bcf97e4d757bf | [
"MIT"
] | null | null | null | from flop.hardconcrete import HardConcrete
from flop.linear import ProjectedLinear, HardConcreteProjectedLinear, HardConcreteLinear
from flop.train import HardConcreteTrainer
from flop.utils import make_hard_concrete, make_projected_linear
from flop.utils import get_hardconcrete_modules, get_hardconcrete_proj_linear_modules
from flop.utils import get_hardconcrete_linear_modules
__all__ = ['HardConcrete', 'ProjectedLinear', 'HardConcreteLinear',
'HardConcreteProjectedLinear', 'HardConcreteTrainer',
'make_hard_concrete', 'make_projected_linear',
'get_hardconcrete_modules', 'get_hardconcrete_proj_linear_modules',
'get_hardconcrete_linear_modules']
| 49.928571 | 88 | 0.816881 | from flop.hardconcrete import HardConcrete
from flop.linear import ProjectedLinear, HardConcreteProjectedLinear, HardConcreteLinear
from flop.train import HardConcreteTrainer
from flop.utils import make_hard_concrete, make_projected_linear
from flop.utils import get_hardconcrete_modules, get_hardconcrete_proj_linear_modules
from flop.utils import get_hardconcrete_linear_modules
__all__ = ['HardConcrete', 'ProjectedLinear', 'HardConcreteLinear',
'HardConcreteProjectedLinear', 'HardConcreteTrainer',
'make_hard_concrete', 'make_projected_linear',
'get_hardconcrete_modules', 'get_hardconcrete_proj_linear_modules',
'get_hardconcrete_linear_modules']
| true | true |
1c3acf723768c88a3e5ed76910f430f9eea19ef7 | 744 | py | Python | Python3/1599-Maximum-Profit-of-Operating-a-Centennial-Wheel/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/1599-Maximum-Profit-of-Operating-a-Centennial-Wheel/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/1599-Maximum-Profit-of-Operating-a-Centennial-Wheel/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution:
def minOperationsMaxProfit(self, customers: List[int], boardingCost: int, runningCost: int) -> int:
if runningCost > 4 * boardingCost:
return -1
nwaits = 0
best_profit = 0
best_rotations = 0
profit = 0
for i, c in enumerate(itertools.chain(customers, itertools.repeat(0))):
nwaits += c
board = min(4, nwaits)
nwaits -= board
profit += (board * boardingCost - runningCost)
if profit > best_profit:
best_profit = profit
best_rotations = i + 1
if i >= len(customers) and nwaits == 0:
break
return best_rotations if best_profit > 0 else -1
| 37.2 | 103 | 0.547043 | class Solution:
def minOperationsMaxProfit(self, customers: List[int], boardingCost: int, runningCost: int) -> int:
if runningCost > 4 * boardingCost:
return -1
nwaits = 0
best_profit = 0
best_rotations = 0
profit = 0
for i, c in enumerate(itertools.chain(customers, itertools.repeat(0))):
nwaits += c
board = min(4, nwaits)
nwaits -= board
profit += (board * boardingCost - runningCost)
if profit > best_profit:
best_profit = profit
best_rotations = i + 1
if i >= len(customers) and nwaits == 0:
break
return best_rotations if best_profit > 0 else -1
| true | true |
1c3acf88e2216a9063255815651278e21fd268c9 | 7,422 | py | Python | tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_test.py | souravsingh/probability | 0519b63094fdaa4e326357a0cdff056d5ef76cd8 | [
"Apache-2.0"
] | 1 | 2021-03-17T14:48:47.000Z | 2021-03-17T14:48:47.000Z | tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_test.py | souravsingh/probability | 0519b63094fdaa4e326357a0cdff056d5ef76cd8 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_test.py | souravsingh/probability | 0519b63094fdaa4e326357a0cdff056d5ef76cd8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for correlation_matrix_volumes_lib.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from tensorflow_probability.python.distributions.internal import correlation_matrix_volumes_lib as corr
from tensorflow_probability.python.distributions.internal import statistical_testing as st
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.platform import test
# NxN correlation matrices are determined by the N*(N-1)/2
# lower-triangular entries. In addition to being between -1 and 1,
# they must also obey the constraint that the determinant of the
# resulting symmetric matrix is non-negative. In 2x2, we can even
# analytically compute the volume when the determinant is bounded to >
# epsilon, as that boils down to the one lower-triangular entry being
# less than 1 - epsilon in absolute value.
def two_by_two_volume(det_bound):
return 2 * np.sqrt(1.0 - det_bound)
# The post
# https://psychometroscar.com/the-volume-of-a-3-x-3-correlation-matrix/
# derives (with elementary calculus) that the volume (with respect to
# Lebesgue^3 measure) of the set of 3x3 correlation matrices is
# pi^2/2. The same result is also obtained by [1].
def three_by_three_volume():
return np.pi**2 / 2.
# The volume of the unconstrained set of correlation matrices is also
# the normalization constant of the LKJ distribution from [2]. As
# part of defining the distribution, that reference a derives general
# formula for this volume for all dimensions. A TensorFlow
# computation thereof gave the below result for 4x4:
def four_by_four_volume():
# This constant computed as math_ops.exp(lkj.log_norm_const(4, [1.0]))
return 11.6973076
# [1] Rousseeuw, P. J., & Molenberghs, G. (1994). "The shape of
# correlation matrices." The American Statistician, 48(4), 276-279.
# [2] Daniel Lewandowski, Dorota Kurowicka, and Harry Joe, "Generating
# random correlation matrices based on vines and extended onion
# method," Journal of Multivariate Analysis 100 (2009), pp 1989-2001.
class CorrelationMatrixVolumesTest(test.TestCase):
def testRejection2D(self):
num_samples = int(1e5) # Chosen for a small min detectable discrepancy
det_bounds = np.array(
[0.01, 0.02, 0.03, 0.04, 0.05, 0.3, 0.35, 0.4, 0.5], dtype=np.float32)
exact_volumes = two_by_two_volume(det_bounds)
(rej_weights,
rej_proposal_volume) = corr.correlation_matrix_volume_rejection_samples(
det_bounds, 2, [num_samples, 9], dtype=np.float32, seed=43)
# shape of rej_weights: [num_samples, 9, 2, 2]
chk1 = st.assert_true_mean_equal_by_dkwm(
rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes,
false_fail_rate=1e-6)
chk2 = check_ops.assert_less(
st.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples, low=0., high=rej_proposal_volume,
# Correct the false fail rate due to different broadcasting
false_fail_rate=1.1e-7, false_pass_rate=1e-6),
0.036)
with ops.control_dependencies([chk1, chk2]):
rej_weights = array_ops.identity(rej_weights)
self.evaluate(rej_weights)
def testRejection3D(self):
num_samples = int(1e5) # Chosen for a small min detectable discrepancy
det_bounds = np.array([0.0], dtype=np.float32)
exact_volumes = np.array([three_by_three_volume()], dtype=np.float32)
(rej_weights,
rej_proposal_volume) = corr.correlation_matrix_volume_rejection_samples(
det_bounds, 3, [num_samples, 1], dtype=np.float32, seed=44)
# shape of rej_weights: [num_samples, 1, 3, 3]
chk1 = st.assert_true_mean_equal_by_dkwm(
rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes,
false_fail_rate=1e-6)
chk2 = check_ops.assert_less(
st.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples, low=0., high=rej_proposal_volume,
false_fail_rate=1e-6, false_pass_rate=1e-6),
# Going for about a 3% relative error
0.15)
with ops.control_dependencies([chk1, chk2]):
rej_weights = array_ops.identity(rej_weights)
self.evaluate(rej_weights)
def testRejection4D(self):
num_samples = int(1e5) # Chosen for a small min detectable discrepancy
det_bounds = np.array([0.0], dtype=np.float32)
exact_volumes = [four_by_four_volume()]
(rej_weights,
rej_proposal_volume) = corr.correlation_matrix_volume_rejection_samples(
det_bounds, 4, [num_samples, 1], dtype=np.float32, seed=45)
# shape of rej_weights: [num_samples, 1, 4, 4]
chk1 = st.assert_true_mean_equal_by_dkwm(
rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes,
false_fail_rate=1e-6)
chk2 = check_ops.assert_less(
st.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples, low=0., high=rej_proposal_volume,
false_fail_rate=1e-6, false_pass_rate=1e-6),
# Going for about a 10% relative error
1.1)
with ops.control_dependencies([chk1, chk2]):
rej_weights = array_ops.identity(rej_weights)
self.evaluate(rej_weights)
def testVolumeEstimation2D(self):
# Test that the confidence intervals produced by
# corr.compte_true_volumes are sound, in the sense of containing
# the exact volume.
num_samples = int(1e5) # Chosen by symmetry with testRejection2D
det_bounds = np.array(
[0.01, 0.02, 0.03, 0.04, 0.05, 0.3, 0.35, 0.4, 0.5], dtype=np.float32)
volume_bounds = corr.compute_true_volumes(
det_bounds, 2, num_samples, error_rate=1e-6, seed=47)
exact_volumes = two_by_two_volume(det_bounds)
for det, volume in zip(det_bounds, exact_volumes):
computed_low, computed_high = volume_bounds[det]
self.assertLess(computed_low, volume)
self.assertGreater(computed_high, volume)
if __name__ == "__main__":
test.main()
| 44.710843 | 103 | 0.721638 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow_probability.python.distributions.internal import correlation_matrix_volumes_lib as corr
from tensorflow_probability.python.distributions.internal import statistical_testing as st
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.platform import test
def two_by_two_volume(det_bound):
return 2 * np.sqrt(1.0 - det_bound)
def three_by_three_volume():
return np.pi**2 / 2.
def four_by_four_volume():
return 11.6973076
# correlation matrices." The American Statistician, 48(4), 276-279.
# random correlation matrices based on vines and extended onion
# method," Journal of Multivariate Analysis 100 (2009), pp 1989-2001.
class CorrelationMatrixVolumesTest(test.TestCase):
def testRejection2D(self):
num_samples = int(1e5)
det_bounds = np.array(
[0.01, 0.02, 0.03, 0.04, 0.05, 0.3, 0.35, 0.4, 0.5], dtype=np.float32)
exact_volumes = two_by_two_volume(det_bounds)
(rej_weights,
rej_proposal_volume) = corr.correlation_matrix_volume_rejection_samples(
det_bounds, 2, [num_samples, 9], dtype=np.float32, seed=43)
chk1 = st.assert_true_mean_equal_by_dkwm(
rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes,
false_fail_rate=1e-6)
chk2 = check_ops.assert_less(
st.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples, low=0., high=rej_proposal_volume,
false_fail_rate=1.1e-7, false_pass_rate=1e-6),
0.036)
with ops.control_dependencies([chk1, chk2]):
rej_weights = array_ops.identity(rej_weights)
self.evaluate(rej_weights)
def testRejection3D(self):
num_samples = int(1e5)
det_bounds = np.array([0.0], dtype=np.float32)
exact_volumes = np.array([three_by_three_volume()], dtype=np.float32)
(rej_weights,
rej_proposal_volume) = corr.correlation_matrix_volume_rejection_samples(
det_bounds, 3, [num_samples, 1], dtype=np.float32, seed=44)
chk1 = st.assert_true_mean_equal_by_dkwm(
rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes,
false_fail_rate=1e-6)
chk2 = check_ops.assert_less(
st.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples, low=0., high=rej_proposal_volume,
false_fail_rate=1e-6, false_pass_rate=1e-6),
0.15)
with ops.control_dependencies([chk1, chk2]):
rej_weights = array_ops.identity(rej_weights)
self.evaluate(rej_weights)
def testRejection4D(self):
num_samples = int(1e5)
det_bounds = np.array([0.0], dtype=np.float32)
exact_volumes = [four_by_four_volume()]
(rej_weights,
rej_proposal_volume) = corr.correlation_matrix_volume_rejection_samples(
det_bounds, 4, [num_samples, 1], dtype=np.float32, seed=45)
chk1 = st.assert_true_mean_equal_by_dkwm(
rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes,
false_fail_rate=1e-6)
chk2 = check_ops.assert_less(
st.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples, low=0., high=rej_proposal_volume,
false_fail_rate=1e-6, false_pass_rate=1e-6),
1.1)
with ops.control_dependencies([chk1, chk2]):
rej_weights = array_ops.identity(rej_weights)
self.evaluate(rej_weights)
def testVolumeEstimation2D(self):
num_samples = int(1e5)
det_bounds = np.array(
[0.01, 0.02, 0.03, 0.04, 0.05, 0.3, 0.35, 0.4, 0.5], dtype=np.float32)
volume_bounds = corr.compute_true_volumes(
det_bounds, 2, num_samples, error_rate=1e-6, seed=47)
exact_volumes = two_by_two_volume(det_bounds)
for det, volume in zip(det_bounds, exact_volumes):
computed_low, computed_high = volume_bounds[det]
self.assertLess(computed_low, volume)
self.assertGreater(computed_high, volume)
if __name__ == "__main__":
test.main()
| true | true |
1c3ad02285f960adf7dad6ef1557a67fa008156e | 3,118 | py | Python | company/migrations/0017_auto_20161201_1715.py | uktrade/directory-api | 45a9024a7ecc2842895201cbb51420ba9e57a168 | [
"MIT"
] | 2 | 2017-06-02T09:09:08.000Z | 2021-01-18T10:26:53.000Z | company/migrations/0017_auto_20161201_1715.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | 629 | 2016-10-10T09:35:52.000Z | 2022-03-25T15:04:04.000Z | company/migrations/0017_auto_20161201_1715.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | 5 | 2017-06-22T10:02:22.000Z | 2022-03-14T17:55:21.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-12-01 17:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0016_merge'),
]
operations = [
migrations.AlterField(
model_name='company',
name='employees',
field=models.CharField(blank=True, choices=[('', 'Please select'), ('1-10', '1-10'), ('11-50', '11-50'), ('51-200', '51-200'), ('201-500', '201-500'), ('501-1000', '501-1,000'), ('1001-10000', '1,001-10,000'), ('10001+', '10,001+')], default='', max_length=20, null=True),
),
migrations.AlterField(
model_name='company',
name='export_status',
field=models.CharField(choices=[('', ''), ('YES', 'Yes, in the last year.'), ('ONE_TWO_YEARS_AGO', 'Yes, 1-2 years ago.'), ('OVER_TWO_YEARS_AGO', 'Yes, over 2 years ago.'), ('NOT_YET', 'Not yet.'), ('NO_INTENTION', 'No, and we have no intention to.')], max_length=20, validators=[]),
),
migrations.AlterField(
model_name='companycasestudy',
name='sector',
field=models.CharField(choices=[('', ''), ('AEROSPACE', 'Aerospace'), ('AGRICULTURE_HORTICULTURE_AND_FISHERIES', 'Agriculture, Horticulture and Fisheries'), ('AIRPORTS', 'Airports'), ('AUTOMOTIVE', 'Automotive'), ('BIOTECHNOLOGY_AND_PHARMACEUTICALS', 'Biotechnology and Pharmaceuticals'), ('BUSINESS_AND_CONSUMER_SERVICES', 'Business and Consumer Services'), ('CHEMICALS', 'Chemicals'), ('CLOTHING_FOOTWEAR_AND_FASHION', 'Clothing, Footwear and Fashion'), ('COMMUNICATIONS', 'Communications'), ('CONSTRUCTION', 'Construction'), ('CREATIVE_AND_MEDIA', 'Creative and Media'), ('DEFENCE', 'Defence'), ('EDUCATION_AND_TRAINING', 'Education and Training'), ('ELECTRONICS_AND_IT_HARDWARE', 'Electronics and IT Hardware'), ('ENVIRONMENT', 'Environment'), ('FINANCIAL_AND_PROFESSIONAL_SERVICES', 'Financial and Professional Services'), ('FOOD_AND_DRINK', 'Food and Drink'), ('GIFTWARE_JEWELLERY_AND_TABLEWARE', 'Giftware, Jewellery and Tableware'), ('GLOBAL_SPORTS_INFRASTRUCTURE', 'Global Sports Infrastructure'), ('HEALTHCARE_AND_MEDICAL', 'Healthcare and Medical'), ('HOUSEHOLD_GOODS_FURNITURE_AND_FURNISHINGS', 'Household Goods, Furniture and Furnishings'), ('LEISURE_AND_TOURISM', 'Leisure and Tourism'), ('MARINE', 'Marine'), ('MECHANICAL_ELECTRICAL_AND_PROCESS_ENGINEERING', 'Mechanical Electrical and Process Engineering'), ('METALLURGICAL_PROCESS_PLANT', 'Metallurgical Process Plant'), ('METALS_MINERALS_AND_MATERIALS', 'Metals, Minerals and Materials'), ('MINING', 'Mining'), ('OIL_AND_GAS', 'Oil and Gas'), ('PORTS_AND_LOGISTICS', 'Ports and Logistics'), ('POWER', 'Power'), ('RAILWAYS', 'Railways'), ('RENEWABLE_ENERGY', 'Renewable Energy'), ('RETAIL_AND_LUXURY', 'Retail and Luxury'), ('SECURITY', 'Security'), ('SOFTWARE_AND_COMPUTER_SERVICES', 'Software and Computer Services'), ('TEXTILES_INTERIOR_TEXTILES_AND_CARPETS', 'Textiles, Interior Textiles and Carpets'), ('WATER', 'Water')], max_length=100),
),
]
| 100.580645 | 1,923 | 0.685696 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0016_merge'),
]
operations = [
migrations.AlterField(
model_name='company',
name='employees',
field=models.CharField(blank=True, choices=[('', 'Please select'), ('1-10', '1-10'), ('11-50', '11-50'), ('51-200', '51-200'), ('201-500', '201-500'), ('501-1000', '501-1,000'), ('1001-10000', '1,001-10,000'), ('10001+', '10,001+')], default='', max_length=20, null=True),
),
migrations.AlterField(
model_name='company',
name='export_status',
field=models.CharField(choices=[('', ''), ('YES', 'Yes, in the last year.'), ('ONE_TWO_YEARS_AGO', 'Yes, 1-2 years ago.'), ('OVER_TWO_YEARS_AGO', 'Yes, over 2 years ago.'), ('NOT_YET', 'Not yet.'), ('NO_INTENTION', 'No, and we have no intention to.')], max_length=20, validators=[]),
),
migrations.AlterField(
model_name='companycasestudy',
name='sector',
field=models.CharField(choices=[('', ''), ('AEROSPACE', 'Aerospace'), ('AGRICULTURE_HORTICULTURE_AND_FISHERIES', 'Agriculture, Horticulture and Fisheries'), ('AIRPORTS', 'Airports'), ('AUTOMOTIVE', 'Automotive'), ('BIOTECHNOLOGY_AND_PHARMACEUTICALS', 'Biotechnology and Pharmaceuticals'), ('BUSINESS_AND_CONSUMER_SERVICES', 'Business and Consumer Services'), ('CHEMICALS', 'Chemicals'), ('CLOTHING_FOOTWEAR_AND_FASHION', 'Clothing, Footwear and Fashion'), ('COMMUNICATIONS', 'Communications'), ('CONSTRUCTION', 'Construction'), ('CREATIVE_AND_MEDIA', 'Creative and Media'), ('DEFENCE', 'Defence'), ('EDUCATION_AND_TRAINING', 'Education and Training'), ('ELECTRONICS_AND_IT_HARDWARE', 'Electronics and IT Hardware'), ('ENVIRONMENT', 'Environment'), ('FINANCIAL_AND_PROFESSIONAL_SERVICES', 'Financial and Professional Services'), ('FOOD_AND_DRINK', 'Food and Drink'), ('GIFTWARE_JEWELLERY_AND_TABLEWARE', 'Giftware, Jewellery and Tableware'), ('GLOBAL_SPORTS_INFRASTRUCTURE', 'Global Sports Infrastructure'), ('HEALTHCARE_AND_MEDICAL', 'Healthcare and Medical'), ('HOUSEHOLD_GOODS_FURNITURE_AND_FURNISHINGS', 'Household Goods, Furniture and Furnishings'), ('LEISURE_AND_TOURISM', 'Leisure and Tourism'), ('MARINE', 'Marine'), ('MECHANICAL_ELECTRICAL_AND_PROCESS_ENGINEERING', 'Mechanical Electrical and Process Engineering'), ('METALLURGICAL_PROCESS_PLANT', 'Metallurgical Process Plant'), ('METALS_MINERALS_AND_MATERIALS', 'Metals, Minerals and Materials'), ('MINING', 'Mining'), ('OIL_AND_GAS', 'Oil and Gas'), ('PORTS_AND_LOGISTICS', 'Ports and Logistics'), ('POWER', 'Power'), ('RAILWAYS', 'Railways'), ('RENEWABLE_ENERGY', 'Renewable Energy'), ('RETAIL_AND_LUXURY', 'Retail and Luxury'), ('SECURITY', 'Security'), ('SOFTWARE_AND_COMPUTER_SERVICES', 'Software and Computer Services'), ('TEXTILES_INTERIOR_TEXTILES_AND_CARPETS', 'Textiles, Interior Textiles and Carpets'), ('WATER', 'Water')], max_length=100),
),
]
| true | true |
1c3ad06c3a369b08b51fe1ac58bcecb9a1f95046 | 15,296 | py | Python | management/forms.py | stustanet/Wahlfang | bd7eb21a187d6c985d34c7f806d6f88e7f00db06 | [
"MIT"
] | null | null | null | management/forms.py | stustanet/Wahlfang | bd7eb21a187d6c985d34c7f806d6f88e7f00db06 | [
"MIT"
] | null | null | null | management/forms.py | stustanet/Wahlfang | bd7eb21a187d6c985d34c7f806d6f88e7f00db06 | [
"MIT"
] | null | null | null | import csv
import io
import re
from datetime import timedelta
from typing import Tuple, List
from django import forms
from django.conf import settings
from django.core.validators import validate_email
from django.utils import timezone
from management.models import ElectionManager
from vote.models import Election, Application, Session, Voter, OpenVote
class StartElectionForm(forms.ModelForm):
run_time = forms.IntegerField(label="run time", min_value=1)
class Meta:
model = Election
fields: List[str] = []
def save(self, commit=True):
instance = super().save(commit=False)
instance.start_date = timezone.now()
instance.end_date = timezone.now() + timedelta(minutes=self.cleaned_data['run_time'])
if commit:
instance.save()
return instance
class StopElectionForm(forms.ModelForm):
class Meta:
model = Election
fields: List[str] = []
def save(self, commit=True):
instance = super().save(commit=False)
instance.end_date = timezone.now()
if commit:
instance.save()
return instance
class TemplateStringForm:
def clean_email_text(self, test_data: List[str], field: str):
"""
checks if the cleaned_text fails when formatted on the test_data.
"""
test_data_dict = {i: "" for i in test_data}
cleaned_text = self.cleaned_data[field]
try:
cleaned_text.format(**test_data_dict)
except (KeyError, ValueError, IndexError):
x = re.findall(r"\{\w*\}", cleaned_text)
test_data = set(x) - {f"{{{i}}}" for i in test_data}
self.add_error(field, "The following variables are not allowed: " + ", ".join(test_data))
return cleaned_text
class AddSessionForm(forms.ModelForm, TemplateStringForm):
variables = {
"{name}": "Voter's name if set",
"{title}": "Session's title",
"{access_code}": "Access code/token for the voter to login",
"{login_url}": "URL which instantly logs user in",
"{base_url}": f"Will render to: https://{settings.URL}",
"{start_time}": "Start time if datetime is set",
"{start_date}": "Start date if datetime is set",
"{start_time_en}": "Start time in english format e.g. 02:23 PM",
"{start_date_en}": "Start date in english format e.g. 12/12/2020",
"{meeting_link}": "Meeting link if set"
}
email = forms.EmailField(required=False, label="",
widget=forms.EmailInput(attrs={"class": "emailinput form-control",
"placeholder": "your@email.de"}))
def __init__(self, request, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['start_date'].widget = forms.TextInput(attrs={'placeholder': 'e.g. 2020-05-12 13:00:00',
'type': 'datetime'})
self.fields['meeting_link'].widget = forms.TextInput(
attrs={'placeholder': 'e.g. https://bbb.stusta.de/b/ssn-abc-123'})
self.user = user
self.request = request
class Meta:
model = Session
fields = ('title', 'start_date', 'meeting_link', 'invite_text')
labels = {
'title': 'Session Name',
'start_date': 'Meeting start (optional)',
'meeting_link': 'Link to meeting call platform (optional)',
# will be set by html
'invite_text': ''
}
def clean(self):
super().clean()
if self.request.POST.get("submit_type") == "test" and not self.cleaned_data['email']:
self.add_error('email', "Email must be set for sending the test mail.")
if self.request.POST.get("submit_type") == "test" and not self.cleaned_data['invite_text']:
self.add_error('invite_text', "The test email can only be send when the invite text is filled.")
return self.cleaned_data
def clean_invite_text(self):
test_data = ["name", "title", "access_code", "login_url", "base_url", "start_time",
"start_date", "meeting_link", "start_date_en", "start_time_en"]
return self.clean_email_text(test_data, 'invite_text')
def _save_m2m(self):
super()._save_m2m()
if not self.user.sessions.filter(pk=self.instance.pk):
self.user.sessions.add(self.instance)
self.user.save()
def save(self, commit=True):
self.instance = super().save(commit=False)
if commit:
self.instance.save()
self._save_m2m()
else:
self.save_m2m = self._save_m2m # pylint: disable=attribute-defined-outside-init
return self.instance
class SessionSettingsForm(AddSessionForm):
add_election_manager = forms.CharField(max_length=256, required=False, label='')
class Meta:
model = Session
fields = ('title', 'start_date', 'meeting_link', 'invite_text')
labels = {
'start_date': 'Meeting start (optional)',
'meeting_link': 'Link to meeting call platform (optional)',
# will be set by html
'invite_text': ''
}
widgets = {
'start_date': forms.TextInput(attrs={'placeholder': 'e.g. 2020-05-12 13:00:00', 'type': 'datetime'})
}
def clean_add_election_manager(self):
value = self.data['add_election_manager']
if not value:
return value
if not ElectionManager.objects.filter(username=value).exists():
raise forms.ValidationError(f'Cannot find election manager with username {value}')
return ElectionManager.objects.get(username=value)
def _save_m2m(self):
super()._save_m2m()
if self.cleaned_data['add_election_manager']:
self.cleaned_data['add_election_manager'].sessions.add(self.instance)
self.cleaned_data['add_election_manager'].save()
def save(self, commit=True):
self.instance = super().save(commit=False)
if commit:
self.instance.save()
self._save_m2m()
else:
self.save_m2m = self._save_m2m # pylint: disable=attribute-defined-outside-init
return self.instance
class AddElectionForm(forms.ModelForm, TemplateStringForm):
variables = {
"{name}": "Voter's name if set",
"{title}": "Session's title",
"{url}": "URL to the election",
"{end_time}": "End time if datetime is set",
"{end_date}": "End date if datetime is set",
"{end_time_en}": "End time in english format e.g. 02:23 PM",
"{end_date_en}": "End date in english format e.g. 12/12/2020",
}
email = forms.EmailField(required=False, label="",
widget=forms.EmailInput(attrs={"class": "emailinput form-control",
"placeholder": "your@email.de"}))
def __init__(self, user, session, request, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['session'].disabled = True
self.fields['session'].initial = session
self.fields['session'].widget = forms.HiddenInput()
self.fields['start_date'].widget = forms.TextInput(
attrs={'placeholder': 'e.g.: 2020-05-12 13:00:00', 'type': 'datetime'})
self.fields['end_date'].widget = forms.TextInput(
attrs={'placeholder': 'e.g.: 2020-05-12 13:00:00', 'type': 'datetime'})
# self.fields['start_date'].initial = timezone.now()
self.fields['max_votes_yes'] = forms.IntegerField(min_value=1, required=False,
label='Maximum number of YES votes (optional)')
self.user = user
self.session = session
self.request = request
class Meta:
model = Election
fields = (
'title', 'start_date', 'end_date', 'session', 'max_votes_yes', 'voters_self_apply', 'send_emails_on_start',
'remind_text', 'enable_abstention', 'result_published')
labels = {
'title': 'Election Name',
'start_date': 'Start time (optional)',
'end_date': 'End time (optional)',
'voters_self_apply': 'Voters can apply for the election',
'send_emails_on_start': 'Voters receive an e-mail when the election starts<br>'
'(useful for elections that last several days)',
'enable_abstention': 'Enable the option to abstain in this election<br>'
'(YES, NO and ABSTENTION votes will be allowed)',
'remind_text': '',
'result_published': 'Automatically publish the election results',
}
def clean_remind_text(self):
test_data = ["name", "title", "url", "end_time", "end_date", "end_date_en", "end_time_en"]
return self.clean_email_text(test_data, 'remind_text')
def clean(self):
super().clean()
if self.session not in self.user.sessions.all():
raise forms.ValidationError("You don't have the permission to add an election here.")
if not self.cleaned_data['send_emails_on_start'] and self.cleaned_data['remind_text']:
self.add_error('send_emails_on_start', "Remind text can only be set when this option is activated.")
if self.request.POST.get("submit_type") == "test" and not self.cleaned_data['email']:
self.add_error('email', "Email must be set for sending the test mail.")
if self.request.POST.get("submit_type") == "test" and not self.cleaned_data['remind_text']:
self.add_error('remind_text', "The test email can only be send when the remind text is filled.")
if self.cleaned_data['end_date'] and self.cleaned_data['end_date'] < timezone.now():
self.add_error('end_date', "End date cannot be in the past.")
if self.cleaned_data.get('end_date') and self.cleaned_data.get('start_date') and \
self.cleaned_data['start_date'] > self.cleaned_data['end_date']:
raise forms.ValidationError("Start date needs to be before end date")
return self.cleaned_data
def save(self, commit=True):
instance = super().save(commit=commit)
self.session.elections.add(instance)
if commit:
self.session.save()
open_votes = [
OpenVote(voter=v, election=instance)
for v in self.session.participants.all()
]
OpenVote.objects.bulk_create(open_votes)
return instance
class AvatarFileInput(forms.ClearableFileInput):
template_name = 'management/image_input.html'
class ApplicationUploadForm(forms.ModelForm):
field_order = ['election', 'display_name', 'email', 'text', 'avatar']
def __init__(self, election, request, *args, **kwargs):
super().__init__(*args, **kwargs)
self.request = request
self.election = election
self.fields['election'].required = False
class Meta:
model = Application
fields = ('election', 'display_name', 'email', 'text', 'avatar')
def clean(self):
super().clean()
if not self.election.can_apply:
raise forms.ValidationError('Applications are currently not allowed')
return self.cleaned_data
def save(self, commit=True):
instance = super().save(commit=False)
instance.election = self.election
if commit:
instance.save()
return instance
class AddVotersForm(forms.Form):
voters_list = forms.CharField(widget=forms.Textarea, label='Emails') # explicitly no max_length here
def __init__(self, session, *args, **kwargs):
super().__init__(*args, **kwargs)
self.session = session
def save(self) -> List[Tuple[Voter, str]]:
voters_codes = [
Voter.from_data(email=email, session=self.session) for email in self.cleaned_data['voters_list']
]
self.session.managers.all().first().send_invite_bulk_threaded(voters_codes)
return voters_codes
def clean_voters_list(self):
lines = self.cleaned_data['voters_list'].splitlines()
emails = []
for line in lines:
if line == '':
continue
try:
validate_email(line)
except forms.ValidationError:
self.add_error('voters_list', f'{line} is not a valid email address')
if Voter.objects.filter(email=line, session=self.session).exists():
self.add_error('voters_list', f'a voter with email address {line} already exists')
emails.append(line)
if len(emails) != len(set(emails)):
raise forms.ValidationError('duplicate email address')
return emails
class AddTokensForm(forms.Form):
nr_anonymous_voters = forms.IntegerField(min_value=1, max_value=50, label='Number of Tokens:')
def __init__(self, session, *args, **kwargs):
super().__init__(*args, **kwargs)
self.session = session
def save(self) -> List[Tuple[Voter, str]]:
anonymous_voters = [
Voter.from_data(session=self.session) for _ in range(self.cleaned_data['nr_anonymous_voters'])
]
return anonymous_voters
class CSVUploaderForm(forms.Form):
csv_data = forms.FileField(label='CSV File')
def __init__(self, session, *args, **kwargs):
super().__init__(*args, **kwargs)
self.session = session
def clean_csv_data(self):
f = self.cleaned_data['csv_data'].file
data = {}
try:
with io.TextIOWrapper(f, encoding='utf-8') as text_file:
csv_reader = csv.DictReader(text_file)
if 'email' not in csv_reader.fieldnames or 'name' not in csv_reader.fieldnames:
raise forms.ValidationError('CSV file needs to have columns "email" and "name".')
for row in csv_reader:
if row['email']:
try:
validate_email(row['email'])
except forms.ValidationError:
self.add_error('csv_data', f'Invalid email {row["email"]}')
else:
row['email'] = None
if Voter.objects.filter(session=self.session, email=row['email']).exists():
self.add_error('csv_data', f'Voter with email address {row["email"]} already exists')
if row['email'] in data:
self.add_error('csv_data', f'Duplicate email in csv: {row["email"]}')
data[row['email']] = row['name']
except UnicodeDecodeError as e:
raise forms.ValidationError('File does not seem to be in CSV format.') from e
return data
def save(self):
voters_codes = [
Voter.from_data(session=self.session, email=email, name=name)
for email, name in self.cleaned_data['csv_data'].items()
]
self.session.managers.all().first().send_invite_bulk_threaded(voters_codes)
| 39.524548 | 119 | 0.600418 | import csv
import io
import re
from datetime import timedelta
from typing import Tuple, List
from django import forms
from django.conf import settings
from django.core.validators import validate_email
from django.utils import timezone
from management.models import ElectionManager
from vote.models import Election, Application, Session, Voter, OpenVote
class StartElectionForm(forms.ModelForm):
run_time = forms.IntegerField(label="run time", min_value=1)
class Meta:
model = Election
fields: List[str] = []
def save(self, commit=True):
instance = super().save(commit=False)
instance.start_date = timezone.now()
instance.end_date = timezone.now() + timedelta(minutes=self.cleaned_data['run_time'])
if commit:
instance.save()
return instance
class StopElectionForm(forms.ModelForm):
class Meta:
model = Election
fields: List[str] = []
def save(self, commit=True):
instance = super().save(commit=False)
instance.end_date = timezone.now()
if commit:
instance.save()
return instance
class TemplateStringForm:
def clean_email_text(self, test_data: List[str], field: str):
test_data_dict = {i: "" for i in test_data}
cleaned_text = self.cleaned_data[field]
try:
cleaned_text.format(**test_data_dict)
except (KeyError, ValueError, IndexError):
x = re.findall(r"\{\w*\}", cleaned_text)
test_data = set(x) - {f"{{{i}}}" for i in test_data}
self.add_error(field, "The following variables are not allowed: " + ", ".join(test_data))
return cleaned_text
class AddSessionForm(forms.ModelForm, TemplateStringForm):
variables = {
"{name}": "Voter's name if set",
"{title}": "Session's title",
"{access_code}": "Access code/token for the voter to login",
"{login_url}": "URL which instantly logs user in",
"{base_url}": f"Will render to: https://{settings.URL}",
"{start_time}": "Start time if datetime is set",
"{start_date}": "Start date if datetime is set",
"{start_time_en}": "Start time in english format e.g. 02:23 PM",
"{start_date_en}": "Start date in english format e.g. 12/12/2020",
"{meeting_link}": "Meeting link if set"
}
email = forms.EmailField(required=False, label="",
widget=forms.EmailInput(attrs={"class": "emailinput form-control",
"placeholder": "your@email.de"}))
def __init__(self, request, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['start_date'].widget = forms.TextInput(attrs={'placeholder': 'e.g. 2020-05-12 13:00:00',
'type': 'datetime'})
self.fields['meeting_link'].widget = forms.TextInput(
attrs={'placeholder': 'e.g. https://bbb.stusta.de/b/ssn-abc-123'})
self.user = user
self.request = request
class Meta:
model = Session
fields = ('title', 'start_date', 'meeting_link', 'invite_text')
labels = {
'title': 'Session Name',
'start_date': 'Meeting start (optional)',
'meeting_link': 'Link to meeting call platform (optional)',
'invite_text': ''
}
def clean(self):
super().clean()
if self.request.POST.get("submit_type") == "test" and not self.cleaned_data['email']:
self.add_error('email', "Email must be set for sending the test mail.")
if self.request.POST.get("submit_type") == "test" and not self.cleaned_data['invite_text']:
self.add_error('invite_text', "The test email can only be send when the invite text is filled.")
return self.cleaned_data
def clean_invite_text(self):
test_data = ["name", "title", "access_code", "login_url", "base_url", "start_time",
"start_date", "meeting_link", "start_date_en", "start_time_en"]
return self.clean_email_text(test_data, 'invite_text')
def _save_m2m(self):
super()._save_m2m()
if not self.user.sessions.filter(pk=self.instance.pk):
self.user.sessions.add(self.instance)
self.user.save()
def save(self, commit=True):
self.instance = super().save(commit=False)
if commit:
self.instance.save()
self._save_m2m()
else:
self.save_m2m = self._save_m2m
return self.instance
class SessionSettingsForm(AddSessionForm):
add_election_manager = forms.CharField(max_length=256, required=False, label='')
class Meta:
model = Session
fields = ('title', 'start_date', 'meeting_link', 'invite_text')
labels = {
'start_date': 'Meeting start (optional)',
'meeting_link': 'Link to meeting call platform (optional)',
'invite_text': ''
}
widgets = {
'start_date': forms.TextInput(attrs={'placeholder': 'e.g. 2020-05-12 13:00:00', 'type': 'datetime'})
}
def clean_add_election_manager(self):
value = self.data['add_election_manager']
if not value:
return value
if not ElectionManager.objects.filter(username=value).exists():
raise forms.ValidationError(f'Cannot find election manager with username {value}')
return ElectionManager.objects.get(username=value)
def _save_m2m(self):
super()._save_m2m()
if self.cleaned_data['add_election_manager']:
self.cleaned_data['add_election_manager'].sessions.add(self.instance)
self.cleaned_data['add_election_manager'].save()
def save(self, commit=True):
self.instance = super().save(commit=False)
if commit:
self.instance.save()
self._save_m2m()
else:
self.save_m2m = self._save_m2m
return self.instance
class AddElectionForm(forms.ModelForm, TemplateStringForm):
variables = {
"{name}": "Voter's name if set",
"{title}": "Session's title",
"{url}": "URL to the election",
"{end_time}": "End time if datetime is set",
"{end_date}": "End date if datetime is set",
"{end_time_en}": "End time in english format e.g. 02:23 PM",
"{end_date_en}": "End date in english format e.g. 12/12/2020",
}
email = forms.EmailField(required=False, label="",
widget=forms.EmailInput(attrs={"class": "emailinput form-control",
"placeholder": "your@email.de"}))
def __init__(self, user, session, request, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['session'].disabled = True
self.fields['session'].initial = session
self.fields['session'].widget = forms.HiddenInput()
self.fields['start_date'].widget = forms.TextInput(
attrs={'placeholder': 'e.g.: 2020-05-12 13:00:00', 'type': 'datetime'})
self.fields['end_date'].widget = forms.TextInput(
attrs={'placeholder': 'e.g.: 2020-05-12 13:00:00', 'type': 'datetime'})
self.fields['max_votes_yes'] = forms.IntegerField(min_value=1, required=False,
label='Maximum number of YES votes (optional)')
self.user = user
self.session = session
self.request = request
class Meta:
model = Election
fields = (
'title', 'start_date', 'end_date', 'session', 'max_votes_yes', 'voters_self_apply', 'send_emails_on_start',
'remind_text', 'enable_abstention', 'result_published')
labels = {
'title': 'Election Name',
'start_date': 'Start time (optional)',
'end_date': 'End time (optional)',
'voters_self_apply': 'Voters can apply for the election',
'send_emails_on_start': 'Voters receive an e-mail when the election starts<br>'
'(useful for elections that last several days)',
'enable_abstention': 'Enable the option to abstain in this election<br>'
'(YES, NO and ABSTENTION votes will be allowed)',
'remind_text': '',
'result_published': 'Automatically publish the election results',
}
def clean_remind_text(self):
test_data = ["name", "title", "url", "end_time", "end_date", "end_date_en", "end_time_en"]
return self.clean_email_text(test_data, 'remind_text')
def clean(self):
super().clean()
if self.session not in self.user.sessions.all():
raise forms.ValidationError("You don't have the permission to add an election here.")
if not self.cleaned_data['send_emails_on_start'] and self.cleaned_data['remind_text']:
self.add_error('send_emails_on_start', "Remind text can only be set when this option is activated.")
if self.request.POST.get("submit_type") == "test" and not self.cleaned_data['email']:
self.add_error('email', "Email must be set for sending the test mail.")
if self.request.POST.get("submit_type") == "test" and not self.cleaned_data['remind_text']:
self.add_error('remind_text', "The test email can only be send when the remind text is filled.")
if self.cleaned_data['end_date'] and self.cleaned_data['end_date'] < timezone.now():
self.add_error('end_date', "End date cannot be in the past.")
if self.cleaned_data.get('end_date') and self.cleaned_data.get('start_date') and \
self.cleaned_data['start_date'] > self.cleaned_data['end_date']:
raise forms.ValidationError("Start date needs to be before end date")
return self.cleaned_data
def save(self, commit=True):
instance = super().save(commit=commit)
self.session.elections.add(instance)
if commit:
self.session.save()
open_votes = [
OpenVote(voter=v, election=instance)
for v in self.session.participants.all()
]
OpenVote.objects.bulk_create(open_votes)
return instance
class AvatarFileInput(forms.ClearableFileInput):
template_name = 'management/image_input.html'
class ApplicationUploadForm(forms.ModelForm):
field_order = ['election', 'display_name', 'email', 'text', 'avatar']
def __init__(self, election, request, *args, **kwargs):
super().__init__(*args, **kwargs)
self.request = request
self.election = election
self.fields['election'].required = False
class Meta:
model = Application
fields = ('election', 'display_name', 'email', 'text', 'avatar')
def clean(self):
super().clean()
if not self.election.can_apply:
raise forms.ValidationError('Applications are currently not allowed')
return self.cleaned_data
def save(self, commit=True):
instance = super().save(commit=False)
instance.election = self.election
if commit:
instance.save()
return instance
class AddVotersForm(forms.Form):
voters_list = forms.CharField(widget=forms.Textarea, label='Emails') # explicitly no max_length here
def __init__(self, session, *args, **kwargs):
super().__init__(*args, **kwargs)
self.session = session
def save(self) -> List[Tuple[Voter, str]]:
voters_codes = [
Voter.from_data(email=email, session=self.session) for email in self.cleaned_data['voters_list']
]
self.session.managers.all().first().send_invite_bulk_threaded(voters_codes)
return voters_codes
def clean_voters_list(self):
lines = self.cleaned_data['voters_list'].splitlines()
emails = []
for line in lines:
if line == '':
continue
try:
validate_email(line)
except forms.ValidationError:
self.add_error('voters_list', f'{line} is not a valid email address')
if Voter.objects.filter(email=line, session=self.session).exists():
self.add_error('voters_list', f'a voter with email address {line} already exists')
emails.append(line)
if len(emails) != len(set(emails)):
raise forms.ValidationError('duplicate email address')
return emails
class AddTokensForm(forms.Form):
nr_anonymous_voters = forms.IntegerField(min_value=1, max_value=50, label='Number of Tokens:')
def __init__(self, session, *args, **kwargs):
super().__init__(*args, **kwargs)
self.session = session
def save(self) -> List[Tuple[Voter, str]]:
anonymous_voters = [
Voter.from_data(session=self.session) for _ in range(self.cleaned_data['nr_anonymous_voters'])
]
return anonymous_voters
class CSVUploaderForm(forms.Form):
csv_data = forms.FileField(label='CSV File')
def __init__(self, session, *args, **kwargs):
super().__init__(*args, **kwargs)
self.session = session
def clean_csv_data(self):
f = self.cleaned_data['csv_data'].file
data = {}
try:
with io.TextIOWrapper(f, encoding='utf-8') as text_file:
csv_reader = csv.DictReader(text_file)
if 'email' not in csv_reader.fieldnames or 'name' not in csv_reader.fieldnames:
raise forms.ValidationError('CSV file needs to have columns "email" and "name".')
for row in csv_reader:
if row['email']:
try:
validate_email(row['email'])
except forms.ValidationError:
self.add_error('csv_data', f'Invalid email {row["email"]}')
else:
row['email'] = None
if Voter.objects.filter(session=self.session, email=row['email']).exists():
self.add_error('csv_data', f'Voter with email address {row["email"]} already exists')
if row['email'] in data:
self.add_error('csv_data', f'Duplicate email in csv: {row["email"]}')
data[row['email']] = row['name']
except UnicodeDecodeError as e:
raise forms.ValidationError('File does not seem to be in CSV format.') from e
return data
def save(self):
voters_codes = [
Voter.from_data(session=self.session, email=email, name=name)
for email, name in self.cleaned_data['csv_data'].items()
]
self.session.managers.all().first().send_invite_bulk_threaded(voters_codes)
| true | true |
1c3ad11a0c2040419ccba925cd61776c686fbc7f | 646 | py | Python | hccf/utils/mathematics.py | kazarinov/cfdr | bf93428614af15440b60fb894097e94fa4efd168 | [
"MIT"
] | null | null | null | hccf/utils/mathematics.py | kazarinov/cfdr | bf93428614af15440b60fb894097e94fa4efd168 | [
"MIT"
] | null | null | null | hccf/utils/mathematics.py | kazarinov/cfdr | bf93428614af15440b60fb894097e94fa4efd168 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import math
import scipy as sp
def sigmoid(z):
s = 1.0 / (1.0 + math.exp(-z))
return s
def log_loss(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1 - epsilon, pred)
ll = sum(act * sp.log(pred.astype(float)) + sp.subtract(1, act.astype(float)) * sp.log(
sp.subtract(1, pred.astype(float))))
ll = ll * -1.0 / len(act)
return ll
def loglikelihood(shows, clicks):
if clicks == 0 or shows == 0 or clicks == shows:
return 0
ctr = float(clicks) / shows
return -1 * (clicks * math.log(ctr) + (shows - clicks) * math.log(1 - ctr))
| 23.925926 | 91 | 0.582043 |
import math
import scipy as sp
def sigmoid(z):
s = 1.0 / (1.0 + math.exp(-z))
return s
def log_loss(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1 - epsilon, pred)
ll = sum(act * sp.log(pred.astype(float)) + sp.subtract(1, act.astype(float)) * sp.log(
sp.subtract(1, pred.astype(float))))
ll = ll * -1.0 / len(act)
return ll
def loglikelihood(shows, clicks):
if clicks == 0 or shows == 0 or clicks == shows:
return 0
ctr = float(clicks) / shows
return -1 * (clicks * math.log(ctr) + (shows - clicks) * math.log(1 - ctr))
| true | true |
1c3ad1bc9968553b109809c28e96e90c83fa686c | 137 | py | Python | beer_store_api/urls.py | gwoods22/beer-store-api | c21593734022718896720db916b73f0404840dc2 | [
"MIT"
] | 1 | 2020-09-10T16:56:56.000Z | 2020-09-10T16:56:56.000Z | beer_store_api/urls.py | gwoods22/beer-store-api | c21593734022718896720db916b73f0404840dc2 | [
"MIT"
] | null | null | null | beer_store_api/urls.py | gwoods22/beer-store-api | c21593734022718896720db916b73f0404840dc2 | [
"MIT"
] | 1 | 2020-09-20T17:47:07.000Z | 2020-09-20T17:47:07.000Z | from django.conf.urls import url, include
urlpatterns = [
url(r'', include('products.urls')),
url(r'', include('land.urls')),
]
| 19.571429 | 41 | 0.635036 | from django.conf.urls import url, include
urlpatterns = [
url(r'', include('products.urls')),
url(r'', include('land.urls')),
]
| true | true |
1c3ad27979f8587c4744196d0b2622701d53b174 | 4,444 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/virtual_machine_scale_set_vm_profile.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/virtual_machine_scale_set_vm_profile.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/virtual_machine_scale_set_vm_profile.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetVMProfile(Model):
"""Describes a virtual machine scale set virtual machine profile.
:param os_profile: Specifies the operating system settings for the virtual
machines in the scale set.
:type os_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetOSProfile
:param storage_profile: Specifies the storage settings for the virtual
machine disks.
:type storage_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetStorageProfile
:param network_profile: Specifies properties of the network interfaces of
the virtual machines in the scale set.
:type network_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetNetworkProfile
:param diagnostics_profile: Specifies the boot diagnostic settings state.
<br><br>Minimum api-version: 2015-06-15.
:type diagnostics_profile:
~azure.mgmt.compute.v2019_03_01.models.DiagnosticsProfile
:param extension_profile: Specifies a collection of settings for
extensions installed on virtual machines in the scale set.
:type extension_profile:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtensionProfile
:param license_type: Specifies that the image or disk that is being used
was licensed on-premises. This element is only used for images that
contain the Windows Server operating system. <br><br> Possible values are:
<br><br> Windows_Client <br><br> Windows_Server <br><br> If this element
is included in a request for an update, the value must match the initial
value. This value cannot be updated. <br><br> For more information, see
[Azure Hybrid Use Benefit for Windows
Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Minimum api-version: 2015-06-15
:type license_type: str
:param priority: Specifies the priority for the virtual machines in the
scale set. <br><br>Minimum api-version: 2017-10-30-preview. Possible
values include: 'Regular', 'Low'
:type priority: str or
~azure.mgmt.compute.v2019_03_01.models.VirtualMachinePriorityTypes
:param eviction_policy: Specifies the eviction policy for virtual machines
in a low priority scale set. <br><br>Minimum api-version:
2017-10-30-preview. Possible values include: 'Deallocate', 'Delete'
:type eviction_policy: str or
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineEvictionPolicyTypes
"""
_attribute_map = {
'os_profile': {'key': 'osProfile', 'type': 'VirtualMachineScaleSetOSProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'VirtualMachineScaleSetStorageProfile'},
'network_profile': {'key': 'networkProfile', 'type': 'VirtualMachineScaleSetNetworkProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'extension_profile': {'key': 'extensionProfile', 'type': 'VirtualMachineScaleSetExtensionProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'str'},
'eviction_policy': {'key': 'evictionPolicy', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSetVMProfile, self).__init__(**kwargs)
self.os_profile = kwargs.get('os_profile', None)
self.storage_profile = kwargs.get('storage_profile', None)
self.network_profile = kwargs.get('network_profile', None)
self.diagnostics_profile = kwargs.get('diagnostics_profile', None)
self.extension_profile = kwargs.get('extension_profile', None)
self.license_type = kwargs.get('license_type', None)
self.priority = kwargs.get('priority', None)
self.eviction_policy = kwargs.get('eviction_policy', None)
| 54.864198 | 170 | 0.707021 |
from msrest.serialization import Model
class VirtualMachineScaleSetVMProfile(Model):
_attribute_map = {
'os_profile': {'key': 'osProfile', 'type': 'VirtualMachineScaleSetOSProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'VirtualMachineScaleSetStorageProfile'},
'network_profile': {'key': 'networkProfile', 'type': 'VirtualMachineScaleSetNetworkProfile'},
'diagnostics_profile': {'key': 'diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'extension_profile': {'key': 'extensionProfile', 'type': 'VirtualMachineScaleSetExtensionProfile'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'str'},
'eviction_policy': {'key': 'evictionPolicy', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSetVMProfile, self).__init__(**kwargs)
self.os_profile = kwargs.get('os_profile', None)
self.storage_profile = kwargs.get('storage_profile', None)
self.network_profile = kwargs.get('network_profile', None)
self.diagnostics_profile = kwargs.get('diagnostics_profile', None)
self.extension_profile = kwargs.get('extension_profile', None)
self.license_type = kwargs.get('license_type', None)
self.priority = kwargs.get('priority', None)
self.eviction_policy = kwargs.get('eviction_policy', None)
| true | true |
1c3ad38252e97a57417ff208d102b970a5b32ee0 | 5,703 | py | Python | what_apps/cms/tests.py | SlashRoot/WHAT | 69e78d01065142446234e77ea7c8c31e3482af29 | [
"MIT"
] | null | null | null | what_apps/cms/tests.py | SlashRoot/WHAT | 69e78d01065142446234e77ea7c8c31e3482af29 | [
"MIT"
] | null | null | null | what_apps/cms/tests.py | SlashRoot/WHAT | 69e78d01065142446234e77ea7c8c31e3482af29 | [
"MIT"
] | null | null | null | from unittest import expectedFailure
from what_apps.cms.models import Question, QandA, QuestionOnForm, BooleanAnswer, \
ContentBlock
from what_apps.do.config import set_up as do_set_up
from what_apps.utility.tests import WHATTestCase, test_user_factory
from what_apps.cms.views import edit_content_block
from django.http import HttpRequest
class Blogging(WHATTestCase):
success_urls = ['/blog/']
login_required_urls = ['/cms/edit_content_block/']
def setUp(self):
do_set_up()
self.users = test_user_factory(1)
def test_blog_editor_200_logged_in(self):
self.client.login(username=self.users[0].username, password='password')
response = self.client.get('/blog/')
self.assertEqual(response.status_code, 200)
def test_blog_submission_creates_blog_object(self):
self.assertEqual(ContentBlock.objects.count(), 0)
self.client.login(username=self.users[0].username, password='password')
post_response = self.client.post('/cms/edit_content_block/', {'headline': 'My really awesome blog post.',
'content':'yep, this is the post.',
'published':True,
'tags': 'public,blog'})
self.assertEqual(post_response.status_code, 200)
self.assertEqual(ContentBlock.objects.count(), 1)
return ContentBlock.objects.all()[0]
def test_blog_submission_with_id_modifies_blog_object(self):
blog_post = self.test_blog_submission_creates_blog_object()
post_response = self.client.post('/cms/edit_content_block/%s/' % blog_post.id,
{'headline': 'My less awesome blog post.',
'content':'yep, this is the post.',
'published':True,
'tags': 'public,blog'})
self.assertEqual(post_response.status_code, 200)
self.assertEqual(ContentBlock.objects.count(), 1)
get_response = self.client.get('/cms/edit_content_block/%s/' % blog_post.id)
self.assertEqual(get_response.status_code, 200)
self.assertContains(get_response, 'My less awesome blog post.')
def test_submitted_blog_appears_on_blog_page(self):
blog_post = self.test_blog_submission_creates_blog_object()
blog_front_page_response = self.client.get('/blog/')
self.assertTrue(blog_post.content in blog_front_page_response.content)
def test_submitted_blog_appears_on_permalink_page(self):
blog_post = self.test_blog_submission_creates_blog_object()
blog_url = blog_post.get_absolute_url()
response = self.assertPageLoadSuccess(blog_url)
self.assertTrue(blog_post.content in response.content)
def test_bad_blog_title_404(self):
self.assertStatusCode('/blog/this-post-definitely-does-not-exist/', 404)
def test_for_two_blog_posts_that_the_content_of_one_does_not_appear_on_the_permalink_of_the_other(self):
first_blog_post = self.test_blog_submission_creates_blog_object()
self.client.login(username=self.users[0].username, password='password')
second_headline = 'A second post - nothing to do with the first.'
post_response = self.client.post('/cms/edit_content_block/', {'headline': second_headline, 'content':'Some other post for sure.', 'published':True, 'tags': 'public,blog'})
second_blog_post = ContentBlock.objects.get(headline=second_headline)
first_blog_post_permalink_response = self.assertPageLoadSuccess(first_blog_post.get_absolute_url())
self.assertTrue(first_blog_post.content in first_blog_post_permalink_response.content)
self.assertFalse(second_blog_post.content in first_blog_post_permalink_response.content)
@expectedFailure
def test_blog_permalink_url_raises_200(self):
self.fail()
@expectedFailure
def test_blog_permalink_displays_proper_entry(self):
self.fail()
class QuestionMakingTest(WHATTestCase):
def test_new_question_with_boolean_answer(self):
question = Question.objects.create(name='what have i just named this question', answer_type=0)
self.assertTrue(question)
return question
def test_that_q_and_a_can_contain_boolean_question(self):
q_and_a = QandA.objects.create(name='the name for the question form')
question = self.test_new_question_with_boolean_answer() #Get a question object by running the test above.
QuestionOnForm.objects.create(question=question, form=q_and_a, required=True) #Through model - puts our question on the q_and_a.
self.assertEqual(q_and_a.questions.count(), 1) #That in fact, the question is on our q_and_a
return q_and_a, question
def test_boolean_answer(self):
user = test_user_factory(1)[0]
q_and_a, question = self.test_that_q_and_a_can_contain_boolean_question() #Grab a q_and_a and a question from the test above.
answer = BooleanAnswer.objects.create(creator=user, question=question, application=q_and_a, answer=False)
self.assertFalse(q_and_a.answers.all()[0].answer())
@expectedFailure
def test_new_question_with_text_answer(self):
self.fail()
@expectedFailure
def test_template_has_question_name(self):
self.client.get('/cms/form/')
self.fail()
| 47.92437 | 180 | 0.667017 | from unittest import expectedFailure
from what_apps.cms.models import Question, QandA, QuestionOnForm, BooleanAnswer, \
ContentBlock
from what_apps.do.config import set_up as do_set_up
from what_apps.utility.tests import WHATTestCase, test_user_factory
from what_apps.cms.views import edit_content_block
from django.http import HttpRequest
class Blogging(WHATTestCase):
success_urls = ['/blog/']
login_required_urls = ['/cms/edit_content_block/']
def setUp(self):
do_set_up()
self.users = test_user_factory(1)
def test_blog_editor_200_logged_in(self):
self.client.login(username=self.users[0].username, password='password')
response = self.client.get('/blog/')
self.assertEqual(response.status_code, 200)
def test_blog_submission_creates_blog_object(self):
self.assertEqual(ContentBlock.objects.count(), 0)
self.client.login(username=self.users[0].username, password='password')
post_response = self.client.post('/cms/edit_content_block/', {'headline': 'My really awesome blog post.',
'content':'yep, this is the post.',
'published':True,
'tags': 'public,blog'})
self.assertEqual(post_response.status_code, 200)
self.assertEqual(ContentBlock.objects.count(), 1)
return ContentBlock.objects.all()[0]
def test_blog_submission_with_id_modifies_blog_object(self):
blog_post = self.test_blog_submission_creates_blog_object()
post_response = self.client.post('/cms/edit_content_block/%s/' % blog_post.id,
{'headline': 'My less awesome blog post.',
'content':'yep, this is the post.',
'published':True,
'tags': 'public,blog'})
self.assertEqual(post_response.status_code, 200)
self.assertEqual(ContentBlock.objects.count(), 1)
get_response = self.client.get('/cms/edit_content_block/%s/' % blog_post.id)
self.assertEqual(get_response.status_code, 200)
self.assertContains(get_response, 'My less awesome blog post.')
def test_submitted_blog_appears_on_blog_page(self):
blog_post = self.test_blog_submission_creates_blog_object()
blog_front_page_response = self.client.get('/blog/')
self.assertTrue(blog_post.content in blog_front_page_response.content)
def test_submitted_blog_appears_on_permalink_page(self):
blog_post = self.test_blog_submission_creates_blog_object()
blog_url = blog_post.get_absolute_url()
response = self.assertPageLoadSuccess(blog_url)
self.assertTrue(blog_post.content in response.content)
def test_bad_blog_title_404(self):
self.assertStatusCode('/blog/this-post-definitely-does-not-exist/', 404)
def test_for_two_blog_posts_that_the_content_of_one_does_not_appear_on_the_permalink_of_the_other(self):
first_blog_post = self.test_blog_submission_creates_blog_object()
self.client.login(username=self.users[0].username, password='password')
second_headline = 'A second post - nothing to do with the first.'
post_response = self.client.post('/cms/edit_content_block/', {'headline': second_headline, 'content':'Some other post for sure.', 'published':True, 'tags': 'public,blog'})
second_blog_post = ContentBlock.objects.get(headline=second_headline)
first_blog_post_permalink_response = self.assertPageLoadSuccess(first_blog_post.get_absolute_url())
self.assertTrue(first_blog_post.content in first_blog_post_permalink_response.content)
self.assertFalse(second_blog_post.content in first_blog_post_permalink_response.content)
@expectedFailure
def test_blog_permalink_url_raises_200(self):
self.fail()
@expectedFailure
def test_blog_permalink_displays_proper_entry(self):
self.fail()
class QuestionMakingTest(WHATTestCase):
def test_new_question_with_boolean_answer(self):
question = Question.objects.create(name='what have i just named this question', answer_type=0)
self.assertTrue(question)
return question
def test_that_q_and_a_can_contain_boolean_question(self):
q_and_a = QandA.objects.create(name='the name for the question form')
question = self.test_new_question_with_boolean_answer()
QuestionOnForm.objects.create(question=question, form=q_and_a, required=True)
self.assertEqual(q_and_a.questions.count(), 1)
return q_and_a, question
def test_boolean_answer(self):
user = test_user_factory(1)[0]
q_and_a, question = self.test_that_q_and_a_can_contain_boolean_question()
answer = BooleanAnswer.objects.create(creator=user, question=question, application=q_and_a, answer=False)
self.assertFalse(q_and_a.answers.all()[0].answer())
@expectedFailure
def test_new_question_with_text_answer(self):
self.fail()
@expectedFailure
def test_template_has_question_name(self):
self.client.get('/cms/form/')
self.fail()
| true | true |
1c3ad3a386b71cbb7c8f485ac55de1c09fcd9451 | 24,767 | py | Python | plexapi/client.py | atoy3731/python-plexapi | ec21c897b7b9087120efd51f67b75c9a8fda7a45 | [
"BSD-3-Clause"
] | null | null | null | plexapi/client.py | atoy3731/python-plexapi | ec21c897b7b9087120efd51f67b75c9a8fda7a45 | [
"BSD-3-Clause"
] | null | null | null | plexapi/client.py | atoy3731/python-plexapi | ec21c897b7b9087120efd51f67b75c9a8fda7a45 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import time
from xml.etree import ElementTree
import requests
from plexapi import BASE_HEADERS, CONFIG, TIMEOUT, log, logfilter, utils
from plexapi.base import PlexObject
from plexapi.exceptions import BadRequest, NotFound, Unauthorized, Unsupported
from plexapi.playqueue import PlayQueue
from requests.status_codes import _codes as codes
DEFAULT_MTYPE = 'video'
@utils.registerPlexObject
class PlexClient(PlexObject):
""" Main class for interacting with a Plex client. This class can connect
directly to the client and control it or proxy commands through your
Plex Server. To better understand the Plex client API's read this page:
https://github.com/plexinc/plex-media-player/wiki/Remote-control-API
Parameters:
server (:class:`~plexapi.server.PlexServer`): PlexServer this client is connected to (optional).
data (ElementTree): Response from PlexServer used to build this object (optional).
initpath (str): Path used to generate data.
baseurl (str): HTTP URL to connect dirrectly to this client.
token (str): X-Plex-Token used for authenication (optional).
session (:class:`~requests.Session`): requests.Session object if you want more control (optional).
timeout (int): timeout in seconds on initial connect to client (default config.TIMEOUT).
Attributes:
TAG (str): 'Player'
key (str): '/resources'
device (str): Best guess on the type of device this is (PS, iPhone, Linux, etc).
deviceClass (str): Device class (pc, phone, etc).
machineIdentifier (str): Unique ID for this device.
model (str): Unknown
platform (str): Unknown
platformVersion (str): Description
product (str): Client Product (Plex for iOS, etc).
protocol (str): Always seems ot be 'plex'.
protocolCapabilities (list<str>): List of client capabilities (navigation, playback,
timeline, mirror, playqueues).
protocolVersion (str): Protocol version (1, future proofing?)
server (:class:`~plexapi.server.PlexServer`): Server this client is connected to.
session (:class:`~requests.Session`): Session object used for connection.
state (str): Unknown
title (str): Name of this client (Johns iPhone, etc).
token (str): X-Plex-Token used for authenication
vendor (str): Unknown
version (str): Device version (4.6.1, etc).
_baseurl (str): HTTP address of the client.
_token (str): Token used to access this client.
_session (obj): Requests session object used to access this client.
_proxyThroughServer (bool): Set to True after calling
:func:`~plexapi.client.PlexClient.proxyThroughServer()` (default False).
"""
TAG = 'Player'
key = '/resources'
def __init__(self, server=None, data=None, initpath=None, baseurl=None,
token=None, connect=True, session=None, timeout=None):
super(PlexClient, self).__init__(server, data, initpath)
self._baseurl = baseurl.strip('/') if baseurl else None
self._token = logfilter.add_secret(token)
self._showSecrets = CONFIG.get('log.show_secrets', '').lower() == 'true'
server_session = server._session if server else None
self._session = session or server_session or requests.Session()
self._proxyThroughServer = False
self._commandId = 0
self._last_call = 0
if not any([data, initpath, baseurl, token]):
self._baseurl = CONFIG.get('auth.client_baseurl', 'http://localhost:32433')
self._token = logfilter.add_secret(CONFIG.get('auth.client_token'))
if connect and self._baseurl:
self.connect(timeout=timeout)
def _nextCommandId(self):
self._commandId += 1
return self._commandId
def connect(self, timeout=None):
""" Alias of reload as any subsequent requests to this client will be
made directly to the device even if the object attributes were initially
populated from a PlexServer.
"""
if not self.key:
raise Unsupported('Cannot reload an object not built from a URL.')
self._initpath = self.key
data = self.query(self.key, timeout=timeout)
self._loadData(data[0])
return self
def reload(self):
""" Alias to self.connect(). """
return self.connect()
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.deviceClass = data.attrib.get('deviceClass')
self.machineIdentifier = data.attrib.get('machineIdentifier')
self.product = data.attrib.get('product')
self.protocol = data.attrib.get('protocol')
self.protocolCapabilities = data.attrib.get('protocolCapabilities', '').split(',')
self.protocolVersion = data.attrib.get('protocolVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.title = data.attrib.get('title') or data.attrib.get('name')
# Active session details
# Since protocolCapabilities is missing from /sessions we cant really control this player without
# creating a client manually.
# Add this in next breaking release.
# if self._initpath == 'status/sessions':
self.device = data.attrib.get('device') # session
self.model = data.attrib.get('model') # session
self.state = data.attrib.get('state') # session
self.vendor = data.attrib.get('vendor') # session
self.version = data.attrib.get('version') # session
self.local = utils.cast(bool, data.attrib.get('local', 0))
self.address = data.attrib.get('address') # session
self.remotePublicAddress = data.attrib.get('remotePublicAddress')
self.userID = data.attrib.get('userID')
def _headers(self, **kwargs):
""" Returns a dict of all default headers for Client requests. """
headers = BASE_HEADERS
if self._token:
headers['X-Plex-Token'] = self._token
headers.update(kwargs)
return headers
def proxyThroughServer(self, value=True, server=None):
""" Tells this PlexClient instance to proxy all future commands through the PlexServer.
Useful if you do not wish to connect directly to the Client device itself.
Parameters:
value (bool): Enable or disable proxying (optional, default True).
Raises:
:class:`plexapi.exceptions.Unsupported`: Cannot use client proxy with unknown server.
"""
if server:
self._server = server
if value is True and not self._server:
raise Unsupported('Cannot use client proxy with unknown server.')
self._proxyThroughServer = value
def query(self, path, method=None, headers=None, timeout=None, **kwargs):
""" Main method used to handle HTTPS requests to the Plex client. This method helps
by encoding the response to utf-8 and parsing the returned XML into and
ElementTree object. Returns None if no data exists in the response.
"""
url = self.url(path)
method = method or self._session.get
timeout = timeout or TIMEOUT
log.debug('%s %s', method.__name__.upper(), url)
headers = self._headers(**headers or {})
response = method(url, headers=headers, timeout=timeout, **kwargs)
if response.status_code not in (200, 201, 204):
codename = codes.get(response.status_code)[0]
errtext = response.text.replace('\n', ' ')
message = '(%s) %s; %s %s' % (response.status_code, codename, response.url, errtext)
if response.status_code == 401:
raise Unauthorized(message)
elif response.status_code == 404:
raise NotFound(message)
else:
raise BadRequest(message)
data = response.text.encode('utf8')
return ElementTree.fromstring(data) if data.strip() else None
def sendCommand(self, command, proxy=None, **params):
""" Convenience wrapper around :func:`~plexapi.client.PlexClient.query()` to more easily
send simple commands to the client. Returns an ElementTree object containing
the response.
Parameters:
command (str): Command to be sent in for format '<controller>/<command>'.
proxy (bool): Set True to proxy this command through the PlexServer.
**params (dict): Additional GET parameters to include with the command.
Raises:
:class:`plexapi.exceptions.Unsupported`: When we detect the client doesn't support this capability.
"""
command = command.strip('/')
controller = command.split('/')[0]
headers = {'X-Plex-Target-Client-Identifier': self.machineIdentifier}
if controller not in self.protocolCapabilities:
log.debug('Client %s doesnt support %s controller.'
'What your trying might not work' % (self.title, controller))
proxy = self._proxyThroughServer if proxy is None else proxy
query = self._server.query if proxy else self.query
# Workaround for ptp. See https://github.com/pkkid/python-plexapi/issues/244
t = time.time()
if t - self._last_call >= 80 and self.product in ('ptp', 'Plex Media Player'):
url = '/player/timeline/poll?wait=0&commandID=%s' % self._nextCommandId()
query(url, headers=headers)
self._last_call = t
params['commandID'] = self._nextCommandId()
key = '/player/%s%s' % (command, utils.joinArgs(params))
try:
return query(key, headers=headers)
except ElementTree.ParseError:
# Workaround for players which don't return valid XML on successful commands
# - Plexamp, Plex for Android: `b'OK'`
# - Plex for Samsung: `b'<?xml version="1.0"?><Response code="200" status="OK">'`
if self.product in (
'Plexamp',
'Plex for Android (TV)',
'Plex for Android (Mobile)',
'Plex for Samsung',
):
return
raise
def url(self, key, includeToken=False):
""" Build a URL string with proper token argument. Token will be appended to the URL
if either includeToken is True or CONFIG.log.show_secrets is 'true'.
"""
if not self._baseurl:
raise BadRequest('PlexClient object missing baseurl.')
if self._token and (includeToken or self._showSecrets):
delim = '&' if '?' in key else '?'
return '%s%s%sX-Plex-Token=%s' % (self._baseurl, key, delim, self._token)
return '%s%s' % (self._baseurl, key)
# ---------------------
# Navigation Commands
# These commands navigate around the user-interface.
def contextMenu(self):
""" Open the context menu on the client. """
self.sendCommand('navigation/contextMenu')
def goBack(self):
""" Navigate back one position. """
self.sendCommand('navigation/back')
def goToHome(self):
""" Go directly to the home screen. """
self.sendCommand('navigation/home')
def goToMusic(self):
""" Go directly to the playing music panel. """
self.sendCommand('navigation/music')
def moveDown(self):
""" Move selection down a position. """
self.sendCommand('navigation/moveDown')
def moveLeft(self):
""" Move selection left a position. """
self.sendCommand('navigation/moveLeft')
def moveRight(self):
""" Move selection right a position. """
self.sendCommand('navigation/moveRight')
def moveUp(self):
""" Move selection up a position. """
self.sendCommand('navigation/moveUp')
def nextLetter(self):
""" Jump to next letter in the alphabet. """
self.sendCommand('navigation/nextLetter')
def pageDown(self):
""" Move selection down a full page. """
self.sendCommand('navigation/pageDown')
def pageUp(self):
""" Move selection up a full page. """
self.sendCommand('navigation/pageUp')
def previousLetter(self):
""" Jump to previous letter in the alphabet. """
self.sendCommand('navigation/previousLetter')
def select(self):
""" Select element at the current position. """
self.sendCommand('navigation/select')
def toggleOSD(self):
""" Toggle the on screen display during playback. """
self.sendCommand('navigation/toggleOSD')
def goToMedia(self, media, **params):
""" Navigate directly to the specified media page.
Parameters:
media (:class:`~plexapi.media.Media`): Media object to navigate to.
**params (dict): Additional GET parameters to include with the command.
Raises:
:class:`plexapi.exceptions.Unsupported`: When no PlexServer specified in this object.
"""
if not self._server:
raise Unsupported('A server must be specified before using this command.')
server_url = media._server._baseurl.split(':')
self.sendCommand('mirror/details', **dict({
'machineIdentifier': self._server.machineIdentifier,
'address': server_url[1].strip('/'),
'port': server_url[-1],
'key': media.key,
'protocol': server_url[0],
'token': media._server.createToken()
}, **params))
# -------------------
# Playback Commands
# Most of the playback commands take a mandatory mtype {'music','photo','video'} argument,
# to specify which media type to apply the command to, (except for playMedia). This
# is in case there are multiple things happening (e.g. music in the background, photo
# slideshow in the foreground).
def pause(self, mtype=DEFAULT_MTYPE):
""" Pause the currently playing media type.
Parameters:
mtype (str): Media type to take action against (music, photo, video).
"""
self.sendCommand('playback/pause', type=mtype)
def play(self, mtype=DEFAULT_MTYPE):
""" Start playback for the specified media type.
Parameters:
mtype (str): Media type to take action against (music, photo, video).
"""
self.sendCommand('playback/play', type=mtype)
def refreshPlayQueue(self, playQueueID, mtype=DEFAULT_MTYPE):
""" Refresh the specified Playqueue.
Parameters:
playQueueID (str): Playqueue ID.
mtype (str): Media type to take action against (music, photo, video).
"""
self.sendCommand(
'playback/refreshPlayQueue', playQueueID=playQueueID, type=mtype)
def seekTo(self, offset, mtype=DEFAULT_MTYPE):
""" Seek to the specified offset (ms) during playback.
Parameters:
offset (int): Position to seek to (milliseconds).
mtype (str): Media type to take action against (music, photo, video).
"""
self.sendCommand('playback/seekTo', offset=offset, type=mtype)
def skipNext(self, mtype=DEFAULT_MTYPE):
""" Skip to the next playback item.
Parameters:
mtype (str): Media type to take action against (music, photo, video).
"""
self.sendCommand('playback/skipNext', type=mtype)
def skipPrevious(self, mtype=DEFAULT_MTYPE):
""" Skip to previous playback item.
Parameters:
mtype (str): Media type to take action against (music, photo, video).
"""
self.sendCommand('playback/skipPrevious', type=mtype)
def skipTo(self, key, mtype=DEFAULT_MTYPE):
""" Skip to the playback item with the specified key.
Parameters:
key (str): Key of the media item to skip to.
mtype (str): Media type to take action against (music, photo, video).
"""
self.sendCommand('playback/skipTo', key=key, type=mtype)
def stepBack(self, mtype=DEFAULT_MTYPE):
""" Step backward a chunk of time in the current playback item.
Parameters:
mtype (str): Media type to take action against (music, photo, video).
"""
self.sendCommand('playback/stepBack', type=mtype)
def stepForward(self, mtype=DEFAULT_MTYPE):
""" Step forward a chunk of time in the current playback item.
Parameters:
mtype (str): Media type to take action against (music, photo, video).
"""
self.sendCommand('playback/stepForward', type=mtype)
def stop(self, mtype=DEFAULT_MTYPE):
""" Stop the currently playing item.
Parameters:
mtype (str): Media type to take action against (music, photo, video).
"""
self.sendCommand('playback/stop', type=mtype)
def setRepeat(self, repeat, mtype=DEFAULT_MTYPE):
""" Enable repeat for the specified playback items.
Parameters:
repeat (int): Repeat mode (0=off, 1=repeatone, 2=repeatall).
mtype (str): Media type to take action against (music, photo, video).
"""
self.setParameters(repeat=repeat, mtype=mtype)
def setShuffle(self, shuffle, mtype=DEFAULT_MTYPE):
""" Enable shuffle for the specified playback items.
Parameters:
shuffle (int): Shuffle mode (0=off, 1=on)
mtype (str): Media type to take action against (music, photo, video).
"""
self.setParameters(shuffle=shuffle, mtype=mtype)
def setVolume(self, volume, mtype=DEFAULT_MTYPE):
""" Enable volume for the current playback item.
Parameters:
volume (int): Volume level (0-100).
mtype (str): Media type to take action against (music, photo, video).
"""
self.setParameters(volume=volume, mtype=mtype)
def setAudioStream(self, audioStreamID, mtype=DEFAULT_MTYPE):
""" Select the audio stream for the current playback item (only video).
Parameters:
audioStreamID (str): ID of the audio stream from the media object.
mtype (str): Media type to take action against (music, photo, video).
"""
self.setStreams(audioStreamID=audioStreamID, mtype=mtype)
def setSubtitleStream(self, subtitleStreamID, mtype=DEFAULT_MTYPE):
""" Select the subtitle stream for the current playback item (only video).
Parameters:
subtitleStreamID (str): ID of the subtitle stream from the media object.
mtype (str): Media type to take action against (music, photo, video).
"""
self.setStreams(subtitleStreamID=subtitleStreamID, mtype=mtype)
def setVideoStream(self, videoStreamID, mtype=DEFAULT_MTYPE):
""" Select the video stream for the current playback item (only video).
Parameters:
videoStreamID (str): ID of the video stream from the media object.
mtype (str): Media type to take action against (music, photo, video).
"""
self.setStreams(videoStreamID=videoStreamID, mtype=mtype)
def playMedia(self, media, offset=0, **params):
""" Start playback of the specified media item. See also:
Parameters:
media (:class:`~plexapi.media.Media`): Media item to be played back
(movie, music, photo, playlist, playqueue).
offset (int): Number of milliseconds at which to start playing with zero
representing the beginning (default 0).
**params (dict): Optional additional parameters to include in the playback request. See
also: https://github.com/plexinc/plex-media-player/wiki/Remote-control-API#modified-commands
Raises:
:class:`plexapi.exceptions.Unsupported`: When no PlexServer specified in this object.
"""
if not self._server:
raise Unsupported('A server must be specified before using this command.')
server_url = media._server._baseurl.split(':')
server_port = server_url[-1].strip('/')
if hasattr(media, "playlistType"):
mediatype = media.playlistType
else:
if isinstance(media, PlayQueue):
mediatype = media.items[0].listType
else:
mediatype = media.listType
# mediatype must be in ["video", "music", "photo"]
if mediatype == "audio":
mediatype = "music"
if self.product != 'OpenPHT':
try:
self.sendCommand('timeline/subscribe', port=server_port, protocol='http')
except: # noqa: E722
# some clients dont need or like this and raises http 400.
# We want to include the exception in the log,
# but it might still work so we swallow it.
log.exception('%s failed to subscribe ' % self.title)
playqueue = media if isinstance(media, PlayQueue) else self._server.createPlayQueue(media)
self.sendCommand('playback/playMedia', **dict({
'machineIdentifier': self._server.machineIdentifier,
'address': server_url[1].strip('/'),
'port': server_port,
'offset': offset,
'key': media.key,
'token': media._server.createToken(),
'type': mediatype,
'containerKey': '/playQueues/%s?window=100&own=1' % playqueue.playQueueID,
}, **params))
def setParameters(self, volume=None, shuffle=None, repeat=None, mtype=DEFAULT_MTYPE):
""" Set multiple playback parameters at once.
Parameters:
volume (int): Volume level (0-100; optional).
shuffle (int): Shuffle mode (0=off, 1=on; optional).
repeat (int): Repeat mode (0=off, 1=repeatone, 2=repeatall; optional).
mtype (str): Media type to take action against (optional music, photo, video).
"""
params = {}
if repeat is not None:
params['repeat'] = repeat
if shuffle is not None:
params['shuffle'] = shuffle
if volume is not None:
params['volume'] = volume
if mtype is not None:
params['type'] = mtype
self.sendCommand('playback/setParameters', **params)
def setStreams(self, audioStreamID=None, subtitleStreamID=None, videoStreamID=None, mtype=DEFAULT_MTYPE):
""" Select multiple playback streams at once.
Parameters:
audioStreamID (str): ID of the audio stream from the media object.
subtitleStreamID (str): ID of the subtitle stream from the media object.
videoStreamID (str): ID of the video stream from the media object.
mtype (str): Media type to take action against (optional music, photo, video).
"""
params = {}
if audioStreamID is not None:
params['audioStreamID'] = audioStreamID
if subtitleStreamID is not None:
params['subtitleStreamID'] = subtitleStreamID
if videoStreamID is not None:
params['videoStreamID'] = videoStreamID
if mtype is not None:
params['type'] = mtype
self.sendCommand('playback/setStreams', **params)
# -------------------
# Timeline Commands
def timeline(self, wait=1):
""" Poll the current timeline and return the XML response. """
return self.sendCommand('timeline/poll', wait=wait)
def isPlayingMedia(self, includePaused=False):
""" Returns True if any media is currently playing.
Parameters:
includePaused (bool): Set True to treat currently paused items
as playing (optional; default True).
"""
for mediatype in self.timeline(wait=0):
if mediatype.get('state') == 'playing':
return True
if includePaused and mediatype.get('state') == 'paused':
return True
return False
| 43.603873 | 115 | 0.610005 |
import time
from xml.etree import ElementTree
import requests
from plexapi import BASE_HEADERS, CONFIG, TIMEOUT, log, logfilter, utils
from plexapi.base import PlexObject
from plexapi.exceptions import BadRequest, NotFound, Unauthorized, Unsupported
from plexapi.playqueue import PlayQueue
from requests.status_codes import _codes as codes
DEFAULT_MTYPE = 'video'
@utils.registerPlexObject
class PlexClient(PlexObject):
TAG = 'Player'
key = '/resources'
def __init__(self, server=None, data=None, initpath=None, baseurl=None,
token=None, connect=True, session=None, timeout=None):
super(PlexClient, self).__init__(server, data, initpath)
self._baseurl = baseurl.strip('/') if baseurl else None
self._token = logfilter.add_secret(token)
self._showSecrets = CONFIG.get('log.show_secrets', '').lower() == 'true'
server_session = server._session if server else None
self._session = session or server_session or requests.Session()
self._proxyThroughServer = False
self._commandId = 0
self._last_call = 0
if not any([data, initpath, baseurl, token]):
self._baseurl = CONFIG.get('auth.client_baseurl', 'http://localhost:32433')
self._token = logfilter.add_secret(CONFIG.get('auth.client_token'))
if connect and self._baseurl:
self.connect(timeout=timeout)
def _nextCommandId(self):
self._commandId += 1
return self._commandId
def connect(self, timeout=None):
if not self.key:
raise Unsupported('Cannot reload an object not built from a URL.')
self._initpath = self.key
data = self.query(self.key, timeout=timeout)
self._loadData(data[0])
return self
def reload(self):
return self.connect()
def _loadData(self, data):
self._data = data
self.deviceClass = data.attrib.get('deviceClass')
self.machineIdentifier = data.attrib.get('machineIdentifier')
self.product = data.attrib.get('product')
self.protocol = data.attrib.get('protocol')
self.protocolCapabilities = data.attrib.get('protocolCapabilities', '').split(',')
self.protocolVersion = data.attrib.get('protocolVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.title = data.attrib.get('title') or data.attrib.get('name')
self.device = data.attrib.get('device')
self.model = data.attrib.get('model')
self.state = data.attrib.get('state')
self.vendor = data.attrib.get('vendor')
self.version = data.attrib.get('version')
self.local = utils.cast(bool, data.attrib.get('local', 0))
self.address = data.attrib.get('address')
self.remotePublicAddress = data.attrib.get('remotePublicAddress')
self.userID = data.attrib.get('userID')
def _headers(self, **kwargs):
headers = BASE_HEADERS
if self._token:
headers['X-Plex-Token'] = self._token
headers.update(kwargs)
return headers
def proxyThroughServer(self, value=True, server=None):
if server:
self._server = server
if value is True and not self._server:
raise Unsupported('Cannot use client proxy with unknown server.')
self._proxyThroughServer = value
def query(self, path, method=None, headers=None, timeout=None, **kwargs):
url = self.url(path)
method = method or self._session.get
timeout = timeout or TIMEOUT
log.debug('%s %s', method.__name__.upper(), url)
headers = self._headers(**headers or {})
response = method(url, headers=headers, timeout=timeout, **kwargs)
if response.status_code not in (200, 201, 204):
codename = codes.get(response.status_code)[0]
errtext = response.text.replace('\n', ' ')
message = '(%s) %s; %s %s' % (response.status_code, codename, response.url, errtext)
if response.status_code == 401:
raise Unauthorized(message)
elif response.status_code == 404:
raise NotFound(message)
else:
raise BadRequest(message)
data = response.text.encode('utf8')
return ElementTree.fromstring(data) if data.strip() else None
def sendCommand(self, command, proxy=None, **params):
command = command.strip('/')
controller = command.split('/')[0]
headers = {'X-Plex-Target-Client-Identifier': self.machineIdentifier}
if controller not in self.protocolCapabilities:
log.debug('Client %s doesnt support %s controller.'
'What your trying might not work' % (self.title, controller))
proxy = self._proxyThroughServer if proxy is None else proxy
query = self._server.query if proxy else self.query
t = time.time()
if t - self._last_call >= 80 and self.product in ('ptp', 'Plex Media Player'):
url = '/player/timeline/poll?wait=0&commandID=%s' % self._nextCommandId()
query(url, headers=headers)
self._last_call = t
params['commandID'] = self._nextCommandId()
key = '/player/%s%s' % (command, utils.joinArgs(params))
try:
return query(key, headers=headers)
except ElementTree.ParseError:
# - Plexamp, Plex for Android: `b'OK'`
# - Plex for Samsung: `b'<?xml version="1.0"?><Response code="200" status="OK">'`
if self.product in (
'Plexamp',
'Plex for Android (TV)',
'Plex for Android (Mobile)',
'Plex for Samsung',
):
return
raise
def url(self, key, includeToken=False):
if not self._baseurl:
raise BadRequest('PlexClient object missing baseurl.')
if self._token and (includeToken or self._showSecrets):
delim = '&' if '?' in key else '?'
return '%s%s%sX-Plex-Token=%s' % (self._baseurl, key, delim, self._token)
return '%s%s' % (self._baseurl, key)
# ---------------------
# Navigation Commands
# These commands navigate around the user-interface.
def contextMenu(self):
self.sendCommand('navigation/contextMenu')
def goBack(self):
self.sendCommand('navigation/back')
def goToHome(self):
self.sendCommand('navigation/home')
def goToMusic(self):
self.sendCommand('navigation/music')
def moveDown(self):
self.sendCommand('navigation/moveDown')
def moveLeft(self):
self.sendCommand('navigation/moveLeft')
def moveRight(self):
self.sendCommand('navigation/moveRight')
def moveUp(self):
self.sendCommand('navigation/moveUp')
def nextLetter(self):
self.sendCommand('navigation/nextLetter')
def pageDown(self):
self.sendCommand('navigation/pageDown')
def pageUp(self):
self.sendCommand('navigation/pageUp')
def previousLetter(self):
self.sendCommand('navigation/previousLetter')
def select(self):
self.sendCommand('navigation/select')
def toggleOSD(self):
self.sendCommand('navigation/toggleOSD')
def goToMedia(self, media, **params):
if not self._server:
raise Unsupported('A server must be specified before using this command.')
server_url = media._server._baseurl.split(':')
self.sendCommand('mirror/details', **dict({
'machineIdentifier': self._server.machineIdentifier,
'address': server_url[1].strip('/'),
'port': server_url[-1],
'key': media.key,
'protocol': server_url[0],
'token': media._server.createToken()
}, **params))
# -------------------
# Playback Commands
# Most of the playback commands take a mandatory mtype {'music','photo','video'} argument,
# to specify which media type to apply the command to, (except for playMedia). This
# is in case there are multiple things happening (e.g. music in the background, photo
# slideshow in the foreground).
def pause(self, mtype=DEFAULT_MTYPE):
self.sendCommand('playback/pause', type=mtype)
def play(self, mtype=DEFAULT_MTYPE):
self.sendCommand('playback/play', type=mtype)
def refreshPlayQueue(self, playQueueID, mtype=DEFAULT_MTYPE):
self.sendCommand(
'playback/refreshPlayQueue', playQueueID=playQueueID, type=mtype)
def seekTo(self, offset, mtype=DEFAULT_MTYPE):
self.sendCommand('playback/seekTo', offset=offset, type=mtype)
def skipNext(self, mtype=DEFAULT_MTYPE):
self.sendCommand('playback/skipNext', type=mtype)
def skipPrevious(self, mtype=DEFAULT_MTYPE):
self.sendCommand('playback/skipPrevious', type=mtype)
def skipTo(self, key, mtype=DEFAULT_MTYPE):
self.sendCommand('playback/skipTo', key=key, type=mtype)
def stepBack(self, mtype=DEFAULT_MTYPE):
self.sendCommand('playback/stepBack', type=mtype)
def stepForward(self, mtype=DEFAULT_MTYPE):
self.sendCommand('playback/stepForward', type=mtype)
def stop(self, mtype=DEFAULT_MTYPE):
self.sendCommand('playback/stop', type=mtype)
def setRepeat(self, repeat, mtype=DEFAULT_MTYPE):
self.setParameters(repeat=repeat, mtype=mtype)
def setShuffle(self, shuffle, mtype=DEFAULT_MTYPE):
self.setParameters(shuffle=shuffle, mtype=mtype)
def setVolume(self, volume, mtype=DEFAULT_MTYPE):
self.setParameters(volume=volume, mtype=mtype)
def setAudioStream(self, audioStreamID, mtype=DEFAULT_MTYPE):
self.setStreams(audioStreamID=audioStreamID, mtype=mtype)
def setSubtitleStream(self, subtitleStreamID, mtype=DEFAULT_MTYPE):
self.setStreams(subtitleStreamID=subtitleStreamID, mtype=mtype)
def setVideoStream(self, videoStreamID, mtype=DEFAULT_MTYPE):
self.setStreams(videoStreamID=videoStreamID, mtype=mtype)
def playMedia(self, media, offset=0, **params):
if not self._server:
raise Unsupported('A server must be specified before using this command.')
server_url = media._server._baseurl.split(':')
server_port = server_url[-1].strip('/')
if hasattr(media, "playlistType"):
mediatype = media.playlistType
else:
if isinstance(media, PlayQueue):
mediatype = media.items[0].listType
else:
mediatype = media.listType
# mediatype must be in ["video", "music", "photo"]
if mediatype == "audio":
mediatype = "music"
if self.product != 'OpenPHT':
try:
self.sendCommand('timeline/subscribe', port=server_port, protocol='http')
except: # noqa: E722
# some clients dont need or like this and raises http 400.
# We want to include the exception in the log,
# but it might still work so we swallow it.
log.exception('%s failed to subscribe ' % self.title)
playqueue = media if isinstance(media, PlayQueue) else self._server.createPlayQueue(media)
self.sendCommand('playback/playMedia', **dict({
'machineIdentifier': self._server.machineIdentifier,
'address': server_url[1].strip('/'),
'port': server_port,
'offset': offset,
'key': media.key,
'token': media._server.createToken(),
'type': mediatype,
'containerKey': '/playQueues/%s?window=100&own=1' % playqueue.playQueueID,
}, **params))
def setParameters(self, volume=None, shuffle=None, repeat=None, mtype=DEFAULT_MTYPE):
params = {}
if repeat is not None:
params['repeat'] = repeat
if shuffle is not None:
params['shuffle'] = shuffle
if volume is not None:
params['volume'] = volume
if mtype is not None:
params['type'] = mtype
self.sendCommand('playback/setParameters', **params)
def setStreams(self, audioStreamID=None, subtitleStreamID=None, videoStreamID=None, mtype=DEFAULT_MTYPE):
params = {}
if audioStreamID is not None:
params['audioStreamID'] = audioStreamID
if subtitleStreamID is not None:
params['subtitleStreamID'] = subtitleStreamID
if videoStreamID is not None:
params['videoStreamID'] = videoStreamID
if mtype is not None:
params['type'] = mtype
self.sendCommand('playback/setStreams', **params)
# -------------------
# Timeline Commands
def timeline(self, wait=1):
return self.sendCommand('timeline/poll', wait=wait)
def isPlayingMedia(self, includePaused=False):
for mediatype in self.timeline(wait=0):
if mediatype.get('state') == 'playing':
return True
if includePaused and mediatype.get('state') == 'paused':
return True
return False
| true | true |
1c3ad4111acc4e4d42b72a16a85a190ffbf1d829 | 1,213 | py | Python | src/Routes/v1_message.py | TheMrAnderson/Harbinger | a008226f790da62b8436d762d76c8900763a8f76 | [
"Apache-2.0"
] | null | null | null | src/Routes/v1_message.py | TheMrAnderson/Harbinger | a008226f790da62b8436d762d76c8900763a8f76 | [
"Apache-2.0"
] | null | null | null | src/Routes/v1_message.py | TheMrAnderson/Harbinger | a008226f790da62b8436d762d76c8900763a8f76 | [
"Apache-2.0"
] | null | null | null | from flask import request
from flask_restplus import Resource, Namespace, fields
ns_messages = Namespace('Messages', description='Handles message actions')
m_model = ns_messages.model('harbinger_message',
{
'topic': fields.String(description='Topic name'),
'date_added': fields.DateTime(description=''),
'payload': fields.String(desciption='The body of the message for the consumer to do something with'),
'expiration_date': fields.DateTime(description='')
})
@ns_messages.route('')
class HarbingerMessageList(Resource):
@ns_messages.doc(responses={200: 'Success'})
@ns_messages.doc(responses={404: 'Newer message for topic not found'})
@ns_messages.doc(responses={500: 'Server error'})
def get(self):
"""Returns list of newer messages for the provided topic"""
# TODO: Flesh out
return 'Newer message for topic not found'
@ns_messages.doc(responses={201: 'Message created'})
@ns_messages.doc(responses={500: 'Server error'})
@ns_messages.expect(m_model)
def post(self):
"""Adds new message"""
topic = request.json['topic']
date_added = request.json['date_added']
expiration_date = request.json['expiration_date']
payload = request.json['payload']
#TODO: Flesh out
return 201
| 33.694444 | 102 | 0.741138 | from flask import request
from flask_restplus import Resource, Namespace, fields
ns_messages = Namespace('Messages', description='Handles message actions')
m_model = ns_messages.model('harbinger_message',
{
'topic': fields.String(description='Topic name'),
'date_added': fields.DateTime(description=''),
'payload': fields.String(desciption='The body of the message for the consumer to do something with'),
'expiration_date': fields.DateTime(description='')
})
@ns_messages.route('')
class HarbingerMessageList(Resource):
@ns_messages.doc(responses={200: 'Success'})
@ns_messages.doc(responses={404: 'Newer message for topic not found'})
@ns_messages.doc(responses={500: 'Server error'})
def get(self):
return 'Newer message for topic not found'
@ns_messages.doc(responses={201: 'Message created'})
@ns_messages.doc(responses={500: 'Server error'})
@ns_messages.expect(m_model)
def post(self):
topic = request.json['topic']
date_added = request.json['date_added']
expiration_date = request.json['expiration_date']
payload = request.json['payload']
return 201
| true | true |
1c3ad43033cb8388d598393ffe6e70100da87d89 | 30,552 | py | Python | scipy/optimize/tests/test_least_squares.py | Ennosigaeon/scipy | 2d872f7cf2098031b9be863ec25e366a550b229c | [
"BSD-3-Clause"
] | 11 | 2020-06-28T04:30:26.000Z | 2022-03-26T08:40:47.000Z | scipy/optimize/tests/test_least_squares.py | Ennosigaeon/scipy | 2d872f7cf2098031b9be863ec25e366a550b229c | [
"BSD-3-Clause"
] | 44 | 2019-06-27T15:56:14.000Z | 2022-03-15T22:21:10.000Z | scipy/optimize/tests/test_least_squares.py | Ennosigaeon/scipy | 2d872f7cf2098031b9be863ec25e366a550b229c | [
"BSD-3-Clause"
] | 20 | 2021-11-07T13:55:56.000Z | 2021-12-02T10:54:01.000Z | from itertools import product
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_, assert_allclose,
assert_equal, suppress_warnings)
from pytest import raises as assert_raises
from scipy.sparse import issparse, lil_matrix
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import least_squares
from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES
from scipy.optimize._lsq.common import EPS, make_strictly_feasible
def fun_trivial(x, a=0):
return (x - a)**2 + 5.0
def jac_trivial(x, a=0.0):
return 2 * (x - a)
def fun_2d_trivial(x):
return np.array([x[0], x[1]])
def jac_2d_trivial(x):
return np.identity(2)
def fun_rosenbrock(x):
return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
def jac_rosenbrock(x):
return np.array([
[-20 * x[0], 10],
[-1, 0]
])
def jac_rosenbrock_bad_dim(x):
return np.array([
[-20 * x[0], 10],
[-1, 0],
[0.0, 0.0]
])
def fun_rosenbrock_cropped(x):
return fun_rosenbrock(x)[0]
def jac_rosenbrock_cropped(x):
return jac_rosenbrock(x)[0]
# When x is 1-D array, return is 2-D array.
def fun_wrong_dimensions(x):
return np.array([x, x**2, x**3])
def jac_wrong_dimensions(x, a=0.0):
return np.atleast_3d(jac_trivial(x, a=a))
def fun_bvp(x):
n = int(np.sqrt(x.shape[0]))
u = np.zeros((n + 2, n + 2))
x = x.reshape((n, n))
u[1:-1, 1:-1] = x
y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
return y.ravel()
class BroydenTridiagonal:
def __init__(self, n=100, mode='sparse'):
np.random.seed(0)
self.n = n
self.x0 = -np.ones(n)
self.lb = np.linspace(-2, -1.5, n)
self.ub = np.linspace(-0.8, 0.0, n)
self.lb += 0.1 * np.random.randn(n)
self.ub += 0.1 * np.random.randn(n)
self.x0 += 0.1 * np.random.randn(n)
self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)
if mode == 'sparse':
self.sparsity = lil_matrix((n, n), dtype=int)
i = np.arange(n)
self.sparsity[i, i] = 1
i = np.arange(1, n)
self.sparsity[i, i - 1] = 1
i = np.arange(n - 1)
self.sparsity[i, i + 1] = 1
self.jac = self._jac
elif mode == 'operator':
self.jac = lambda x: aslinearoperator(self._jac(x))
elif mode == 'dense':
self.sparsity = None
self.jac = lambda x: self._jac(x).toarray()
else:
assert_(False)
def fun(self, x):
f = (3 - x) * x + 1
f[1:] -= x[:-1]
f[:-1] -= 2 * x[1:]
return f
def _jac(self, x):
J = lil_matrix((self.n, self.n))
i = np.arange(self.n)
J[i, i] = 3 - 2 * x
i = np.arange(1, self.n)
J[i, i - 1] = -1
i = np.arange(self.n - 1)
J[i, i + 1] = -2
return J
class ExponentialFittingProblem:
"""Provide data and function for exponential fitting in the form
y = a + exp(b * x) + noise."""
def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1),
n_points=11, random_seed=None):
np.random.seed(random_seed)
self.m = n_points
self.n = 2
self.p0 = np.zeros(2)
self.x = np.linspace(x_range[0], x_range[1], n_points)
self.y = a + np.exp(b * self.x)
self.y += noise * np.random.randn(self.m)
outliers = np.random.randint(0, self.m, n_outliers)
self.y[outliers] += 50 * noise * np.random.rand(n_outliers)
self.p_opt = np.array([a, b])
def fun(self, p):
return p[0] + np.exp(p[1] * self.x) - self.y
def jac(self, p):
J = np.empty((self.m, self.n))
J[:, 0] = 1
J[:, 1] = self.x * np.exp(p[1] * self.x)
return J
def cubic_soft_l1(z):
rho = np.empty((3, z.size))
t = 1 + z
rho[0] = 3 * (t**(1/3) - 1)
rho[1] = t ** (-2/3)
rho[2] = -2/3 * t**(-5/3)
return rho
LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1]
class BaseMixin:
def test_basic(self):
# Test that the basic calling sequence works.
res = least_squares(fun_trivial, 2., method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.fun, fun_trivial(res.x))
def test_args_kwargs(self):
# Test that args and kwargs are passed correctly to the functions.
a = 3.0
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, args=(a,),
method=self.method)
res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
method=self.method)
assert_allclose(res.x, a, rtol=1e-4)
assert_allclose(res1.x, a, rtol=1e-4)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
args=(3, 4,), method=self.method)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
kwargs={'kaboom': 3}, method=self.method)
def test_jac_options(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
method=self.method)
def test_nfev_options(self):
for max_nfev in [None, 20]:
res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev,
method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
def test_x_scale_options(self):
for x_scale in [1.0, np.array([0.5]), 'jac']:
res = least_squares(fun_trivial, 2.0, x_scale=x_scale)
assert_allclose(res.x, 0)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale='auto', method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=-1.0, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=None, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=1.0+2.0j, method=self.method)
def test_diff_step(self):
# res1 and res2 should be equivalent.
# res2 and res3 should be different.
res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
method=self.method)
res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
method=self.method)
res3 = least_squares(fun_trivial, 2.0,
diff_step=None, method=self.method)
assert_allclose(res1.x, 0, atol=1e-4)
assert_allclose(res2.x, 0, atol=1e-4)
assert_allclose(res3.x, 0, atol=1e-4)
assert_equal(res1.x, res2.x)
assert_equal(res1.nfev, res2.nfev)
assert_(res2.nfev != res3.nfev)
def test_incorrect_options_usage(self):
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'no_such_option': 100})
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'max_nfev': 100})
def test_full_result(self):
# MINPACK doesn't work very well with factor=100 on this problem,
# thus using low 'atol'.
res = least_squares(fun_trivial, 2.0, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.cost, 12.5)
assert_allclose(res.fun, 5)
assert_allclose(res.jac, 0, atol=1e-4)
assert_allclose(res.grad, 0, atol=1e-2)
assert_allclose(res.optimality, 0, atol=1e-2)
assert_equal(res.active_mask, 0)
if self.method == 'lm':
assert_(res.nfev < 30)
assert_(res.njev is None)
else:
assert_(res.nfev < 10)
assert_(res.njev < 10)
assert_(res.status > 0)
assert_(res.success)
def test_full_result_single_fev(self):
# MINPACK checks the number of nfev after the iteration,
# so it's hard to tell what he is going to compute.
if self.method == 'lm':
return
res = least_squares(fun_trivial, 2.0, method=self.method,
max_nfev=1)
assert_equal(res.x, np.array([2]))
assert_equal(res.cost, 40.5)
assert_equal(res.fun, np.array([9]))
assert_equal(res.jac, np.array([[4]]))
assert_equal(res.grad, np.array([36]))
assert_equal(res.optimality, 36)
assert_equal(res.active_mask, np.array([0]))
assert_equal(res.nfev, 1)
assert_equal(res.njev, 1)
assert_equal(res.status, 0)
assert_equal(res.success, 0)
def test_rosenbrock(self):
x0 = [-2, 1]
x_opt = [1, 1]
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.x, x_opt)
def test_rosenbrock_cropped(self):
x0 = [-2, 1]
if self.method == 'lm':
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped,
x0, method='lm')
else:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock_cropped],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
res = least_squares(
fun_rosenbrock_cropped, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.cost, 0, atol=1e-14)
def test_fun_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_wrong_dimensions,
2.0, method=self.method)
def test_jac_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, jac_wrong_dimensions, method=self.method)
def test_fun_and_jac_inconsistent_dimensions(self):
x0 = [1, 2]
assert_raises(ValueError, least_squares, fun_rosenbrock, x0,
jac_rosenbrock_bad_dim, method=self.method)
def test_x0_multidimensional(self):
x0 = np.ones(4).reshape(2, 2)
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_scalar(self):
x0 = 2.0 + 0.0*1j
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_array(self):
x0 = [1.0, 2.0 + 0.0*1j]
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_bvp(self):
# This test was introduced with fix #5556. It turned out that
# dogbox solver had a bug with trust-region radius update, which
# could block its progress and create an infinite loop. And this
# discrete boundary value problem is the one which triggers it.
n = 10
x0 = np.ones(n**2)
if self.method == 'lm':
max_nfev = 5000 # To account for Jacobian estimation.
else:
max_nfev = 100
res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method,
max_nfev=max_nfev)
assert_(res.nfev < max_nfev)
assert_(res.cost < 0.5)
def test_error_raised_when_all_tolerances_below_eps(self):
# Test that all 0 tolerances are not allowed.
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
method=self.method, ftol=None, xtol=None, gtol=None)
def test_convergence_with_only_one_tolerance_enabled(self):
if self.method == 'lm':
return # should not do test
x0 = [-2, 1]
x_opt = [1, 1]
for ftol, xtol, gtol in [(1e-8, None, None),
(None, 1e-8, None),
(None, None, 1e-8)]:
res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock,
ftol=ftol, gtol=gtol, xtol=xtol,
method=self.method)
assert_allclose(res.x, x_opt)
class BoundsMixin:
def test_inconsistent(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(10.0, 0.0), method=self.method)
def test_infeasible(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(3., 4), method=self.method)
def test_wrong_number(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.,
bounds=(1., 2, 3), method=self.method)
def test_inconsistent_shape(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(1.0, [2.0, 3.0]), method=self.method)
# 1-D array wont't be broadcasted
assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0],
bounds=([0.0], [3.0, 4.0]), method=self.method)
def test_in_bounds(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(-1.0, 3.0), method=self.method)
assert_allclose(res.x, 0.0, atol=1e-4)
assert_equal(res.active_mask, [0])
assert_(-1 <= res.x <= 3)
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(0.5, 3.0), method=self.method)
assert_allclose(res.x, 0.5, atol=1e-4)
assert_equal(res.active_mask, [-1])
assert_(0.5 <= res.x <= 3)
def test_bounds_shape(self):
for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]:
x0 = [1.0, 1.0]
res = least_squares(fun_2d_trivial, x0, jac=jac)
assert_allclose(res.x, [0.0, 0.0])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=(0.5, [2.0, 2.0]), method=self.method)
assert_allclose(res.x, [0.5, 0.5])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=([0.3, 0.2], 3.0), method=self.method)
assert_allclose(res.x, [0.3, 0.2])
res = least_squares(
fun_2d_trivial, x0, jac=jac, bounds=([-1, 0.5], [1.0, 3.0]),
method=self.method)
assert_allclose(res.x, [0.0, 0.5], atol=1e-5)
def test_rosenbrock_bounds(self):
x0_1 = np.array([-2.0, 1.0])
x0_2 = np.array([2.0, 2.0])
x0_3 = np.array([-2.0, 2.0])
x0_4 = np.array([0.0, 2.0])
x0_5 = np.array([-1.2, 1.0])
problems = [
(x0_1, ([-np.inf, -1.5], np.inf)),
(x0_2, ([-np.inf, 1.5], np.inf)),
(x0_3, ([-np.inf, 1.5], np.inf)),
(x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
(x0_2, ([1.0, 1.5], [3.0, 3.0])),
(x0_5, ([-50.0, 0.0], [0.5, 100]))
]
for x0, bounds in problems:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, [1.0, 0.5], 'jac'],
['exact', 'lsmr']):
res = least_squares(fun_rosenbrock, x0, jac, bounds,
x_scale=x_scale, tr_solver=tr_solver,
method=self.method)
assert_allclose(res.optimality, 0.0, atol=1e-5)
class SparseMixin:
def test_exact_tr_solver(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='exact', method=self.method)
assert_raises(ValueError, least_squares, p.fun, p.x0,
tr_solver='exact', jac_sparsity=p.sparsity,
method=self.method)
def test_equivalence(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(
sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(
dense.fun, dense.x0, jac=sparse.jac,
method=self.method)
assert_equal(res_sparse.nfev, res_dense.nfev)
assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
def test_tr_options(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
tr_options={'btol': 1e-10})
assert_allclose(res.cost, 0, atol=1e-20)
def test_wrong_parameters(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='best', method=self.method)
assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac,
tr_solver='lsmr', tr_options={'tol': 1e-10})
def test_solver_selection(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac,
method=self.method)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_(issparse(res_sparse.jac))
assert_(isinstance(res_dense.jac, np.ndarray))
def test_numerical_jac(self):
p = BroydenTridiagonal()
for jac in ['2-point', '3-point', 'cs']:
res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
res_sparse = least_squares(
p.fun, p.x0, jac,method=self.method,
jac_sparsity=p.sparsity)
assert_equal(res_dense.nfev, res_sparse.nfev)
assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
def test_with_bounds(self):
p = BroydenTridiagonal()
for jac, jac_sparsity in product(
[p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
res_1 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, np.inf),
method=self.method,jac_sparsity=jac_sparsity)
res_2 = least_squares(
p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
res_3 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
assert_allclose(res_1.optimality, 0, atol=1e-10)
assert_allclose(res_2.optimality, 0, atol=1e-10)
assert_allclose(res_3.optimality, 0, atol=1e-10)
def test_wrong_jac_sparsity(self):
p = BroydenTridiagonal()
sparsity = p.sparsity[:-1]
assert_raises(ValueError, least_squares, p.fun, p.x0,
jac_sparsity=sparsity, method=self.method)
def test_linear_operator(self):
p = BroydenTridiagonal(mode='operator')
res = least_squares(p.fun, p.x0, p.jac, method=self.method)
assert_allclose(res.cost, 0.0, atol=1e-20)
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, tr_solver='exact')
def test_x_scale_jac_scale(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
x_scale='jac')
assert_allclose(res.cost, 0.0, atol=1e-20)
p = BroydenTridiagonal(mode='operator')
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, x_scale='jac')
class LossFunctionMixin:
def test_options(self):
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_allclose(res.x, 0, atol=1e-15)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
loss='hinge', method=self.method)
def test_fun(self):
# Test that res.fun is actual residuals, and not modified by loss
# function stuff.
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_equal(res.fun, fun_trivial(res.x))
def test_grad(self):
# Test that res.grad is true gradient of loss function at the
# solution. Use max_nfev = 1, to avoid reaching minimum.
x = np.array([2.0]) # res.x will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x * (x**2 + 5))
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x)
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5)
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2))
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4))
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3))
def test_jac(self):
# Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation
# of Hessian. This approximation is computed by doubly differentiating
# the cost function and dropping the part containing second derivative
# of f. For a scalar function it is computed as
# H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the
# brackets is less than EPS it is replaced by EPS. Here, we check
# against the root of H.
x = 2.0 # res.x will be this.
f = x**2 + 5 # res.fun will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x)
# For `huber` loss the Jacobian correction is identically zero
# in outlier region, in such cases it is modified to be equal EPS**0.5.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x * EPS**0.5)
# Now, let's apply `loss_scale` to turn the residual into an inlier.
# The loss function becomes linear.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
f_scale=10, max_nfev=1)
assert_equal(res.jac, 2 * x)
# 'soft_l1' always gives a positive scaling.
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75)
# For 'cauchy' the correction term turns out to be negative, and it
# replaced by EPS**0.5.
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Now use scaling to turn the residual to inlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
f_scale=10, max_nfev=1, method=self.method)
fs = f / 10
assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2))
# 'arctan' gives an outlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Turn to inlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
f_scale=20.0, max_nfev=1, method=self.method)
fs = f / 20
assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4))
# cubic_soft_l1 will give an outlier.
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Turn to inlier.
res = least_squares(fun_trivial, x, jac_trivial,
loss=cubic_soft_l1, f_scale=6, max_nfev=1)
fs = f / 6
assert_allclose(res.jac,
2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6))
def test_robustness(self):
for noise in [0.1, 1.0]:
p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)
for jac in ['2-point', '3-point', 'cs', p.jac]:
res_lsq = least_squares(p.fun, p.p0, jac=jac,
method=self.method)
assert_allclose(res_lsq.optimality, 0, atol=1e-2)
for loss in LOSSES:
if loss == 'linear':
continue
res_robust = least_squares(
p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
method=self.method)
assert_allclose(res_robust.optimality, 0, atol=1e-2)
assert_(norm(res_robust.x - p.p_opt) <
norm(res_lsq.x - p.p_opt))
class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'dogbox'
class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'trf'
def test_lsmr_regularization(self):
p = BroydenTridiagonal()
for regularize in [True, False]:
res = least_squares(p.fun, p.x0, p.jac, method='trf',
tr_options={'regularize': regularize})
assert_allclose(res.cost, 0, atol=1e-20)
class TestLM(BaseMixin):
method = 'lm'
def test_bounds_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, bounds=(-3.0, 3.0), method='lm')
def test_m_less_n_not_supported(self):
x0 = [-2, 1]
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0,
method='lm')
def test_sparse_not_supported(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_jac_sparsity_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
jac_sparsity=[1], method='lm')
def test_LinearOperator_not_supported(self):
p = BroydenTridiagonal(mode="operator")
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_loss(self):
res = least_squares(fun_trivial, 2.0, loss='linear', method='lm')
assert_allclose(res.x, 0.0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
method='lm', loss='huber')
def test_basic():
# test that 'method' arg is really optional
res = least_squares(fun_trivial, 2.0)
assert_allclose(res.x, 0, atol=1e-10)
def test_small_tolerances_for_lm():
for ftol, xtol, gtol in [(None, 1e-13, 1e-13),
(1e-13, None, 1e-13),
(1e-13, 1e-13, None)]:
assert_raises(ValueError, least_squares, fun_trivial, 2.0, xtol=xtol,
ftol=ftol, gtol=gtol, method='lm')
def test_fp32_gh12991():
# checks that smaller FP sizes can be used in least_squares
# this is the minimum working example reported for gh12991
np.random.seed(1)
x = np.linspace(0, 1, 100).astype("float32")
y = np.random.random(100).astype("float32")
def func(p, x):
return p[0] + p[1] * x
def err(p, x, y):
return func(p, x) - y
res = least_squares(err, [-1.0, -1.0], args=(x, y))
# previously the initial jacobian calculated for this would be all 0
# and the minimize would terminate immediately, with nfev=1, would
# report a successful minimization (it shouldn't have done), but be
# unchanged from the initial solution.
# It was terminating early because the underlying approx_derivative
# used a step size for FP64 when the working space was FP32.
assert res.nfev > 3
assert_allclose(res.x, np.array([0.4082241, 0.15530563]), atol=5e-5)
| 39.069054 | 96 | 0.559931 | from itertools import product
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_, assert_allclose,
assert_equal, suppress_warnings)
from pytest import raises as assert_raises
from scipy.sparse import issparse, lil_matrix
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import least_squares
from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES
from scipy.optimize._lsq.common import EPS, make_strictly_feasible
def fun_trivial(x, a=0):
return (x - a)**2 + 5.0
def jac_trivial(x, a=0.0):
return 2 * (x - a)
def fun_2d_trivial(x):
return np.array([x[0], x[1]])
def jac_2d_trivial(x):
return np.identity(2)
def fun_rosenbrock(x):
return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
def jac_rosenbrock(x):
return np.array([
[-20 * x[0], 10],
[-1, 0]
])
def jac_rosenbrock_bad_dim(x):
return np.array([
[-20 * x[0], 10],
[-1, 0],
[0.0, 0.0]
])
def fun_rosenbrock_cropped(x):
return fun_rosenbrock(x)[0]
def jac_rosenbrock_cropped(x):
return jac_rosenbrock(x)[0]
def fun_wrong_dimensions(x):
return np.array([x, x**2, x**3])
def jac_wrong_dimensions(x, a=0.0):
return np.atleast_3d(jac_trivial(x, a=a))
def fun_bvp(x):
n = int(np.sqrt(x.shape[0]))
u = np.zeros((n + 2, n + 2))
x = x.reshape((n, n))
u[1:-1, 1:-1] = x
y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
return y.ravel()
class BroydenTridiagonal:
def __init__(self, n=100, mode='sparse'):
np.random.seed(0)
self.n = n
self.x0 = -np.ones(n)
self.lb = np.linspace(-2, -1.5, n)
self.ub = np.linspace(-0.8, 0.0, n)
self.lb += 0.1 * np.random.randn(n)
self.ub += 0.1 * np.random.randn(n)
self.x0 += 0.1 * np.random.randn(n)
self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)
if mode == 'sparse':
self.sparsity = lil_matrix((n, n), dtype=int)
i = np.arange(n)
self.sparsity[i, i] = 1
i = np.arange(1, n)
self.sparsity[i, i - 1] = 1
i = np.arange(n - 1)
self.sparsity[i, i + 1] = 1
self.jac = self._jac
elif mode == 'operator':
self.jac = lambda x: aslinearoperator(self._jac(x))
elif mode == 'dense':
self.sparsity = None
self.jac = lambda x: self._jac(x).toarray()
else:
assert_(False)
def fun(self, x):
f = (3 - x) * x + 1
f[1:] -= x[:-1]
f[:-1] -= 2 * x[1:]
return f
def _jac(self, x):
J = lil_matrix((self.n, self.n))
i = np.arange(self.n)
J[i, i] = 3 - 2 * x
i = np.arange(1, self.n)
J[i, i - 1] = -1
i = np.arange(self.n - 1)
J[i, i + 1] = -2
return J
class ExponentialFittingProblem:
def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1),
n_points=11, random_seed=None):
np.random.seed(random_seed)
self.m = n_points
self.n = 2
self.p0 = np.zeros(2)
self.x = np.linspace(x_range[0], x_range[1], n_points)
self.y = a + np.exp(b * self.x)
self.y += noise * np.random.randn(self.m)
outliers = np.random.randint(0, self.m, n_outliers)
self.y[outliers] += 50 * noise * np.random.rand(n_outliers)
self.p_opt = np.array([a, b])
def fun(self, p):
return p[0] + np.exp(p[1] * self.x) - self.y
def jac(self, p):
J = np.empty((self.m, self.n))
J[:, 0] = 1
J[:, 1] = self.x * np.exp(p[1] * self.x)
return J
def cubic_soft_l1(z):
rho = np.empty((3, z.size))
t = 1 + z
rho[0] = 3 * (t**(1/3) - 1)
rho[1] = t ** (-2/3)
rho[2] = -2/3 * t**(-5/3)
return rho
LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1]
class BaseMixin:
def test_basic(self):
res = least_squares(fun_trivial, 2., method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.fun, fun_trivial(res.x))
def test_args_kwargs(self):
a = 3.0
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, args=(a,),
method=self.method)
res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
method=self.method)
assert_allclose(res.x, a, rtol=1e-4)
assert_allclose(res1.x, a, rtol=1e-4)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
args=(3, 4,), method=self.method)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
kwargs={'kaboom': 3}, method=self.method)
def test_jac_options(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
method=self.method)
def test_nfev_options(self):
for max_nfev in [None, 20]:
res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev,
method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
def test_x_scale_options(self):
for x_scale in [1.0, np.array([0.5]), 'jac']:
res = least_squares(fun_trivial, 2.0, x_scale=x_scale)
assert_allclose(res.x, 0)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale='auto', method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=-1.0, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=None, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=1.0+2.0j, method=self.method)
def test_diff_step(self):
res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
method=self.method)
res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
method=self.method)
res3 = least_squares(fun_trivial, 2.0,
diff_step=None, method=self.method)
assert_allclose(res1.x, 0, atol=1e-4)
assert_allclose(res2.x, 0, atol=1e-4)
assert_allclose(res3.x, 0, atol=1e-4)
assert_equal(res1.x, res2.x)
assert_equal(res1.nfev, res2.nfev)
assert_(res2.nfev != res3.nfev)
def test_incorrect_options_usage(self):
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'no_such_option': 100})
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'max_nfev': 100})
def test_full_result(self):
# thus using low 'atol'.
res = least_squares(fun_trivial, 2.0, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.cost, 12.5)
assert_allclose(res.fun, 5)
assert_allclose(res.jac, 0, atol=1e-4)
assert_allclose(res.grad, 0, atol=1e-2)
assert_allclose(res.optimality, 0, atol=1e-2)
assert_equal(res.active_mask, 0)
if self.method == 'lm':
assert_(res.nfev < 30)
assert_(res.njev is None)
else:
assert_(res.nfev < 10)
assert_(res.njev < 10)
assert_(res.status > 0)
assert_(res.success)
def test_full_result_single_fev(self):
# MINPACK checks the number of nfev after the iteration,
# so it's hard to tell what he is going to compute.
if self.method == 'lm':
return
res = least_squares(fun_trivial, 2.0, method=self.method,
max_nfev=1)
assert_equal(res.x, np.array([2]))
assert_equal(res.cost, 40.5)
assert_equal(res.fun, np.array([9]))
assert_equal(res.jac, np.array([[4]]))
assert_equal(res.grad, np.array([36]))
assert_equal(res.optimality, 36)
assert_equal(res.active_mask, np.array([0]))
assert_equal(res.nfev, 1)
assert_equal(res.njev, 1)
assert_equal(res.status, 0)
assert_equal(res.success, 0)
def test_rosenbrock(self):
x0 = [-2, 1]
x_opt = [1, 1]
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.x, x_opt)
def test_rosenbrock_cropped(self):
x0 = [-2, 1]
if self.method == 'lm':
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped,
x0, method='lm')
else:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock_cropped],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
res = least_squares(
fun_rosenbrock_cropped, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.cost, 0, atol=1e-14)
def test_fun_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_wrong_dimensions,
2.0, method=self.method)
def test_jac_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, jac_wrong_dimensions, method=self.method)
def test_fun_and_jac_inconsistent_dimensions(self):
x0 = [1, 2]
assert_raises(ValueError, least_squares, fun_rosenbrock, x0,
jac_rosenbrock_bad_dim, method=self.method)
def test_x0_multidimensional(self):
x0 = np.ones(4).reshape(2, 2)
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_scalar(self):
x0 = 2.0 + 0.0*1j
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_array(self):
x0 = [1.0, 2.0 + 0.0*1j]
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_bvp(self):
n = 10
x0 = np.ones(n**2)
if self.method == 'lm':
max_nfev = 5000
else:
max_nfev = 100
res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method,
max_nfev=max_nfev)
assert_(res.nfev < max_nfev)
assert_(res.cost < 0.5)
def test_error_raised_when_all_tolerances_below_eps(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
method=self.method, ftol=None, xtol=None, gtol=None)
def test_convergence_with_only_one_tolerance_enabled(self):
if self.method == 'lm':
return
x0 = [-2, 1]
x_opt = [1, 1]
for ftol, xtol, gtol in [(1e-8, None, None),
(None, 1e-8, None),
(None, None, 1e-8)]:
res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock,
ftol=ftol, gtol=gtol, xtol=xtol,
method=self.method)
assert_allclose(res.x, x_opt)
class BoundsMixin:
def test_inconsistent(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(10.0, 0.0), method=self.method)
def test_infeasible(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(3., 4), method=self.method)
def test_wrong_number(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.,
bounds=(1., 2, 3), method=self.method)
def test_inconsistent_shape(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(1.0, [2.0, 3.0]), method=self.method)
assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0],
bounds=([0.0], [3.0, 4.0]), method=self.method)
def test_in_bounds(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(-1.0, 3.0), method=self.method)
assert_allclose(res.x, 0.0, atol=1e-4)
assert_equal(res.active_mask, [0])
assert_(-1 <= res.x <= 3)
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(0.5, 3.0), method=self.method)
assert_allclose(res.x, 0.5, atol=1e-4)
assert_equal(res.active_mask, [-1])
assert_(0.5 <= res.x <= 3)
def test_bounds_shape(self):
for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]:
x0 = [1.0, 1.0]
res = least_squares(fun_2d_trivial, x0, jac=jac)
assert_allclose(res.x, [0.0, 0.0])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=(0.5, [2.0, 2.0]), method=self.method)
assert_allclose(res.x, [0.5, 0.5])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=([0.3, 0.2], 3.0), method=self.method)
assert_allclose(res.x, [0.3, 0.2])
res = least_squares(
fun_2d_trivial, x0, jac=jac, bounds=([-1, 0.5], [1.0, 3.0]),
method=self.method)
assert_allclose(res.x, [0.0, 0.5], atol=1e-5)
def test_rosenbrock_bounds(self):
x0_1 = np.array([-2.0, 1.0])
x0_2 = np.array([2.0, 2.0])
x0_3 = np.array([-2.0, 2.0])
x0_4 = np.array([0.0, 2.0])
x0_5 = np.array([-1.2, 1.0])
problems = [
(x0_1, ([-np.inf, -1.5], np.inf)),
(x0_2, ([-np.inf, 1.5], np.inf)),
(x0_3, ([-np.inf, 1.5], np.inf)),
(x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
(x0_2, ([1.0, 1.5], [3.0, 3.0])),
(x0_5, ([-50.0, 0.0], [0.5, 100]))
]
for x0, bounds in problems:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, [1.0, 0.5], 'jac'],
['exact', 'lsmr']):
res = least_squares(fun_rosenbrock, x0, jac, bounds,
x_scale=x_scale, tr_solver=tr_solver,
method=self.method)
assert_allclose(res.optimality, 0.0, atol=1e-5)
class SparseMixin:
def test_exact_tr_solver(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='exact', method=self.method)
assert_raises(ValueError, least_squares, p.fun, p.x0,
tr_solver='exact', jac_sparsity=p.sparsity,
method=self.method)
def test_equivalence(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(
sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(
dense.fun, dense.x0, jac=sparse.jac,
method=self.method)
assert_equal(res_sparse.nfev, res_dense.nfev)
assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
def test_tr_options(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
tr_options={'btol': 1e-10})
assert_allclose(res.cost, 0, atol=1e-20)
def test_wrong_parameters(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='best', method=self.method)
assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac,
tr_solver='lsmr', tr_options={'tol': 1e-10})
def test_solver_selection(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac,
method=self.method)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_(issparse(res_sparse.jac))
assert_(isinstance(res_dense.jac, np.ndarray))
def test_numerical_jac(self):
p = BroydenTridiagonal()
for jac in ['2-point', '3-point', 'cs']:
res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
res_sparse = least_squares(
p.fun, p.x0, jac,method=self.method,
jac_sparsity=p.sparsity)
assert_equal(res_dense.nfev, res_sparse.nfev)
assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
def test_with_bounds(self):
p = BroydenTridiagonal()
for jac, jac_sparsity in product(
[p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
res_1 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, np.inf),
method=self.method,jac_sparsity=jac_sparsity)
res_2 = least_squares(
p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
res_3 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
assert_allclose(res_1.optimality, 0, atol=1e-10)
assert_allclose(res_2.optimality, 0, atol=1e-10)
assert_allclose(res_3.optimality, 0, atol=1e-10)
def test_wrong_jac_sparsity(self):
p = BroydenTridiagonal()
sparsity = p.sparsity[:-1]
assert_raises(ValueError, least_squares, p.fun, p.x0,
jac_sparsity=sparsity, method=self.method)
def test_linear_operator(self):
p = BroydenTridiagonal(mode='operator')
res = least_squares(p.fun, p.x0, p.jac, method=self.method)
assert_allclose(res.cost, 0.0, atol=1e-20)
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, tr_solver='exact')
def test_x_scale_jac_scale(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
x_scale='jac')
assert_allclose(res.cost, 0.0, atol=1e-20)
p = BroydenTridiagonal(mode='operator')
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, x_scale='jac')
class LossFunctionMixin:
def test_options(self):
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_allclose(res.x, 0, atol=1e-15)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
loss='hinge', method=self.method)
def test_fun(self):
# Test that res.fun is actual residuals, and not modified by loss
# function stuff.
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_equal(res.fun, fun_trivial(res.x))
def test_grad(self):
# Test that res.grad is true gradient of loss function at the
# solution. Use max_nfev = 1, to avoid reaching minimum.
x = np.array([2.0]) # res.x will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x * (x**2 + 5))
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x)
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5)
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2))
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4))
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3))
def test_jac(self):
# Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation
# of Hessian. This approximation is computed by doubly differentiating
# the cost function and dropping the part containing second derivative
# of f. For a scalar function it is computed as
# H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the
# brackets is less than EPS it is replaced by EPS. Here, we check
# against the root of H.
x = 2.0 # res.x will be this.
f = x**2 + 5 # res.fun will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x)
# For `huber` loss the Jacobian correction is identically zero
# in outlier region, in such cases it is modified to be equal EPS**0.5.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x * EPS**0.5)
# Now, let's apply `loss_scale` to turn the residual into an inlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
f_scale=10, max_nfev=1)
assert_equal(res.jac, 2 * x)
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75)
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
f_scale=10, max_nfev=1, method=self.method)
fs = f / 10
assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2))
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
f_scale=20.0, max_nfev=1, method=self.method)
fs = f / 20
assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4))
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1)
assert_allclose(res.jac, 2 * x * EPS**0.5)
res = least_squares(fun_trivial, x, jac_trivial,
loss=cubic_soft_l1, f_scale=6, max_nfev=1)
fs = f / 6
assert_allclose(res.jac,
2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6))
def test_robustness(self):
for noise in [0.1, 1.0]:
p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)
for jac in ['2-point', '3-point', 'cs', p.jac]:
res_lsq = least_squares(p.fun, p.p0, jac=jac,
method=self.method)
assert_allclose(res_lsq.optimality, 0, atol=1e-2)
for loss in LOSSES:
if loss == 'linear':
continue
res_robust = least_squares(
p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
method=self.method)
assert_allclose(res_robust.optimality, 0, atol=1e-2)
assert_(norm(res_robust.x - p.p_opt) <
norm(res_lsq.x - p.p_opt))
class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'dogbox'
class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'trf'
def test_lsmr_regularization(self):
p = BroydenTridiagonal()
for regularize in [True, False]:
res = least_squares(p.fun, p.x0, p.jac, method='trf',
tr_options={'regularize': regularize})
assert_allclose(res.cost, 0, atol=1e-20)
class TestLM(BaseMixin):
method = 'lm'
def test_bounds_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, bounds=(-3.0, 3.0), method='lm')
def test_m_less_n_not_supported(self):
x0 = [-2, 1]
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0,
method='lm')
def test_sparse_not_supported(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_jac_sparsity_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
jac_sparsity=[1], method='lm')
def test_LinearOperator_not_supported(self):
p = BroydenTridiagonal(mode="operator")
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_loss(self):
res = least_squares(fun_trivial, 2.0, loss='linear', method='lm')
assert_allclose(res.x, 0.0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
method='lm', loss='huber')
def test_basic():
res = least_squares(fun_trivial, 2.0)
assert_allclose(res.x, 0, atol=1e-10)
def test_small_tolerances_for_lm():
for ftol, xtol, gtol in [(None, 1e-13, 1e-13),
(1e-13, None, 1e-13),
(1e-13, 1e-13, None)]:
assert_raises(ValueError, least_squares, fun_trivial, 2.0, xtol=xtol,
ftol=ftol, gtol=gtol, method='lm')
def test_fp32_gh12991():
np.random.seed(1)
x = np.linspace(0, 1, 100).astype("float32")
y = np.random.random(100).astype("float32")
def func(p, x):
return p[0] + p[1] * x
def err(p, x, y):
return func(p, x) - y
res = least_squares(err, [-1.0, -1.0], args=(x, y))
# unchanged from the initial solution.
# It was terminating early because the underlying approx_derivative
# used a step size for FP64 when the working space was FP32.
assert res.nfev > 3
assert_allclose(res.x, np.array([0.4082241, 0.15530563]), atol=5e-5)
| true | true |
1c3ad4d233fc232732890fb92f556dcbb8c87887 | 4,839 | py | Python | week_2_data_ingestion/airflow/dags/data_ingestion_yellowtaxi_gcs.py | meswaramoorthy/gcp-de-zoomcamp | 1b01ea425404c7c101839cf363dbf556b67d86a0 | [
"MIT"
] | null | null | null | week_2_data_ingestion/airflow/dags/data_ingestion_yellowtaxi_gcs.py | meswaramoorthy/gcp-de-zoomcamp | 1b01ea425404c7c101839cf363dbf556b67d86a0 | [
"MIT"
] | 2 | 2022-02-22T06:40:10.000Z | 2022-02-23T05:41:02.000Z | week_2_data_ingestion/airflow/dags/data_ingestion_yellowtaxi_gcs.py | meswaramoorthy/gcp-de-zoomcamp | 1b01ea425404c7c101839cf363dbf556b67d86a0 | [
"MIT"
] | null | null | null | # Imports
import os
import logging
from datetime import datetime
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from airflow.providers.google.cloud.operators.bigquery import BigQueryCreateExternalTableOperator
from google.cloud import storage
import pyarrow.csv as pv
import pyarrow.parquet as pq
AIRFLOW_HOME = os.environ.get("AIRFLOW_HOME", '/opt/airflow/')
PROJECT_ID= os.environ.get("GCP_PROJECT_ID")
BUCKET= os.environ.get("GCP_GCS_BUCKET")
BIGQUERY_DATASET = os.environ.get("BIGQUERY_DATASET", 'trips_data_all')
URL_PREFIX= 'https://s3.amazonaws.com/nyc-tlc/trip+data'
FILE_NAME= 'yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
URL_TEMPLATE= URL_PREFIX + '/' + FILE_NAME
OUTPUT_FILE_TEMPLATE= AIRFLOW_HOME + '/output_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
TABLE_NAME_TEMPLATE= 'yellow_taxi_trip_{{ execution_date.strftime(\'%Y_%m\') }}'
# path_to_creds = f"{AIRFLOW_HOME}/google_credentials.json"
path_to_creds = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
def format_to_parquet(src_file):
if not src_file.endswith('.csv'):
logging.error("Can only accept source files in csv format.")
return
table= pv.read_csv(src_file)
pq.write_table(table, src_file.replace('.csv', '.parquet'))
# ti.xcom_push(key= 'parquet_file_name', value= src_file.replace('.csv', '.parquet'))
def upload_to_gcs(bucket, object_name, local_file):
"""
Ref: https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
:param bucket: GCS bucket name
:param object_name: target path & file-name
:param local_file: source path & file-name
:return:
"""
# WORKAROUND to prevent timeout for files > 6 MB on 800 kbps upload speed.
# (Ref: https://github.com/googleapis/python-storage/issues/74)
# storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024 # 5 MB
# storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024 # 5 MB
# End of Workaround
# file_name= ti.xcom_pull(task_ids= prev_task_id, key= xcom_key)
logging.info(bucket + ':' + object_name + ':' + local_file)
print(PROJECT_ID , ':' , BIGQUERY_DATASET , ':' , FILE_NAME.replace('.csv', '.parquet'))
client= storage.Client()
bucket= client.bucket(bucket)
blob= bucket.blob(object_name)
blob.upload_from_filename(local_file)
default_args= {
"owner": "airflow",
"depends_on_past": False,
"retries": 1,
}
# DAG Declaration
with DAG(
dag_id= "data_ingestion_gcs",
schedule_interval= "@monthly",
start_date= datetime(2019, 1, 1),
end_date= datetime(2021, 7, 30),
default_args= default_args,
catchup= True,
max_active_runs= 1,
tags= ["dtc-de-nyc-tripdata-yellow"],
) as dag:
download_dataset_task = BashOperator(
task_id= "download_dataset_task",
bash_command= f"curl -sSLf {URL_TEMPLATE} > {OUTPUT_FILE_TEMPLATE}" # Adding f option to make curl fail for 404
)
format_to_parquet_task= PythonOperator(
task_id= "format_to_parquet_task",
python_callable= format_to_parquet,
op_kwargs= {
"src_file": OUTPUT_FILE_TEMPLATE
},
)
# local_to_gcs_task = PythonOperator(
# task_id= "local_to_gcs_task",
# python_callable= upload_to_gcs,
# op_kwargs= {
# "bucket": BUCKET,
# "object_name": f"raw/{FILE_NAME.replace('.csv', '.parquet')}",
# "local_file": OUTPUT_FILE_TEMPLATE.replace('.csv', '.parquet'),
# # "prev_task_id": "format_to_parquet_task",
# # "xcom_key": "parquet_file_name"
# }
# )
upload_to_gcs_bash_task = BashOperator(
task_id = "upload_to_gcs_bash_task",
bash_command = f"ls && gcloud auth activate-service-account --key-file={path_to_creds} && \
gsutil -m cp {OUTPUT_FILE_TEMPLATE.replace('.csv', '.parquet')} gs://{BUCKET}/raw",
)
# bigquery_external_table_task = BigQueryCreateExternalTableOperator(
# task_id = 'bigquery_external_table_task',
# table_resource = {
# "tableReference": {
# "projectId": PROJECT_ID,
# "datasetId": BIGQUERY_DATASET,
# "tableId": "external_table"
# },
# "externalDataConfiguration": {
# "sourceFormat" : "PARQUET",
# "sourceUris": [f"gs://{BUCKET}/raw/{FILE_NAME.replace('.csv', '.parquet')}"],
# },
# },
# )
remove_local_file_task = BashOperator(
task_id = "remove_local_file_task",
bash_command = f"rm {OUTPUT_FILE_TEMPLATE} {OUTPUT_FILE_TEMPLATE.replace('.csv', '.parquet')}"
)
download_dataset_task >> format_to_parquet_task >> upload_to_gcs_bash_task >> remove_local_file_task | 36.938931 | 119 | 0.66646 |
import os
import logging
from datetime import datetime
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from airflow.providers.google.cloud.operators.bigquery import BigQueryCreateExternalTableOperator
from google.cloud import storage
import pyarrow.csv as pv
import pyarrow.parquet as pq
AIRFLOW_HOME = os.environ.get("AIRFLOW_HOME", '/opt/airflow/')
PROJECT_ID= os.environ.get("GCP_PROJECT_ID")
BUCKET= os.environ.get("GCP_GCS_BUCKET")
BIGQUERY_DATASET = os.environ.get("BIGQUERY_DATASET", 'trips_data_all')
URL_PREFIX= 'https://s3.amazonaws.com/nyc-tlc/trip+data'
FILE_NAME= 'yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
URL_TEMPLATE= URL_PREFIX + '/' + FILE_NAME
OUTPUT_FILE_TEMPLATE= AIRFLOW_HOME + '/output_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
TABLE_NAME_TEMPLATE= 'yellow_taxi_trip_{{ execution_date.strftime(\'%Y_%m\') }}'
path_to_creds = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
def format_to_parquet(src_file):
if not src_file.endswith('.csv'):
logging.error("Can only accept source files in csv format.")
return
table= pv.read_csv(src_file)
pq.write_table(table, src_file.replace('.csv', '.parquet'))
def upload_to_gcs(bucket, object_name, local_file):
logging.info(bucket + ':' + object_name + ':' + local_file)
print(PROJECT_ID , ':' , BIGQUERY_DATASET , ':' , FILE_NAME.replace('.csv', '.parquet'))
client= storage.Client()
bucket= client.bucket(bucket)
blob= bucket.blob(object_name)
blob.upload_from_filename(local_file)
default_args= {
"owner": "airflow",
"depends_on_past": False,
"retries": 1,
}
with DAG(
dag_id= "data_ingestion_gcs",
schedule_interval= "@monthly",
start_date= datetime(2019, 1, 1),
end_date= datetime(2021, 7, 30),
default_args= default_args,
catchup= True,
max_active_runs= 1,
tags= ["dtc-de-nyc-tripdata-yellow"],
) as dag:
download_dataset_task = BashOperator(
task_id= "download_dataset_task",
bash_command= f"curl -sSLf {URL_TEMPLATE} > {OUTPUT_FILE_TEMPLATE}"
)
format_to_parquet_task= PythonOperator(
task_id= "format_to_parquet_task",
python_callable= format_to_parquet,
op_kwargs= {
"src_file": OUTPUT_FILE_TEMPLATE
},
)
= "upload_to_gcs_bash_task",
bash_command = f"ls && gcloud auth activate-service-account --key-file={path_to_creds} && \
gsutil -m cp {OUTPUT_FILE_TEMPLATE.replace('.csv', '.parquet')} gs://{BUCKET}/raw",
)
remove_local_file_task = BashOperator(
task_id = "remove_local_file_task",
bash_command = f"rm {OUTPUT_FILE_TEMPLATE} {OUTPUT_FILE_TEMPLATE.replace('.csv', '.parquet')}"
)
download_dataset_task >> format_to_parquet_task >> upload_to_gcs_bash_task >> remove_local_file_task | true | true |
1c3ad52d3fc87c3182c23cc3f256734c7bb28c50 | 331 | py | Python | examples/design_search/setup.py | uoa-iai/RoboGrammar | 5837107b1588f126e162ba1e9b47f5c9d26c024b | [
"MIT"
] | null | null | null | examples/design_search/setup.py | uoa-iai/RoboGrammar | 5837107b1588f126e162ba1e9b47f5c9d26c024b | [
"MIT"
] | null | null | null | examples/design_search/setup.py | uoa-iai/RoboGrammar | 5837107b1588f126e162ba1e9b47f5c9d26c024b | [
"MIT"
] | 1 | 2021-05-09T21:51:11.000Z | 2021-05-09T21:51:11.000Z | import os
from setuptools import setup
# Change working directory to the one this file is in
os.chdir(os.path.dirname(os.path.realpath(__file__)))
setup(
name='design_search',
version='0.1.0',
packages=['design_search'],
package_dir={'design_search': '..'},
install_requires=[
'numpy >= 1.19'
]
)
| 20.6875 | 53 | 0.65861 | import os
from setuptools import setup
os.chdir(os.path.dirname(os.path.realpath(__file__)))
setup(
name='design_search',
version='0.1.0',
packages=['design_search'],
package_dir={'design_search': '..'},
install_requires=[
'numpy >= 1.19'
]
)
| true | true |
1c3ad68dce23f052ad3db0ec86ff7220479905d4 | 4,272 | py | Python | kolibri/core/tasks/main.py | kuboginichimaru/kolibri | 18b398f62baa1c60f8456f7f9c6d6c9447068f69 | [
"MIT"
] | null | null | null | kolibri/core/tasks/main.py | kuboginichimaru/kolibri | 18b398f62baa1c60f8456f7f9c6d6c9447068f69 | [
"MIT"
] | 8 | 2021-05-21T15:31:24.000Z | 2022-02-24T15:02:14.000Z | kolibri/core/tasks/main.py | kuboginichimaru/kolibri | 18b398f62baa1c60f8456f7f9c6d6c9447068f69 | [
"MIT"
] | null | null | null | import logging
import os
import sqlite3
from django.utils.functional import SimpleLazyObject
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import exc
from kolibri.core.sqlite.utils import check_sqlite_integrity
from kolibri.core.sqlite.utils import repair_sqlite_db
from kolibri.core.tasks.queue import Queue
from kolibri.core.tasks.scheduler import Scheduler
from kolibri.core.tasks.worker import Worker
from kolibri.utils import conf
logger = logging.getLogger(__name__)
def create_db_url(
db_type, path=None, name=None, password=None, user=None, host=None, port=None
):
if db_type == "sqlite":
return "sqlite:///{path}".format(path=path)
elif db_type == "postgres":
return "postgresql://{user}:{password}@{host}{port}/{name}".format(
name=name,
password=password,
user=user,
host=host,
port=":" + port if port else "",
)
def make_connection(db_type, url):
if db_type == "sqlite":
kwargs = dict(
connect_args={"check_same_thread": False},
)
elif db_type == "postgres":
kwargs = dict(
pool_pre_ping=True,
client_encoding="utf8",
)
else:
raise Exception("Unknown database engine option: {}".format(db_type))
connection = create_engine(url, **kwargs)
# Add multiprocessing safeguards as recommended by:
# https://docs.sqlalchemy.org/en/13/core/pooling.html#pooling-multiprocessing
# Don't make a connection before we've added the multiprocessing guards
# as otherwise we will have a connection that doesn't have the 'pid' attribute set.
@event.listens_for(connection, "connect")
def connect(dbapi_connection, connection_record):
connection_record.info["pid"] = os.getpid()
@event.listens_for(connection, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
if connection_record.info["pid"] != pid:
connection_record.connection = connection_proxy.connection = None
raise exc.DisconnectionError(
"Connection record belongs to pid %s, attempting to check out in pid %s"
% (connection_record.info["pid"], pid)
)
return connection
def __initialize_connection():
db_url = create_db_url(
conf.OPTIONS["Database"]["DATABASE_ENGINE"],
path=os.path.join(conf.KOLIBRI_HOME, "job_storage.sqlite3"),
name=conf.OPTIONS["Database"]["DATABASE_NAME"],
password=conf.OPTIONS["Database"]["DATABASE_PASSWORD"],
user=conf.OPTIONS["Database"]["DATABASE_USER"],
host=conf.OPTIONS["Database"]["DATABASE_HOST"],
port=conf.OPTIONS["Database"]["DATABASE_PORT"],
)
connection = make_connection(
conf.OPTIONS["Database"]["DATABASE_ENGINE"],
db_url,
)
# Check if the database is corrupted
try:
check_sqlite_integrity(connection)
except (exc.DatabaseError, sqlite3.DatabaseError):
logger.warn("Job storage database has been corrupted, regenerating")
repair_sqlite_db(connection)
return connection
connection = SimpleLazyObject(__initialize_connection)
task_queue_name = "kolibri"
priority_queue_name = "no_waiting"
facility_queue_name = "facility"
def __priority_queue():
return Queue(priority_queue_name, connection=connection)
priority_queue = SimpleLazyObject(__priority_queue)
def __facility_queue():
return Queue(facility_queue_name, connection=connection)
facility_queue = SimpleLazyObject(__facility_queue)
def __queue():
return Queue(task_queue_name, connection=connection)
queue = SimpleLazyObject(__queue)
def __scheduler():
return Scheduler(queue=queue, connection=connection)
scheduler = SimpleLazyObject(__scheduler)
def initialize_workers():
logger.info("Starting scheduler workers.")
regular_worker = Worker(task_queue_name, connection=connection, num_workers=1)
priority_worker = Worker(priority_queue_name, connection=connection, num_workers=3)
facility_worker = Worker(facility_queue_name, connection=connection, num_workers=1)
return regular_worker, priority_worker, facility_worker
| 30.297872 | 88 | 0.704588 | import logging
import os
import sqlite3
from django.utils.functional import SimpleLazyObject
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import exc
from kolibri.core.sqlite.utils import check_sqlite_integrity
from kolibri.core.sqlite.utils import repair_sqlite_db
from kolibri.core.tasks.queue import Queue
from kolibri.core.tasks.scheduler import Scheduler
from kolibri.core.tasks.worker import Worker
from kolibri.utils import conf
logger = logging.getLogger(__name__)
def create_db_url(
db_type, path=None, name=None, password=None, user=None, host=None, port=None
):
if db_type == "sqlite":
return "sqlite:///{path}".format(path=path)
elif db_type == "postgres":
return "postgresql://{user}:{password}@{host}{port}/{name}".format(
name=name,
password=password,
user=user,
host=host,
port=":" + port if port else "",
)
def make_connection(db_type, url):
if db_type == "sqlite":
kwargs = dict(
connect_args={"check_same_thread": False},
)
elif db_type == "postgres":
kwargs = dict(
pool_pre_ping=True,
client_encoding="utf8",
)
else:
raise Exception("Unknown database engine option: {}".format(db_type))
connection = create_engine(url, **kwargs)
stens_for(connection, "connect")
def connect(dbapi_connection, connection_record):
connection_record.info["pid"] = os.getpid()
@event.listens_for(connection, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
if connection_record.info["pid"] != pid:
connection_record.connection = connection_proxy.connection = None
raise exc.DisconnectionError(
"Connection record belongs to pid %s, attempting to check out in pid %s"
% (connection_record.info["pid"], pid)
)
return connection
def __initialize_connection():
db_url = create_db_url(
conf.OPTIONS["Database"]["DATABASE_ENGINE"],
path=os.path.join(conf.KOLIBRI_HOME, "job_storage.sqlite3"),
name=conf.OPTIONS["Database"]["DATABASE_NAME"],
password=conf.OPTIONS["Database"]["DATABASE_PASSWORD"],
user=conf.OPTIONS["Database"]["DATABASE_USER"],
host=conf.OPTIONS["Database"]["DATABASE_HOST"],
port=conf.OPTIONS["Database"]["DATABASE_PORT"],
)
connection = make_connection(
conf.OPTIONS["Database"]["DATABASE_ENGINE"],
db_url,
)
# Check if the database is corrupted
try:
check_sqlite_integrity(connection)
except (exc.DatabaseError, sqlite3.DatabaseError):
logger.warn("Job storage database has been corrupted, regenerating")
repair_sqlite_db(connection)
return connection
connection = SimpleLazyObject(__initialize_connection)
task_queue_name = "kolibri"
priority_queue_name = "no_waiting"
facility_queue_name = "facility"
def __priority_queue():
return Queue(priority_queue_name, connection=connection)
priority_queue = SimpleLazyObject(__priority_queue)
def __facility_queue():
return Queue(facility_queue_name, connection=connection)
facility_queue = SimpleLazyObject(__facility_queue)
def __queue():
return Queue(task_queue_name, connection=connection)
queue = SimpleLazyObject(__queue)
def __scheduler():
return Scheduler(queue=queue, connection=connection)
scheduler = SimpleLazyObject(__scheduler)
def initialize_workers():
logger.info("Starting scheduler workers.")
regular_worker = Worker(task_queue_name, connection=connection, num_workers=1)
priority_worker = Worker(priority_queue_name, connection=connection, num_workers=3)
facility_worker = Worker(facility_queue_name, connection=connection, num_workers=1)
return regular_worker, priority_worker, facility_worker
| true | true |
1c3ad6e626f17d7949669b590b2ab4b21b862670 | 1,491 | py | Python | python/verifair/benchmarks/fairsquare/M_BNc_F_NN_V3_H2_Q.py | obastani/verifair | 1d5efea041330fa9fe8d59d976bdd3ef97aff417 | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2019-11-05T20:40:40.000Z | 2020-09-16T03:13:54.000Z | python/verifair/benchmarks/fairsquare/M_BNc_F_NN_V3_H2_Q.py | obastani/verifair | 1d5efea041330fa9fe8d59d976bdd3ef97aff417 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | python/verifair/benchmarks/fairsquare/M_BNc_F_NN_V3_H2_Q.py | obastani/verifair | 1d5efea041330fa9fe8d59d976bdd3ef97aff417 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from .helper import *
def sample(flag):
sex = step([(0,1,0.3307), (1,2,0.6693)])
if sex < 1:
capital_gain = gaussian(568.4105, 24248365.5428)
if capital_gain < 7298.0000:
age = gaussian(38.4208, 184.9151)
education_num = gaussian(10.0827, 6.5096)
else:
age = gaussian(38.8125, 193.4918)
education_num = gaussian(10.1041, 6.1522)
else:
capital_gain = gaussian(1329.3700, 69327473.1006)
if capital_gain < 5178.0000:
age = gaussian(38.6361, 187.2435)
education_num = gaussian(10.0817, 6.4841)
else:
age = gaussian(38.2668, 187.2747)
education_num = gaussian(10.0974, 7.1793)
if (education_num > age):
age = education_num
sensitiveAttribute(sex < 1, flag)
qualified(age > 18)
N_age = ((age - 17.0) / 73.0 - 0.5) * 10 + 0.5
N_education_num = ((education_num - 3.0) / 13.0 - 0.5) * 10 + 0.5
N_capital_gain = ((capital_gain - 0.0) / 22040.0 - 0.5) * 10 + 0.5
h1 = -0.2277 * N_age + 0.6434 * N_education_num + 2.3643 * N_capital_gain + 3.7146
if h1 < 0:
h1 = 0
h2 = -0.0236 * N_age + -3.3556 * N_education_num + -1.8183 * N_capital_gain + -1.7810
if h2 < 0:
h2 = 0
o1 = 0.4865 * h1 + 1.0685 * h2 + -1.8079
if o1 < 0:
o1 = 0
o2 = 1.7044 * h1 + -1.3880 * h2 + 0.6830
if o2 < 0:
o2 = 0
return int(o1 < o2)
fairnessTarget(o1 < o2)
| 33.886364 | 89 | 0.539906 | from .helper import *
def sample(flag):
sex = step([(0,1,0.3307), (1,2,0.6693)])
if sex < 1:
capital_gain = gaussian(568.4105, 24248365.5428)
if capital_gain < 7298.0000:
age = gaussian(38.4208, 184.9151)
education_num = gaussian(10.0827, 6.5096)
else:
age = gaussian(38.8125, 193.4918)
education_num = gaussian(10.1041, 6.1522)
else:
capital_gain = gaussian(1329.3700, 69327473.1006)
if capital_gain < 5178.0000:
age = gaussian(38.6361, 187.2435)
education_num = gaussian(10.0817, 6.4841)
else:
age = gaussian(38.2668, 187.2747)
education_num = gaussian(10.0974, 7.1793)
if (education_num > age):
age = education_num
sensitiveAttribute(sex < 1, flag)
qualified(age > 18)
N_age = ((age - 17.0) / 73.0 - 0.5) * 10 + 0.5
N_education_num = ((education_num - 3.0) / 13.0 - 0.5) * 10 + 0.5
N_capital_gain = ((capital_gain - 0.0) / 22040.0 - 0.5) * 10 + 0.5
h1 = -0.2277 * N_age + 0.6434 * N_education_num + 2.3643 * N_capital_gain + 3.7146
if h1 < 0:
h1 = 0
h2 = -0.0236 * N_age + -3.3556 * N_education_num + -1.8183 * N_capital_gain + -1.7810
if h2 < 0:
h2 = 0
o1 = 0.4865 * h1 + 1.0685 * h2 + -1.8079
if o1 < 0:
o1 = 0
o2 = 1.7044 * h1 + -1.3880 * h2 + 0.6830
if o2 < 0:
o2 = 0
return int(o1 < o2)
fairnessTarget(o1 < o2)
| true | true |
1c3ad6e7b9c7fbea3425c301501a21a9d703b98e | 1,408 | py | Python | Python/longest-happy-prefix.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/longest-happy-prefix.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/longest-happy-prefix.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | # Time: O(n)
# Space: O(n)
# kmp solution
class Solution(object):
def longestPrefix(self, s):
"""
:type s: str
:rtype: str
"""
def getPrefix(pattern):
prefix = [-1]*len(pattern)
j = -1
for i in range(1, len(pattern)):
while j != -1 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
return s[:getPrefix(s)[-1]+1]
# Time: O(n) on average
# Space: O(1)
# rolling-hash solution
class Solution2(object):
def longestPrefix(self, s):
"""
:type s: str
:rtype: str
"""
M = 10**9+7
D = 26
def check(l, s):
for i in range(l):
if s[i] != s[len(s)-l+i]:
return False
return True
result, prefix, suffix, power = 0, 0, 0, 1
for i in range(len(s)-1):
prefix = (prefix*D + (ord(s[i])-ord('a'))) % M
suffix = (suffix + (ord(s[len(s)-(i+1)])-ord('a'))*power) % M
power = (power*D)%M
if prefix == suffix:
# we assume M is a very large prime without hash collision
# assert(check(i+1, s))
result = i+1
return s[:result]
| 27.076923 | 74 | 0.426136 |
class Solution(object):
def longestPrefix(self, s):
def getPrefix(pattern):
prefix = [-1]*len(pattern)
j = -1
for i in range(1, len(pattern)):
while j != -1 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
return s[:getPrefix(s)[-1]+1]
class Solution2(object):
def longestPrefix(self, s):
M = 10**9+7
D = 26
def check(l, s):
for i in range(l):
if s[i] != s[len(s)-l+i]:
return False
return True
result, prefix, suffix, power = 0, 0, 0, 1
for i in range(len(s)-1):
prefix = (prefix*D + (ord(s[i])-ord('a'))) % M
suffix = (suffix + (ord(s[len(s)-(i+1)])-ord('a'))*power) % M
power = (power*D)%M
if prefix == suffix:
result = i+1
return s[:result]
| true | true |
1c3ad7c48b81c35fb3323f8e6ce0b41a1a4c8f05 | 973 | py | Python | shikshastudio/worklist/migrations/0001_initial.py | I-ArchanaDash/shikshastudio | 71c5631fa19c90d419a6f31335f07195dbccb124 | [
"Apache-2.0"
] | null | null | null | shikshastudio/worklist/migrations/0001_initial.py | I-ArchanaDash/shikshastudio | 71c5631fa19c90d419a6f31335f07195dbccb124 | [
"Apache-2.0"
] | null | null | null | shikshastudio/worklist/migrations/0001_initial.py | I-ArchanaDash/shikshastudio | 71c5631fa19c90d419a6f31335f07195dbccb124 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.9 on 2022-02-02 12:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField(blank=True, null=True)),
('complete', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 33.551724 | 141 | 0.63001 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField(blank=True, null=True)),
('complete', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
1c3ad9b85e444c0be6a54d60a231c91985be3569 | 6,496 | py | Python | LearningSafeSets/Validation/Validation.py | alexliniger/AdversarialRoadModel | 14157760687c22acc8b91c39128875005ada7563 | [
"Apache-2.0"
] | 20 | 2020-07-17T06:32:32.000Z | 2022-03-27T03:24:26.000Z | LearningSafeSets/Validation/Validation.py | alexliniger/AdversarialRoadModel | 14157760687c22acc8b91c39128875005ada7563 | [
"Apache-2.0"
] | null | null | null | LearningSafeSets/Validation/Validation.py | alexliniger/AdversarialRoadModel | 14157760687c22acc8b91c39128875005ada7563 | [
"Apache-2.0"
] | 7 | 2020-07-19T07:16:01.000Z | 2022-01-22T22:58:02.000Z | ## Copyright 2020 Alexander Liniger
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
###########################################################################
###########################################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
class Validation:
def __init__(self,config):
self.config = config
self.cut_off = config["cut_off"]
self.data = {}
self.result_dir = config["result_dir"]
def validate(self,model,data):
# model.to(device)
criterion_bce = nn.BCELoss()
# criterion = nn.BCEWithLogitsLoss()
criterion_mse = nn.MSELoss()
model.eval()
correct = 0
# false_safe = 0
under_approx = 0
over_approx = 0
total = 0
metric_mse = []
metric_bce = []
for i in range(data.n_all_batches):
state, safe = data.giveBatch(i)
safe_model = model(state).view(-1)
safe_model_max = (safe_model >= self.cut_off).type(torch.FloatTensor)
metric_mse.append(criterion_mse(safe_model, safe).item())
metric_bce.append(criterion_bce(safe_model, safe).item())
total += safe.size(0)
correct += (safe_model_max == safe).sum().item()
under_approx += (safe_model_max < safe).sum().item()
over_approx += (safe_model_max > safe).sum().item()
print('\tMSE: %.4f, BCE: %.4f, Acc: %.4f, UnderApprox: %.4f, OverApprox: %.4f'
% (np.mean(metric_mse), np.mean(metric_bce), correct / total, under_approx / total, over_approx / total))
self.data['full_set'] = []
self.data['full_set'].append({
'acc': correct / total,
'under': under_approx / total,
'over': over_approx / total,
'total': total,
'correct': correct,
'mse': np.mean(metric_mse),
'bce': np.mean(metric_bce)
})
def validateTest(self,model,data):
criterion_bce = nn.BCELoss()
criterion_mse = nn.MSELoss()
model.eval()
correct = 0
# false_safe = 0
under_approx = 0
over_approx = 0
total = 0
metric_mse = []
metric_bce = []
for i in range(data.n_train_batches,data.n_all_batches):
state, safe = data.giveBatch(i)
safe_model = model(state).view(-1)
safe_model_max = (safe_model >= self.cut_off).type(torch.FloatTensor)
metric_mse.append(criterion_mse(safe_model, safe).item())
metric_bce.append(criterion_bce(safe_model, safe).item())
total += safe.size(0)
correct += (safe_model_max == safe).sum().item()
under_approx += (safe_model_max < safe).sum().item()
over_approx += (safe_model_max > safe).sum().item()
print('\tMSE: %.4f, BCE: %.4f, Acc: %.4f, UnderApprox: %.4f, OverApprox: %.4f'
% (np.mean(metric_mse), np.mean(metric_bce), correct / total, under_approx / total, over_approx / total))
self.data['val_set'] = []
self.data['val_set'].append({
'acc': correct / total,
'under': under_approx / total,
'over': over_approx / total,
'total': total,
'correct': correct,
'mse': np.mean(metric_mse),
'bce': np.mean(metric_bce)
})
def validateTestUnseen(self,model,data):
criterion_bce = nn.BCELoss()
criterion_mse = nn.MSELoss()
model.eval()
correct = 0
false_safe = 0
under_approx = 0
over_approx = 0
total = 0
metric_mse = []
metric_bce = []
for i in range(self.config['NGKAPPA_T']):
state, safe = data.giveTest(i)
safe_model = model(state).view(-1)
safe_model_max = (safe_model >= self.cut_off).type(torch.FloatTensor)
metric_mse.append(criterion_mse(safe_model, safe).item())
metric_bce.append(criterion_bce(safe_model, safe).item())
total += safe.size(0)
correct += (safe_model_max == safe).sum().item()
under_approx += (safe_model_max < safe).sum().item()
over_approx += (safe_model_max > safe).sum().item()
name = self.result_dir+"/RobustInv-Pred-"+str(i)+".bin"
fh = open(name, "bw")
safe_model_max.detach().numpy().astype(bool).tofile(fh)
print('\tMSE: %.4f, BCE: %.4f, Acc: %.4f, UnderApprox: %.4f, OverApprox: %.4f'
% (np.mean(metric_mse), np.mean(metric_bce),correct/total,under_approx/total,over_approx/total))
self.data['test_set'] = []
self.data['test_set'].append({
'acc': correct / total,
'under': under_approx / total,
'over': over_approx / total,
'total': total,
'correct': correct,
'mse': np.mean(metric_mse),
'bce': np.mean(metric_bce)
})
def save_val(self):
with open(self.result_dir + '/val.txt', 'w') as outfile:
json.dump(self.data, outfile, indent=4)
def save_model(self,model):
model_dict = {}
k = 0
for i in range(len(model._modules['model']._modules)):
if len(model._modules['model']._modules[str(i)]._parameters) > 0:
W = model._modules['model']._modules[str(i)]._parameters['weight'].data.detach().numpy().tolist()
b = model._modules['model']._modules[str(i)]._parameters['bias'].data.detach().numpy().tolist()
model_dict[str(k)] = []
model_dict[str(k)].append({
'W': W,
'b': b })
k+=1
model_dict["length"] = k
with open(self.result_dir+'/model.txt', 'w') as outfile:
json.dump(model_dict, outfile, indent=4)
| 35.113514 | 119 | 0.552032 | true | true | |
1c3ada5c5ef2fa17cd168410a01d8ba068d9d078 | 12,479 | py | Python | var/spack/repos/builtin/packages/ascent/package.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2018-08-20T06:55:11.000Z | 2018-08-20T06:55:11.000Z | var/spack/repos/builtin/packages/ascent/package.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/ascent/package.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-03-12T19:27:17.000Z | 2020-03-12T19:27:17.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import socket
import os
import llnl.util.tty as tty
from os import environ as env
def cmake_cache_entry(name, value):
"""
Helper that creates CMake cache entry strings used in
'host-config' files.
"""
return 'set({0} "{1}" CACHE PATH "")\n\n'.format(name, value)
class Ascent(Package):
"""Ascent is an open source many-core capable lightweight in situ
visualization and analysis infrastructure for multi-physics HPC
simulations."""
homepage = "https://github.com/Alpine-DAV/ascent"
git = "https://github.com/Alpine-DAV/ascent.git"
maintainers = ['cyrush']
version('develop', branch='develop', submodules=True)
###########################################################################
# package variants
###########################################################################
variant("shared", default=True, description="Build Conduit as shared libs")
variant("cmake", default=True,
description="Build CMake (if off, attempt to use cmake from PATH)")
variant("mpi", default=True, description="Build Ascent MPI Support")
# variants for python support
variant("python", default=True, description="Build Conduit Python support")
# variants for runtime features
variant("vtkh", default=True,
description="Build VTK-h filter and rendering support")
variant("tbb", default=True, description="Build tbb support")
variant("cuda", default=False, description="Build cuda support")
variant("adios", default=False, description="Build Adios filter support")
# variants for dev-tools (docs, etc)
variant("doc", default=False, description="Build Conduit's documentation")
###########################################################################
# package dependencies
###########################################################################
depends_on("cmake", when="+cmake")
depends_on("conduit@master")
#######################
# Python
#######################
# we need a shared version of python b/c linking with static python lib
# causes duplicate state issues when running compiled python modules.
depends_on("python+shared")
extends("python", when="+python")
depends_on("py-numpy", when="+python", type=('build', 'run'))
#######################
# MPI
#######################
depends_on("mpi", when="+mpi")
depends_on("py-mpi4py", when="+python+mpi")
#############################
# TPLs for Runtime Features
#############################
depends_on("vtkh", when="+vtkh")
depends_on("vtkh+cuda", when="+vtkh+cuda")
depends_on("adios", when="+adios")
#######################
# Documentation related
#######################
depends_on("py-sphinx", when="+python+doc", type='build')
def install(self, spec, prefix):
"""
Build and install Conduit.
"""
with working_dir('spack-build', create=True):
py_site_pkgs_dir = None
if "+python" in spec:
py_site_pkgs_dir = site_packages_dir
host_cfg_fname = self.create_host_config(spec,
prefix,
py_site_pkgs_dir)
cmake_args = []
# if we have a static build, we need to avoid any of
# spack's default cmake settings related to rpaths
# (see: https://github.com/LLNL/spack/issues/2658)
if "+shared" in spec:
cmake_args.extend(std_cmake_args)
else:
for arg in std_cmake_args:
if arg.count("RPATH") == 0:
cmake_args.append(arg)
cmake_args.extend(["-C", host_cfg_fname, "../src"])
cmake(*cmake_args)
make()
make("install")
# install copy of host config for provenance
install(host_cfg_fname, prefix)
def create_host_config(self, spec, prefix, py_site_pkgs_dir=None):
"""
This method creates a 'host-config' file that specifies
all of the options used to configure and build ascent.
For more details about 'host-config' files see:
http://ascent.readthedocs.io/en/latest/BuildingAscent.html
Note:
The `py_site_pkgs_dir` arg exists to allow a package that
subclasses this package provide a specific site packages
dir when calling this function. `py_site_pkgs_dir` should
be an absolute path or `None`.
This is necessary because the spack `site_packages_dir`
var will not exist in the base class. For more details
on this issue see: https://github.com/spack/spack/issues/6261
"""
#######################
# Compiler Info
#######################
c_compiler = env["SPACK_CC"]
cpp_compiler = env["SPACK_CXX"]
f_compiler = None
if self.compiler.fc:
# even if this is set, it may not exist so do one more sanity check
if os.path.isfile(env["SPACK_FC"]):
f_compiler = env["SPACK_FC"]
#######################################################################
# By directly fetching the names of the actual compilers we appear
# to doing something evil here, but this is necessary to create a
# 'host config' file that works outside of the spack install env.
#######################################################################
sys_type = spec.architecture
# if on llnl systems, we can use the SYS_TYPE
if "SYS_TYPE" in env:
sys_type = env["SYS_TYPE"]
##############################################
# Find and record what CMake is used
##############################################
if "+cmake" in spec:
cmake_exe = spec['cmake'].command.path
else:
cmake_exe = which("cmake")
if cmake_exe is None:
msg = 'failed to find CMake (and cmake variant is off)'
raise RuntimeError(msg)
cmake_exe = cmake_exe.path
host_cfg_fname = "%s-%s-%s-ascent.cmake" % (socket.gethostname(),
sys_type,
spec.compiler)
cfg = open(host_cfg_fname, "w")
cfg.write("##################################\n")
cfg.write("# spack generated host-config\n")
cfg.write("##################################\n")
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
cfg.write("##################################\n\n")
# Include path to cmake for reference
cfg.write("# cmake from spack \n")
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
#######################
# Compiler Settings
#######################
cfg.write("#######\n")
cfg.write("# using %s compiler spec\n" % spec.compiler)
cfg.write("#######\n\n")
cfg.write("# c compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler))
cfg.write("# cpp compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler))
cfg.write("# fortran compiler used by spack\n")
if f_compiler is not None:
cfg.write(cmake_cache_entry("ENABLE_FORTRAN", "ON"))
cfg.write(cmake_cache_entry("CMAKE_Fortran_COMPILER", f_compiler))
else:
cfg.write("# no fortran compiler found\n\n")
cfg.write(cmake_cache_entry("ENABLE_FORTRAN", "OFF"))
#######################################################################
# Core Dependencies
#######################################################################
#######################
# Conduit
#######################
cfg.write("# conduit from spack \n")
cfg.write(cmake_cache_entry("CONDUIT_DIR", spec['conduit'].prefix))
#######################################################################
# Optional Dependencies
#######################################################################
#######################
# Python
#######################
cfg.write("# Python Support\n")
if "+python" in spec:
cfg.write("# Enable python module builds\n")
cfg.write(cmake_cache_entry("ENABLE_PYTHON", "ON"))
cfg.write("# python from spack \n")
cfg.write(cmake_cache_entry("PYTHON_EXECUTABLE",
spec['python'].command.path))
# only set dest python site packages dir if passed
if py_site_pkgs_dir:
cfg.write(cmake_cache_entry("PYTHON_MODULE_INSTALL_PREFIX",
py_site_pkgs_dir))
else:
cfg.write(cmake_cache_entry("ENABLE_PYTHON", "OFF"))
if "+doc" in spec:
cfg.write(cmake_cache_entry("ENABLE_DOCS", "ON"))
cfg.write("# sphinx from spack \n")
sphinx_build_exe = join_path(spec['py-sphinx'].prefix.bin,
"sphinx-build")
cfg.write(cmake_cache_entry("SPHINX_EXECUTABLE", sphinx_build_exe))
else:
cfg.write(cmake_cache_entry("ENABLE_DOCS", "OFF"))
#######################
# MPI
#######################
cfg.write("# MPI Support\n")
if "+mpi" in spec:
cfg.write(cmake_cache_entry("ENABLE_MPI", "ON"))
cfg.write(cmake_cache_entry("MPI_C_COMPILER", spec['mpi'].mpicc))
cfg.write(cmake_cache_entry("MPI_CXX_COMPILER",
spec['mpi'].mpicxx))
cfg.write(cmake_cache_entry("MPI_Fortran_COMPILER",
spec['mpi'].mpifc))
mpiexe_bin = join_path(spec['mpi'].prefix.bin, 'mpiexec')
if os.path.isfile(mpiexe_bin):
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC
if self.spec["cmake"].satisfies('@3.10:'):
cfg.write(cmake_cache_entry("MPIEXEC_EXECUTABLE",
mpiexe_bin))
else:
cfg.write(cmake_cache_entry("MPIEXEC",
mpiexe_bin))
else:
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
#######################
# CUDA
#######################
cfg.write("# CUDA Support\n")
if "+cuda" in spec:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "OFF"))
#######################
# VTK-h
#######################
cfg.write("# vtk-h support \n")
if "+vtkh" in spec:
cfg.write("# tbb from spack\n")
cfg.write(cmake_cache_entry("TBB_DIR", spec['tbb'].prefix))
cfg.write("# vtk-m from spack\n")
cfg.write(cmake_cache_entry("VTKM_DIR", spec['vtkm'].prefix))
cfg.write("# vtk-h from spack\n")
cfg.write(cmake_cache_entry("VTKH_DIR", spec['vtkh'].prefix))
else:
cfg.write("# vtk-h not built by spack \n")
#######################
# Adios
#######################
cfg.write("# adios support\n")
if "+adios" in spec:
cfg.write(cmake_cache_entry("ADIOS_DIR", spec['adios'].prefix))
else:
cfg.write("# adios not built by spack \n")
cfg.write("##################################\n")
cfg.write("# end spack generated host-config\n")
cfg.write("##################################\n")
cfg.close()
host_cfg_fname = os.path.abspath(host_cfg_fname)
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
return host_cfg_fname
| 37.362275 | 79 | 0.494671 |
from spack import *
import socket
import os
import llnl.util.tty as tty
from os import environ as env
def cmake_cache_entry(name, value):
return 'set({0} "{1}" CACHE PATH "")\n\n'.format(name, value)
class Ascent(Package):
homepage = "https://github.com/Alpine-DAV/ascent"
git = "https://github.com/Alpine-DAV/ascent.git"
maintainers = ['cyrush']
version('develop', branch='develop', submodules=True)
| true | true |
1c3ada6ac0274750c25dfe121fdcff9e5ebfe0aa | 8,426 | py | Python | linear_dynamical_systems/arma.py | kiss2u/google-research | 2cd66234656f9e2f4218ed90a2d8aa9cf3139093 | [
"Apache-2.0"
] | 7 | 2020-03-15T12:14:07.000Z | 2021-12-01T07:01:09.000Z | linear_dynamical_systems/arma.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 25 | 2020-07-25T08:53:09.000Z | 2022-03-12T00:43:02.000Z | linear_dynamical_systems/arma.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 4 | 2020-06-15T03:06:53.000Z | 2021-08-06T16:38:33.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils related to AR and ARMA models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import numpy as np
from statsmodels.regression.linear_model import OLS
from statsmodels.tools import sm_exceptions
from statsmodels.tools.tools import add_constant
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.tsatools import lagmat
def fit_ar(outputs, inputs, guessed_dim):
"""Fits an AR model of order p = guessed_dim.
Args:
outputs: Array with the output values from the LDS.
inputs: Array with exogenous inputs values.
guessed_dim: Guessed hidden dimension.
Returns:
- Fitted AR coefficients.
"""
if outputs.shape[1] > 1:
# If there are multiple output dimensions, fit autoregressive params on
# each dimension separately and average.
params_list = [
fit_ar(outputs[:, j:j+1], inputs, guessed_dim) \
for j in xrange(outputs.shape[1])]
return np.mean(
np.concatenate([a.reshape(1, -1) for a in params_list]), axis=0)
if inputs is None:
model = AR(outputs).fit(ic='bic', trend='c', maxlag=guessed_dim, disp=0)
arparams = np.zeros(guessed_dim)
arparams[:model.k_ar] = model.params[model.k_trend:]
return arparams
else:
model = ARMA(outputs, order=(guessed_dim, 0), exog=inputs)
try:
arma_model = model.fit(start_ar_lags=guessed_dim, trend='c', disp=0)
return arma_model.arparams
except (ValueError, np.linalg.LinAlgError) as e:
warnings.warn(str(e), sm_exceptions.ConvergenceWarning)
return np.zeros(guessed_dim)
def _fit_arma_iter(outputs, inputs, p, q, r, l2_reg=0.0):
"""Iterative regression for estimating AR params in ARMAX(p, q, r) model.
The iterative AR regression process provides consistent estimates for the
AR parameters of an ARMAX(p, q, r) model after q iterative steps.
It first fits an ARMAX(p, 0, r) model with least squares regression, then
ARMAX(p, 1, r), and so on, ..., til ARMAX(p, q, r). At the i-th step, it
fits an ARMAX(p, i, r) model, according to estimated error terms from the
previous step.
For description of the iterative regression method, see Section 2 of
`Consistent Estimates of Autoregressive Parameters and Extended Sample
Autocorrelation Function for Stationary and Nonstationary ARMA Models` at
https://www.jstor.org/stable/2288340.
The implementation here is a generalization of the method mentioned in the
paper. We adapt the method for multidimensional outputs, exogenous inputs, nan
handling, and also add regularization on the MA parameters.
Args:
outputs: Array with the output values from the LDS, nans allowed.
inputs: Array with exogenous inputs values, nans allowed. Could be None.
p: AR order, i.e. max lag of the autoregressive part.
q: MA order, i.e. max lag of the error terms.
r: Max lag of the exogenous inputs.
l2_reg: L2 regularization coefficient, to be applied on MA coefficients.
Returns:
Fitted AR coefficients.
"""
if outputs.shape[1] > 1:
# If there are multiple output dimensions, fit autoregressive params on
# each dimension separately and average.
params_list = [
_fit_arma_iter(outputs[:, j:j+1], inputs, p, q, r, l2_reg=l2_reg) \
for j in xrange(outputs.shape[1])]
return np.mean(
np.concatenate([a.reshape(1, -1) for a in params_list]), axis=0)
# We include a constant term in regression.
k_const = 1
# Input dim. If inputs is None, then in_dim = 0.
in_dim = 0
if inputs is not None:
in_dim = inputs.shape[1]
# Lag the inputs to obtain [?, r], column j means series x_{t-j}.
# Use trim to drop rows with unknown values both at beginning and end.
lagged_in = np.concatenate(
[lagmat(inputs[:, i], maxlag=r, trim='both') for i in xrange(in_dim)],
axis=1)
# Since we trim in beginning, the offset is r.
lagged_in_offset = r
# Lag the series itself to p-th order.
lagged_out = lagmat(outputs, maxlag=p, trim='both')
lagged_out_offset = p
y = outputs
y_offset = 0
# Estimated residuals, initialized to 0.
res = np.zeros_like(outputs)
for i in xrange(q + 1):
# Lag the residuals to i-th order in i-th iteration.
lagged_res = lagmat(res, maxlag=i, trim='both')
lagged_res_offset = y_offset + i
# Compute offset in regression, since lagged_in, lagged_out, and lagged_res
# have different offsets. Align them.
if inputs is None:
y_offset = max(lagged_out_offset, lagged_res_offset)
else:
y_offset = max(lagged_out_offset, lagged_res_offset, lagged_in_offset)
y = outputs[y_offset:, :]
# Concatenate all variables in regression.
x = np.concatenate([
lagged_out[y_offset - lagged_out_offset:, :],
lagged_res[y_offset - lagged_res_offset:, :]
],
axis=1)
if inputs is not None:
x = np.concatenate([lagged_in[y_offset - lagged_in_offset:, :], x],
axis=1)
# Add constant term as the first variable.
x = add_constant(x, prepend=True)
if x.shape[1] < k_const + in_dim * r + p + i:
raise ValueError('Insufficient sequence length for model fitting.')
# Drop rows with nans.
arr = np.concatenate([y, x], axis=1)
arr = arr[~np.isnan(arr).any(axis=1)]
y_dropped_na = arr[:, 0:1]
x_dropped_na = arr[:, 1:]
# Only regularize the MA part.
alpha = np.concatenate(
[np.zeros(k_const + in_dim * r + p), l2_reg * np.ones(i)], axis=0)
# When L1_wt = 0, it's ridge regression.
olsfit = OLS(y_dropped_na, x_dropped_na).fit_regularized(
alpha=alpha, L1_wt=0.0)
# Update estimated residuals.
res = y - np.matmul(x, olsfit.params.reshape(-1, 1))
if len(olsfit.params) != k_const + in_dim * r + p + q:
raise ValueError('Expected param len %d, got %d.' %
(k_const + in_dim * r + p + q, len(olsfit.params)))
if q == 0:
return olsfit.params[-p:]
return olsfit.params[-(p + q):-q]
def fit_arma_iter(outputs, inputs, guessed_dim, l2_reg=0.0):
"""Iterative regression for ARMAX(p, q, r) model, where p=q=r=guessed_dim.
Args:
outputs: Array with the output values from the LDS.
inputs: Array with exogenous inputs values.
guessed_dim: Guessed hidden dimension.
l2_reg: L2 regularization coefficient.
Returns:
Fitted AR coefficients.
"""
return _fit_arma_iter(
outputs,
inputs,
p=guessed_dim,
q=guessed_dim - 1,
r=guessed_dim - 1,
l2_reg=l2_reg)
def fit_arma_mle(outputs, inputs, guessed_dim, method='css-mle'):
"""Fits an ARMA model of order (p=guessed_dim, q=gussed_dim).
First try using the statsmodel.ARMA default initialization of start params.
If the start params are unstable (roots outside unit circle), try start params
with only AR params but no MA.
If the AR only start params result in SVD failure in optimizer, returns the AR
only params as the fitted params and returns no model.
Args:
outputs: Array with the output values from the LDS.
inputs: Array of exogenous inputs.
guessed_dim: Guessed hidden dimension.
method: 'ccs-mle' or 'ccs' or 'mle', fit method in statsmodel package.
Returns:
- Fitted AR coefficients.
"""
p = guessed_dim
q = guessed_dim
model = ARMA(outputs, order=(p, q), exog=inputs)
try:
arma_model = model.fit(start_ar_lags=None, trend='c', method=method, disp=0)
return arma_model.arparams
except (ValueError, np.linalg.LinAlgError) as e:
warnings.warn(str(e), sm_exceptions.ConvergenceWarning)
return np.zeros(p)
def get_eig_from_arparams(arparams):
eigs = np.roots(np.r_[1, -arparams])
return eigs[np.argsort(eigs.real)[::-1]]
| 37.283186 | 80 | 0.693805 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import numpy as np
from statsmodels.regression.linear_model import OLS
from statsmodels.tools import sm_exceptions
from statsmodels.tools.tools import add_constant
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.tsatools import lagmat
def fit_ar(outputs, inputs, guessed_dim):
if outputs.shape[1] > 1:
params_list = [
fit_ar(outputs[:, j:j+1], inputs, guessed_dim) \
for j in xrange(outputs.shape[1])]
return np.mean(
np.concatenate([a.reshape(1, -1) for a in params_list]), axis=0)
if inputs is None:
model = AR(outputs).fit(ic='bic', trend='c', maxlag=guessed_dim, disp=0)
arparams = np.zeros(guessed_dim)
arparams[:model.k_ar] = model.params[model.k_trend:]
return arparams
else:
model = ARMA(outputs, order=(guessed_dim, 0), exog=inputs)
try:
arma_model = model.fit(start_ar_lags=guessed_dim, trend='c', disp=0)
return arma_model.arparams
except (ValueError, np.linalg.LinAlgError) as e:
warnings.warn(str(e), sm_exceptions.ConvergenceWarning)
return np.zeros(guessed_dim)
def _fit_arma_iter(outputs, inputs, p, q, r, l2_reg=0.0):
if outputs.shape[1] > 1:
params_list = [
_fit_arma_iter(outputs[:, j:j+1], inputs, p, q, r, l2_reg=l2_reg) \
for j in xrange(outputs.shape[1])]
return np.mean(
np.concatenate([a.reshape(1, -1) for a in params_list]), axis=0)
k_const = 1
in_dim = 0
if inputs is not None:
in_dim = inputs.shape[1]
lagged_in = np.concatenate(
[lagmat(inputs[:, i], maxlag=r, trim='both') for i in xrange(in_dim)],
axis=1)
lagged_in_offset = r
lagged_out = lagmat(outputs, maxlag=p, trim='both')
lagged_out_offset = p
y = outputs
y_offset = 0
res = np.zeros_like(outputs)
for i in xrange(q + 1):
lagged_res = lagmat(res, maxlag=i, trim='both')
lagged_res_offset = y_offset + i
if inputs is None:
y_offset = max(lagged_out_offset, lagged_res_offset)
else:
y_offset = max(lagged_out_offset, lagged_res_offset, lagged_in_offset)
y = outputs[y_offset:, :]
x = np.concatenate([
lagged_out[y_offset - lagged_out_offset:, :],
lagged_res[y_offset - lagged_res_offset:, :]
],
axis=1)
if inputs is not None:
x = np.concatenate([lagged_in[y_offset - lagged_in_offset:, :], x],
axis=1)
x = add_constant(x, prepend=True)
if x.shape[1] < k_const + in_dim * r + p + i:
raise ValueError('Insufficient sequence length for model fitting.')
arr = np.concatenate([y, x], axis=1)
arr = arr[~np.isnan(arr).any(axis=1)]
y_dropped_na = arr[:, 0:1]
x_dropped_na = arr[:, 1:]
alpha = np.concatenate(
[np.zeros(k_const + in_dim * r + p), l2_reg * np.ones(i)], axis=0)
olsfit = OLS(y_dropped_na, x_dropped_na).fit_regularized(
alpha=alpha, L1_wt=0.0)
# Update estimated residuals.
res = y - np.matmul(x, olsfit.params.reshape(-1, 1))
if len(olsfit.params) != k_const + in_dim * r + p + q:
raise ValueError('Expected param len %d, got %d.' %
(k_const + in_dim * r + p + q, len(olsfit.params)))
if q == 0:
return olsfit.params[-p:]
return olsfit.params[-(p + q):-q]
def fit_arma_iter(outputs, inputs, guessed_dim, l2_reg=0.0):
return _fit_arma_iter(
outputs,
inputs,
p=guessed_dim,
q=guessed_dim - 1,
r=guessed_dim - 1,
l2_reg=l2_reg)
def fit_arma_mle(outputs, inputs, guessed_dim, method='css-mle'):
p = guessed_dim
q = guessed_dim
model = ARMA(outputs, order=(p, q), exog=inputs)
try:
arma_model = model.fit(start_ar_lags=None, trend='c', method=method, disp=0)
return arma_model.arparams
except (ValueError, np.linalg.LinAlgError) as e:
warnings.warn(str(e), sm_exceptions.ConvergenceWarning)
return np.zeros(p)
def get_eig_from_arparams(arparams):
eigs = np.roots(np.r_[1, -arparams])
return eigs[np.argsort(eigs.real)[::-1]]
| true | true |
1c3ada899ff98335f4fbfabd64f32248e2f9b294 | 13,121 | py | Python | lib/libsnmp/snmpmanager.py | tamihiro/libsnmp | 537bb916dcc27e9e94eeae29b210b3a327586dc1 | [
"MIT"
] | null | null | null | lib/libsnmp/snmpmanager.py | tamihiro/libsnmp | 537bb916dcc27e9e94eeae29b210b3a327586dc1 | [
"MIT"
] | null | null | null | lib/libsnmp/snmpmanager.py | tamihiro/libsnmp | 537bb916dcc27e9e94eeae29b210b3a327586dc1 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
# $Id$
# $Revision$
#
# libsnmp - a Python SNMP library
# Copyright (c) 2003 Justin Warren <daedalus@eigenmagic.com>
#
## An snmpmanager understands SNMPv1 and SNMPv2c messages
## and so it can encode and decode both.
from future import standard_library
from builtins import bytes
standard_library.install_aliases()
import socket
import select
import logging
import queue
import time
import os
import asyncore
import sys
from libsnmp import debug
from libsnmp import asynrole
from libsnmp import rfc1157
from libsnmp import rfc1905
from libsnmp import v1
from libsnmp import v2
log = logging.getLogger('snmp-manager')
## Used in typeSetter()
## Demo only, really
typeValDict = {
'i': 0x02, ## Integer
's': 0x04, ## String
'o': 0x06, ## ObjectID
't': 0x43, ## TimeTicks
'a': 0x40, ## IPAddress
'c': 0x41, ## Counter
'C': 0x46, ## Counter64
}
class snmpManager(asynrole.manager):
nextRequestID = 0 # global counter of requestIDs
def __init__(self, queueEmpty=None, trapCallback=None, interface=('0.0.0.0', 0), timeout=0.25):
""" Create a new snmpManager bound to interface
queueEmpty is a callback of what to do if I run out
of stuff to do. Default is to wait for more stuff.
"""
self.queueEmpty = queueEmpty
self.outbound = queue.Queue()
self.callbacks = {}
# What to do if we get a trap
self.trapCallback = trapCallback
# initialise as an asynrole manager
asynrole.manager.__init__(self, (self.receiveData, None), interface=interface, timeout=timeout )
try:
# figure out the current system uptime
pass
except:
raise
def assignRequestID(self):
""" Assign a unique requestID
"""
reqID = self.nextRequestID
self.nextRequestID += 1
return reqID
def createGetRequestPDU(self, varbindlist, version=2):
reqID = self.assignRequestID()
if version == 1:
pdu = rfc1157.Get( reqID, varBindList=varbindlist )
elif version == 2:
pdu = rfc1905.Get( reqID, varBindList=varbindlist )
return pdu
def createGetNextRequestPDU(self, varbindlist, version=2):
reqID = self.assignRequestID()
if version == 1:
pdu = rfc1157.GetNext( reqID, varBindList=varbindlist )
elif version == 2:
pdu = rfc1905.GetNext( reqID, varBindList=varbindlist )
return pdu
def createSetRequestPDU(self, varbindlist, version=2):
reqID = self.assignRequestID()
if version == 1:
pdu = rfc1157.Set( reqID, varBindList=varbindlist )
elif version == 2:
pdu = rfc1905.Set( reqID, varBindList=varbindlist )
return pdu
def createGetRequestMessage(self, oid, community='public', version=2):
""" Creates a message object from a pdu and a
community string.
"""
if version == 1:
objID = rfc1157.ObjectID(oid)
val = rfc1157.Null()
varbindlist = rfc1157.VarBindList( [ rfc1157.VarBind(objID, val) ] )
pdu = self.createGetRequestPDU( varbindlist, 1 )
message = rfc1157.Message( community=community, data=pdu )
elif version == 2:
objID = rfc1905.ObjectID(oid)
val = rfc1905.Null()
varbindlist = rfc1905.VarBindList( [ rfc1905.VarBind(objID, val) ] )
pdu = self.createGetRequestPDU( varbindlist, 2 )
message = rfc1905.Message( community=community, data=pdu )
else:
raise ValueError('Unknown version %d' % version)
return message
def createGetNextRequestMessage(self, varbindlist, community='public', version=2):
""" Creates a message object from a pdu and a
community string.
"""
pdu = self.createGetNextRequestPDU( varbindlist, version )
if version == 1:
return rfc1157.Message( community=community, data=pdu )
if version == 2:
return rfc1905.Message( community=community, data=pdu )
def createSetRequestMessage(self, oid, valtype, value, community='public', version=2):
""" Creates a message object from a pdu and a
community string.
"""
if version == 1:
objID = rfc1157.ObjectID(oid)
val = rfc1157.tagDecodeDict[valtype](value)
varbindlist = rfc1157.VarBindList( [ rfc1157.VarBind(objID, val) ] )
pdu = self.createSetRequestPDU( varbindlist, 1 )
message = rfc1157.Message( community=community, data=pdu )
elif version == 2:
objID = rfc1905.ObjectID(oid)
val = rfc1905.tagDecodeDict[valtype](value)
varbindlist = rfc1905.VarBindList( [ rfc1905.VarBind(objID, val) ] )
pdu = self.createSetRequestPDU( varbindlist, 1 )
message = rfc1905.Message( community=community, data=pdu )
else:
raise ValueError('Unknown version %d' % version)
return message
def createTrapMessage(self, pdu, community='public', version=2):
""" Creates a message object from a pdu and a
community string.
"""
if version == 1:
return v1.createTrapMessage( community=community, data=pdu )
elif version == 2:
return v2.createTrapMessage( community=community, data=pdu )
def createTrapPDU(self, varbindlist, version=2, enterprise='.1.3.6.1.4', agentAddr=None, genericTrap=6, specificTrap=0):
""" Creates a Trap PDU object from a list of strings and integers
along with a varBindList to make it a bit easier to build a Trap.
"""
if agentAddr is None:
agentAddr = self.getsockname()[0]
pass
if version == 1:
ent = rfc1157.ObjectID(enterprise)
agent = rfc1157.NetworkAddress(agentAddr)
gTrap = rfc1157.GenericTrap(genericTrap)
sTrap = rfc1157.Integer(specificTrap)
ts = rfc1157.TimeTicks( self.getSysUptime() )
pdu = rfc1157.TrapPDU(ent, agent, gTrap, sTrap, ts, varbindlist)
# log.debug('v1.trap is %s' % pdu)
elif version == 2:
ent = rfc1905.ObjectID(enterprise)
agent = rfc1905.NetworkAddress(agentAddr)
gTrap = rfc1157.GenericTrap(genericTrap)
sTrap = rfc1905.Integer(specificTrap)
ts = rfc1905.TimeTicks( self.getSysUptime() )
pdu = rfc1157.TrapPDU(ent, agent, gTrap, sTrap, ts, varbindlist)
pass
return pdu
def snmpGet(self, oid, remote, callback, community='public', version=2):
""" snmpGet issues an SNMP Get Request to remote for
the object ID oid
remote is a tuple of (host, port)
oid is a dotted string eg: .1.2.6.1.0.1.1.3.0
"""
msg = self.createGetRequestMessage( oid, community, version )
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[msg.data.requestID] = callback
return msg.data.requestID
def snmpGetNext(self, varbindlist, remote, callback, community='public', version=2):
""" snmpGetNext issues an SNMP Get Next Request to remote for
the varbindlist that is passed in. It is assumed that you
have either built a varbindlist yourself or just pass
one in that was previously returned by an snmpGet or snmpGetNext
"""
msg = self.createGetNextRequestMessage( varbindlist, community, version )
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[msg.data.requestID] = callback
return msg.data.requestID
def snmpSet(self, oid, valtype, value, remote, callback, community='public', version=2):
"""
snmpSet is slightly more complex in that you need to pass in
a combination of oid and value in order to set a variable.
Depending on the version, this will be built into the appropriate
varbindlist for message creation.
valtype should be a tagDecodeDict key
"""
msg = self.createSetRequestMessage( oid, valtype, value, community, version )
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[msg.data.requestID] = callback
return msg.data.requestID
def snmpTrap(self, remote, trapPDU, community='public', version=2):
""" Queue up a trap for sending
"""
msg = self.createTrapMessage(trapPDU, community, version)
self.outbound.put( (msg, remote) )
def receiveData(self, manager, cb_ctx, xxx_todo_changeme, xxx_todo_changeme1 ):
""" This method should be called when data is received
from a remote host.
"""
(rawdata, src) = xxx_todo_changeme
data = sys.version_info[0]<3 and bytes(rawdata, 'latin1') or rawdata
(exc_type, exc_value, exc_traceback) = xxx_todo_changeme1
if exc_type is not None:
raise exc_type(exc_value)
# perform the action on the message by calling the
# callback from my list of callbacks, passing it the
# message and a reference to myself
# Decode the data into a message
msg = rfc1905.Message().decode(data)
# Decode it based on what version of message it is
if msg.version == 0:
# if __debug__: log.debug('Detected SNMPv1 message')
self.handleV1Message(msg)
elif msg.version == 1:
# if __debug__: log.debug('Detected SNMPv2 message')
self.handleV2Message(msg)
else:
log.error('Unknown message version %d detected' % msg.version)
log.error('version is a %s' % msg.version() )
raise ValueError('Unknown message version %d detected' % msg.version)
def handleV1Message(self, msg):
""" Handle reception of an SNMP version 1 message
"""
if isinstance(msg.data, rfc1157.PDU):
self.callbacks[msg.data.requestID](self, msg)
## remove the callback from my list once it's done
del self.callbacks[msg.data.requestID]
elif isinstance(msg.data, rfc1157.TrapPDU):
self.trapCallback(self, msg)
else:
log.info('Unknown SNMPv1 Message type received')
pass
def handleV2Message(self, msg):
""" Handle reception of an SNMP version 2c message
"""
if isinstance(msg.data, rfc1905.PDU):
self.callbacks[msg.data.requestID](self, msg)
## remove the callback from my list once it's done
del self.callbacks[msg.data.requestID]
elif isinstance(msg.data, rfc1905.TrapPDU):
self.trapCallback(self, msg)
else:
log.info('Unknown SNMPv2 Message type received')
pass
def enterpriseOID(self, partialOID):
""" A convenience method to automagically prepend the
'enterprise' prefix to the partial OID
"""
return '.1.3.6.1.2.1.' + partialOID
def run(self):
"""
Listen for incoming request thingies
and send pending requests
"""
while 1:
try:
# check for inbound messages
self.poll()
# send any pending outbound messages
request = self.outbound.get(0)
self.send( request[0].encode(), request[1] )
except queue.Empty:
if self.queueEmpty:
self.queueEmpty(self)
pass
except:
raise
def getSysUptime(self):
""" This is a pain because of system dependence
Each OS has a different way of doing this and I
cannot find a Python builtin that will do it.
"""
try:
##
## The linux way
##
uptime = open('/proc/uptime').read().split()
upsecs = int(float(uptime[0]) * 100)
return upsecs
except:
return 0
def typeSetter(self, typestring):
"""
Used to figure out the right tag value key to use in
snmpSet. This is really only used for a more user-friendly
way of doing things from a frontend. Use the actual key
values if you're calling snmpSet programmatically.
"""
return typeValDict[typestring]
| 34.258486 | 124 | 0.604908 | from __future__ import unicode_literals
ses()
import socket
import select
import logging
import queue
import time
import os
import asyncore
import sys
from libsnmp import debug
from libsnmp import asynrole
from libsnmp import rfc1157
from libsnmp import rfc1905
from libsnmp import v1
from libsnmp import v2
log = logging.getLogger('snmp-manager')
: 0x06, 0x43, x40, 0x41, 0x46,
class snmpManager(asynrole.manager):
nextRequestID = 0
def __init__(self, queueEmpty=None, trapCallback=None, interface=('0.0.0.0', 0), timeout=0.25):
self.queueEmpty = queueEmpty
self.outbound = queue.Queue()
self.callbacks = {}
self.trapCallback = trapCallback
asynrole.manager.__init__(self, (self.receiveData, None), interface=interface, timeout=timeout )
try:
pass
except:
raise
def assignRequestID(self):
reqID = self.nextRequestID
self.nextRequestID += 1
return reqID
def createGetRequestPDU(self, varbindlist, version=2):
reqID = self.assignRequestID()
if version == 1:
pdu = rfc1157.Get( reqID, varBindList=varbindlist )
elif version == 2:
pdu = rfc1905.Get( reqID, varBindList=varbindlist )
return pdu
def createGetNextRequestPDU(self, varbindlist, version=2):
reqID = self.assignRequestID()
if version == 1:
pdu = rfc1157.GetNext( reqID, varBindList=varbindlist )
elif version == 2:
pdu = rfc1905.GetNext( reqID, varBindList=varbindlist )
return pdu
def createSetRequestPDU(self, varbindlist, version=2):
reqID = self.assignRequestID()
if version == 1:
pdu = rfc1157.Set( reqID, varBindList=varbindlist )
elif version == 2:
pdu = rfc1905.Set( reqID, varBindList=varbindlist )
return pdu
def createGetRequestMessage(self, oid, community='public', version=2):
if version == 1:
objID = rfc1157.ObjectID(oid)
val = rfc1157.Null()
varbindlist = rfc1157.VarBindList( [ rfc1157.VarBind(objID, val) ] )
pdu = self.createGetRequestPDU( varbindlist, 1 )
message = rfc1157.Message( community=community, data=pdu )
elif version == 2:
objID = rfc1905.ObjectID(oid)
val = rfc1905.Null()
varbindlist = rfc1905.VarBindList( [ rfc1905.VarBind(objID, val) ] )
pdu = self.createGetRequestPDU( varbindlist, 2 )
message = rfc1905.Message( community=community, data=pdu )
else:
raise ValueError('Unknown version %d' % version)
return message
def createGetNextRequestMessage(self, varbindlist, community='public', version=2):
pdu = self.createGetNextRequestPDU( varbindlist, version )
if version == 1:
return rfc1157.Message( community=community, data=pdu )
if version == 2:
return rfc1905.Message( community=community, data=pdu )
def createSetRequestMessage(self, oid, valtype, value, community='public', version=2):
if version == 1:
objID = rfc1157.ObjectID(oid)
val = rfc1157.tagDecodeDict[valtype](value)
varbindlist = rfc1157.VarBindList( [ rfc1157.VarBind(objID, val) ] )
pdu = self.createSetRequestPDU( varbindlist, 1 )
message = rfc1157.Message( community=community, data=pdu )
elif version == 2:
objID = rfc1905.ObjectID(oid)
val = rfc1905.tagDecodeDict[valtype](value)
varbindlist = rfc1905.VarBindList( [ rfc1905.VarBind(objID, val) ] )
pdu = self.createSetRequestPDU( varbindlist, 1 )
message = rfc1905.Message( community=community, data=pdu )
else:
raise ValueError('Unknown version %d' % version)
return message
def createTrapMessage(self, pdu, community='public', version=2):
if version == 1:
return v1.createTrapMessage( community=community, data=pdu )
elif version == 2:
return v2.createTrapMessage( community=community, data=pdu )
def createTrapPDU(self, varbindlist, version=2, enterprise='.1.3.6.1.4', agentAddr=None, genericTrap=6, specificTrap=0):
if agentAddr is None:
agentAddr = self.getsockname()[0]
pass
if version == 1:
ent = rfc1157.ObjectID(enterprise)
agent = rfc1157.NetworkAddress(agentAddr)
gTrap = rfc1157.GenericTrap(genericTrap)
sTrap = rfc1157.Integer(specificTrap)
ts = rfc1157.TimeTicks( self.getSysUptime() )
pdu = rfc1157.TrapPDU(ent, agent, gTrap, sTrap, ts, varbindlist)
elif version == 2:
ent = rfc1905.ObjectID(enterprise)
agent = rfc1905.NetworkAddress(agentAddr)
gTrap = rfc1157.GenericTrap(genericTrap)
sTrap = rfc1905.Integer(specificTrap)
ts = rfc1905.TimeTicks( self.getSysUptime() )
pdu = rfc1157.TrapPDU(ent, agent, gTrap, sTrap, ts, varbindlist)
pass
return pdu
def snmpGet(self, oid, remote, callback, community='public', version=2):
msg = self.createGetRequestMessage( oid, community, version )
self.outbound.put( (msg, remote) )
self.callbacks[msg.data.requestID] = callback
return msg.data.requestID
def snmpGetNext(self, varbindlist, remote, callback, community='public', version=2):
msg = self.createGetNextRequestMessage( varbindlist, community, version )
self.outbound.put( (msg, remote) )
self.callbacks[msg.data.requestID] = callback
return msg.data.requestID
def snmpSet(self, oid, valtype, value, remote, callback, community='public', version=2):
msg = self.createSetRequestMessage( oid, valtype, value, community, version )
self.outbound.put( (msg, remote) )
self.callbacks[msg.data.requestID] = callback
return msg.data.requestID
def snmpTrap(self, remote, trapPDU, community='public', version=2):
msg = self.createTrapMessage(trapPDU, community, version)
self.outbound.put( (msg, remote) )
def receiveData(self, manager, cb_ctx, xxx_todo_changeme, xxx_todo_changeme1 ):
(rawdata, src) = xxx_todo_changeme
data = sys.version_info[0]<3 and bytes(rawdata, 'latin1') or rawdata
(exc_type, exc_value, exc_traceback) = xxx_todo_changeme1
if exc_type is not None:
raise exc_type(exc_value)
msg = rfc1905.Message().decode(data)
if msg.version == 0:
self.handleV1Message(msg)
elif msg.version == 1:
self.handleV2Message(msg)
else:
log.error('Unknown message version %d detected' % msg.version)
log.error('version is a %s' % msg.version() )
raise ValueError('Unknown message version %d detected' % msg.version)
def handleV1Message(self, msg):
if isinstance(msg.data, rfc1157.PDU):
self.callbacks[msg.data.requestID](self, msg)
D]
elif isinstance(msg.data, rfc1157.TrapPDU):
self.trapCallback(self, msg)
else:
log.info('Unknown SNMPv1 Message type received')
pass
def handleV2Message(self, msg):
if isinstance(msg.data, rfc1905.PDU):
self.callbacks[msg.data.requestID](self, msg)
## remove the callback from my list once it's done
del self.callbacks[msg.data.requestID]
elif isinstance(msg.data, rfc1905.TrapPDU):
self.trapCallback(self, msg)
else:
log.info('Unknown SNMPv2 Message type received')
pass
def enterpriseOID(self, partialOID):
return '.1.3.6.1.2.1.' + partialOID
def run(self):
while 1:
try:
self.poll()
request = self.outbound.get(0)
self.send( request[0].encode(), request[1] )
except queue.Empty:
if self.queueEmpty:
self.queueEmpty(self)
pass
except:
raise
def getSysUptime(self):
try:
uptime = open('/proc/uptime').read().split()
upsecs = int(float(uptime[0]) * 100)
return upsecs
except:
return 0
def typeSetter(self, typestring):
return typeValDict[typestring]
| true | true |
1c3adc6658ccf330715b06712be972a88383fdc5 | 2,820 | py | Python | src/super_gradients/examples/cifar10_training_torch_objects/cifar10_training_torch_objects_example.py | Deci-AI/super-gradients | 658f638389654668a085e23c3b19622241fd9267 | [
"Apache-2.0"
] | 308 | 2021-12-30T10:14:30.000Z | 2022-03-30T19:05:31.000Z | src/super_gradients/examples/cifar10_training_torch_objects/cifar10_training_torch_objects_example.py | Deci-AI/super-gradients | 658f638389654668a085e23c3b19622241fd9267 | [
"Apache-2.0"
] | 24 | 2022-01-10T08:05:37.000Z | 2022-03-30T18:49:06.000Z | src/super_gradients/examples/cifar10_training_torch_objects/cifar10_training_torch_objects_example.py | Deci-AI/super-gradients | 658f638389654668a085e23c3b19622241fd9267 | [
"Apache-2.0"
] | 26 | 2021-12-31T06:04:07.000Z | 2022-03-21T09:51:44.000Z |
"""
Cifar10 training with SuperGradients training with the following initialized torch objects:
DataLoaders
Optimizers
Networks (nn.Module)
Schedulers
Loss functions
Main purpose is to demonstrate training in SG with minimal abstraction and maximal flexibility
"""
from super_gradients import SgModel
from super_gradients.training.metrics.classification_metrics import Accuracy, Top5
from super_gradients.training import MultiGPUMode
from torch.optim import ASGD
from torch.optim.lr_scheduler import MultiStepLR, ReduceLROnPlateau
from torch.nn import CrossEntropyLoss
from super_gradients.training.utils.callbacks import Phase, LRSchedulerCallback
from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor
from torchvision.models import resnet18
from torch.utils.data import DataLoader
# Define any torch DataLoaders, need at least train & valid loaders
train_dataset = CIFAR10(root='data/', download=True, train=True, transform=ToTensor())
valid_dataset = CIFAR10(root='data/', download=True, train=False, transform=ToTensor())
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=16)
valid_loader = DataLoader(valid_dataset, batch_size=32)
# Define any network of type nn.Module
net = resnet18(num_classes=len(train_dataset.classes))
# Define any optimizer of type torch.optim.Optimizer (and schedulers)
lr = 2.5e-4
optimizer = ASGD(net.parameters(), lr=lr, weight_decay=0.0001)
rop_lr_scheduler = ReduceLROnPlateau(optimizer, mode="max", patience=10, verbose=True)
step_lr_scheduler = MultiStepLR(optimizer, milestones=[0, 150, 200], gamma=0.1)
# Define any loss function of type torch.nn.modules.loss._Loss
loss_fn = CrossEntropyLoss()
# Define phase callbacks
phase_callbacks = [LRSchedulerCallback(scheduler=rop_lr_scheduler, phase=Phase.VALIDATION_EPOCH_END, metric_name="Accuracy"),
LRSchedulerCallback(scheduler=step_lr_scheduler, phase=Phase.TRAIN_EPOCH_END)]
# Bring everything together with SgModel and start training
model = SgModel("Cifar10_external_objects_example", multi_gpu=MultiGPUMode.OFF,
train_loader=train_loader, valid_loader=valid_loader, classes=train_dataset.classes)
model.build_model(net)
train_params = {"max_epochs": 300,
"phase_callbacks": phase_callbacks,
"initial_lr": lr,
"loss": loss_fn,
"criterion_params": {},
'optimizer': optimizer,
"train_metrics_list": [Accuracy(), Top5()],
"valid_metrics_list": [Accuracy(), Top5()],
"loss_logging_items_names": ["Loss"], "metric_to_watch": "Accuracy",
"greater_metric_to_watch_is_better": True,
"lr_scheduler_step_type": "epoch"}
model.train(training_params=train_params)
| 40.869565 | 125 | 0.750709 |
from super_gradients import SgModel
from super_gradients.training.metrics.classification_metrics import Accuracy, Top5
from super_gradients.training import MultiGPUMode
from torch.optim import ASGD
from torch.optim.lr_scheduler import MultiStepLR, ReduceLROnPlateau
from torch.nn import CrossEntropyLoss
from super_gradients.training.utils.callbacks import Phase, LRSchedulerCallback
from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor
from torchvision.models import resnet18
from torch.utils.data import DataLoader
train_dataset = CIFAR10(root='data/', download=True, train=True, transform=ToTensor())
valid_dataset = CIFAR10(root='data/', download=True, train=False, transform=ToTensor())
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=16)
valid_loader = DataLoader(valid_dataset, batch_size=32)
net = resnet18(num_classes=len(train_dataset.classes))
lr = 2.5e-4
optimizer = ASGD(net.parameters(), lr=lr, weight_decay=0.0001)
rop_lr_scheduler = ReduceLROnPlateau(optimizer, mode="max", patience=10, verbose=True)
step_lr_scheduler = MultiStepLR(optimizer, milestones=[0, 150, 200], gamma=0.1)
loss_fn = CrossEntropyLoss()
phase_callbacks = [LRSchedulerCallback(scheduler=rop_lr_scheduler, phase=Phase.VALIDATION_EPOCH_END, metric_name="Accuracy"),
LRSchedulerCallback(scheduler=step_lr_scheduler, phase=Phase.TRAIN_EPOCH_END)]
model = SgModel("Cifar10_external_objects_example", multi_gpu=MultiGPUMode.OFF,
train_loader=train_loader, valid_loader=valid_loader, classes=train_dataset.classes)
model.build_model(net)
train_params = {"max_epochs": 300,
"phase_callbacks": phase_callbacks,
"initial_lr": lr,
"loss": loss_fn,
"criterion_params": {},
'optimizer': optimizer,
"train_metrics_list": [Accuracy(), Top5()],
"valid_metrics_list": [Accuracy(), Top5()],
"loss_logging_items_names": ["Loss"], "metric_to_watch": "Accuracy",
"greater_metric_to_watch_is_better": True,
"lr_scheduler_step_type": "epoch"}
model.train(training_params=train_params)
| true | true |
1c3ae01f7d275d8c9a138cfb38421b60adf5332c | 1,701 | py | Python | app/core/migrations/0001_initial.py | elieish/recipe-api-django | 19e3dff1111b2e570925c7b263363994258eaf22 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | elieish/recipe-api-django | 19e3dff1111b2e570925c7b263363994258eaf22 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | elieish/recipe-api-django | 19e3dff1111b2e570925c7b263363994258eaf22 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.6 on 2020-05-08 11:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.029412 | 266 | 0.637272 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true | true |
1c3ae19661cdaf6cd25142e916f8ec09fd4b7223 | 4,289 | py | Python | experiments/compare_fuzzers/plot.py | TCatshoek/lstar | 042b0ae3a0627db7a412c828f3752a9c30928ec1 | [
"MIT"
] | 2 | 2019-10-15T11:28:12.000Z | 2021-01-28T15:14:09.000Z | experiments/compare_fuzzers/plot.py | TCatshoek/lstar | 042b0ae3a0627db7a412c828f3752a9c30928ec1 | [
"MIT"
] | null | null | null | experiments/compare_fuzzers/plot.py | TCatshoek/lstar | 042b0ae3a0627db7a412c828f3752a9c30928ec1 | [
"MIT"
] | null | null | null | from afl.utils import AFLUtils
from libfuzzer.utils import CorpusUtils
from suls.rerssoconnector import RERSSOConnector
from pathlib import Path
import re
import matplotlib.pyplot as plt
def check_reached_afl(problem, problemset, rers_basepath, afl_basepath):
rers_path = f"{rers_basepath}/{problemset}/{problem}/{problem}.so"
afl_dir = f'{afl_basepath}/{problemset}/{problem}'
bin_path = f'{afl_basepath}/{problemset}/{problem}/{problem}'
sul = RERSSOConnector(rers_path)
aflutils = AFLUtils(afl_dir,
bin_path,
[str(x) for x in sul.get_alphabet()],
sul)
reached = aflutils.gather_reached_errors(return_time_date=True)
# Filter reached so only the earliest of each error counts
time_error_reached = {}
for (error, time_cur_reached) in reached:
if error in time_error_reached:
if time_error_reached[error] > time_cur_reached:
time_error_reached[error] = time_cur_reached
else:
time_error_reached[error] = time_cur_reached
# Sort by time reached
sorted_time_reached = sorted(time_error_reached.items(), key=lambda x: x[1])
# Accumulate which errors were found by which time
acc_err_reached = {}
acc_errs = set()
for err, time in sorted_time_reached:
acc_errs.add(err)
acc_err_reached[time] = acc_errs.copy()
sorted_acc_reached = sorted(acc_err_reached.items(), key=lambda x: x[0])
sorted_acc_reached_count = [(time, len(errs)) for (time, errs) in sorted_acc_reached]
times, counts = list(zip(*sorted_acc_reached_count))
# Get some time info from the AFL directory
start_time = aflutils.get_start_date_time()
last_time = aflutils.get_last_date_time()
# Calculate some time stuff for plotting
# min_time = min(list(times))
min_time = start_time
rel_start_time = start_time - min_time
rel_times = [time - min_time for time in times]
rel_last_time = last_time - min_time
all_times = [rel_start_time] + rel_times + [rel_last_time]
all_counts = [0] + list(counts) + [max(counts)]
return all_counts, all_times
def check_reached_libfuzzer(problem, problemset, rers_basepath, fuzzer_basepath):
rers_path = f"{rers_basepath}/{problemset}/{problem}/{problem}.so"
fuzzer_dir = Path(f'{fuzzer_basepath}/{problemset}/{problem}')
assert fuzzer_dir.exists(), fuzzer_dir
sul = RERSSOConnector(rers_path)
cutils = CorpusUtils(
corpus_path=fuzzer_dir.joinpath('corpus_errors'),
fuzzer_path=fuzzer_dir.joinpath(f'{problem}_fuzz'),
sul=sul
)
return cutils.get_plot_data()
problem = "Problem11"
problemset = "TrainingSeqReachRers2019"
libfuzzer_basepath = "/home/tom/afl/thesis_benchmark_2/libFuzzer"
afl_basepath = "afl"
rers_basepath = "../../rers"
libfuzzer_reached = check_reached_libfuzzer(problem, problemset, rers_basepath, libfuzzer_basepath)
afl_reached = check_reached_afl(problem, problemset, rers_basepath, afl_basepath)
if max(libfuzzer_reached[1]) > max(afl_reached[1]):
afl_reached[0].append(afl_reached[0][-1])
afl_reached[1].append(libfuzzer_reached[1][-1])
elif max(libfuzzer_reached[1]) < max(afl_reached[1]):
libfuzzer_reached[0].append(libfuzzer_reached[0][-1])
libfuzzer_reached[1].append(afl_reached[1][-1])
print(libfuzzer_reached)
print(afl_reached)
plt.step(libfuzzer_reached[1], libfuzzer_reached[0], label="libFuzzer")
plt.step(afl_reached[1], afl_reached[0], label="AFL")
plt.legend()
plt.xlabel("time(s)")
plt.ylabel("Errors reached")
plt.title(f"Fuzzer comparison - {problem}")
plt.show()
# problem = "Problem13"
# problemset = "TrainingSeqReachRers2019"
# libfuzzer_basepath = "/home/tom/afl/thesis_benchmark_2/libFuzzer"
# #afl_basepath = "afl"
# rers_basepath = "../../rers"
#
# libfuzzer_reached = check_reached_libfuzzer(problem, problemset, rers_basepath, libfuzzer_basepath)
# #afl_reached = check_reached_afl(problem, problemset, rers_basepath, afl_basepath)
#
# print(libfuzzer_reached)
# #print(afl_reached)
#
# plt.step(libfuzzer_reached[1], libfuzzer_reached[0], label="libFuzzer")
# #plt.plot(afl_reached[1], afl_reached[0], label="AFL")
# plt.legend()
# plt.title(f"Fuzzer comparison - {problem}")
# plt.show()
| 34.869919 | 101 | 0.717183 | from afl.utils import AFLUtils
from libfuzzer.utils import CorpusUtils
from suls.rerssoconnector import RERSSOConnector
from pathlib import Path
import re
import matplotlib.pyplot as plt
def check_reached_afl(problem, problemset, rers_basepath, afl_basepath):
rers_path = f"{rers_basepath}/{problemset}/{problem}/{problem}.so"
afl_dir = f'{afl_basepath}/{problemset}/{problem}'
bin_path = f'{afl_basepath}/{problemset}/{problem}/{problem}'
sul = RERSSOConnector(rers_path)
aflutils = AFLUtils(afl_dir,
bin_path,
[str(x) for x in sul.get_alphabet()],
sul)
reached = aflutils.gather_reached_errors(return_time_date=True)
time_error_reached = {}
for (error, time_cur_reached) in reached:
if error in time_error_reached:
if time_error_reached[error] > time_cur_reached:
time_error_reached[error] = time_cur_reached
else:
time_error_reached[error] = time_cur_reached
sorted_time_reached = sorted(time_error_reached.items(), key=lambda x: x[1])
acc_err_reached = {}
acc_errs = set()
for err, time in sorted_time_reached:
acc_errs.add(err)
acc_err_reached[time] = acc_errs.copy()
sorted_acc_reached = sorted(acc_err_reached.items(), key=lambda x: x[0])
sorted_acc_reached_count = [(time, len(errs)) for (time, errs) in sorted_acc_reached]
times, counts = list(zip(*sorted_acc_reached_count))
start_time = aflutils.get_start_date_time()
last_time = aflutils.get_last_date_time()
min_time = start_time
rel_start_time = start_time - min_time
rel_times = [time - min_time for time in times]
rel_last_time = last_time - min_time
all_times = [rel_start_time] + rel_times + [rel_last_time]
all_counts = [0] + list(counts) + [max(counts)]
return all_counts, all_times
def check_reached_libfuzzer(problem, problemset, rers_basepath, fuzzer_basepath):
rers_path = f"{rers_basepath}/{problemset}/{problem}/{problem}.so"
fuzzer_dir = Path(f'{fuzzer_basepath}/{problemset}/{problem}')
assert fuzzer_dir.exists(), fuzzer_dir
sul = RERSSOConnector(rers_path)
cutils = CorpusUtils(
corpus_path=fuzzer_dir.joinpath('corpus_errors'),
fuzzer_path=fuzzer_dir.joinpath(f'{problem}_fuzz'),
sul=sul
)
return cutils.get_plot_data()
problem = "Problem11"
problemset = "TrainingSeqReachRers2019"
libfuzzer_basepath = "/home/tom/afl/thesis_benchmark_2/libFuzzer"
afl_basepath = "afl"
rers_basepath = "../../rers"
libfuzzer_reached = check_reached_libfuzzer(problem, problemset, rers_basepath, libfuzzer_basepath)
afl_reached = check_reached_afl(problem, problemset, rers_basepath, afl_basepath)
if max(libfuzzer_reached[1]) > max(afl_reached[1]):
afl_reached[0].append(afl_reached[0][-1])
afl_reached[1].append(libfuzzer_reached[1][-1])
elif max(libfuzzer_reached[1]) < max(afl_reached[1]):
libfuzzer_reached[0].append(libfuzzer_reached[0][-1])
libfuzzer_reached[1].append(afl_reached[1][-1])
print(libfuzzer_reached)
print(afl_reached)
plt.step(libfuzzer_reached[1], libfuzzer_reached[0], label="libFuzzer")
plt.step(afl_reached[1], afl_reached[0], label="AFL")
plt.legend()
plt.xlabel("time(s)")
plt.ylabel("Errors reached")
plt.title(f"Fuzzer comparison - {problem}")
plt.show()
| true | true |
1c3ae21e16af0fc8d249ed3c8d2c07ae69ce6afd | 12,402 | py | Python | openstackclient/tests/identity/v3/test_service.py | citrix-openstack-build/python-openstackclient | ad59b03be6af9da31230689af268139b12b548e7 | [
"Apache-2.0"
] | null | null | null | openstackclient/tests/identity/v3/test_service.py | citrix-openstack-build/python-openstackclient | ad59b03be6af9da31230689af268139b12b548e7 | [
"Apache-2.0"
] | null | null | null | openstackclient/tests/identity/v3/test_service.py | citrix-openstack-build/python-openstackclient | ad59b03be6af9da31230689af268139b12b548e7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
from openstackclient.identity.v3 import service
from openstackclient.tests import fakes
from openstackclient.tests.identity.v3 import fakes as identity_fakes
from openstackclient.tests.identity.v3 import test_identity
class TestService(test_identity.TestIdentityv3):
def setUp(self):
super(TestService, self).setUp()
# Get a shortcut to the ServiceManager Mock
self.services_mock = self.app.client_manager.identity.services
self.services_mock.reset_mock()
class TestServiceCreate(TestService):
def setUp(self):
super(TestServiceCreate, self).setUp()
self.services_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
)
# Get the command object to test
self.cmd = service.CreateService(self.app, None)
def test_service_create_name(self):
arglist = [
'--name', identity_fakes.service_name,
identity_fakes.service_type,
]
verifylist = [
('name', identity_fakes.service_name),
('enable', False),
('disable', False),
('type', identity_fakes.service_type),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# ServiceManager.create(name, type, enabled=, **kwargs)
self.services_mock.create.assert_called_with(
identity_fakes.service_name,
identity_fakes.service_type,
True,
)
collist = ('enabled', 'id', 'name', 'type')
self.assertEqual(columns, collist)
datalist = (
True,
identity_fakes.service_id,
identity_fakes.service_name,
identity_fakes.service_type,
)
self.assertEqual(data, datalist)
def test_service_create_enable(self):
arglist = [
'--enable',
identity_fakes.service_type,
]
verifylist = [
('name', None),
('enable', True),
('disable', False),
('type', identity_fakes.service_type),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# ServiceManager.create(name, type, enabled=, **kwargs)
self.services_mock.create.assert_called_with(
None,
identity_fakes.service_type,
True,
)
collist = ('enabled', 'id', 'name', 'type')
self.assertEqual(columns, collist)
datalist = (
True,
identity_fakes.service_id,
identity_fakes.service_name,
identity_fakes.service_type,
)
self.assertEqual(data, datalist)
def test_service_create_disable(self):
arglist = [
'--disable',
identity_fakes.service_type,
]
verifylist = [
('name', None),
('enable', False),
('disable', True),
('type', identity_fakes.service_type),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# ServiceManager.create(name, type, enabled=, **kwargs)
self.services_mock.create.assert_called_with(
None,
identity_fakes.service_type,
False,
)
collist = ('enabled', 'id', 'name', 'type')
self.assertEqual(columns, collist)
datalist = (
True,
identity_fakes.service_id,
identity_fakes.service_name,
identity_fakes.service_type,
)
self.assertEqual(data, datalist)
class TestServiceDelete(TestService):
def setUp(self):
super(TestServiceDelete, self).setUp()
self.services_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
)
self.services_mock.delete.return_value = None
# Get the command object to test
self.cmd = service.DeleteService(self.app, None)
def test_service_delete_no_options(self):
arglist = [
identity_fakes.service_name,
]
verifylist = [
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
self.services_mock.delete.assert_called_with(
identity_fakes.service_id,
)
class TestServiceList(TestService):
def setUp(self):
super(TestServiceList, self).setUp()
self.services_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
),
]
# Get the command object to test
self.cmd = service.ListService(self.app, None)
def test_service_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.services_mock.list.assert_called_with()
collist = ('ID', 'Name', 'Type', 'Enabled')
self.assertEqual(columns, collist)
datalist = ((
identity_fakes.service_id,
identity_fakes.service_name,
identity_fakes.service_type,
True,
), )
self.assertEqual(tuple(data), datalist)
class TestServiceSet(TestService):
def setUp(self):
super(TestServiceSet, self).setUp()
self.services_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
)
self.services_mock.update.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
)
# Get the command object to test
self.cmd = service.SetService(self.app, None)
def test_service_set_no_options(self):
arglist = [
identity_fakes.service_name,
]
verifylist = [
('type', None),
('name', None),
('enable', False),
('disable', False),
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
def test_service_set_type(self):
arglist = [
'--type', identity_fakes.service_type,
identity_fakes.service_name,
]
verifylist = [
('type', identity_fakes.service_type),
('name', None),
('enable', False),
('disable', False),
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
# Set expected values
kwargs = {
'name': identity_fakes.service_name,
'type': identity_fakes.service_type,
'enabled': True,
}
# ServiceManager.update(service, name=, type=, enabled=, **kwargs)
self.services_mock.update.assert_called_with(
identity_fakes.service_id,
**kwargs
)
def test_service_set_name(self):
arglist = [
'--name', identity_fakes.service_name,
identity_fakes.service_name,
]
verifylist = [
('type', None),
('name', identity_fakes.service_name),
('enable', False),
('disable', False),
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
# Set expected values
kwargs = {
'name': identity_fakes.service_name,
'type': identity_fakes.service_type,
'enabled': True,
}
# ServiceManager.update(service, name=, type=, enabled=, **kwargs)
self.services_mock.update.assert_called_with(
identity_fakes.service_id,
**kwargs
)
def test_service_set_enable(self):
arglist = [
'--enable',
identity_fakes.service_name,
]
verifylist = [
('type', None),
('name', None),
('enable', True),
('disable', False),
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
# Set expected values
kwargs = {
'name': identity_fakes.service_name,
'type': identity_fakes.service_type,
'enabled': True,
}
# ServiceManager.update(service, name=, type=, enabled=, **kwargs)
self.services_mock.update.assert_called_with(
identity_fakes.service_id,
**kwargs
)
def test_service_set_disable(self):
arglist = [
'--disable',
identity_fakes.service_name,
]
verifylist = [
('type', None),
('name', None),
('enable', False),
('disable', True),
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
# Set expected values
kwargs = {
'name': identity_fakes.service_name,
'type': identity_fakes.service_type,
'enabled': False,
}
# ServiceManager.update(service, name=, type=, enabled=, **kwargs)
self.services_mock.update.assert_called_with(
identity_fakes.service_id,
**kwargs
)
class TestServiceShow(TestService):
def setUp(self):
super(TestServiceShow, self).setUp()
self.services_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
)
# Get the command object to test
self.cmd = service.ShowService(self.app, None)
def test_service_show(self):
arglist = [
identity_fakes.service_name,
]
verifylist = [
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# ServiceManager.get(id)
self.services_mock.get.assert_called_with(
identity_fakes.service_name,
)
collist = ('enabled', 'id', 'name', 'type')
self.assertEqual(columns, collist)
datalist = (
True,
identity_fakes.service_id,
identity_fakes.service_name,
identity_fakes.service_type,
)
self.assertEqual(data, datalist)
| 30.322738 | 77 | 0.584261 |
import copy
from openstackclient.identity.v3 import service
from openstackclient.tests import fakes
from openstackclient.tests.identity.v3 import fakes as identity_fakes
from openstackclient.tests.identity.v3 import test_identity
class TestService(test_identity.TestIdentityv3):
def setUp(self):
super(TestService, self).setUp()
self.services_mock = self.app.client_manager.identity.services
self.services_mock.reset_mock()
class TestServiceCreate(TestService):
def setUp(self):
super(TestServiceCreate, self).setUp()
self.services_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
)
self.cmd = service.CreateService(self.app, None)
def test_service_create_name(self):
arglist = [
'--name', identity_fakes.service_name,
identity_fakes.service_type,
]
verifylist = [
('name', identity_fakes.service_name),
('enable', False),
('disable', False),
('type', identity_fakes.service_type),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.services_mock.create.assert_called_with(
identity_fakes.service_name,
identity_fakes.service_type,
True,
)
collist = ('enabled', 'id', 'name', 'type')
self.assertEqual(columns, collist)
datalist = (
True,
identity_fakes.service_id,
identity_fakes.service_name,
identity_fakes.service_type,
)
self.assertEqual(data, datalist)
def test_service_create_enable(self):
arglist = [
'--enable',
identity_fakes.service_type,
]
verifylist = [
('name', None),
('enable', True),
('disable', False),
('type', identity_fakes.service_type),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.services_mock.create.assert_called_with(
None,
identity_fakes.service_type,
True,
)
collist = ('enabled', 'id', 'name', 'type')
self.assertEqual(columns, collist)
datalist = (
True,
identity_fakes.service_id,
identity_fakes.service_name,
identity_fakes.service_type,
)
self.assertEqual(data, datalist)
def test_service_create_disable(self):
arglist = [
'--disable',
identity_fakes.service_type,
]
verifylist = [
('name', None),
('enable', False),
('disable', True),
('type', identity_fakes.service_type),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.services_mock.create.assert_called_with(
None,
identity_fakes.service_type,
False,
)
collist = ('enabled', 'id', 'name', 'type')
self.assertEqual(columns, collist)
datalist = (
True,
identity_fakes.service_id,
identity_fakes.service_name,
identity_fakes.service_type,
)
self.assertEqual(data, datalist)
class TestServiceDelete(TestService):
def setUp(self):
super(TestServiceDelete, self).setUp()
self.services_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
)
self.services_mock.delete.return_value = None
self.cmd = service.DeleteService(self.app, None)
def test_service_delete_no_options(self):
arglist = [
identity_fakes.service_name,
]
verifylist = [
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
self.services_mock.delete.assert_called_with(
identity_fakes.service_id,
)
class TestServiceList(TestService):
def setUp(self):
super(TestServiceList, self).setUp()
self.services_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
),
]
self.cmd = service.ListService(self.app, None)
def test_service_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.services_mock.list.assert_called_with()
collist = ('ID', 'Name', 'Type', 'Enabled')
self.assertEqual(columns, collist)
datalist = ((
identity_fakes.service_id,
identity_fakes.service_name,
identity_fakes.service_type,
True,
), )
self.assertEqual(tuple(data), datalist)
class TestServiceSet(TestService):
def setUp(self):
super(TestServiceSet, self).setUp()
self.services_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
)
self.services_mock.update.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
)
self.cmd = service.SetService(self.app, None)
def test_service_set_no_options(self):
arglist = [
identity_fakes.service_name,
]
verifylist = [
('type', None),
('name', None),
('enable', False),
('disable', False),
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
def test_service_set_type(self):
arglist = [
'--type', identity_fakes.service_type,
identity_fakes.service_name,
]
verifylist = [
('type', identity_fakes.service_type),
('name', None),
('enable', False),
('disable', False),
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
kwargs = {
'name': identity_fakes.service_name,
'type': identity_fakes.service_type,
'enabled': True,
}
self.services_mock.update.assert_called_with(
identity_fakes.service_id,
**kwargs
)
def test_service_set_name(self):
arglist = [
'--name', identity_fakes.service_name,
identity_fakes.service_name,
]
verifylist = [
('type', None),
('name', identity_fakes.service_name),
('enable', False),
('disable', False),
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
kwargs = {
'name': identity_fakes.service_name,
'type': identity_fakes.service_type,
'enabled': True,
}
self.services_mock.update.assert_called_with(
identity_fakes.service_id,
**kwargs
)
def test_service_set_enable(self):
arglist = [
'--enable',
identity_fakes.service_name,
]
verifylist = [
('type', None),
('name', None),
('enable', True),
('disable', False),
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
kwargs = {
'name': identity_fakes.service_name,
'type': identity_fakes.service_type,
'enabled': True,
}
self.services_mock.update.assert_called_with(
identity_fakes.service_id,
**kwargs
)
def test_service_set_disable(self):
arglist = [
'--disable',
identity_fakes.service_name,
]
verifylist = [
('type', None),
('name', None),
('enable', False),
('disable', True),
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
kwargs = {
'name': identity_fakes.service_name,
'type': identity_fakes.service_type,
'enabled': False,
}
self.services_mock.update.assert_called_with(
identity_fakes.service_id,
**kwargs
)
class TestServiceShow(TestService):
def setUp(self):
super(TestServiceShow, self).setUp()
self.services_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.SERVICE),
loaded=True,
)
self.cmd = service.ShowService(self.app, None)
def test_service_show(self):
arglist = [
identity_fakes.service_name,
]
verifylist = [
('service', identity_fakes.service_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.services_mock.get.assert_called_with(
identity_fakes.service_name,
)
collist = ('enabled', 'id', 'name', 'type')
self.assertEqual(columns, collist)
datalist = (
True,
identity_fakes.service_id,
identity_fakes.service_name,
identity_fakes.service_type,
)
self.assertEqual(data, datalist)
| true | true |
1c3ae2fdc195c31175e0e67fa1831a524cc4bcbf | 1,634 | py | Python | test/test_driver.py | bunyk/pss | d903f187b69ea2282b79b730454a041dd0c5f007 | [
"Unlicense"
] | null | null | null | test/test_driver.py | bunyk/pss | d903f187b69ea2282b79b730454a041dd0c5f007 | [
"Unlicense"
] | null | null | null | test/test_driver.py | bunyk/pss | d903f187b69ea2282b79b730454a041dd0c5f007 | [
"Unlicense"
] | null | null | null | import os, sys
import unittest
sys.path.insert(0, '.')
sys.path.insert(0, '..')
from psslib.driver import pss_run
from test.utils import path_to_testdir, MockOutputFormatter
class TestDriver(unittest.TestCase):
# Just basic sanity tests for pss_run
# Do all the heavy testing in test_pssmain.py, because it also testse the
# cmdline argument parsing and combination logic.
#
testdir1 = path_to_testdir('testdir1')
def setUp(self):
self.of = MockOutputFormatter('testdir1')
def test_basic(self):
match_found = pss_run(
roots=[self.testdir1],
pattern='abc',
output_formatter=self.of,
include_types=['cc'])
self.assertEqual(sorted(self.of.output),
sorted(self._gen_outputs_in_file(
'testdir1/filea.c', [('MATCH', (2, [(4, 7)]))]) +
self._gen_outputs_in_file(
'testdir1/filea.h', [('MATCH', (1, [(8, 11)]))])))
self.assertEquals(match_found, True)
def _gen_outputs_in_file(self, filename, outputs):
""" Helper method for constructing a list of output pairs in the format
of MockOutputFormatter, delimited from both ends with START_MATCHES
and END_MATCHES for the given filename.
"""
seq = []
seq.append(('START_MATCHES', os.path.normpath(filename)))
seq.extend(outputs)
seq.append(('END_MATCHES', os.path.normpath(filename)))
return seq
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 32.68 | 79 | 0.591799 | import os, sys
import unittest
sys.path.insert(0, '.')
sys.path.insert(0, '..')
from psslib.driver import pss_run
from test.utils import path_to_testdir, MockOutputFormatter
class TestDriver(unittest.TestCase):
testdir1 = path_to_testdir('testdir1')
def setUp(self):
self.of = MockOutputFormatter('testdir1')
def test_basic(self):
match_found = pss_run(
roots=[self.testdir1],
pattern='abc',
output_formatter=self.of,
include_types=['cc'])
self.assertEqual(sorted(self.of.output),
sorted(self._gen_outputs_in_file(
'testdir1/filea.c', [('MATCH', (2, [(4, 7)]))]) +
self._gen_outputs_in_file(
'testdir1/filea.h', [('MATCH', (1, [(8, 11)]))])))
self.assertEquals(match_found, True)
def _gen_outputs_in_file(self, filename, outputs):
seq = []
seq.append(('START_MATCHES', os.path.normpath(filename)))
seq.extend(outputs)
seq.append(('END_MATCHES', os.path.normpath(filename)))
return seq
if __name__ == '__main__':
unittest.main()
| true | true |
1c3ae327a71e2763888b0d8ab69b22d08995a16e | 16,508 | py | Python | src/vulkan/util/gen_enum_to_str.py | SoftReaper/Mesa-Renoir-deb | 8d1de1f66058d62b41fe55d36522efea2bdf996d | [
"MIT"
] | null | null | null | src/vulkan/util/gen_enum_to_str.py | SoftReaper/Mesa-Renoir-deb | 8d1de1f66058d62b41fe55d36522efea2bdf996d | [
"MIT"
] | null | null | null | src/vulkan/util/gen_enum_to_str.py | SoftReaper/Mesa-Renoir-deb | 8d1de1f66058d62b41fe55d36522efea2bdf996d | [
"MIT"
] | null | null | null | # Copyright © 2017 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Create enum to string functions for vulkan using vk.xml."""
import argparse
import functools
import os
import re
import textwrap
import xml.etree.ElementTree as et
from mako.template import Template
COPYRIGHT = textwrap.dedent(u"""\
* Copyright © 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.""")
C_TEMPLATE = Template(textwrap.dedent(u"""\
/* Autogenerated file -- do not edit
* generated by ${file}
*
${copyright}
*/
#include <string.h>
#include <vulkan/vulkan.h>
#include <vulkan/vk_android_native_buffer.h>
#include <vulkan/vk_layer.h>
#include "util/macros.h"
#include "vk_enum_to_str.h"
% for enum in enums:
% if enum.guard:
#ifdef ${enum.guard}
% endif
const char *
vk_${enum.name[2:]}_to_str(${enum.name} input)
{
switch((int64_t)input) {
% for v in sorted(enum.values.keys()):
case ${v}:
return "${enum.values[v]}";
% endfor
case ${enum.max_enum_name}: return "${enum.max_enum_name}";
default:
return "Unknown ${enum.name} value.";
}
}
% if enum.guard:
#endif
% endif
%endfor
size_t vk_structure_type_size(const struct VkBaseInStructure *item)
{
switch((int)item->sType) {
% for struct in structs:
% if struct.extension is not None and struct.extension.define is not None:
#ifdef ${struct.extension.define}
case ${struct.stype}: return sizeof(${struct.name});
#endif
% else:
case ${struct.stype}: return sizeof(${struct.name});
% endif
%endfor
case VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO: return sizeof(VkLayerInstanceCreateInfo);
case VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO: return sizeof(VkLayerDeviceCreateInfo);
default:
unreachable("Undefined struct type.");
}
}
const char *
vk_ObjectType_to_ObjectName(VkObjectType type)
{
switch((int)type) {
% for object_type in sorted(object_types[0].enum_to_name.keys()):
case ${object_type}:
return "${object_types[0].enum_to_name[object_type]}";
% endfor
default:
return "Unknown VkObjectType value.";
}
}
"""))
H_TEMPLATE = Template(textwrap.dedent(u"""\
/* Autogenerated file -- do not edit
* generated by ${file}
*
${copyright}
*/
#ifndef MESA_VK_ENUM_TO_STR_H
#define MESA_VK_ENUM_TO_STR_H
#include <vulkan/vulkan.h>
#include <vulkan/vk_android_native_buffer.h>
#ifdef __cplusplus
extern "C" {
#endif
% for enum in enums:
% if enum.guard:
#ifdef ${enum.guard}
% endif
const char * vk_${enum.name[2:]}_to_str(${enum.name} input);
% if enum.guard:
#endif
% endif
% endfor
size_t vk_structure_type_size(const struct VkBaseInStructure *item);
const char * vk_ObjectType_to_ObjectName(VkObjectType type);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif"""))
H_DEFINE_TEMPLATE = Template(textwrap.dedent(u"""\
/* Autogenerated file -- do not edit
* generated by ${file}
*
${copyright}
*/
#ifndef MESA_VK_ENUM_DEFINES_H
#define MESA_VK_ENUM_DEFINES_H
#include <vulkan/vulkan.h>
#include <vulkan/vk_android_native_buffer.h>
#ifdef __cplusplus
extern "C" {
#endif
% for ext in extensions:
#define _${ext.name}_number (${ext.number})
% endfor
% for enum in bitmasks:
% if enum.bitwidth > 32:
<% continue %>
% endif
% if enum.guard:
#ifdef ${enum.guard}
% endif
#define ${enum.all_bits_name()} ${hex(enum.all_bits_value())}u
% if enum.guard:
#endif
% endif
% endfor
% for enum in bitmasks:
% if enum.bitwidth < 64:
<% continue %>
% endif
/* Redefine bitmask values of ${enum.name} */
% if enum.guard:
#ifdef ${enum.guard}
% endif
% for n, v in enum.name_to_value.items():
#define ${n} (${hex(v)}ULL)
% endfor
% if enum.guard:
#endif
% endif
% endfor
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif"""))
class NamedFactory(object):
"""Factory for creating enums."""
def __init__(self, type_):
self.registry = {}
self.type = type_
def __call__(self, name, **kwargs):
try:
return self.registry[name]
except KeyError:
n = self.registry[name] = self.type(name, **kwargs)
return n
def get(self, name):
return self.registry.get(name)
class VkExtension(object):
"""Simple struct-like class representing extensions"""
def __init__(self, name, number=None, define=None):
self.name = name
self.number = number
self.define = define
def CamelCase_to_SHOUT_CASE(s):
return (s[:1] + re.sub(r'(?<![A-Z])([A-Z])', r'_\1', s[1:])).upper()
def compute_max_enum_name(s):
max_enum_name = CamelCase_to_SHOUT_CASE(s)
last_prefix = max_enum_name.rsplit('_', 1)[-1]
# Those special prefixes need to be always at the end
if last_prefix in ['AMD', 'EXT', 'INTEL', 'KHR', 'NV'] :
max_enum_name = "_".join(max_enum_name.split('_')[:-1])
max_enum_name = max_enum_name + "_MAX_ENUM_" + last_prefix
else:
max_enum_name = max_enum_name + "_MAX_ENUM"
return max_enum_name
class VkEnum(object):
"""Simple struct-like class representing a single Vulkan Enum."""
def __init__(self, name, bitwidth=32, values=None):
self.name = name
self.max_enum_name = compute_max_enum_name(name)
self.bitwidth = bitwidth
self.extension = None
# Maps numbers to names
self.values = values or dict()
self.name_to_value = dict()
self.guard = None
self.name_to_alias_list = {}
def all_bits_name(self):
assert self.name.startswith('Vk')
assert re.search(r'FlagBits[A-Z]*$', self.name)
return 'VK_ALL_' + CamelCase_to_SHOUT_CASE(self.name[2:])
def all_bits_value(self):
return functools.reduce(lambda a,b: a | b, self.values.keys(), 0)
def add_value(self, name, value=None,
extnum=None, offset=None, alias=None,
error=False):
if alias is not None:
assert value is None and offset is None
if alias not in self.name_to_value:
# We don't have this alias yet. Just record the alias and
# we'll deal with it later.
alias_list = self.name_to_alias_list.setdefault(alias, [])
alias_list.append(name);
return
# Use the value from the alias
value = self.name_to_value[alias]
assert value is not None or extnum is not None
if value is None:
value = 1000000000 + (extnum - 1) * 1000 + offset
if error:
value = -value
self.name_to_value[name] = value
if value not in self.values:
self.values[value] = name
elif len(self.values[value]) > len(name):
self.values[value] = name
# Now that the value has been fully added, resolve aliases, if any.
if name in self.name_to_alias_list:
for alias in self.name_to_alias_list[name]:
self.add_value(alias, value)
del self.name_to_alias_list[name]
def add_value_from_xml(self, elem, extension=None):
self.extension = extension
if 'value' in elem.attrib:
self.add_value(elem.attrib['name'],
value=int(elem.attrib['value'], base=0))
elif 'bitpos' in elem.attrib:
self.add_value(elem.attrib['name'],
value=(1 << int(elem.attrib['bitpos'], base=0)))
elif 'alias' in elem.attrib:
self.add_value(elem.attrib['name'], alias=elem.attrib['alias'])
else:
error = 'dir' in elem.attrib and elem.attrib['dir'] == '-'
if 'extnumber' in elem.attrib:
extnum = int(elem.attrib['extnumber'])
else:
extnum = extension.number
self.add_value(elem.attrib['name'],
extnum=extnum,
offset=int(elem.attrib['offset']),
error=error)
def set_guard(self, g):
self.guard = g
class VkChainStruct(object):
"""Simple struct-like class representing a single Vulkan struct identified with a VkStructureType"""
def __init__(self, name, stype):
self.name = name
self.stype = stype
self.extension = None
def struct_get_stype(xml_node):
for member in xml_node.findall('./member'):
name = member.findall('./name')
if len(name) > 0 and name[0].text == "sType":
return member.get('values')
return None
class VkObjectType(object):
"""Simple struct-like class representing a single Vulkan object type"""
def __init__(self, name):
self.name = name
self.enum_to_name = dict()
def parse_xml(enum_factory, ext_factory, struct_factory, bitmask_factory,
obj_type_factory, filename):
"""Parse the XML file. Accumulate results into the factories.
This parser is a memory efficient iterative XML parser that returns a list
of VkEnum objects.
"""
xml = et.parse(filename)
for enum_type in xml.findall('./enums[@type="enum"]'):
enum = enum_factory(enum_type.attrib['name'])
for value in enum_type.findall('./enum'):
enum.add_value_from_xml(value)
# For bitmask we only add the Enum selected for convenience.
for enum_type in xml.findall('./enums[@type="bitmask"]'):
bitwidth = int(enum_type.attrib.get('bitwidth', 32))
enum = bitmask_factory(enum_type.attrib['name'], bitwidth=bitwidth)
for value in enum_type.findall('./enum'):
enum.add_value_from_xml(value)
for value in xml.findall('./feature/require/enum[@extends]'):
extends = value.attrib['extends']
enum = enum_factory.get(extends)
if enum is not None:
enum.add_value_from_xml(value)
enum = bitmask_factory.get(extends)
if enum is not None:
enum.add_value_from_xml(value)
for struct_type in xml.findall('./types/type[@category="struct"]'):
name = struct_type.attrib['name']
stype = struct_get_stype(struct_type)
if stype is not None:
struct_factory(name, stype=stype)
platform_define = {}
for platform in xml.findall('./platforms/platform'):
name = platform.attrib['name']
define = platform.attrib['protect']
platform_define[name] = define
for ext_elem in xml.findall('./extensions/extension[@supported="vulkan"]'):
define = None
if "platform" in ext_elem.attrib:
define = platform_define[ext_elem.attrib['platform']]
extension = ext_factory(ext_elem.attrib['name'],
number=int(ext_elem.attrib['number']),
define=define)
for value in ext_elem.findall('./require/enum[@extends]'):
extends = value.attrib['extends']
enum = enum_factory.get(extends)
if enum is not None:
enum.add_value_from_xml(value, extension)
enum = bitmask_factory.get(extends)
if enum is not None:
enum.add_value_from_xml(value, extension)
for t in ext_elem.findall('./require/type'):
struct = struct_factory.get(t.attrib['name'])
if struct is not None:
struct.extension = extension
if define:
for value in ext_elem.findall('./require/type[@name]'):
enum = enum_factory.get(value.attrib['name'])
if enum is not None:
enum.set_guard(define)
obj_types = obj_type_factory("VkObjectType")
for object_type in xml.findall('./types/type[@category="handle"]'):
for object_name in object_type.findall('./name'):
# Convert to int to avoid undefined enums
enum = object_type.attrib['objtypeenum']
enum_val = enum_factory.get("VkObjectType").name_to_value[enum]
obj_types.enum_to_name[enum_val] = object_name.text
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--xml', required=True,
help='Vulkan API XML files',
action='append',
dest='xml_files')
parser.add_argument('--outdir',
help='Directory to put the generated files in',
required=True)
args = parser.parse_args()
enum_factory = NamedFactory(VkEnum)
ext_factory = NamedFactory(VkExtension)
struct_factory = NamedFactory(VkChainStruct)
obj_type_factory = NamedFactory(VkObjectType)
bitmask_factory = NamedFactory(VkEnum)
for filename in args.xml_files:
parse_xml(enum_factory, ext_factory, struct_factory, bitmask_factory,
obj_type_factory, filename)
enums = sorted(enum_factory.registry.values(), key=lambda e: e.name)
extensions = sorted(ext_factory.registry.values(), key=lambda e: e.name)
structs = sorted(struct_factory.registry.values(), key=lambda e: e.name)
bitmasks = sorted(bitmask_factory.registry.values(), key=lambda e: e.name)
object_types = sorted(obj_type_factory.registry.values(), key=lambda e: e.name)
for template, file_ in [(C_TEMPLATE, os.path.join(args.outdir, 'vk_enum_to_str.c')),
(H_TEMPLATE, os.path.join(args.outdir, 'vk_enum_to_str.h')),
(H_DEFINE_TEMPLATE, os.path.join(args.outdir, 'vk_enum_defines.h'))]:
with open(file_, 'w', encoding='utf-8') as f:
f.write(template.render(
file=os.path.basename(__file__),
enums=enums,
extensions=extensions,
structs=structs,
bitmasks=bitmasks,
object_types=object_types,
copyright=COPYRIGHT))
if __name__ == '__main__':
main()
| 33.967078 | 104 | 0.62182 |
import argparse
import functools
import os
import re
import textwrap
import xml.etree.ElementTree as et
from mako.template import Template
COPYRIGHT = textwrap.dedent(u"""\
* Copyright © 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.""")
C_TEMPLATE = Template(textwrap.dedent(u"""\
/* Autogenerated file -- do not edit
* generated by ${file}
*
${copyright}
*/
#include <string.h>
#include <vulkan/vulkan.h>
#include <vulkan/vk_android_native_buffer.h>
#include <vulkan/vk_layer.h>
#include "util/macros.h"
#include "vk_enum_to_str.h"
% for enum in enums:
% if enum.guard:
#ifdef ${enum.guard}
% endif
const char *
vk_${enum.name[2:]}_to_str(${enum.name} input)
{
switch((int64_t)input) {
% for v in sorted(enum.values.keys()):
case ${v}:
return "${enum.values[v]}";
% endfor
case ${enum.max_enum_name}: return "${enum.max_enum_name}";
default:
return "Unknown ${enum.name} value.";
}
}
% if enum.guard:
#endif
% endif
%endfor
size_t vk_structure_type_size(const struct VkBaseInStructure *item)
{
switch((int)item->sType) {
% for struct in structs:
% if struct.extension is not None and struct.extension.define is not None:
#ifdef ${struct.extension.define}
case ${struct.stype}: return sizeof(${struct.name});
#endif
% else:
case ${struct.stype}: return sizeof(${struct.name});
% endif
%endfor
case VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO: return sizeof(VkLayerInstanceCreateInfo);
case VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO: return sizeof(VkLayerDeviceCreateInfo);
default:
unreachable("Undefined struct type.");
}
}
const char *
vk_ObjectType_to_ObjectName(VkObjectType type)
{
switch((int)type) {
% for object_type in sorted(object_types[0].enum_to_name.keys()):
case ${object_type}:
return "${object_types[0].enum_to_name[object_type]}";
% endfor
default:
return "Unknown VkObjectType value.";
}
}
"""))
H_TEMPLATE = Template(textwrap.dedent(u"""\
/* Autogenerated file -- do not edit
* generated by ${file}
*
${copyright}
*/
#ifndef MESA_VK_ENUM_TO_STR_H
#define MESA_VK_ENUM_TO_STR_H
#include <vulkan/vulkan.h>
#include <vulkan/vk_android_native_buffer.h>
#ifdef __cplusplus
extern "C" {
#endif
% for enum in enums:
% if enum.guard:
#ifdef ${enum.guard}
% endif
const char * vk_${enum.name[2:]}_to_str(${enum.name} input);
% if enum.guard:
#endif
% endif
% endfor
size_t vk_structure_type_size(const struct VkBaseInStructure *item);
const char * vk_ObjectType_to_ObjectName(VkObjectType type);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif"""))
H_DEFINE_TEMPLATE = Template(textwrap.dedent(u"""\
/* Autogenerated file -- do not edit
* generated by ${file}
*
${copyright}
*/
#ifndef MESA_VK_ENUM_DEFINES_H
#define MESA_VK_ENUM_DEFINES_H
#include <vulkan/vulkan.h>
#include <vulkan/vk_android_native_buffer.h>
#ifdef __cplusplus
extern "C" {
#endif
% for ext in extensions:
#define _${ext.name}_number (${ext.number})
% endfor
% for enum in bitmasks:
% if enum.bitwidth > 32:
<% continue %>
% endif
% if enum.guard:
#ifdef ${enum.guard}
% endif
#define ${enum.all_bits_name()} ${hex(enum.all_bits_value())}u
% if enum.guard:
#endif
% endif
% endfor
% for enum in bitmasks:
% if enum.bitwidth < 64:
<% continue %>
% endif
/* Redefine bitmask values of ${enum.name} */
% if enum.guard:
#ifdef ${enum.guard}
% endif
% for n, v in enum.name_to_value.items():
#define ${n} (${hex(v)}ULL)
% endfor
% if enum.guard:
#endif
% endif
% endfor
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif"""))
class NamedFactory(object):
def __init__(self, type_):
self.registry = {}
self.type = type_
def __call__(self, name, **kwargs):
try:
return self.registry[name]
except KeyError:
n = self.registry[name] = self.type(name, **kwargs)
return n
def get(self, name):
return self.registry.get(name)
class VkExtension(object):
def __init__(self, name, number=None, define=None):
self.name = name
self.number = number
self.define = define
def CamelCase_to_SHOUT_CASE(s):
return (s[:1] + re.sub(r'(?<![A-Z])([A-Z])', r'_\1', s[1:])).upper()
def compute_max_enum_name(s):
max_enum_name = CamelCase_to_SHOUT_CASE(s)
last_prefix = max_enum_name.rsplit('_', 1)[-1]
if last_prefix in ['AMD', 'EXT', 'INTEL', 'KHR', 'NV'] :
max_enum_name = "_".join(max_enum_name.split('_')[:-1])
max_enum_name = max_enum_name + "_MAX_ENUM_" + last_prefix
else:
max_enum_name = max_enum_name + "_MAX_ENUM"
return max_enum_name
class VkEnum(object):
def __init__(self, name, bitwidth=32, values=None):
self.name = name
self.max_enum_name = compute_max_enum_name(name)
self.bitwidth = bitwidth
self.extension = None
self.values = values or dict()
self.name_to_value = dict()
self.guard = None
self.name_to_alias_list = {}
def all_bits_name(self):
assert self.name.startswith('Vk')
assert re.search(r'FlagBits[A-Z]*$', self.name)
return 'VK_ALL_' + CamelCase_to_SHOUT_CASE(self.name[2:])
def all_bits_value(self):
return functools.reduce(lambda a,b: a | b, self.values.keys(), 0)
def add_value(self, name, value=None,
extnum=None, offset=None, alias=None,
error=False):
if alias is not None:
assert value is None and offset is None
if alias not in self.name_to_value:
# we'll deal with it later.
alias_list = self.name_to_alias_list.setdefault(alias, [])
alias_list.append(name);
return
value = self.name_to_value[alias]
assert value is not None or extnum is not None
if value is None:
value = 1000000000 + (extnum - 1) * 1000 + offset
if error:
value = -value
self.name_to_value[name] = value
if value not in self.values:
self.values[value] = name
elif len(self.values[value]) > len(name):
self.values[value] = name
if name in self.name_to_alias_list:
for alias in self.name_to_alias_list[name]:
self.add_value(alias, value)
del self.name_to_alias_list[name]
def add_value_from_xml(self, elem, extension=None):
self.extension = extension
if 'value' in elem.attrib:
self.add_value(elem.attrib['name'],
value=int(elem.attrib['value'], base=0))
elif 'bitpos' in elem.attrib:
self.add_value(elem.attrib['name'],
value=(1 << int(elem.attrib['bitpos'], base=0)))
elif 'alias' in elem.attrib:
self.add_value(elem.attrib['name'], alias=elem.attrib['alias'])
else:
error = 'dir' in elem.attrib and elem.attrib['dir'] == '-'
if 'extnumber' in elem.attrib:
extnum = int(elem.attrib['extnumber'])
else:
extnum = extension.number
self.add_value(elem.attrib['name'],
extnum=extnum,
offset=int(elem.attrib['offset']),
error=error)
def set_guard(self, g):
self.guard = g
class VkChainStruct(object):
def __init__(self, name, stype):
self.name = name
self.stype = stype
self.extension = None
def struct_get_stype(xml_node):
for member in xml_node.findall('./member'):
name = member.findall('./name')
if len(name) > 0 and name[0].text == "sType":
return member.get('values')
return None
class VkObjectType(object):
def __init__(self, name):
self.name = name
self.enum_to_name = dict()
def parse_xml(enum_factory, ext_factory, struct_factory, bitmask_factory,
obj_type_factory, filename):
xml = et.parse(filename)
for enum_type in xml.findall('./enums[@type="enum"]'):
enum = enum_factory(enum_type.attrib['name'])
for value in enum_type.findall('./enum'):
enum.add_value_from_xml(value)
for enum_type in xml.findall('./enums[@type="bitmask"]'):
bitwidth = int(enum_type.attrib.get('bitwidth', 32))
enum = bitmask_factory(enum_type.attrib['name'], bitwidth=bitwidth)
for value in enum_type.findall('./enum'):
enum.add_value_from_xml(value)
for value in xml.findall('./feature/require/enum[@extends]'):
extends = value.attrib['extends']
enum = enum_factory.get(extends)
if enum is not None:
enum.add_value_from_xml(value)
enum = bitmask_factory.get(extends)
if enum is not None:
enum.add_value_from_xml(value)
for struct_type in xml.findall('./types/type[@category="struct"]'):
name = struct_type.attrib['name']
stype = struct_get_stype(struct_type)
if stype is not None:
struct_factory(name, stype=stype)
platform_define = {}
for platform in xml.findall('./platforms/platform'):
name = platform.attrib['name']
define = platform.attrib['protect']
platform_define[name] = define
for ext_elem in xml.findall('./extensions/extension[@supported="vulkan"]'):
define = None
if "platform" in ext_elem.attrib:
define = platform_define[ext_elem.attrib['platform']]
extension = ext_factory(ext_elem.attrib['name'],
number=int(ext_elem.attrib['number']),
define=define)
for value in ext_elem.findall('./require/enum[@extends]'):
extends = value.attrib['extends']
enum = enum_factory.get(extends)
if enum is not None:
enum.add_value_from_xml(value, extension)
enum = bitmask_factory.get(extends)
if enum is not None:
enum.add_value_from_xml(value, extension)
for t in ext_elem.findall('./require/type'):
struct = struct_factory.get(t.attrib['name'])
if struct is not None:
struct.extension = extension
if define:
for value in ext_elem.findall('./require/type[@name]'):
enum = enum_factory.get(value.attrib['name'])
if enum is not None:
enum.set_guard(define)
obj_types = obj_type_factory("VkObjectType")
for object_type in xml.findall('./types/type[@category="handle"]'):
for object_name in object_type.findall('./name'):
enum = object_type.attrib['objtypeenum']
enum_val = enum_factory.get("VkObjectType").name_to_value[enum]
obj_types.enum_to_name[enum_val] = object_name.text
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--xml', required=True,
help='Vulkan API XML files',
action='append',
dest='xml_files')
parser.add_argument('--outdir',
help='Directory to put the generated files in',
required=True)
args = parser.parse_args()
enum_factory = NamedFactory(VkEnum)
ext_factory = NamedFactory(VkExtension)
struct_factory = NamedFactory(VkChainStruct)
obj_type_factory = NamedFactory(VkObjectType)
bitmask_factory = NamedFactory(VkEnum)
for filename in args.xml_files:
parse_xml(enum_factory, ext_factory, struct_factory, bitmask_factory,
obj_type_factory, filename)
enums = sorted(enum_factory.registry.values(), key=lambda e: e.name)
extensions = sorted(ext_factory.registry.values(), key=lambda e: e.name)
structs = sorted(struct_factory.registry.values(), key=lambda e: e.name)
bitmasks = sorted(bitmask_factory.registry.values(), key=lambda e: e.name)
object_types = sorted(obj_type_factory.registry.values(), key=lambda e: e.name)
for template, file_ in [(C_TEMPLATE, os.path.join(args.outdir, 'vk_enum_to_str.c')),
(H_TEMPLATE, os.path.join(args.outdir, 'vk_enum_to_str.h')),
(H_DEFINE_TEMPLATE, os.path.join(args.outdir, 'vk_enum_defines.h'))]:
with open(file_, 'w', encoding='utf-8') as f:
f.write(template.render(
file=os.path.basename(__file__),
enums=enums,
extensions=extensions,
structs=structs,
bitmasks=bitmasks,
object_types=object_types,
copyright=COPYRIGHT))
if __name__ == '__main__':
main()
| true | true |
1c3ae3292a9ef709a84f051cbd22878be2751be2 | 318 | py | Python | codewars/find_next_square.py | tarcisioallyson/python_exercise | be5257c5cce7c0c2b573ece2308e3b5b03c22fac | [
"Unlicense"
] | null | null | null | codewars/find_next_square.py | tarcisioallyson/python_exercise | be5257c5cce7c0c2b573ece2308e3b5b03c22fac | [
"Unlicense"
] | null | null | null | codewars/find_next_square.py | tarcisioallyson/python_exercise | be5257c5cce7c0c2b573ece2308e3b5b03c22fac | [
"Unlicense"
] | null | null | null | def find_next_square(sq):
import math
# Return the next square if sq is a square, -1 otherwise
root = int(math.sqrt(sq))
if root**2 == sq:
sq += 1
while int(math.sqrt(sq))**2 != sq:
sq+=1
return sq
else:
return -1
print(find_next_square(81)) | 24.461538 | 61 | 0.528302 | def find_next_square(sq):
import math
root = int(math.sqrt(sq))
if root**2 == sq:
sq += 1
while int(math.sqrt(sq))**2 != sq:
sq+=1
return sq
else:
return -1
print(find_next_square(81)) | true | true |
1c3ae33aa7f08c8da33f8b3ce0bb608fedfe42c9 | 208 | py | Python | self_driving/data_collection/util.py | cclauss/self_driving_pi_car | 7dbb67b837b58ddc492ac5ea0822d69dfe6e2d34 | [
"MIT"
] | 724 | 2018-11-19T16:29:38.000Z | 2022-01-27T19:52:13.000Z | self_driving/data_collection/util.py | cclauss/self_driving_pi_car | 7dbb67b837b58ddc492ac5ea0822d69dfe6e2d34 | [
"MIT"
] | 5 | 2018-02-17T19:24:38.000Z | 2018-02-17T19:31:42.000Z | self_driving/data_collection/util.py | cclauss/self_driving_pi_car | 7dbb67b837b58ddc492ac5ea0822d69dfe6e2d34 | [
"MIT"
] | 69 | 2018-03-02T13:08:43.000Z | 2022-01-13T07:44:27.000Z | import time
def get_date():
"""
Gives you the date in form:
year-month-day-hours-minutes-second
:return: current date
:rtype: str
"""
return time.strftime('%Y-%m-%d-%H-%-M-%S')
| 16 | 46 | 0.581731 | import time
def get_date():
return time.strftime('%Y-%m-%d-%H-%-M-%S')
| true | true |
1c3ae481e79889fb0480f81200581ce4ee128173 | 3,017 | py | Python | Supersegment/temp.py | XDZhelheim/TrafficDataAnalysis | a73dde10f91fb88af3a7b2edd7a04adaa5ea57f5 | [
"MIT"
] | null | null | null | Supersegment/temp.py | XDZhelheim/TrafficDataAnalysis | a73dde10f91fb88af3a7b2edd7a04adaa5ea57f5 | [
"MIT"
] | null | null | null | Supersegment/temp.py | XDZhelheim/TrafficDataAnalysis | a73dde10f91fb88af3a7b2edd7a04adaa5ea57f5 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import geopandas as gp
from shapely.geometry import Polygon, MultiLineString, Point
import shapely.wkt as wkt
import supersegment
# def randrange(n, vmin, vmax):
# '''
# Helper function to make an array of random numbers having shape (n, )
# with each number distributed Uniform(vmin, vmax).
# '''
# return (vmax - vmin)*np.random.rand(n) + vmin
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# n = 100
# # For each set of style and range settings, plot n random points in the box
# # defined by x in [23, 32], y in [0, 100], z in [zlow, zhigh].
# for m, zlow, zhigh in [('o', -50, -25), ('^', -30, -5)]:
# xs = randrange(n, 23, 32)
# ys = randrange(n, 0, 100)
# zs = randrange(n, zlow, zhigh)
# ax.scatter(xs, ys, zs, marker=m)
# print(type(zs[0]))
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
# plt.show()
# df=pd.read_table("./TrafficDataAnalysis/boundary.txt", nrows=10000)
# df['tti']=[0]*len(df)
# df['speed']=[0]*len(df)
# # print(df.loc[df['obj_id']==841])
# # 2018-1-1
# df2=pd.read_table("./TrafficDataAnalysis/city_district.txt", nrows=2736)
# # print(df2)
# for index, row in df2.iterrows():
# df.loc[df['obj_id']==row['obj_id'], 'tti']=row['tti']
# df.loc[df['obj_id']==row['obj_id'], 'speed']=row['speed']
# print(df)
# df1=pd.read_table("./TrafficDataAnalysis/res2.txt", header=None, sep=' ')
# print(df1[0])
# x = np.linspace(200, 3500, 2000)
# plt.plot(x, x, '-r')
# # plt.plot(x, 0.15*x+750, '-r')
# plt.scatter(df1[0], df1[1])
# plt.show()
# df=pd.read_csv("./TrafficDataAnalysis/chengdushi_1001_1010.csv", nrows=1, header=0, names=["track"], usecols=[2])
# track=[]
# for temp in df["track"]:
# temp=temp.lstrip("[").rstrip("]")
# # print(temp)
# # temp=temp.replace(", ", ";")
# temp=temp.split(", ")
# for i in range(len(temp)):
# temp[i]=temp[i].split(" ")
# for item in temp:
# item[0]=float(item[0])
# item[1]=float(item[1])
# item[2]=int(item[2])
# track.append(temp)
# print(track)
# with open("./TrafficDataAnalysis/chengdushi_1001_1010.csv") as f:
# temp=f.readline()
# print(temp)
df=pd.read_table("../boundary.txt", nrows=10000)
df['geometry']=df['geometry'].apply(lambda z: wkt.loads(z))
df=gp.GeoDataFrame(df)
df.crs={'init':'epsg:4326'}
roads=df.loc[(df["obj_id"]==283504) | (df["obj_id"]==283505) | (df["obj_id"]==283506), "geometry"].apply(lambda x: x.buffer(distance=0.0001))
# b=roads.iloc[0].bounds
# print(b)
# minx, miny, maxx, maxy=roads.total_bounds
# print(minx, miny, maxx, maxy)
# a=np.array([1, 2, 3, 4, 5, 6])
# b=np.array([0, 0, 1, 1, 2, 3])
# c=a[b==2]
# print(c)
# print(type(roads.iloc[0]))
roads=roads.to_crs("epsg:2432")
print(roads.length)
# temp=[]
# tracks=get_tracks(2000)
# for i in tracks:
# for j in i:
# temp.append(Point(j[0], j[1]))
# show_geom(gp.GeoSeries(temp), "black", "allpoints") | 28.196262 | 141 | 0.616175 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import geopandas as gp
from shapely.geometry import Polygon, MultiLineString, Point
import shapely.wkt as wkt
import supersegment
# Helper function to make an array of random numbers having shape (n, )
# with each number distributed Uniform(vmin, vmax).
# '''
d"]==283505) | (df["obj_id"]==283506), "geometry"].apply(lambda x: x.buffer(distance=0.0001))
roads=roads.to_crs("epsg:2432")
print(roads.length)
| true | true |
1c3ae5f91f3ac2db9f56eb3b957a29a5ef0be04c | 6,978 | py | Python | layers/output_utils.py | Priyashbhugra/yolact | ef871057f2768dcb13e6d9636d49402c9862fcd4 | [
"MIT"
] | null | null | null | layers/output_utils.py | Priyashbhugra/yolact | ef871057f2768dcb13e6d9636d49402c9862fcd4 | [
"MIT"
] | null | null | null | layers/output_utils.py | Priyashbhugra/yolact | ef871057f2768dcb13e6d9636d49402c9862fcd4 | [
"MIT"
] | null | null | null | """ Contains functions used to sanitize and prepare the output of Yolact. """
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
from data import cfg, mask_type, MEANS, STD, activation_func
from utils.augmentations import Resize
from utils import timer
from .box_utils import crop, sanitize_coordinates
def postprocess(det_output, w, h, batch_idx=0, interpolation_mode='bilinear',
visualize_lincomb=False, crop_masks=True, score_threshold=0):
"""
Postprocesses the output of Yolact on testing mode into a format that makes sense,
accounting for all the possible configuration settings.
Args:
- det_output: The lost of dicts that Detect outputs.
- w: The real with of the image.
- h: The real height of the image.
- batch_idx: If you have multiple images for this batch, the image's index in the batch.
- interpolation_mode: Can be 'nearest' | 'area' | 'bilinear' (see torch.nn.functional.interpolate)
Returns 4 torch Tensors (in the following order):
- classes [num_det]: The class idx for each detection.
- scores [num_det]: The confidence score for each detection.
- boxes [num_det, 4]: The bounding box for each detection in absolute point form.
- masks [num_det, h, w]: Full image masks for each detection.
"""
dets = det_output[batch_idx]
net = dets['net']
dets = dets['detection']
if dets is None:
return [torch.Tensor()] * 4 # Warning, this is 4 copies of the same thing
if score_threshold > 0:
keep = dets['score'] > score_threshold
for k in dets:
if k != 'proto':
dets[k] = dets[k][keep]
if dets['score'].size(0) == 0:
return [torch.Tensor()] * 4
# Actually extract everything from dets now
classes = dets['class']
boxes = dets['box']
scores = dets['score']
masks = dets['mask']
# for car detection and masking
if cfg.mask_type == mask_type.lincomb and cfg.eval_mask_branch:
# At this points masks is only the coefficients
proto_data = dets['proto']
# Test flag, do not upvote
if cfg.mask_proto_debug:
np.save('scripts/proto.npy', proto_data.cpu().numpy())
if visualize_lincomb:
display_lincomb(proto_data, masks)
masks = proto_data @ masks.t()
masks = cfg.mask_proto_mask_activation(masks)
# Crop masks before upsampling because you know why
if crop_masks:
masks = crop(masks, boxes)
# Permute into the correct output shape [num_dets, proto_h, proto_w]
masks = masks.permute(2, 0, 1).contiguous()
if cfg.use_maskiou:
with timer.env('maskiou_net'):
with torch.no_grad():
maskiou_p = net.maskiou_net(masks.unsqueeze(1))
maskiou_p = torch.gather(maskiou_p, dim=1, index=classes.unsqueeze(1)).squeeze(1)
if cfg.rescore_mask:
if cfg.rescore_bbox:
scores = scores * maskiou_p
else:
scores = [scores, scores * maskiou_p]
# Scale masks up to the full image
masks = F.interpolate(masks.unsqueeze(0), (h, w), mode=interpolation_mode, align_corners=False).squeeze(0)
# Binarize the masks
masks.gt_(0.5)
boxes[:, 0], boxes[:, 2] = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, cast=False)
boxes[:, 1], boxes[:, 3] = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, cast=False)
boxes = boxes.long()
if cfg.mask_type == mask_type.direct and cfg.eval_mask_branch:
# Upscale masks
full_masks = torch.zeros(masks.size(0), h, w)
for jdx in range(masks.size(0)):
x1, y1, x2, y2 = boxes[jdx, :]
mask_w = x2 - x1
mask_h = y2 - y1
# Just in case
if mask_w * mask_h <= 0 or mask_w < 0:
continue
mask = masks[jdx, :].view(1, 1, cfg.mask_size, cfg.mask_size)
mask = F.interpolate(mask, (mask_h, mask_w), mode=interpolation_mode, align_corners=False)
mask = mask.gt(0.5).float()
full_masks[jdx, y1:y2, x1:x2] = mask
masks = full_masks
return classes, scores, boxes, masks
def undo_image_transformation(img, w, h):
"""
Takes a transformed image tensor and returns a numpy ndarray that is untransformed.
Arguments w and h are the original height and width of the image.
"""
img_numpy = img.permute(1, 2, 0).cpu().numpy()
img_numpy = img_numpy[:, :, (2, 1, 0)] # To BRG
if cfg.backbone.transform.normalize:
img_numpy = (img_numpy * np.array(STD) + np.array(MEANS)) / 255.0
elif cfg.backbone.transform.subtract_means:
img_numpy = (img_numpy / 255.0 + np.array(MEANS) / 255.0).astype(np.float32)
img_numpy = img_numpy[:, :, (2, 1, 0)] # To RGB
img_numpy = np.clip(img_numpy, 0, 1)
return cv2.resize(img_numpy, (w,h))
def display_lincomb(proto_data, masks):
out_masks = torch.matmul(proto_data, masks.t())
# out_masks = cfg.mask_proto_mask_activation(out_masks)
for kdx in range(1):
jdx = kdx + 0
import matplotlib.pyplot as plt
coeffs = masks[jdx, :].cpu().numpy()
idx = np.argsort(-np.abs(coeffs))
# plt.bar(list(range(idx.shape[0])), coeffs[idx])
# plt.show()
coeffs_sort = coeffs[idx]
arr_h, arr_w = (4,8)
proto_h, proto_w, _ = proto_data.size()
arr_img = np.zeros([proto_h*arr_h, proto_w*arr_w])
arr_run = np.zeros([proto_h*arr_h, proto_w*arr_w])
test = torch.sum(proto_data, -1).cpu().numpy()
for y in range(arr_h):
for x in range(arr_w):
i = arr_w * y + x
if i == 0:
running_total = proto_data[:, :, idx[i]].cpu().numpy() * coeffs_sort[i]
else:
running_total += proto_data[:, :, idx[i]].cpu().numpy() * coeffs_sort[i]
running_total_nonlin = running_total
if cfg.mask_proto_mask_activation == activation_func.sigmoid:
running_total_nonlin = (1/(1+np.exp(-running_total_nonlin)))
arr_img[y*proto_h:(y+1)*proto_h, x*proto_w:(x+1)*proto_w] = (proto_data[:, :, idx[i]] / torch.max(proto_data[:, :, idx[i]])).cpu().numpy() * coeffs_sort[i]
arr_run[y*proto_h:(y+1)*proto_h, x*proto_w:(x+1)*proto_w] = (running_total_nonlin > 0.5).astype(np.float)
plt.imshow(arr_img)
plt.show()
# plt.imshow(arr_run)
# plt.show()
# plt.imshow(test)
# plt.show()
plt.imshow(out_masks[:, :, jdx].cpu().numpy())
plt.show()
| 36.34375 | 171 | 0.586271 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
from data import cfg, mask_type, MEANS, STD, activation_func
from utils.augmentations import Resize
from utils import timer
from .box_utils import crop, sanitize_coordinates
def postprocess(det_output, w, h, batch_idx=0, interpolation_mode='bilinear',
visualize_lincomb=False, crop_masks=True, score_threshold=0):
dets = det_output[batch_idx]
net = dets['net']
dets = dets['detection']
if dets is None:
return [torch.Tensor()] * 4
if score_threshold > 0:
keep = dets['score'] > score_threshold
for k in dets:
if k != 'proto':
dets[k] = dets[k][keep]
if dets['score'].size(0) == 0:
return [torch.Tensor()] * 4
classes = dets['class']
boxes = dets['box']
scores = dets['score']
masks = dets['mask']
if cfg.mask_type == mask_type.lincomb and cfg.eval_mask_branch:
proto_data = dets['proto']
if cfg.mask_proto_debug:
np.save('scripts/proto.npy', proto_data.cpu().numpy())
if visualize_lincomb:
display_lincomb(proto_data, masks)
masks = proto_data @ masks.t()
masks = cfg.mask_proto_mask_activation(masks)
if crop_masks:
masks = crop(masks, boxes)
masks = masks.permute(2, 0, 1).contiguous()
if cfg.use_maskiou:
with timer.env('maskiou_net'):
with torch.no_grad():
maskiou_p = net.maskiou_net(masks.unsqueeze(1))
maskiou_p = torch.gather(maskiou_p, dim=1, index=classes.unsqueeze(1)).squeeze(1)
if cfg.rescore_mask:
if cfg.rescore_bbox:
scores = scores * maskiou_p
else:
scores = [scores, scores * maskiou_p]
masks = F.interpolate(masks.unsqueeze(0), (h, w), mode=interpolation_mode, align_corners=False).squeeze(0)
masks.gt_(0.5)
boxes[:, 0], boxes[:, 2] = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, cast=False)
boxes[:, 1], boxes[:, 3] = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, cast=False)
boxes = boxes.long()
if cfg.mask_type == mask_type.direct and cfg.eval_mask_branch:
full_masks = torch.zeros(masks.size(0), h, w)
for jdx in range(masks.size(0)):
x1, y1, x2, y2 = boxes[jdx, :]
mask_w = x2 - x1
mask_h = y2 - y1
if mask_w * mask_h <= 0 or mask_w < 0:
continue
mask = masks[jdx, :].view(1, 1, cfg.mask_size, cfg.mask_size)
mask = F.interpolate(mask, (mask_h, mask_w), mode=interpolation_mode, align_corners=False)
mask = mask.gt(0.5).float()
full_masks[jdx, y1:y2, x1:x2] = mask
masks = full_masks
return classes, scores, boxes, masks
def undo_image_transformation(img, w, h):
img_numpy = img.permute(1, 2, 0).cpu().numpy()
img_numpy = img_numpy[:, :, (2, 1, 0)]
if cfg.backbone.transform.normalize:
img_numpy = (img_numpy * np.array(STD) + np.array(MEANS)) / 255.0
elif cfg.backbone.transform.subtract_means:
img_numpy = (img_numpy / 255.0 + np.array(MEANS) / 255.0).astype(np.float32)
img_numpy = img_numpy[:, :, (2, 1, 0)]
img_numpy = np.clip(img_numpy, 0, 1)
return cv2.resize(img_numpy, (w,h))
def display_lincomb(proto_data, masks):
out_masks = torch.matmul(proto_data, masks.t())
for kdx in range(1):
jdx = kdx + 0
import matplotlib.pyplot as plt
coeffs = masks[jdx, :].cpu().numpy()
idx = np.argsort(-np.abs(coeffs))
coeffs_sort = coeffs[idx]
arr_h, arr_w = (4,8)
proto_h, proto_w, _ = proto_data.size()
arr_img = np.zeros([proto_h*arr_h, proto_w*arr_w])
arr_run = np.zeros([proto_h*arr_h, proto_w*arr_w])
test = torch.sum(proto_data, -1).cpu().numpy()
for y in range(arr_h):
for x in range(arr_w):
i = arr_w * y + x
if i == 0:
running_total = proto_data[:, :, idx[i]].cpu().numpy() * coeffs_sort[i]
else:
running_total += proto_data[:, :, idx[i]].cpu().numpy() * coeffs_sort[i]
running_total_nonlin = running_total
if cfg.mask_proto_mask_activation == activation_func.sigmoid:
running_total_nonlin = (1/(1+np.exp(-running_total_nonlin)))
arr_img[y*proto_h:(y+1)*proto_h, x*proto_w:(x+1)*proto_w] = (proto_data[:, :, idx[i]] / torch.max(proto_data[:, :, idx[i]])).cpu().numpy() * coeffs_sort[i]
arr_run[y*proto_h:(y+1)*proto_h, x*proto_w:(x+1)*proto_w] = (running_total_nonlin > 0.5).astype(np.float)
plt.imshow(arr_img)
plt.show()
plt.imshow(out_masks[:, :, jdx].cpu().numpy())
plt.show()
| true | true |
1c3ae671fe4a260122c3b70b182bc81814715e39 | 952 | py | Python | test/test_face_snap_with_registered_face_snaps.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | test/test_face_snap_with_registered_face_snaps.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | test/test_face_snap_with_registered_face_snaps.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
The version of the OpenAPI document: 1.0.4
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import vtpl_api
from vtpl_api.models.face_snap_with_registered_face_snaps import FaceSnapWithRegisteredFaceSnaps # noqa: E501
from vtpl_api.rest import ApiException
class TestFaceSnapWithRegisteredFaceSnaps(unittest.TestCase):
"""FaceSnapWithRegisteredFaceSnaps unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFaceSnapWithRegisteredFaceSnaps(self):
"""Test FaceSnapWithRegisteredFaceSnaps"""
# FIXME: construct object with mandatory attributes with example values
# model = vtpl_api.models.face_snap_with_registered_face_snaps.FaceSnapWithRegisteredFaceSnaps() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.8 | 118 | 0.737395 |
from __future__ import absolute_import
import unittest
import vtpl_api
from vtpl_api.models.face_snap_with_registered_face_snaps import FaceSnapWithRegisteredFaceSnaps
from vtpl_api.rest import ApiException
class TestFaceSnapWithRegisteredFaceSnaps(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testFaceSnapWithRegisteredFaceSnaps(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
1c3ae6bccaf5257f5a744f1daf9e4187c5f864e2 | 2,120 | py | Python | examples/clustering/profile.py | catalinpopc/openstacksdk | adaf758076b0c74cf4bb55e88fdee7072764f5f3 | [
"Apache-2.0"
] | null | null | null | examples/clustering/profile.py | catalinpopc/openstacksdk | adaf758076b0c74cf4bb55e88fdee7072764f5f3 | [
"Apache-2.0"
] | null | null | null | examples/clustering/profile.py | catalinpopc/openstacksdk | adaf758076b0c74cf4bb55e88fdee7072764f5f3 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from examples.connect import FLAVOR_NAME
from examples.connect import IMAGE_NAME
from examples.connect import NETWORK_NAME
from examples.connect import SERVER_NAME
"""
Managing profiles in the Cluster service.
For a full guide see
https://developer.openstack.org/sdks/python/openstacksdk/user/guides/cluster.html
"""
def list_profiles(conn):
print("List Profiles:")
for profile in conn.clustering.profiles():
print(profile.to_dict())
for profile in conn.clustering.profiles(sort='name:asc'):
print(profile.to_dict())
def create_profile(conn):
print("Create Profile:")
spec = {
'profile': 'os.nova.server',
'version': 1.0,
'properties': {
'name': SERVER_NAME,
'flavor': FLAVOR_NAME,
'image': IMAGE_NAME,
'networks': {
'network': NETWORK_NAME
}
}
}
profile = conn.clustering.create_profile('os_server', spec)
print(profile.to_dict())
def get_profile(conn):
print("Get Profile:")
profile = conn.clustering.get_profile('os_server')
print(profile.to_dict())
def find_profile(conn):
print("Find Profile:")
profile = conn.clustering.find_profile('os_server')
print(profile.to_dict())
def update_profile(conn):
print("Update Profile:")
profile = conn.clustering.update_profile('os_server', name='old_server')
print(profile.to_dict())
def delete_profile(conn):
print("Delete Profile:")
conn.clustering.delete_profile('os_server')
print("Profile deleted.")
| 25.542169 | 81 | 0.686792 |
from examples.connect import FLAVOR_NAME
from examples.connect import IMAGE_NAME
from examples.connect import NETWORK_NAME
from examples.connect import SERVER_NAME
def list_profiles(conn):
print("List Profiles:")
for profile in conn.clustering.profiles():
print(profile.to_dict())
for profile in conn.clustering.profiles(sort='name:asc'):
print(profile.to_dict())
def create_profile(conn):
print("Create Profile:")
spec = {
'profile': 'os.nova.server',
'version': 1.0,
'properties': {
'name': SERVER_NAME,
'flavor': FLAVOR_NAME,
'image': IMAGE_NAME,
'networks': {
'network': NETWORK_NAME
}
}
}
profile = conn.clustering.create_profile('os_server', spec)
print(profile.to_dict())
def get_profile(conn):
print("Get Profile:")
profile = conn.clustering.get_profile('os_server')
print(profile.to_dict())
def find_profile(conn):
print("Find Profile:")
profile = conn.clustering.find_profile('os_server')
print(profile.to_dict())
def update_profile(conn):
print("Update Profile:")
profile = conn.clustering.update_profile('os_server', name='old_server')
print(profile.to_dict())
def delete_profile(conn):
print("Delete Profile:")
conn.clustering.delete_profile('os_server')
print("Profile deleted.")
| true | true |
1c3ae7fee60b1eade0c55933724255ed28cf901e | 695 | py | Python | PythonExercicios/ex003.py | gabjohann/python_3 | 380cb622669ed82d6b22fdd09d41f02f1ad50a73 | [
"MIT"
] | null | null | null | PythonExercicios/ex003.py | gabjohann/python_3 | 380cb622669ed82d6b22fdd09d41f02f1ad50a73 | [
"MIT"
] | null | null | null | PythonExercicios/ex003.py | gabjohann/python_3 | 380cb622669ed82d6b22fdd09d41f02f1ad50a73 | [
"MIT"
] | null | null | null | # Faça um programa que leia algo pelo teclado e mostre na tela o seu tipo primitivo
# e todas as informações possíveis sobre ele.
n = input('Digite algo: ')
print('O tipo primitivo é: ', type(n))
print('É alfanumérico? ', n.isalnum())
print('É alfabético? ', n.isalpha())
print('É decimal? ', n.isdecimal())
print('É um dígito? ', n.isdigit())
print('É um identificador? ', n.isidentifier())
print('É possível ser impresso? ', n.isprintable())
print('É somente espaços? ', n.isspace())
print('Está capitalizada? ', n.istitle())
print('Está em maiúsculo? ', n.isupper())
print('Está em minúsculo? ', n.islower())
print('É um número? ', n.isnumeric())
print('É um código binário? ', n.isascii())
| 38.611111 | 83 | 0.683453 |
n = input('Digite algo: ')
print('O tipo primitivo é: ', type(n))
print('É alfanumérico? ', n.isalnum())
print('É alfabético? ', n.isalpha())
print('É decimal? ', n.isdecimal())
print('É um dígito? ', n.isdigit())
print('É um identificador? ', n.isidentifier())
print('É possível ser impresso? ', n.isprintable())
print('É somente espaços? ', n.isspace())
print('Está capitalizada? ', n.istitle())
print('Está em maiúsculo? ', n.isupper())
print('Está em minúsculo? ', n.islower())
print('É um número? ', n.isnumeric())
print('É um código binário? ', n.isascii())
| true | true |
1c3ae8082e261353f40972c2e8e73000e45ed5c9 | 2,358 | py | Python | bacteria_archaea/marine/marine_prok_biomass_estimate.py | yinonbaron/biomass_distribution | 783a8d2f59754bde9b0ea802512b131abbe7d8a0 | [
"MIT"
] | 1 | 2021-05-17T13:55:48.000Z | 2021-05-17T13:55:48.000Z | bacteria_archaea/marine/marine_prok_biomass_estimate.py | yinonbaron/biomass_distribution | 783a8d2f59754bde9b0ea802512b131abbe7d8a0 | [
"MIT"
] | null | null | null | bacteria_archaea/marine/marine_prok_biomass_estimate.py | yinonbaron/biomass_distribution | 783a8d2f59754bde9b0ea802512b131abbe7d8a0 | [
"MIT"
] | 2 | 2018-01-10T08:53:35.000Z | 2021-05-17T13:55:50.000Z |
# coding: utf-8
# # Estimating the total biomass of marine archaea and bacteria
#
# We use our best estimates for the total number of marine prokaryotes, the carbon content of marine prokaryotes and the fraction of marine archaea and bacteria out of the total population of marine prokaryotes to estimate the total biomass of marine bacteria and archaea
# In[1]:
import numpy as np
import pandas as pd
pd.options.display.float_format = '{:,.1e}'.format
import sys
sys.path.insert(0, '../../statistics_helper')
from CI_helper import *
results = pd.read_excel('marine_prok_biomass_estimate.xlsx')
# These are our best estimates for the different parameters required for the estimate, along with the associated uncertainties
# In[2]:
results.head()
# We multiply all the relevant parameters to arrive at our best estimate for the biomass of marine archaea and bacteria, and propagate the uncertainties associated with each parameter to calculate the uncertainty associated with the estimate for the total biomass
# In[3]:
# Calculate the total biomass of marine archaea and bacteria
total_arch_biomass = results['Value'][0]*results['Value'][1]*(1+results['Value'][4])*1e-15*results['Value'][2]
total_bac_biomass = results['Value'][0]*results['Value'][1]*(1+results['Value'][4])*1e-15*results['Value'][3]
print('Our best estimate for the total biomass of marine archaea is %.1f Gt C' %(total_arch_biomass/1e15))
print('Our best estimate for the total biomass of marine bacteria is %.1f Gt C' %(total_bac_biomass/1e15))
# Propagate the uncertainty in the total biomass of bacteria and archaea
prok_biomass_CI = CI_sum_prop(estimates=np.array([results['Value'][0]*results['Value'][1], results['Value'][0]*results['Value'][1]*results['Value'][4]]), mul_CIs=np.array([CI_prod_prop(results['Uncertainty'][:2]),results['Uncertainty'][4]]))
# Propagate the uncertainty associated with each parameter to the final estimate
arch_biomass_uncertainty = CI_prod_prop(np.array([prok_biomass_CI,results['Uncertainty'][2]]))
bac_biomass_uncertainty = CI_prod_prop(np.array([prok_biomass_CI,results['Uncertainty'][3]]))
print('The uncertainty associated with the estimate for the biomass of archaea is %.1f-fold' %arch_biomass_uncertainty)
print('The uncertainty associated with the estimate for the biomass of bacteria is %.1f-fold' %bac_biomass_uncertainty)
| 50.170213 | 271 | 0.772265 |
ay.float_format = '{:,.1e}'.format
import sys
sys.path.insert(0, '../../statistics_helper')
from CI_helper import *
results = pd.read_excel('marine_prok_biomass_estimate.xlsx')
results.head()
total_arch_biomass = results['Value'][0]*results['Value'][1]*(1+results['Value'][4])*1e-15*results['Value'][2]
total_bac_biomass = results['Value'][0]*results['Value'][1]*(1+results['Value'][4])*1e-15*results['Value'][3]
print('Our best estimate for the total biomass of marine archaea is %.1f Gt C' %(total_arch_biomass/1e15))
print('Our best estimate for the total biomass of marine bacteria is %.1f Gt C' %(total_bac_biomass/1e15))
prok_biomass_CI = CI_sum_prop(estimates=np.array([results['Value'][0]*results['Value'][1], results['Value'][0]*results['Value'][1]*results['Value'][4]]), mul_CIs=np.array([CI_prod_prop(results['Uncertainty'][:2]),results['Uncertainty'][4]]))
arch_biomass_uncertainty = CI_prod_prop(np.array([prok_biomass_CI,results['Uncertainty'][2]]))
bac_biomass_uncertainty = CI_prod_prop(np.array([prok_biomass_CI,results['Uncertainty'][3]]))
print('The uncertainty associated with the estimate for the biomass of archaea is %.1f-fold' %arch_biomass_uncertainty)
print('The uncertainty associated with the estimate for the biomass of bacteria is %.1f-fold' %bac_biomass_uncertainty)
| true | true |
1c3ae8b8efd54b0337cb7ca549f48d80cbe99ef7 | 14,957 | py | Python | libs/groupdocs_conversion_cloud/models/image_convert_options.py | rocketbot-cl/pdf2word | e46f6f574f69aa744e300baf4802e426b71bf9b2 | [
"MIT"
] | null | null | null | libs/groupdocs_conversion_cloud/models/image_convert_options.py | rocketbot-cl/pdf2word | e46f6f574f69aa744e300baf4802e426b71bf9b2 | [
"MIT"
] | null | null | null | libs/groupdocs_conversion_cloud/models/image_convert_options.py | rocketbot-cl/pdf2word | e46f6f574f69aa744e300baf4802e426b71bf9b2 | [
"MIT"
] | null | null | null | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="ImageConvertOptions.py">
# Copyright (c) 2003-2019 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
from groupdocs_conversion_cloud.models import ConvertOptions
class ImageConvertOptions(ConvertOptions):
"""
Options for to Image conversion
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'width': 'int',
'height': 'int',
'horizontal_resolution': 'int',
'vertical_resolution': 'int',
'grayscale': 'bool',
'rotate_angle': 'int',
'use_pdf': 'bool',
'watermark_options': 'WatermarkOptions',
'brightness': 'int',
'contrast': 'int',
'gamma': 'float',
'flip_mode': 'str'
}
attribute_map = {
'width': 'Width',
'height': 'Height',
'horizontal_resolution': 'HorizontalResolution',
'vertical_resolution': 'VerticalResolution',
'grayscale': 'Grayscale',
'rotate_angle': 'RotateAngle',
'use_pdf': 'UsePdf',
'watermark_options': 'WatermarkOptions',
'brightness': 'Brightness',
'contrast': 'Contrast',
'gamma': 'Gamma',
'flip_mode': 'FlipMode'
}
def __init__(self, width=None, height=None, horizontal_resolution=None, vertical_resolution=None, grayscale=None, rotate_angle=None, use_pdf=None, watermark_options=None, brightness=None, contrast=None, gamma=None, flip_mode=None, **kwargs): # noqa: E501
"""Initializes new instance of ImageConvertOptions""" # noqa: E501
self._width = None
self._height = None
self._horizontal_resolution = None
self._vertical_resolution = None
self._grayscale = None
self._rotate_angle = None
self._use_pdf = None
self._watermark_options = None
self._brightness = None
self._contrast = None
self._gamma = None
self._flip_mode = None
if width is not None:
self.width = width
if height is not None:
self.height = height
if horizontal_resolution is not None:
self.horizontal_resolution = horizontal_resolution
if vertical_resolution is not None:
self.vertical_resolution = vertical_resolution
if grayscale is not None:
self.grayscale = grayscale
if rotate_angle is not None:
self.rotate_angle = rotate_angle
if use_pdf is not None:
self.use_pdf = use_pdf
if watermark_options is not None:
self.watermark_options = watermark_options
if brightness is not None:
self.brightness = brightness
if contrast is not None:
self.contrast = contrast
if gamma is not None:
self.gamma = gamma
if flip_mode is not None:
self.flip_mode = flip_mode
base = super(ImageConvertOptions, self)
base.__init__(**kwargs)
self.swagger_types.update(base.swagger_types)
self.attribute_map.update(base.attribute_map)
@property
def width(self):
"""
Gets the width. # noqa: E501
Desired image width after conversion # noqa: E501
:return: The width. # noqa: E501
:rtype: int
"""
return self._width
@width.setter
def width(self, width):
"""
Sets the width.
Desired image width after conversion # noqa: E501
:param width: The width. # noqa: E501
:type: int
"""
if width is None:
raise ValueError("Invalid value for `width`, must not be `None`") # noqa: E501
self._width = width
@property
def height(self):
"""
Gets the height. # noqa: E501
Desired image height after conversion # noqa: E501
:return: The height. # noqa: E501
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""
Sets the height.
Desired image height after conversion # noqa: E501
:param height: The height. # noqa: E501
:type: int
"""
if height is None:
raise ValueError("Invalid value for `height`, must not be `None`") # noqa: E501
self._height = height
@property
def horizontal_resolution(self):
"""
Gets the horizontal_resolution. # noqa: E501
Desired image horizontal resolution after conversion. The default resolution is the resolution of the input file or 96dpi # noqa: E501
:return: The horizontal_resolution. # noqa: E501
:rtype: int
"""
return self._horizontal_resolution
@horizontal_resolution.setter
def horizontal_resolution(self, horizontal_resolution):
"""
Sets the horizontal_resolution.
Desired image horizontal resolution after conversion. The default resolution is the resolution of the input file or 96dpi # noqa: E501
:param horizontal_resolution: The horizontal_resolution. # noqa: E501
:type: int
"""
if horizontal_resolution is None:
raise ValueError("Invalid value for `horizontal_resolution`, must not be `None`") # noqa: E501
self._horizontal_resolution = horizontal_resolution
@property
def vertical_resolution(self):
"""
Gets the vertical_resolution. # noqa: E501
Desired image vertical resolution after conversion. The default resolution is the resolution of the input file or 96dpi # noqa: E501
:return: The vertical_resolution. # noqa: E501
:rtype: int
"""
return self._vertical_resolution
@vertical_resolution.setter
def vertical_resolution(self, vertical_resolution):
"""
Sets the vertical_resolution.
Desired image vertical resolution after conversion. The default resolution is the resolution of the input file or 96dpi # noqa: E501
:param vertical_resolution: The vertical_resolution. # noqa: E501
:type: int
"""
if vertical_resolution is None:
raise ValueError("Invalid value for `vertical_resolution`, must not be `None`") # noqa: E501
self._vertical_resolution = vertical_resolution
@property
def grayscale(self):
"""
Gets the grayscale. # noqa: E501
Convert to grayscale image # noqa: E501
:return: The grayscale. # noqa: E501
:rtype: bool
"""
return self._grayscale
@grayscale.setter
def grayscale(self, grayscale):
"""
Sets the grayscale.
Convert to grayscale image # noqa: E501
:param grayscale: The grayscale. # noqa: E501
:type: bool
"""
if grayscale is None:
raise ValueError("Invalid value for `grayscale`, must not be `None`") # noqa: E501
self._grayscale = grayscale
@property
def rotate_angle(self):
"""
Gets the rotate_angle. # noqa: E501
Image rotation angle # noqa: E501
:return: The rotate_angle. # noqa: E501
:rtype: int
"""
return self._rotate_angle
@rotate_angle.setter
def rotate_angle(self, rotate_angle):
"""
Sets the rotate_angle.
Image rotation angle # noqa: E501
:param rotate_angle: The rotate_angle. # noqa: E501
:type: int
"""
if rotate_angle is None:
raise ValueError("Invalid value for `rotate_angle`, must not be `None`") # noqa: E501
self._rotate_angle = rotate_angle
@property
def use_pdf(self):
"""
Gets the use_pdf. # noqa: E501
If true, the input firstly is converted to PDF and after that to desired format # noqa: E501
:return: The use_pdf. # noqa: E501
:rtype: bool
"""
return self._use_pdf
@use_pdf.setter
def use_pdf(self, use_pdf):
"""
Sets the use_pdf.
If true, the input firstly is converted to PDF and after that to desired format # noqa: E501
:param use_pdf: The use_pdf. # noqa: E501
:type: bool
"""
if use_pdf is None:
raise ValueError("Invalid value for `use_pdf`, must not be `None`") # noqa: E501
self._use_pdf = use_pdf
@property
def watermark_options(self):
"""
Gets the watermark_options. # noqa: E501
Watermark specific options # noqa: E501
:return: The watermark_options. # noqa: E501
:rtype: WatermarkOptions
"""
return self._watermark_options
@watermark_options.setter
def watermark_options(self, watermark_options):
"""
Sets the watermark_options.
Watermark specific options # noqa: E501
:param watermark_options: The watermark_options. # noqa: E501
:type: WatermarkOptions
"""
self._watermark_options = watermark_options
@property
def brightness(self):
"""
Gets the brightness. # noqa: E501
Adjust image brightness # noqa: E501
:return: The brightness. # noqa: E501
:rtype: int
"""
return self._brightness
@brightness.setter
def brightness(self, brightness):
"""
Sets the brightness.
Adjust image brightness # noqa: E501
:param brightness: The brightness. # noqa: E501
:type: int
"""
if brightness is None:
raise ValueError("Invalid value for `brightness`, must not be `None`") # noqa: E501
self._brightness = brightness
@property
def contrast(self):
"""
Gets the contrast. # noqa: E501
Adjust image contrast # noqa: E501
:return: The contrast. # noqa: E501
:rtype: int
"""
return self._contrast
@contrast.setter
def contrast(self, contrast):
"""
Sets the contrast.
Adjust image contrast # noqa: E501
:param contrast: The contrast. # noqa: E501
:type: int
"""
if contrast is None:
raise ValueError("Invalid value for `contrast`, must not be `None`") # noqa: E501
self._contrast = contrast
@property
def gamma(self):
"""
Gets the gamma. # noqa: E501
Adjust image gamma # noqa: E501
:return: The gamma. # noqa: E501
:rtype: float
"""
return self._gamma
@gamma.setter
def gamma(self, gamma):
"""
Sets the gamma.
Adjust image gamma # noqa: E501
:param gamma: The gamma. # noqa: E501
:type: float
"""
if gamma is None:
raise ValueError("Invalid value for `gamma`, must not be `None`") # noqa: E501
self._gamma = gamma
@property
def flip_mode(self):
"""
Gets the flip_mode. # noqa: E501
Image flip mode # noqa: E501
:return: The flip_mode. # noqa: E501
:rtype: str
"""
return self._flip_mode
@flip_mode.setter
def flip_mode(self, flip_mode):
"""
Sets the flip_mode.
Image flip mode # noqa: E501
:param flip_mode: The flip_mode. # noqa: E501
:type: str
"""
if flip_mode is None:
raise ValueError("Invalid value for `flip_mode`, must not be `None`") # noqa: E501
allowed_values = ["None", "FlipX", "FlipY", "FlipXY"] # noqa: E501
if not flip_mode.isdigit():
if flip_mode not in allowed_values:
raise ValueError(
"Invalid value for `flip_mode` ({0}), must be one of {1}" # noqa: E501
.format(flip_mode, allowed_values))
self._flip_mode = flip_mode
else:
self._flip_mode = allowed_values[int(flip_mode) if six.PY3 else long(flip_mode)]
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ImageConvertOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.902893 | 259 | 0.586949 |
import pprint
import re
import six
from groupdocs_conversion_cloud.models import ConvertOptions
class ImageConvertOptions(ConvertOptions):
swagger_types = {
'width': 'int',
'height': 'int',
'horizontal_resolution': 'int',
'vertical_resolution': 'int',
'grayscale': 'bool',
'rotate_angle': 'int',
'use_pdf': 'bool',
'watermark_options': 'WatermarkOptions',
'brightness': 'int',
'contrast': 'int',
'gamma': 'float',
'flip_mode': 'str'
}
attribute_map = {
'width': 'Width',
'height': 'Height',
'horizontal_resolution': 'HorizontalResolution',
'vertical_resolution': 'VerticalResolution',
'grayscale': 'Grayscale',
'rotate_angle': 'RotateAngle',
'use_pdf': 'UsePdf',
'watermark_options': 'WatermarkOptions',
'brightness': 'Brightness',
'contrast': 'Contrast',
'gamma': 'Gamma',
'flip_mode': 'FlipMode'
}
def __init__(self, width=None, height=None, horizontal_resolution=None, vertical_resolution=None, grayscale=None, rotate_angle=None, use_pdf=None, watermark_options=None, brightness=None, contrast=None, gamma=None, flip_mode=None, **kwargs):
self._width = None
self._height = None
self._horizontal_resolution = None
self._vertical_resolution = None
self._grayscale = None
self._rotate_angle = None
self._use_pdf = None
self._watermark_options = None
self._brightness = None
self._contrast = None
self._gamma = None
self._flip_mode = None
if width is not None:
self.width = width
if height is not None:
self.height = height
if horizontal_resolution is not None:
self.horizontal_resolution = horizontal_resolution
if vertical_resolution is not None:
self.vertical_resolution = vertical_resolution
if grayscale is not None:
self.grayscale = grayscale
if rotate_angle is not None:
self.rotate_angle = rotate_angle
if use_pdf is not None:
self.use_pdf = use_pdf
if watermark_options is not None:
self.watermark_options = watermark_options
if brightness is not None:
self.brightness = brightness
if contrast is not None:
self.contrast = contrast
if gamma is not None:
self.gamma = gamma
if flip_mode is not None:
self.flip_mode = flip_mode
base = super(ImageConvertOptions, self)
base.__init__(**kwargs)
self.swagger_types.update(base.swagger_types)
self.attribute_map.update(base.attribute_map)
@property
def width(self):
return self._width
@width.setter
def width(self, width):
if width is None:
raise ValueError("Invalid value for `width`, must not be `None`")
self._width = width
@property
def height(self):
return self._height
@height.setter
def height(self, height):
if height is None:
raise ValueError("Invalid value for `height`, must not be `None`")
self._height = height
@property
def horizontal_resolution(self):
return self._horizontal_resolution
@horizontal_resolution.setter
def horizontal_resolution(self, horizontal_resolution):
if horizontal_resolution is None:
raise ValueError("Invalid value for `horizontal_resolution`, must not be `None`")
self._horizontal_resolution = horizontal_resolution
@property
def vertical_resolution(self):
return self._vertical_resolution
@vertical_resolution.setter
def vertical_resolution(self, vertical_resolution):
if vertical_resolution is None:
raise ValueError("Invalid value for `vertical_resolution`, must not be `None`")
self._vertical_resolution = vertical_resolution
@property
def grayscale(self):
return self._grayscale
@grayscale.setter
def grayscale(self, grayscale):
if grayscale is None:
raise ValueError("Invalid value for `grayscale`, must not be `None`")
self._grayscale = grayscale
@property
def rotate_angle(self):
return self._rotate_angle
@rotate_angle.setter
def rotate_angle(self, rotate_angle):
if rotate_angle is None:
raise ValueError("Invalid value for `rotate_angle`, must not be `None`")
self._rotate_angle = rotate_angle
@property
def use_pdf(self):
return self._use_pdf
@use_pdf.setter
def use_pdf(self, use_pdf):
if use_pdf is None:
raise ValueError("Invalid value for `use_pdf`, must not be `None`")
self._use_pdf = use_pdf
@property
def watermark_options(self):
return self._watermark_options
@watermark_options.setter
def watermark_options(self, watermark_options):
self._watermark_options = watermark_options
@property
def brightness(self):
return self._brightness
@brightness.setter
def brightness(self, brightness):
if brightness is None:
raise ValueError("Invalid value for `brightness`, must not be `None`")
self._brightness = brightness
@property
def contrast(self):
return self._contrast
@contrast.setter
def contrast(self, contrast):
if contrast is None:
raise ValueError("Invalid value for `contrast`, must not be `None`")
self._contrast = contrast
@property
def gamma(self):
return self._gamma
@gamma.setter
def gamma(self, gamma):
if gamma is None:
raise ValueError("Invalid value for `gamma`, must not be `None`")
self._gamma = gamma
@property
def flip_mode(self):
return self._flip_mode
@flip_mode.setter
def flip_mode(self, flip_mode):
if flip_mode is None:
raise ValueError("Invalid value for `flip_mode`, must not be `None`")
allowed_values = ["None", "FlipX", "FlipY", "FlipXY"]
if not flip_mode.isdigit():
if flip_mode not in allowed_values:
raise ValueError(
"Invalid value for `flip_mode` ({0}), must be one of {1}"
.format(flip_mode, allowed_values))
self._flip_mode = flip_mode
else:
self._flip_mode = allowed_values[int(flip_mode) if six.PY3 else long(flip_mode)]
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ImageConvertOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c3ae9c8efd453aecda98026cf65208b789b948b | 15,510 | py | Python | lib/click/parser.py | sangwo/dubhacks2019 | 0074ef9cdfaf25b84ce40bbb97872b07167ad83f | [
"MIT"
] | 6,140 | 2016-05-23T16:09:35.000Z | 2022-03-30T19:00:46.000Z | lib/click/parser.py | sangwo/dubhacks2019 | 0074ef9cdfaf25b84ce40bbb97872b07167ad83f | [
"MIT"
] | 1,384 | 2016-07-08T22:26:41.000Z | 2022-03-24T16:39:43.000Z | lib/click/parser.py | sangwo/dubhacks2019 | 0074ef9cdfaf25b84ce40bbb97872b07167ad83f | [
"MIT"
] | 5,110 | 2016-05-27T13:45:18.000Z | 2022-03-31T18:40:42.000Z | # -*- coding: utf-8 -*-
"""
click.parser
~~~~~~~~~~~~
This module started out as largely a copy paste from the stdlib's
optparse module with the features removed that we do not need from
optparse because we implement them in Click on a higher level (for
instance type handling, help formatting and a lot more).
The plan is to remove more and more from here over time.
The reason this is a different module and not optparse from the stdlib
is that there are differences in 2.x and 3.x about the error messages
generated and optparse in the stdlib uses gettext for no good reason
and might cause us issues.
"""
import re
from collections import deque
from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \
BadArgumentUsage
def _unpack_args(args, nargs_spec):
"""Given an iterable of arguments and an iterable of nargs specifications,
it returns a tuple with all the unpacked arguments at the first index
and all remaining arguments as the second.
The nargs specification is the number of arguments that should be consumed
or `-1` to indicate that this position should eat up all the remainders.
Missing items are filled with `None`.
"""
args = deque(args)
nargs_spec = deque(nargs_spec)
rv = []
spos = None
def _fetch(c):
try:
if spos is None:
return c.popleft()
else:
return c.pop()
except IndexError:
return None
while nargs_spec:
nargs = _fetch(nargs_spec)
if nargs == 1:
rv.append(_fetch(args))
elif nargs > 1:
x = [_fetch(args) for _ in range(nargs)]
# If we're reversed, we're pulling in the arguments in reverse,
# so we need to turn them around.
if spos is not None:
x.reverse()
rv.append(tuple(x))
elif nargs < 0:
if spos is not None:
raise TypeError('Cannot have two nargs < 0')
spos = len(rv)
rv.append(None)
# spos is the position of the wildcard (star). If it's not `None`,
# we fill it with the remainder.
if spos is not None:
rv[spos] = tuple(args)
args = []
rv[spos + 1:] = reversed(rv[spos + 1:])
return tuple(rv), list(args)
def _error_opt_args(nargs, opt):
if nargs == 1:
raise BadOptionUsage(opt, '%s option requires an argument' % opt)
raise BadOptionUsage(opt, '%s option requires %d arguments' % (opt, nargs))
def split_opt(opt):
first = opt[:1]
if first.isalnum():
return '', opt
if opt[1:2] == first:
return opt[:2], opt[2:]
return first, opt[1:]
def normalize_opt(opt, ctx):
if ctx is None or ctx.token_normalize_func is None:
return opt
prefix, opt = split_opt(opt)
return prefix + ctx.token_normalize_func(opt)
def split_arg_string(string):
"""Given an argument string this attempts to split it into small parts."""
rv = []
for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
r'|\S+)\s*', string, re.S):
arg = match.group().strip()
if arg[:1] == arg[-1:] and arg[:1] in '"\'':
arg = arg[1:-1].encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
try:
arg = type(string)(arg)
except UnicodeError:
pass
rv.append(arg)
return rv
class Option(object):
def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
self._short_opts = []
self._long_opts = []
self.prefixes = set()
for opt in opts:
prefix, value = split_opt(opt)
if not prefix:
raise ValueError('Invalid start character for option (%s)'
% opt)
self.prefixes.add(prefix[0])
if len(prefix) == 1 and len(value) == 1:
self._short_opts.append(opt)
else:
self._long_opts.append(opt)
self.prefixes.add(prefix)
if action is None:
action = 'store'
self.dest = dest
self.action = action
self.nargs = nargs
self.const = const
self.obj = obj
@property
def takes_value(self):
return self.action in ('store', 'append')
def process(self, value, state):
if self.action == 'store':
state.opts[self.dest] = value
elif self.action == 'store_const':
state.opts[self.dest] = self.const
elif self.action == 'append':
state.opts.setdefault(self.dest, []).append(value)
elif self.action == 'append_const':
state.opts.setdefault(self.dest, []).append(self.const)
elif self.action == 'count':
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
else:
raise ValueError('unknown action %r' % self.action)
state.order.append(self.obj)
class Argument(object):
def __init__(self, dest, nargs=1, obj=None):
self.dest = dest
self.nargs = nargs
self.obj = obj
def process(self, value, state):
if self.nargs > 1:
holes = sum(1 for x in value if x is None)
if holes == len(value):
value = None
elif holes != 0:
raise BadArgumentUsage('argument %s takes %d values'
% (self.dest, self.nargs))
state.opts[self.dest] = value
state.order.append(self.obj)
class ParsingState(object):
def __init__(self, rargs):
self.opts = {}
self.largs = []
self.rargs = rargs
self.order = []
class OptionParser(object):
"""The option parser is an internal class that is ultimately used to
parse options and arguments. It's modelled after optparse and brings
a similar but vastly simplified API. It should generally not be used
directly as the high level Click classes wrap it for you.
It's not nearly as extensible as optparse or argparse as it does not
implement features that are implemented on a higher level (such as
types or defaults).
:param ctx: optionally the :class:`~click.Context` where this parser
should go with.
"""
def __init__(self, ctx=None):
#: The :class:`~click.Context` for this parser. This might be
#: `None` for some advanced use cases.
self.ctx = ctx
#: This controls how the parser deals with interspersed arguments.
#: If this is set to `False`, the parser will stop on the first
#: non-option. Click uses this to implement nested subcommands
#: safely.
self.allow_interspersed_args = True
#: This tells the parser how to deal with unknown options. By
#: default it will error out (which is sensible), but there is a
#: second mode where it will ignore it and continue processing
#: after shifting all the unknown options into the resulting args.
self.ignore_unknown_options = False
if ctx is not None:
self.allow_interspersed_args = ctx.allow_interspersed_args
self.ignore_unknown_options = ctx.ignore_unknown_options
self._short_opt = {}
self._long_opt = {}
self._opt_prefixes = set(['-', '--'])
self._args = []
def add_option(self, opts, dest, action=None, nargs=1, const=None,
obj=None):
"""Adds a new option named `dest` to the parser. The destination
is not inferred (unlike with optparse) and needs to be explicitly
provided. Action can be any of ``store``, ``store_const``,
``append``, ``appnd_const`` or ``count``.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
opts = [normalize_opt(opt, self.ctx) for opt in opts]
option = Option(opts, dest, action=action, nargs=nargs,
const=const, obj=obj)
self._opt_prefixes.update(option.prefixes)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
def add_argument(self, dest, nargs=1, obj=None):
"""Adds a positional argument named `dest` to the parser.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
def parse_args(self, args):
"""Parses positional arguments and returns ``(values, args, order)``
for the parsed options and arguments as well as the leftover
arguments if there are any. The order is a list of objects as they
appear on the command line. If arguments appear multiple times they
will be memorized multiple times as well.
"""
state = ParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except UsageError:
if self.ctx is None or not self.ctx.resilient_parsing:
raise
return state.opts, state.largs, state.order
def _process_args_for_args(self, state):
pargs, args = _unpack_args(state.largs + state.rargs,
[x.nargs for x in self._args])
for idx, arg in enumerate(self._args):
arg.process(pargs[idx], state)
state.largs = args
state.rargs = []
def _process_args_for_options(self, state):
while state.rargs:
arg = state.rargs.pop(0)
arglen = len(arg)
# Double dashes always handled explicitly regardless of what
# prefixes are valid.
if arg == '--':
return
elif arg[:1] in self._opt_prefixes and arglen > 1:
self._process_opts(arg, state)
elif self.allow_interspersed_args:
state.largs.append(arg)
else:
state.rargs.insert(0, arg)
return
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt, explicit_value, state):
if opt not in self._long_opt:
possibilities = [word for word in self._long_opt
if word.startswith(opt)]
raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
option = self._long_opt[opt]
if option.takes_value:
# At this point it's safe to modify rargs by injecting the
# explicit value, because no exception is raised in this
# branch. This means that the inserted value will be fully
# consumed.
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
elif explicit_value is not None:
raise BadOptionUsage(opt, '%s option does not take a value' % opt)
else:
value = None
option.process(value, state)
def _match_short_opt(self, arg, state):
stop = False
i = 1
prefix = arg[0]
unknown_options = []
for ch in arg[1:]:
opt = normalize_opt(prefix + ch, self.ctx)
option = self._short_opt.get(opt)
i += 1
if not option:
if self.ignore_unknown_options:
unknown_options.append(ch)
continue
raise NoSuchOption(opt, ctx=self.ctx)
if option.takes_value:
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
state.rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
else:
value = None
option.process(value, state)
if stop:
break
# If we got any unknown options we re-combinate the string of the
# remaining options and re-attach the prefix, then report that
# to the state as new larg. This way there is basic combinatorics
# that can be achieved while still ignoring unknown arguments.
if self.ignore_unknown_options and unknown_options:
state.largs.append(prefix + ''.join(unknown_options))
def _process_opts(self, arg, state):
explicit_value = None
# Long option handling happens in two parts. The first part is
# supporting explicitly attached values. In any case, we will try
# to long match the option first.
if '=' in arg:
long_opt, explicit_value = arg.split('=', 1)
else:
long_opt = arg
norm_long_opt = normalize_opt(long_opt, self.ctx)
# At this point we will match the (assumed) long option through
# the long option matching code. Note that this allows options
# like "-foo" to be matched as long options.
try:
self._match_long_opt(norm_long_opt, explicit_value, state)
except NoSuchOption:
# At this point the long option matching failed, and we need
# to try with short options. However there is a special rule
# which says, that if we have a two character options prefix
# (applies to "--foo" for instance), we do not dispatch to the
# short option code and will instead raise the no option
# error.
if arg[:2] not in self._opt_prefixes:
return self._match_short_opt(arg, state)
if not self.ignore_unknown_options:
raise
state.largs.append(arg)
| 36.238318 | 79 | 0.573823 |
import re
from collections import deque
from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \
BadArgumentUsage
def _unpack_args(args, nargs_spec):
args = deque(args)
nargs_spec = deque(nargs_spec)
rv = []
spos = None
def _fetch(c):
try:
if spos is None:
return c.popleft()
else:
return c.pop()
except IndexError:
return None
while nargs_spec:
nargs = _fetch(nargs_spec)
if nargs == 1:
rv.append(_fetch(args))
elif nargs > 1:
x = [_fetch(args) for _ in range(nargs)]
if spos is not None:
x.reverse()
rv.append(tuple(x))
elif nargs < 0:
if spos is not None:
raise TypeError('Cannot have two nargs < 0')
spos = len(rv)
rv.append(None)
# we fill it with the remainder.
if spos is not None:
rv[spos] = tuple(args)
args = []
rv[spos + 1:] = reversed(rv[spos + 1:])
return tuple(rv), list(args)
def _error_opt_args(nargs, opt):
if nargs == 1:
raise BadOptionUsage(opt, '%s option requires an argument' % opt)
raise BadOptionUsage(opt, '%s option requires %d arguments' % (opt, nargs))
def split_opt(opt):
first = opt[:1]
if first.isalnum():
return '', opt
if opt[1:2] == first:
return opt[:2], opt[2:]
return first, opt[1:]
def normalize_opt(opt, ctx):
if ctx is None or ctx.token_normalize_func is None:
return opt
prefix, opt = split_opt(opt)
return prefix + ctx.token_normalize_func(opt)
def split_arg_string(string):
rv = []
for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
r'|\S+)\s*', string, re.S):
arg = match.group().strip()
if arg[:1] == arg[-1:] and arg[:1] in '"\'':
arg = arg[1:-1].encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
try:
arg = type(string)(arg)
except UnicodeError:
pass
rv.append(arg)
return rv
class Option(object):
def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
self._short_opts = []
self._long_opts = []
self.prefixes = set()
for opt in opts:
prefix, value = split_opt(opt)
if not prefix:
raise ValueError('Invalid start character for option (%s)'
% opt)
self.prefixes.add(prefix[0])
if len(prefix) == 1 and len(value) == 1:
self._short_opts.append(opt)
else:
self._long_opts.append(opt)
self.prefixes.add(prefix)
if action is None:
action = 'store'
self.dest = dest
self.action = action
self.nargs = nargs
self.const = const
self.obj = obj
@property
def takes_value(self):
return self.action in ('store', 'append')
def process(self, value, state):
if self.action == 'store':
state.opts[self.dest] = value
elif self.action == 'store_const':
state.opts[self.dest] = self.const
elif self.action == 'append':
state.opts.setdefault(self.dest, []).append(value)
elif self.action == 'append_const':
state.opts.setdefault(self.dest, []).append(self.const)
elif self.action == 'count':
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
else:
raise ValueError('unknown action %r' % self.action)
state.order.append(self.obj)
class Argument(object):
def __init__(self, dest, nargs=1, obj=None):
self.dest = dest
self.nargs = nargs
self.obj = obj
def process(self, value, state):
if self.nargs > 1:
holes = sum(1 for x in value if x is None)
if holes == len(value):
value = None
elif holes != 0:
raise BadArgumentUsage('argument %s takes %d values'
% (self.dest, self.nargs))
state.opts[self.dest] = value
state.order.append(self.obj)
class ParsingState(object):
def __init__(self, rargs):
self.opts = {}
self.largs = []
self.rargs = rargs
self.order = []
class OptionParser(object):
def __init__(self, ctx=None):
#: The :class:`~click.Context` for this parser. This might be
#: `None` for some advanced use cases.
self.ctx = ctx
#: This controls how the parser deals with interspersed arguments.
#: If this is set to `False`, the parser will stop on the first
#: non-option. Click uses this to implement nested subcommands
#: safely.
self.allow_interspersed_args = True
#: This tells the parser how to deal with unknown options. By
#: default it will error out (which is sensible), but there is a
#: second mode where it will ignore it and continue processing
#: after shifting all the unknown options into the resulting args.
self.ignore_unknown_options = False
if ctx is not None:
self.allow_interspersed_args = ctx.allow_interspersed_args
self.ignore_unknown_options = ctx.ignore_unknown_options
self._short_opt = {}
self._long_opt = {}
self._opt_prefixes = set(['-', '--'])
self._args = []
def add_option(self, opts, dest, action=None, nargs=1, const=None,
obj=None):
if obj is None:
obj = dest
opts = [normalize_opt(opt, self.ctx) for opt in opts]
option = Option(opts, dest, action=action, nargs=nargs,
const=const, obj=obj)
self._opt_prefixes.update(option.prefixes)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
def add_argument(self, dest, nargs=1, obj=None):
if obj is None:
obj = dest
self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
def parse_args(self, args):
state = ParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except UsageError:
if self.ctx is None or not self.ctx.resilient_parsing:
raise
return state.opts, state.largs, state.order
def _process_args_for_args(self, state):
pargs, args = _unpack_args(state.largs + state.rargs,
[x.nargs for x in self._args])
for idx, arg in enumerate(self._args):
arg.process(pargs[idx], state)
state.largs = args
state.rargs = []
def _process_args_for_options(self, state):
while state.rargs:
arg = state.rargs.pop(0)
arglen = len(arg)
# Double dashes always handled explicitly regardless of what
# prefixes are valid.
if arg == '--':
return
elif arg[:1] in self._opt_prefixes and arglen > 1:
self._process_opts(arg, state)
elif self.allow_interspersed_args:
state.largs.append(arg)
else:
state.rargs.insert(0, arg)
return
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt, explicit_value, state):
if opt not in self._long_opt:
possibilities = [word for word in self._long_opt
if word.startswith(opt)]
raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
option = self._long_opt[opt]
if option.takes_value:
# At this point it's safe to modify rargs by injecting the
# explicit value, because no exception is raised in this
# branch. This means that the inserted value will be fully
# consumed.
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
elif explicit_value is not None:
raise BadOptionUsage(opt, '%s option does not take a value' % opt)
else:
value = None
option.process(value, state)
def _match_short_opt(self, arg, state):
stop = False
i = 1
prefix = arg[0]
unknown_options = []
for ch in arg[1:]:
opt = normalize_opt(prefix + ch, self.ctx)
option = self._short_opt.get(opt)
i += 1
if not option:
if self.ignore_unknown_options:
unknown_options.append(ch)
continue
raise NoSuchOption(opt, ctx=self.ctx)
if option.takes_value:
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
state.rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
else:
value = None
option.process(value, state)
if stop:
break
# If we got any unknown options we re-combinate the string of the
# remaining options and re-attach the prefix, then report that
# to the state as new larg. This way there is basic combinatorics
# that can be achieved while still ignoring unknown arguments.
if self.ignore_unknown_options and unknown_options:
state.largs.append(prefix + ''.join(unknown_options))
def _process_opts(self, arg, state):
explicit_value = None
# Long option handling happens in two parts. The first part is
# supporting explicitly attached values. In any case, we will try
# to long match the option first.
if '=' in arg:
long_opt, explicit_value = arg.split('=', 1)
else:
long_opt = arg
norm_long_opt = normalize_opt(long_opt, self.ctx)
# At this point we will match the (assumed) long option through
# the long option matching code. Note that this allows options
# like "-foo" to be matched as long options.
try:
self._match_long_opt(norm_long_opt, explicit_value, state)
except NoSuchOption:
# At this point the long option matching failed, and we need
# to try with short options. However there is a special rule
# which says, that if we have a two character options prefix
# (applies to "--foo" for instance), we do not dispatch to the
# short option code and will instead raise the no option
# error.
if arg[:2] not in self._opt_prefixes:
return self._match_short_opt(arg, state)
if not self.ignore_unknown_options:
raise
state.largs.append(arg)
| true | true |
1c3aea34398d990c72f1835b9334fa867f54d7a3 | 778 | py | Python | resources/models/permissions.py | suutari-ai/respa | a944b1c13f855eaf5f883687b5fd025ece7c8176 | [
"MIT"
] | null | null | null | resources/models/permissions.py | suutari-ai/respa | a944b1c13f855eaf5f883687b5fd025ece7c8176 | [
"MIT"
] | 1 | 2020-06-12T08:58:58.000Z | 2020-06-12T08:58:58.000Z | resources/models/permissions.py | suutari-ai/respa | a944b1c13f855eaf5f883687b5fd025ece7c8176 | [
"MIT"
] | null | null | null | from django.utils.translation import ugettext_lazy as _
RESOURCE_PERMISSIONS = (
('can_approve_reservation', _('Can approve reservation')),
('can_make_reservations', _('Can make reservations')),
('can_modify_reservations', _('Can modify reservations')),
('can_ignore_opening_hours', _('Can make reservations outside opening hours')),
('can_view_reservation_access_code', _('Can view reservation access code')),
('can_view_reservation_extra_fields', _('Can view reservation extra fields')),
('can_access_reservation_comments', _('Can access reservation comments')),
('can_view_reservation_catering_orders', _('Can view reservation catering orders')),
('can_modify_reservation_catering_orders', _('Can modify reservation catering orders')),
)
| 55.571429 | 92 | 0.755784 | from django.utils.translation import ugettext_lazy as _
RESOURCE_PERMISSIONS = (
('can_approve_reservation', _('Can approve reservation')),
('can_make_reservations', _('Can make reservations')),
('can_modify_reservations', _('Can modify reservations')),
('can_ignore_opening_hours', _('Can make reservations outside opening hours')),
('can_view_reservation_access_code', _('Can view reservation access code')),
('can_view_reservation_extra_fields', _('Can view reservation extra fields')),
('can_access_reservation_comments', _('Can access reservation comments')),
('can_view_reservation_catering_orders', _('Can view reservation catering orders')),
('can_modify_reservation_catering_orders', _('Can modify reservation catering orders')),
)
| true | true |
1c3aed124f5136bd609625cf41cf4529ddb81b12 | 7,619 | py | Python | lib/Config.py | limebox/sublime-sdf | a315a0226b9fa3df938434b5c67e2f47f2a388d3 | [
"MIT"
] | 3 | 2017-10-12T19:11:44.000Z | 2021-08-24T18:51:38.000Z | lib/Config.py | limebox/sublime-sdf | a315a0226b9fa3df938434b5c67e2f47f2a388d3 | [
"MIT"
] | 10 | 2018-04-20T06:31:58.000Z | 2021-07-10T02:46:24.000Z | lib/Config.py | limebox/sublime-sdf | a315a0226b9fa3df938434b5c67e2f47f2a388d3 | [
"MIT"
] | null | null | null | class Config():
config_object = {
"cli_arguments": {
"adddependencies": "-p \"[PROJECT_FOLDER]\" -all",
"deploy": "-np -p \"[PROJECT_FOLDER]\"",
"importbundle": "-p \"[PROJECT_FOLDER]\"",
"importconfiguration": "-p \"[PROJECT_FOLDER]\"", # FEATURES:ALL_FEATURES",
"importfiles": "-p \"[PROJECT_FOLDER]\" -excludeproperties",
"importobjects": "-p \"[PROJECT_FOLDER]\"",
"listbundles": "",
"listconfiguration": "",
"listfiles": "-folder \"/SuiteScripts\"",
"listmissingdependencies": "-p \"[PROJECT_FOLDER]\"",
"listobjects": "-p \"[PROJECT_FOLDER]\"",
"preview": "-p \"[PROJECT_FOLDER]\"",
"update": "-p \"[PROJECT_FOLDER]\"",
"updatecustomrecordwithinstances": "-p \"[PROJECT_FOLDER]\"",
# "uploadfiles": "-p \"[PROJECT_FOLDER]\"",
# "uploadfolders": "-p \"[PROJECT_FOLDER]\"",
"validate": "-p \"[PROJECT_FOLDER]\" -server",
"issuetoken": "",
"savetoken": "",
"revoketoken": ""
},
"custom_objects": [
[
"Bundle Installation Script",
"bundleinstallationscript",
"/Objects/Scripts/BundleInstallation",
"customscript"
],
[
"Centers",
"center",
"/Objects/CentersAndTabs/Center",
"custcenter"
],
[
"Center Categories",
"centercategory",
"/Objects/CentersAndTabs/Category",
"custcentercategory"
],
[
"Center Tabs",
"centertab",
"/Objects/CentersAndTabs/Tab",
"custcentertab"
],
[
"Client Scripts",
"clientscript",
"/Objects/Scripts/Client",
"customscript"
],
[
"CRM Custom Fields",
"crmcustomfield",
"/Objects/Fields/CRM",
"custevent"
],
[
"Custom Plugins",
"customglplugin",
"/Objects/Plugins/Custom",
"customscript"
],
[
"Custom Lists",
"customlist",
"/Objects/Lists",
"customlist"
],
[
"Custom Records",
"customrecordtype",
"/Objects/Records",
"customrecord"
],
[
"Email Capture Plugins",
"emailcaptureplugin",
"/Objects/Plugins/Email",
"customscript"
],
[
"Entity Custom Fields",
"entitycustomfield",
"/Objects/Fields/Entity",
"custentity"
],
[
"Entity Forms",
"entryForm",
"/Objects/Forms/Entry",
"custform"
],
[
"Transaction Forms",
"transactionForm",
"/Objects/Forms/Transaction",
"custform"
],
[
"Item Custom Fields",
"itemcustomfield",
"/Objects/Fields/Item",
"custitem"
],
[
"Item Number Custom Fields",
"itemnumbercustomfield",
"/Objects/Fields/ItemNumber",
"custitem"
],
[
"Item Option Custom Fields",
"itemoptioncustomfield",
"/Objects/Fields/ItemOption",
"custitemoption"
],
[
"Map Reduce Script",
"mapreducescript",
"/Objects/Scripts/MapReduce",
"customscript"
],
[
"Mass Update Script",
"massupdatescript",
"/Objects/Scripts/MassUpdate",
"customscript"
],
[
"Other Custom Field",
"othercustomfield",
"/Objects/Fields/Other",
"custrecord"
],
[
"Portlets",
"portlet",
"/Objects/Scripts/Portlet",
"customscript"
],
[
"Promotions Plugins",
"promotionsplugin",
"/Objects/Plugins/Promotions",
"customscript"
],
[
"Restlets",
"restlet",
"/Objects/Scripts/Restlet",
"customscript"
],
[
"Roles",
"role",
"/Objects/Roles",
"customrole"
],
[
"Saved Searches",
"savedsearch",
"/Objects/SavedSearches",
"customsearch"
],
[
"Scheduled Scripts",
"scheduledscript",
"/Objects/Scripts/Scheduled",
"customscript"
],
[
"Sub Tabs",
"subtab",
"/Objects/CentersAndTabs/SubTab",
"custtab"
],
[
"Suitelet",
"suitelet",
"/Objects/Scripts/Suitelet",
"customscript"
],
[
"Transaction Body Custom Field",
"transactionbodycustomfield",
"/Objects/Fields/TransactionBody",
"transactionbodycustomfield"
],
[
"Transaction Column Custom Field",
"transactioncolumncustomfield",
"/Objects/Fields/TransactionColumn",
"custcol"
],
[
"User Event Script",
"usereventscript",
"/Objects/Scripts/UserEvent",
"customscript"
],
[
"Workflows",
"workflow",
"/Objects/Workflows",
"customworkflow"
],
[
"Workflow Action Scripts",
"workflowactionscript",
"/Objects/Scripts/WorkflowAction",
"customscript"
]
],
"cli_commands": [
[
"Add Dependencies to Manifest",
"Adds missing dependencies to the manifest file.",
"adddependencies"
],
[
"Deploy to Account",
"Deploys the folder or zip file that contains the SuiteCloud project.",
"deploy"
],
[
"Import Bundle",
"Imports a customization bundle from your NetSuite account and\nconverts it to an account customization project.",
"importbundle"
],
[
"Import Configuration",
"Imports the feature configuration from your NetSuite account to the account customization project so that you can enable or disable features in the project.",
"importconfiguration"
],
[
"Import Files",
"Imports files from your NetSuite account to the account customization project.",
"importfiles"
],
[
"Import Objects",
"Imports custom objects from your NetSuite account to the SuiteCloud project.",
"importobjects"
],
[
"List Bundles",
"Lists the customization bundles that were created in your NetSuite account.",
"listbundles"
],
[
"List Configuration",
"List available account configuration.",
"listconfiguration"
],
[
"List Files",
"Lists the files in the File Cabinet of your NetSuite account.",
"listfiles"
],
[
"List Missing Dependencies",
"Lists the missing dependencies in the SuiteCloud project.",
"listmissingdependencies"
],
[
"List Objects",
"Lists the custom objects in your NetSuite account.",
"listobjects"
],
[
"Preview",
"Previews the deployment steps of a folder or zip file that contains the SuiteCloud project.",
"preview"
],
[
"Update",
"Updates existing custom objects in the SuiteCloud project folder with the custom objects in your NetSuite account.",
"update"
],
[
"Update Custom Record With Instances",
"Updates the custom record object and its instances in the SuiteCloud project.",
"updatecustomrecordwithinstances"
],
# [
# "Upload File to Account",
# "Either the current file if in a SuiteScript folder, or show you a list of files to upload.",
# "uploadfiles"
# ],
# [
# "Upload Folder to Account",
# "Select an entire folder to upload",
# "uploadfolders"
# ],
[
"Validate Project",
"Validates the folder or zip file that contains the SuiteCloud project.",
"validate"
],
[
"Issue Token",
"Issues a TBA token to authenticate against your NetSuite Production account.",
"issuetoken"
],
[
"Save Token",
"Saves the latest TBA token created from your NetSuite account to use with SDF CLI.",
"savetoken"
],
[
"Revoke Token",
"Revokes the latest TBA token that was issued to your NetSuite Production account using the issuetoken command.",
"revoketoken"
],
[
"Set Password",
"Sets the password for this environment and session",
"setpassword"
],
[
"Clear Password",
"Clears the password for this environment from this session",
"clearpassword"
]
]
}
def get( index_key ):
return Config.config_object[ index_key ] | 22.675595 | 163 | 0.60861 | class Config():
config_object = {
"cli_arguments": {
"adddependencies": "-p \"[PROJECT_FOLDER]\" -all",
"deploy": "-np -p \"[PROJECT_FOLDER]\"",
"importbundle": "-p \"[PROJECT_FOLDER]\"",
"importconfiguration": "-p \"[PROJECT_FOLDER]\"",
"importfiles": "-p \"[PROJECT_FOLDER]\" -excludeproperties",
"importobjects": "-p \"[PROJECT_FOLDER]\"",
"listbundles": "",
"listconfiguration": "",
"listfiles": "-folder \"/SuiteScripts\"",
"listmissingdependencies": "-p \"[PROJECT_FOLDER]\"",
"listobjects": "-p \"[PROJECT_FOLDER]\"",
"preview": "-p \"[PROJECT_FOLDER]\"",
"update": "-p \"[PROJECT_FOLDER]\"",
"updatecustomrecordwithinstances": "-p \"[PROJECT_FOLDER]\"",
# "uploadfiles": "-p \"[PROJECT_FOLDER]\"",
# "uploadfolders": "-p \"[PROJECT_FOLDER]\"",
"validate": "-p \"[PROJECT_FOLDER]\" -server",
"issuetoken": "",
"savetoken": "",
"revoketoken": ""
},
"custom_objects": [
[
"Bundle Installation Script",
"bundleinstallationscript",
"/Objects/Scripts/BundleInstallation",
"customscript"
],
[
"Centers",
"center",
"/Objects/CentersAndTabs/Center",
"custcenter"
],
[
"Center Categories",
"centercategory",
"/Objects/CentersAndTabs/Category",
"custcentercategory"
],
[
"Center Tabs",
"centertab",
"/Objects/CentersAndTabs/Tab",
"custcentertab"
],
[
"Client Scripts",
"clientscript",
"/Objects/Scripts/Client",
"customscript"
],
[
"CRM Custom Fields",
"crmcustomfield",
"/Objects/Fields/CRM",
"custevent"
],
[
"Custom Plugins",
"customglplugin",
"/Objects/Plugins/Custom",
"customscript"
],
[
"Custom Lists",
"customlist",
"/Objects/Lists",
"customlist"
],
[
"Custom Records",
"customrecordtype",
"/Objects/Records",
"customrecord"
],
[
"Email Capture Plugins",
"emailcaptureplugin",
"/Objects/Plugins/Email",
"customscript"
],
[
"Entity Custom Fields",
"entitycustomfield",
"/Objects/Fields/Entity",
"custentity"
],
[
"Entity Forms",
"entryForm",
"/Objects/Forms/Entry",
"custform"
],
[
"Transaction Forms",
"transactionForm",
"/Objects/Forms/Transaction",
"custform"
],
[
"Item Custom Fields",
"itemcustomfield",
"/Objects/Fields/Item",
"custitem"
],
[
"Item Number Custom Fields",
"itemnumbercustomfield",
"/Objects/Fields/ItemNumber",
"custitem"
],
[
"Item Option Custom Fields",
"itemoptioncustomfield",
"/Objects/Fields/ItemOption",
"custitemoption"
],
[
"Map Reduce Script",
"mapreducescript",
"/Objects/Scripts/MapReduce",
"customscript"
],
[
"Mass Update Script",
"massupdatescript",
"/Objects/Scripts/MassUpdate",
"customscript"
],
[
"Other Custom Field",
"othercustomfield",
"/Objects/Fields/Other",
"custrecord"
],
[
"Portlets",
"portlet",
"/Objects/Scripts/Portlet",
"customscript"
],
[
"Promotions Plugins",
"promotionsplugin",
"/Objects/Plugins/Promotions",
"customscript"
],
[
"Restlets",
"restlet",
"/Objects/Scripts/Restlet",
"customscript"
],
[
"Roles",
"role",
"/Objects/Roles",
"customrole"
],
[
"Saved Searches",
"savedsearch",
"/Objects/SavedSearches",
"customsearch"
],
[
"Scheduled Scripts",
"scheduledscript",
"/Objects/Scripts/Scheduled",
"customscript"
],
[
"Sub Tabs",
"subtab",
"/Objects/CentersAndTabs/SubTab",
"custtab"
],
[
"Suitelet",
"suitelet",
"/Objects/Scripts/Suitelet",
"customscript"
],
[
"Transaction Body Custom Field",
"transactionbodycustomfield",
"/Objects/Fields/TransactionBody",
"transactionbodycustomfield"
],
[
"Transaction Column Custom Field",
"transactioncolumncustomfield",
"/Objects/Fields/TransactionColumn",
"custcol"
],
[
"User Event Script",
"usereventscript",
"/Objects/Scripts/UserEvent",
"customscript"
],
[
"Workflows",
"workflow",
"/Objects/Workflows",
"customworkflow"
],
[
"Workflow Action Scripts",
"workflowactionscript",
"/Objects/Scripts/WorkflowAction",
"customscript"
]
],
"cli_commands": [
[
"Add Dependencies to Manifest",
"Adds missing dependencies to the manifest file.",
"adddependencies"
],
[
"Deploy to Account",
"Deploys the folder or zip file that contains the SuiteCloud project.",
"deploy"
],
[
"Import Bundle",
"Imports a customization bundle from your NetSuite account and\nconverts it to an account customization project.",
"importbundle"
],
[
"Import Configuration",
"Imports the feature configuration from your NetSuite account to the account customization project so that you can enable or disable features in the project.",
"importconfiguration"
],
[
"Import Files",
"Imports files from your NetSuite account to the account customization project.",
"importfiles"
],
[
"Import Objects",
"Imports custom objects from your NetSuite account to the SuiteCloud project.",
"importobjects"
],
[
"List Bundles",
"Lists the customization bundles that were created in your NetSuite account.",
"listbundles"
],
[
"List Configuration",
"List available account configuration.",
"listconfiguration"
],
[
"List Files",
"Lists the files in the File Cabinet of your NetSuite account.",
"listfiles"
],
[
"List Missing Dependencies",
"Lists the missing dependencies in the SuiteCloud project.",
"listmissingdependencies"
],
[
"List Objects",
"Lists the custom objects in your NetSuite account.",
"listobjects"
],
[
"Preview",
"Previews the deployment steps of a folder or zip file that contains the SuiteCloud project.",
"preview"
],
[
"Update",
"Updates existing custom objects in the SuiteCloud project folder with the custom objects in your NetSuite account.",
"update"
],
[
"Update Custom Record With Instances",
"Updates the custom record object and its instances in the SuiteCloud project.",
"updatecustomrecordwithinstances"
],
# [
# "Upload File to Account",
# "Either the current file if in a SuiteScript folder, or show you a list of files to upload.",
# "uploadfiles"
# ],
# [
# "Upload Folder to Account",
# "Select an entire folder to upload",
# "uploadfolders"
# ],
[
"Validate Project",
"Validates the folder or zip file that contains the SuiteCloud project.",
"validate"
],
[
"Issue Token",
"Issues a TBA token to authenticate against your NetSuite Production account.",
"issuetoken"
],
[
"Save Token",
"Saves the latest TBA token created from your NetSuite account to use with SDF CLI.",
"savetoken"
],
[
"Revoke Token",
"Revokes the latest TBA token that was issued to your NetSuite Production account using the issuetoken command.",
"revoketoken"
],
[
"Set Password",
"Sets the password for this environment and session",
"setpassword"
],
[
"Clear Password",
"Clears the password for this environment from this session",
"clearpassword"
]
]
}
def get( index_key ):
return Config.config_object[ index_key ] | true | true |
1c3aed1dcedbba4d6e31e8cb158888ac54b0db5e | 871 | py | Python | certbot-apache/certbot_apache/_internal/override_arch.py | silverbacknet/certbot | 270b5535e24fd3dab4c05fa8929adca8117942f1 | [
"Apache-2.0"
] | 5 | 2021-01-26T08:47:29.000Z | 2021-01-30T00:42:12.000Z | certbot-apache/certbot_apache/_internal/override_arch.py | silverbacknet/certbot | 270b5535e24fd3dab4c05fa8929adca8117942f1 | [
"Apache-2.0"
] | 2 | 2019-11-20T07:08:26.000Z | 2020-11-05T23:31:48.000Z | certbot-apache/certbot_apache/_internal/override_arch.py | silverbacknet/certbot | 270b5535e24fd3dab4c05fa8929adca8117942f1 | [
"Apache-2.0"
] | 1 | 2020-10-28T05:49:43.000Z | 2020-10-28T05:49:43.000Z | """ Distribution specific override class for Arch Linux """
import zope.interface
from certbot import interfaces
from certbot_apache._internal import configurator
@zope.interface.provider(interfaces.IPluginFactory)
class ArchConfigurator(configurator.ApacheConfigurator):
"""Arch Linux specific ApacheConfigurator override class"""
OS_DEFAULTS = dict(
server_root="/etc/httpd",
vhost_root="/etc/httpd/conf",
vhost_files="*.conf",
logs_root="/var/log/httpd",
ctl="apachectl",
version_cmd=['apachectl', '-v'],
restart_cmd=['apachectl', 'graceful'],
conftest_cmd=['apachectl', 'configtest'],
enmod=None,
dismod=None,
le_vhost_ext="-le-ssl.conf",
handle_modules=False,
handle_sites=False,
challenge_location="/etc/httpd/conf",
bin=None,
)
| 30.034483 | 63 | 0.661309 | import zope.interface
from certbot import interfaces
from certbot_apache._internal import configurator
@zope.interface.provider(interfaces.IPluginFactory)
class ArchConfigurator(configurator.ApacheConfigurator):
OS_DEFAULTS = dict(
server_root="/etc/httpd",
vhost_root="/etc/httpd/conf",
vhost_files="*.conf",
logs_root="/var/log/httpd",
ctl="apachectl",
version_cmd=['apachectl', '-v'],
restart_cmd=['apachectl', 'graceful'],
conftest_cmd=['apachectl', 'configtest'],
enmod=None,
dismod=None,
le_vhost_ext="-le-ssl.conf",
handle_modules=False,
handle_sites=False,
challenge_location="/etc/httpd/conf",
bin=None,
)
| true | true |
1c3aed720ff3c84f8575b631665ae385013da9c9 | 12,656 | py | Python | sdks/python/apache_beam/dataframe/expressions.py | bipinupd/beam | fffb85a35df6ae3bdb2934c077856f6b27559aa7 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/dataframe/expressions.py | bipinupd/beam | fffb85a35df6ae3bdb2934c077856f6b27559aa7 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/dataframe/expressions.py | bipinupd/beam | fffb85a35df6ae3bdb2934c077856f6b27559aa7 | [
"Apache-2.0"
] | 1 | 2020-04-29T20:09:40.000Z | 2020-04-29T20:09:40.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import random
import threading
from typing import Any
from typing import Callable
from typing import Iterable
from typing import Optional
from typing import TypeVar
from apache_beam.dataframe import partitionings
class Session(object):
"""A session represents a mapping of expressions to concrete values.
The bindings typically include required placeholders, but may be any
intermediate expression as well.
"""
def __init__(self, bindings=None):
self._bindings = dict(bindings or {})
def evaluate(self, expr): # type: (Expression) -> Any
if expr not in self._bindings:
self._bindings[expr] = expr.evaluate_at(self)
return self._bindings[expr]
def lookup(self, expr): # type: (Expression) -> Any
return self._bindings[expr]
class PartitioningSession(Session):
"""An extension of Session that enforces actual partitioning of inputs.
Each expression is evaluated multiple times for various supported
partitionings determined by its `requires_partition_by` specification. For
each tested partitioning, the input is partitioned and the expression is
evaluated on each partition separately, as if this were actually executed in
a parallel manner.
For each input partitioning, the results are verified to be partitioned
appropriately according to the expression's `preserves_partition_by`
specification.
For testing only.
"""
def evaluate(self, expr):
import pandas as pd
import collections
def is_scalar(expr):
return not isinstance(expr.proxy(), pd.core.generic.NDFrame)
def difficulty(partitioning):
"""Imposes an ordering on partitionings where the largest schemes are the
most likely to reveal an error. This order is different from the one
defined by is_subpartitioning_of:
Nothing() > Index() > ... > Index([i,j]) > Index([j]) > Singleton()
"""
if isinstance(partitioning, partitionings.Singleton):
return -float('inf')
elif isinstance(partitioning, partitionings.Index):
if partitioning._levels is None:
return 1_000_000
else:
return len(partitioning._levels)
elif isinstance(partitioning, partitionings.Nothing):
return float('inf')
if expr not in self._bindings:
if is_scalar(expr) or not expr.args():
result = super(PartitioningSession, self).evaluate(expr)
else:
scaler_args = [arg for arg in expr.args() if is_scalar(arg)]
def evaluate_with(input_partitioning):
parts = collections.defaultdict(
lambda: Session({arg: self.evaluate(arg)
for arg in scaler_args}))
for arg in expr.args():
if not is_scalar(arg):
input = self.evaluate(arg)
for key, part in input_partitioning.test_partition_fn(input):
parts[key]._bindings[arg] = part
if not parts:
parts[None] # Create at least one entry.
results = []
for session in parts.values():
if any(len(session.lookup(arg)) for arg in expr.args()
if not is_scalar(arg)):
results.append(session.evaluate(expr))
expected_output_partitioning = expr.preserves_partition_by(
) if input_partitioning.is_subpartitioning_of(
expr.preserves_partition_by()) else input_partitioning
if not expected_output_partitioning.check(results):
raise AssertionError(
f"""Expression does not preserve partitioning!
Expression: {expr}
Requires: {expr.requires_partition_by()}
Preserves: {expr.preserves_partition_by()}
Input partitioning: {input_partitioning}
Expected output partitioning: {expected_output_partitioning}
""")
if results:
return pd.concat(results)
else:
# Choose any single session.
return next(iter(parts.values())).evaluate(expr)
# Store random state so it can be re-used for each execution, in case
# the expression is part of a test that relies on the random seed.
random_state = random.getstate()
# Run with all supported partitionings in order of ascending
# "difficulty". This way the final result is computed with the
# most challenging partitioning. Avoids heisenbugs where sometimes
# the result is computed trivially with Singleton partitioning and
# passes.
for input_partitioning in sorted(set([expr.requires_partition_by(),
partitionings.Nothing(),
partitionings.Index(),
partitionings.Singleton()]),
key=difficulty):
if not input_partitioning.is_subpartitioning_of(
expr.requires_partition_by()):
continue
random.setstate(random_state)
result = evaluate_with(input_partitioning)
self._bindings[expr] = result
return self._bindings[expr]
# The return type of an Expression
T = TypeVar('T')
class Expression(object):
"""An expression is an operation bound to a set of arguments.
An expression represents a deferred tree of operations, which can be
evaluated at a specific bindings of root expressions to values.
"""
def __init__(
self,
name, # type: str
proxy, # type: T
_id=None # type: Optional[str]
):
self._name = name
self._proxy = proxy
# Store for preservation through pickling.
self._id = _id or '%s_%s_%s' % (name, type(proxy).__name__, id(self))
def proxy(self): # type: () -> T
return self._proxy
def __hash__(self):
return hash(self._id)
def __eq__(self, other):
return self._id == other._id
def __ne__(self, other):
return not self == other
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self._id)
def placeholders(self):
"""Returns all the placeholders that self depends on."""
raise NotImplementedError(type(self))
def evaluate_at(self, session): # type: (Session) -> T
"""Returns the result of self with the bindings given in session."""
raise NotImplementedError(type(self))
def requires_partition_by(self): # type: () -> partitionings.Partitioning
"""Returns the partitioning, if any, require to evaluate this expression.
Returns partitioning.Nothing() to require no partitioning is required.
"""
raise NotImplementedError(type(self))
def preserves_partition_by(self): # type: () -> partitionings.Partitioning
"""Returns the partitioning, if any, preserved by this expression.
This gives an upper bound on the partitioning of its ouput. The actual
partitioning of the output may be less strict (e.g. if the input was
less partitioned).
"""
raise NotImplementedError(type(self))
class PlaceholderExpression(Expression):
"""An expression whose value must be explicitly bound in the session."""
def __init__(
self, # type: PlaceholderExpression
proxy, # type: T
reference=None, # type: Any
):
"""Initialize a placeholder expression.
Args:
proxy: A proxy object with the type expected to be bound to this
expression. Used for type checking at pipeline construction time.
"""
super(PlaceholderExpression, self).__init__('placeholder', proxy)
self._reference = reference
def placeholders(self):
return frozenset([self])
def args(self):
return ()
def evaluate_at(self, session):
return session.lookup(self)
def requires_partition_by(self):
return partitionings.Nothing()
def preserves_partition_by(self):
return partitionings.Nothing()
class ConstantExpression(Expression):
"""An expression whose value is known at pipeline construction time."""
def __init__(
self, # type: ConstantExpression
value, # type: T
proxy=None # type: Optional[T]
):
"""Initialize a constant expression.
Args:
value: The constant value to be produced by this expression.
proxy: (Optional) a proxy object with same type as `value` to use for
rapid type checking at pipeline construction time. If not provided,
`value` will be used directly.
"""
if proxy is None:
proxy = value
super(ConstantExpression, self).__init__('constant', proxy)
self._value = value
def placeholders(self):
return frozenset()
def args(self):
return ()
def evaluate_at(self, session):
return self._value
def requires_partition_by(self):
return partitionings.Nothing()
def preserves_partition_by(self):
return partitionings.Nothing()
class ComputedExpression(Expression):
"""An expression whose value must be computed at pipeline execution time."""
def __init__(
self, # type: ComputedExpression
name, # type: str
func, # type: Callable[...,T]
args, # type: Iterable[Expression]
proxy=None, # type: Optional[T]
_id=None, # type: Optional[str]
requires_partition_by=partitionings.Index(), # type: partitionings.Partitioning
preserves_partition_by=partitionings.Nothing(), # type: partitionings.Partitioning
):
"""Initialize a computed expression.
Args:
name: The name of this expression.
func: The function that will be used to compute the value of this
expression. Should accept arguments of the types returned when
evaluating the `args` expressions.
args: The list of expressions that will be used to produce inputs to
`func`.
proxy: (Optional) a proxy object with same type as the objects that this
ComputedExpression will produce at execution time. If not provided, a
proxy will be generated using `func` and the proxies of `args`.
_id: (Optional) a string to uniquely identify this expression.
requires_partition_by: The required (common) partitioning of the args.
preserves_partition_by: The level of partitioning preserved.
"""
if (not _get_allow_non_parallel() and
requires_partition_by == partitionings.Singleton()):
raise NonParallelOperation(
"Using non-parallel form of %s "
"outside of allow_non_parallel_operations block." % name)
args = tuple(args)
if proxy is None:
proxy = func(*(arg.proxy() for arg in args))
super(ComputedExpression, self).__init__(name, proxy, _id)
self._func = func
self._args = args
self._requires_partition_by = requires_partition_by
self._preserves_partition_by = preserves_partition_by
def placeholders(self):
return frozenset.union(
frozenset(), *[arg.placeholders() for arg in self.args()])
def args(self):
return self._args
def evaluate_at(self, session):
return self._func(*(session.evaluate(arg) for arg in self._args))
def requires_partition_by(self):
return self._requires_partition_by
def preserves_partition_by(self):
return self._preserves_partition_by
def elementwise_expression(name, func, args):
return ComputedExpression(
name,
func,
args,
requires_partition_by=partitionings.Nothing(),
preserves_partition_by=partitionings.Singleton())
_ALLOW_NON_PARALLEL = threading.local()
_ALLOW_NON_PARALLEL.value = False
def _get_allow_non_parallel():
return _ALLOW_NON_PARALLEL.value
@contextlib.contextmanager
def allow_non_parallel_operations(allow=True):
if allow is None:
yield
else:
old_value, _ALLOW_NON_PARALLEL.value = _ALLOW_NON_PARALLEL.value, allow
yield
_ALLOW_NON_PARALLEL.value = old_value
class NonParallelOperation(Exception):
pass
| 33.930295 | 89 | 0.680547 |
import contextlib
import random
import threading
from typing import Any
from typing import Callable
from typing import Iterable
from typing import Optional
from typing import TypeVar
from apache_beam.dataframe import partitionings
class Session(object):
def __init__(self, bindings=None):
self._bindings = dict(bindings or {})
def evaluate(self, expr):
if expr not in self._bindings:
self._bindings[expr] = expr.evaluate_at(self)
return self._bindings[expr]
def lookup(self, expr):
return self._bindings[expr]
class PartitioningSession(Session):
def evaluate(self, expr):
import pandas as pd
import collections
def is_scalar(expr):
return not isinstance(expr.proxy(), pd.core.generic.NDFrame)
def difficulty(partitioning):
if isinstance(partitioning, partitionings.Singleton):
return -float('inf')
elif isinstance(partitioning, partitionings.Index):
if partitioning._levels is None:
return 1_000_000
else:
return len(partitioning._levels)
elif isinstance(partitioning, partitionings.Nothing):
return float('inf')
if expr not in self._bindings:
if is_scalar(expr) or not expr.args():
result = super(PartitioningSession, self).evaluate(expr)
else:
scaler_args = [arg for arg in expr.args() if is_scalar(arg)]
def evaluate_with(input_partitioning):
parts = collections.defaultdict(
lambda: Session({arg: self.evaluate(arg)
for arg in scaler_args}))
for arg in expr.args():
if not is_scalar(arg):
input = self.evaluate(arg)
for key, part in input_partitioning.test_partition_fn(input):
parts[key]._bindings[arg] = part
if not parts:
parts[None]
results = []
for session in parts.values():
if any(len(session.lookup(arg)) for arg in expr.args()
if not is_scalar(arg)):
results.append(session.evaluate(expr))
expected_output_partitioning = expr.preserves_partition_by(
) if input_partitioning.is_subpartitioning_of(
expr.preserves_partition_by()) else input_partitioning
if not expected_output_partitioning.check(results):
raise AssertionError(
f"""Expression does not preserve partitioning!
Expression: {expr}
Requires: {expr.requires_partition_by()}
Preserves: {expr.preserves_partition_by()}
Input partitioning: {input_partitioning}
Expected output partitioning: {expected_output_partitioning}
""")
if results:
return pd.concat(results)
else:
return next(iter(parts.values())).evaluate(expr)
random_state = random.getstate()
for input_partitioning in sorted(set([expr.requires_partition_by(),
partitionings.Nothing(),
partitionings.Index(),
partitionings.Singleton()]),
key=difficulty):
if not input_partitioning.is_subpartitioning_of(
expr.requires_partition_by()):
continue
random.setstate(random_state)
result = evaluate_with(input_partitioning)
self._bindings[expr] = result
return self._bindings[expr]
T = TypeVar('T')
class Expression(object):
def __init__(
self,
name,
proxy,
_id=None
):
self._name = name
self._proxy = proxy
self._id = _id or '%s_%s_%s' % (name, type(proxy).__name__, id(self))
def proxy(self):
return self._proxy
def __hash__(self):
return hash(self._id)
def __eq__(self, other):
return self._id == other._id
def __ne__(self, other):
return not self == other
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self._id)
def placeholders(self):
raise NotImplementedError(type(self))
def evaluate_at(self, session):
raise NotImplementedError(type(self))
def requires_partition_by(self):
raise NotImplementedError(type(self))
def preserves_partition_by(self):
raise NotImplementedError(type(self))
class PlaceholderExpression(Expression):
def __init__(
self,
proxy,
reference=None,
):
super(PlaceholderExpression, self).__init__('placeholder', proxy)
self._reference = reference
def placeholders(self):
return frozenset([self])
def args(self):
return ()
def evaluate_at(self, session):
return session.lookup(self)
def requires_partition_by(self):
return partitionings.Nothing()
def preserves_partition_by(self):
return partitionings.Nothing()
class ConstantExpression(Expression):
def __init__(
self,
value,
proxy=None
):
if proxy is None:
proxy = value
super(ConstantExpression, self).__init__('constant', proxy)
self._value = value
def placeholders(self):
return frozenset()
def args(self):
return ()
def evaluate_at(self, session):
return self._value
def requires_partition_by(self):
return partitionings.Nothing()
def preserves_partition_by(self):
return partitionings.Nothing()
class ComputedExpression(Expression):
def __init__(
self,
name,
func,
args,
proxy=None,
_id=None,
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Nothing(),
):
if (not _get_allow_non_parallel() and
requires_partition_by == partitionings.Singleton()):
raise NonParallelOperation(
"Using non-parallel form of %s "
"outside of allow_non_parallel_operations block." % name)
args = tuple(args)
if proxy is None:
proxy = func(*(arg.proxy() for arg in args))
super(ComputedExpression, self).__init__(name, proxy, _id)
self._func = func
self._args = args
self._requires_partition_by = requires_partition_by
self._preserves_partition_by = preserves_partition_by
def placeholders(self):
return frozenset.union(
frozenset(), *[arg.placeholders() for arg in self.args()])
def args(self):
return self._args
def evaluate_at(self, session):
return self._func(*(session.evaluate(arg) for arg in self._args))
def requires_partition_by(self):
return self._requires_partition_by
def preserves_partition_by(self):
return self._preserves_partition_by
def elementwise_expression(name, func, args):
return ComputedExpression(
name,
func,
args,
requires_partition_by=partitionings.Nothing(),
preserves_partition_by=partitionings.Singleton())
_ALLOW_NON_PARALLEL = threading.local()
_ALLOW_NON_PARALLEL.value = False
def _get_allow_non_parallel():
return _ALLOW_NON_PARALLEL.value
@contextlib.contextmanager
def allow_non_parallel_operations(allow=True):
if allow is None:
yield
else:
old_value, _ALLOW_NON_PARALLEL.value = _ALLOW_NON_PARALLEL.value, allow
yield
_ALLOW_NON_PARALLEL.value = old_value
class NonParallelOperation(Exception):
pass
| true | true |
1c3aee424e7fc8070086b9ea865864c2f4fcc382 | 3,031 | py | Python | python/sfm_global.py | teguhkhg/3dv_tutorial_py | 5e7bc614c5a71cd9d125b1bd8767b0b502ef9241 | [
"Beerware"
] | 2 | 2020-12-15T06:33:45.000Z | 2021-02-16T22:53:48.000Z | python/sfm_global.py | teguhkhg/3dv_tutorial_py | 5e7bc614c5a71cd9d125b1bd8767b0b502ef9241 | [
"Beerware"
] | null | null | null | python/sfm_global.py | teguhkhg/3dv_tutorial_py | 5e7bc614c5a71cd9d125b1bd8767b0b502ef9241 | [
"Beerware"
] | null | null | null | import numpy as np
import g2o
import cv2
import glob
from bundle_adjustment import MonoBA
# def makeNoisyPoints(Xs, xs, )
class Frame(object):
def __init__(self):
pass
class Mappoint(object):
def __init__(self):
pass
class Measurement(object):
def __init__(self):
pass
class CovisibilityGraph(object):
def __init__(self):
pass
def main():
img_resize = 0.25
f_init = 500
cx_init = -1
cy_init = -1
Z_init = 2
Z_limit = 100
ba_loss_width = 9
min_inlier_num = 200
ba_inlier_num = 200
show_match = False
fdetector = cv2.BRISK_create()
img_keypoint = []
img_set = []
img_descriptor = []
files = sorted(glob.glob("../bin/data/relief/*.jpg"))
for filename in files:
image = cv2.imread(filename)
if img_resize != 1:
width = int(image.shape[1] * img_resize)
height = int(image.shape[0] * img_resize)
dim = (width, height)
image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
keypoint, descriptor = fdetector.detectAndCompute(image, None)
img_set.append(image)
img_keypoint.append(keypoint)
img_descriptor.append(descriptor)
if len(img_set) < 2:
return
if cx_init < 0:
cx_init = int(img_set[0].shape[1]/2)
if cy_init < 0:
cy_init = int(img_set[0].shape[0]/2)
print(cx_init, cy_init)
fmatcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
match_pair = []
match_inlier = []
for i in range(len(img_set)):
for j in range(i + 1, len(img_set)):
matches = fmatcher.match(img_descriptor[i], img_descriptor[j])
inlier = []
src = []
dst = []
for itr in matches:
src.append(img_keypoint[i][itr.queryIdx].pt)
dst.append(img_keypoint[j][itr.trainIdx].pt)
src = np.asarray(src)
dst = np.asarray(dst)
F, inlier_mask = cv2.findFundamentalMat(src, dst, cv2.RANSAC)
for k in range(len(inlier_mask)):
if inlier_mask[k]:
inlier.append(matches[k])
print("3DV Tutorial: Image %d - %d are matched (%d / %d).\n"
% (i, j, len(inlier), len(inlier_mask)))
if len(inlier) < min_inlier_num:
continue
print("3DV Tutorial: Image %d - %d are selected.\n" % (i, j))
match_pair.append((i, j))
match_inlier.append(inlier)
if show_match:
match_image = cv2.drawMatches(
img_set[i], img_keypoint[i], img_set[j], img_keypoint[j], matches, None, None, None, inlier_mask)
cv2.imshow("3DV Tutorial: Structure-from-Motion", match_image)
cv2.waitKey()
if len(match_pair) < 1:
return
ba = MonoBA()
ba.set_camera(float(f_init), np.array([cx_init, cy_init]).astype(float))
if __name__ == "__main__":
main() | 28.866667 | 117 | 0.571429 | import numpy as np
import g2o
import cv2
import glob
from bundle_adjustment import MonoBA
class Frame(object):
def __init__(self):
pass
class Mappoint(object):
def __init__(self):
pass
class Measurement(object):
def __init__(self):
pass
class CovisibilityGraph(object):
def __init__(self):
pass
def main():
img_resize = 0.25
f_init = 500
cx_init = -1
cy_init = -1
Z_init = 2
Z_limit = 100
ba_loss_width = 9
min_inlier_num = 200
ba_inlier_num = 200
show_match = False
fdetector = cv2.BRISK_create()
img_keypoint = []
img_set = []
img_descriptor = []
files = sorted(glob.glob("../bin/data/relief/*.jpg"))
for filename in files:
image = cv2.imread(filename)
if img_resize != 1:
width = int(image.shape[1] * img_resize)
height = int(image.shape[0] * img_resize)
dim = (width, height)
image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
keypoint, descriptor = fdetector.detectAndCompute(image, None)
img_set.append(image)
img_keypoint.append(keypoint)
img_descriptor.append(descriptor)
if len(img_set) < 2:
return
if cx_init < 0:
cx_init = int(img_set[0].shape[1]/2)
if cy_init < 0:
cy_init = int(img_set[0].shape[0]/2)
print(cx_init, cy_init)
fmatcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
match_pair = []
match_inlier = []
for i in range(len(img_set)):
for j in range(i + 1, len(img_set)):
matches = fmatcher.match(img_descriptor[i], img_descriptor[j])
inlier = []
src = []
dst = []
for itr in matches:
src.append(img_keypoint[i][itr.queryIdx].pt)
dst.append(img_keypoint[j][itr.trainIdx].pt)
src = np.asarray(src)
dst = np.asarray(dst)
F, inlier_mask = cv2.findFundamentalMat(src, dst, cv2.RANSAC)
for k in range(len(inlier_mask)):
if inlier_mask[k]:
inlier.append(matches[k])
print("3DV Tutorial: Image %d - %d are matched (%d / %d).\n"
% (i, j, len(inlier), len(inlier_mask)))
if len(inlier) < min_inlier_num:
continue
print("3DV Tutorial: Image %d - %d are selected.\n" % (i, j))
match_pair.append((i, j))
match_inlier.append(inlier)
if show_match:
match_image = cv2.drawMatches(
img_set[i], img_keypoint[i], img_set[j], img_keypoint[j], matches, None, None, None, inlier_mask)
cv2.imshow("3DV Tutorial: Structure-from-Motion", match_image)
cv2.waitKey()
if len(match_pair) < 1:
return
ba = MonoBA()
ba.set_camera(float(f_init), np.array([cx_init, cy_init]).astype(float))
if __name__ == "__main__":
main() | true | true |
1c3aef1a22edb2ebc2b9b274684f2b269d3324c2 | 4,634 | py | Python | chalice/app.py | viswanath-puttagunta/aws-cheatsheet | 2425eb15a6a1a22e9679c218d69b2f4e9c277a75 | [
"Apache-2.0"
] | null | null | null | chalice/app.py | viswanath-puttagunta/aws-cheatsheet | 2425eb15a6a1a22e9679c218d69b2f4e9c277a75 | [
"Apache-2.0"
] | null | null | null | chalice/app.py | viswanath-puttagunta/aws-cheatsheet | 2425eb15a6a1a22e9679c218d69b2f4e9c277a75 | [
"Apache-2.0"
] | null | null | null | from chalice import Chalice, BadRequestError
from chalice import NotFoundError
from chalice import CORSConfig
import json
import boto3
from botocore.exceptions import ClientError
'''
Commands that worked
http ${GATEWAY}/cities/seattle
http -v ${GATEWAY}/objects/mykey X-Api-Key:${APIKEY}
echo '{"name":"Viswanath"}' | http PUT ${GATEWAY}/files/vkey X-API-Key:${APIKEY}
http PUT ${GATEWAY}/files/doc2_shortened.pdf Content-Type:application/octet-stream X-API-Key:${APIKEY} < ${DATAFOLDER}/${DATAFILE}
echo '{"fname":"kavi", "lname":"Viswanath"}' | http PUT ${GATEWAY}/dynamo/vkey X-API-Key:${APIKEY}
echo '{"fname":"beku", "data": {"lname":"Vish","mname":"vell"}}' | http PUT ${GATEWAY}/dynamo/vkey X-API-Key:${APIKEY}
http ${GATEWAY}/dynamo/kavi X-API-Key:${APIKEY}
'''
app = Chalice(app_name='khelloworld')
app.debug = True
CITIES_TO_STATE = {
'seattle': 'WA',
'portland': 'OR'
}
S3 = boto3.client('s3', region_name='us-east-2')
BUCKET = 'vbucket-test1234'
BUCKET2 = 'vbucket-file1234'
SQS_QUEUE_URL = 'sqsurlgoeshere'
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('vdynamodb')
sqs = boto3.client('sqs')
@app.route('/')
def index():
return {'hello': 'vworld'}
@app.route('/cities/{city}')
def state_of_city(city):
try:
return {'state': CITIES_TO_STATE[city]}
except KeyError:
raise BadRequestError("Unknown city '%s', valid choices are: %s" % (
city, ', '.join(CITIES_TO_STATE.keys())))
@app.route('/objects/{key}', methods=['GET','PUT'], api_key_required=True)
def myobject(key):
request = app.current_request
if request.method == 'PUT':
S3.put_object(Bucket=BUCKET, Key=key,Body=json.dumps(request.json_body))
with table.batch_writer() as batch:
batch.put_item(Item = {"fname": key})
elif request.method == 'GET':
try:
response = S3.get_object(Bucket=BUCKET, Key=key)
return json.loads(response['Body'].read())
except ClientError as e:
raise NotFoundError(key)
@app.route('/dynamo/{key}', methods=['GET','PUT'], api_key_required=True)
def dynamofn(key):
request = app.current_request
data = request.json_body
if request.method == 'PUT':
with table.batch_writer() as batch:
batch.put_item(Item = data)
return data
elif request.method == 'GET':
try:
response = table.get_item(
Key={'fname':key}
)
except ClientError as e:
raise NotFoundError(key)
return response
@app.route('/files/{key}', methods=['GET','PUT'], api_key_required=True,content_types=['application/octet-stream'])
def myfile(key):
request = app.current_request
if request.method == 'PUT':
S3.put_object(Bucket=BUCKET2, Key=key,Body=request.raw_body)
else:
pass
return {}
@app.route('/mdoc/{key}', methods=['GET','PUT'], api_key_required=True,content_types=['application/octet-stream'])
def mymdoc(key):
request = app.current_request
if request.method == 'PUT':
S3.put_object(Bucket=BUCKET2, Key=key,Body=request.raw_body)
with table.batch_writer() as batch:
batch.put_item(Item= {'fname':key, 'bucket':BUCKET2})
response = sqs.send_message(
QueueUrl=SQS_QUEUE_URL,
DelaySeconds=10,
MessageAttributes={
'docid': { 'DataType': 'String', 'StringValue':key},
'bucket': {'DataType': 'String', 'StringValue':BUCKET2}
},
MessageBody=('Hello')
)
else:
pass
return {}
cors_config = CORSConfig(
allow_origin='https://h876bx1hx2.execute-api.us-east-2.amazonaws.com',
allow_headers=['X-Special-Header'],
max_age=600,
expose_headers=['X-Special-Header'],
allow_credentials=True
)
@app.route('/custom_cors', methods=['GET'], cors=cors_config)
def supports_custom_cors():
return {'cors': True}
# The view function above will return {"hello": "world"}
# whenever you make an HTTP GET request to '/'.
#
# Here are a few more examples:
#
# @app.route('/hello/{name}')
# def hello_name(name):
# # '/hello/james' -> {"hello": "james"}
# return {'hello': name}
#
# @app.route('/users', methods=['POST'])
# def create_user():
# # This is the JSON body the user sent in their POST request.
# user_as_json = app.current_request.json_body
# # We'll echo the json body back to the user in a 'user' key.
# return {'user': user_as_json}
#
# See the README documentation for more examples.
#
| 31.739726 | 131 | 0.630125 | from chalice import Chalice, BadRequestError
from chalice import NotFoundError
from chalice import CORSConfig
import json
import boto3
from botocore.exceptions import ClientError
app = Chalice(app_name='khelloworld')
app.debug = True
CITIES_TO_STATE = {
'seattle': 'WA',
'portland': 'OR'
}
S3 = boto3.client('s3', region_name='us-east-2')
BUCKET = 'vbucket-test1234'
BUCKET2 = 'vbucket-file1234'
SQS_QUEUE_URL = 'sqsurlgoeshere'
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('vdynamodb')
sqs = boto3.client('sqs')
@app.route('/')
def index():
return {'hello': 'vworld'}
@app.route('/cities/{city}')
def state_of_city(city):
try:
return {'state': CITIES_TO_STATE[city]}
except KeyError:
raise BadRequestError("Unknown city '%s', valid choices are: %s" % (
city, ', '.join(CITIES_TO_STATE.keys())))
@app.route('/objects/{key}', methods=['GET','PUT'], api_key_required=True)
def myobject(key):
request = app.current_request
if request.method == 'PUT':
S3.put_object(Bucket=BUCKET, Key=key,Body=json.dumps(request.json_body))
with table.batch_writer() as batch:
batch.put_item(Item = {"fname": key})
elif request.method == 'GET':
try:
response = S3.get_object(Bucket=BUCKET, Key=key)
return json.loads(response['Body'].read())
except ClientError as e:
raise NotFoundError(key)
@app.route('/dynamo/{key}', methods=['GET','PUT'], api_key_required=True)
def dynamofn(key):
request = app.current_request
data = request.json_body
if request.method == 'PUT':
with table.batch_writer() as batch:
batch.put_item(Item = data)
return data
elif request.method == 'GET':
try:
response = table.get_item(
Key={'fname':key}
)
except ClientError as e:
raise NotFoundError(key)
return response
@app.route('/files/{key}', methods=['GET','PUT'], api_key_required=True,content_types=['application/octet-stream'])
def myfile(key):
request = app.current_request
if request.method == 'PUT':
S3.put_object(Bucket=BUCKET2, Key=key,Body=request.raw_body)
else:
pass
return {}
@app.route('/mdoc/{key}', methods=['GET','PUT'], api_key_required=True,content_types=['application/octet-stream'])
def mymdoc(key):
request = app.current_request
if request.method == 'PUT':
S3.put_object(Bucket=BUCKET2, Key=key,Body=request.raw_body)
with table.batch_writer() as batch:
batch.put_item(Item= {'fname':key, 'bucket':BUCKET2})
response = sqs.send_message(
QueueUrl=SQS_QUEUE_URL,
DelaySeconds=10,
MessageAttributes={
'docid': { 'DataType': 'String', 'StringValue':key},
'bucket': {'DataType': 'String', 'StringValue':BUCKET2}
},
MessageBody=('Hello')
)
else:
pass
return {}
cors_config = CORSConfig(
allow_origin='https://h876bx1hx2.execute-api.us-east-2.amazonaws.com',
allow_headers=['X-Special-Header'],
max_age=600,
expose_headers=['X-Special-Header'],
allow_credentials=True
)
@app.route('/custom_cors', methods=['GET'], cors=cors_config)
def supports_custom_cors():
return {'cors': True}
| true | true |
1c3aefa8b340ce2d1addc68b0b56285e80ee1ddd | 730 | py | Python | main/management/commands/strava_update.py | cablespaghetti/running-club-challenge | 46bc289084c5c089154f456ac2b8901924653ead | [
"MIT"
] | null | null | null | main/management/commands/strava_update.py | cablespaghetti/running-club-challenge | 46bc289084c5c089154f456ac2b8901924653ead | [
"MIT"
] | null | null | null | main/management/commands/strava_update.py | cablespaghetti/running-club-challenge | 46bc289084c5c089154f456ac2b8901924653ead | [
"MIT"
] | null | null | null | import logging
import sys
from django.core.management.base import BaseCommand
from main.models import Athlete
from main.strava import update_user_strava_activities
from stravalib.exc import RateLimitExceeded
logger = logging.getLogger()
class Command(BaseCommand):
help = 'Gets any new athlete activities from Strava'
def handle(self, *args, **options):
for athlete in Athlete.objects.all():
logger.info(f"Got athlete {athlete}")
user = athlete.user
try:
update_user_strava_activities(user)
except RateLimitExceeded:
logger.warning(f"Strava rate limit exceeded. Updates will be picked up next time.")
sys.exit(0)
| 30.416667 | 99 | 0.680822 | import logging
import sys
from django.core.management.base import BaseCommand
from main.models import Athlete
from main.strava import update_user_strava_activities
from stravalib.exc import RateLimitExceeded
logger = logging.getLogger()
class Command(BaseCommand):
help = 'Gets any new athlete activities from Strava'
def handle(self, *args, **options):
for athlete in Athlete.objects.all():
logger.info(f"Got athlete {athlete}")
user = athlete.user
try:
update_user_strava_activities(user)
except RateLimitExceeded:
logger.warning(f"Strava rate limit exceeded. Updates will be picked up next time.")
sys.exit(0)
| true | true |
1c3af011d6321136b8938fe7837075da6815aaca | 5,798 | py | Python | direct/src/showutil/Rope.py | cmarshall108/panda3d-python3 | 8bea2c0c120b03ec1c9fd179701fdeb7510bb97b | [
"PHP-3.0",
"PHP-3.01"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | direct/src/showutil/Rope.py | cmarshall108/panda3d-python3 | 8bea2c0c120b03ec1c9fd179701fdeb7510bb97b | [
"PHP-3.0",
"PHP-3.01"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/direct/showutil/Rope.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 1 | 2021-04-09T00:02:59.000Z | 2021-04-09T00:02:59.000Z | from panda3d.core import *
class Rope(NodePath):
"""
This class defines a NURBS curve whose control vertices are
defined based on points relative to one or more nodes in space, so
that the "rope" will animate as the nodes move around. It uses
the C++ RopeNode class to achieve fancy rendering effects like
thick lines built from triangle strips.
"""
showRope = ConfigVariableBool('show-rope', True, \
"Set this to false to deactivate the display of ropes.")
def __init__(self, name = 'Rope'):
self.ropeNode = RopeNode(name)
self.curve = NurbsCurveEvaluator()
self.ropeNode.setCurve(self.curve)
NodePath.__init__(self, self.ropeNode)
self.name = name
self.order = 0
self.verts = []
self.knots = None
def setup(self, order, verts, knots = None):
"""This must be called to define the shape of the curve
initially, and may be called again as needed to adjust the
curve's properties.
order must be either 1, 2, 3, or 4, and is one more than the
degree of the curve; most NURBS curves are order 4.
verts is a list of (NodePath, point) tuples, defining the
control vertices of the curve. For each control vertex, the
NodePath may refer to an arbitrary node in the scene graph,
indicating the point should be interpreted in the coordinate
space of that node (and it will automatically move when the
node is moved), or it may be the empty NodePath or None to
indicate the point should be interpreted in the coordinate
space of the Rope itself. Each point value may be either a
3-tuple or a 4-tuple (or a VBase3 or VBase4). If it is a
3-component vector, it represents a 3-d point in space; a
4-component vector represents a point in 4-d homogeneous
space; that is to say, a 3-d point and an additional weight
factor (which should have been multiplied into the x y z
components).
verts may be a list of dictionaries instead of a list of
tuples. In this case, each vertex dictionary may have any of
the following elements:
'node' : the NodePath indicating the coordinate space
'point' : the 3-D point relative to the node; default (0, 0, 0)
'color' : the color of the vertex, default (1, 1, 1, 1)
'thickness' : the thickness at the vertex, default 1
In order to enable the per-vertex color or thickness, you must
call rope.ropeNode.setUseVertexColor(1) or
rope.ropeNode.setUseVertexThickness(1).
knots is optional. If specified, it should be a list of
floats, and should be of length len(verts) + order. If it
is omitted, a default knot string is generated that consists
of the first (order - 1) and last (order - 1) values the
same, and the intermediate values incrementing by 1.
"""
self.order = order
self.verts = verts
self.knots = knots
self.recompute()
def recompute(self):
"""Recomputes the curve after its properties have changed.
Normally it is not necessary for the user to call this
directly."""
if not self.showRope:
return
numVerts = len(self.verts)
self.curve.reset(numVerts)
self.curve.setOrder(self.order)
defaultNodePath = None
defaultPoint = (0, 0, 0)
defaultColor = (1, 1, 1, 1)
defaultThickness = 1
useVertexColor = self.ropeNode.getUseVertexColor()
useVertexThickness = self.ropeNode.getUseVertexThickness()
vcd = self.ropeNode.getVertexColorDimension()
vtd = self.ropeNode.getVertexThicknessDimension()
for i in range(numVerts):
v = self.verts[i]
if isinstance(v, tuple):
nodePath, point = v
color = defaultColor
thickness = defaultThickness
else:
nodePath = v.get('node', defaultNodePath)
point = v.get('point', defaultPoint)
color = v.get('color', defaultColor)
thickness = v.get('thickness', defaultThickness)
if isinstance(point, tuple):
if (len(point) >= 4):
self.curve.setVertex(i, VBase4(point[0], point[1], point[2], point[3]))
else:
self.curve.setVertex(i, VBase3(point[0], point[1], point[2]))
else:
self.curve.setVertex(i, point)
if nodePath:
self.curve.setVertexSpace(i, nodePath)
if useVertexColor:
self.curve.setExtendedVertex(i, vcd + 0, color[0])
self.curve.setExtendedVertex(i, vcd + 1, color[1])
self.curve.setExtendedVertex(i, vcd + 2, color[2])
self.curve.setExtendedVertex(i, vcd + 3, color[3])
if useVertexThickness:
self.curve.setExtendedVertex(i, vtd, thickness)
if self.knots != None:
for i in range(len(self.knots)):
self.curve.setKnot(i, self.knots[i])
self.ropeNode.resetBound(self)
def getPoints(self, len):
"""Returns a list of len points, evenly distributed in
parametric space on the rope, in the coordinate space of the
Rope itself."""
result = self.curve.evaluate(self)
startT = result.getStartT()
sizeT = result.getEndT() - startT
numPts = len
ropePts = []
for i in range(numPts):
pt = Point3()
result.evalPoint(sizeT * i / float(numPts - 1) + startT, pt)
ropePts.append(pt)
return ropePts
| 39.175676 | 91 | 0.606588 | from panda3d.core import *
class Rope(NodePath):
showRope = ConfigVariableBool('show-rope', True, \
"Set this to false to deactivate the display of ropes.")
def __init__(self, name = 'Rope'):
self.ropeNode = RopeNode(name)
self.curve = NurbsCurveEvaluator()
self.ropeNode.setCurve(self.curve)
NodePath.__init__(self, self.ropeNode)
self.name = name
self.order = 0
self.verts = []
self.knots = None
def setup(self, order, verts, knots = None):
self.order = order
self.verts = verts
self.knots = knots
self.recompute()
def recompute(self):
if not self.showRope:
return
numVerts = len(self.verts)
self.curve.reset(numVerts)
self.curve.setOrder(self.order)
defaultNodePath = None
defaultPoint = (0, 0, 0)
defaultColor = (1, 1, 1, 1)
defaultThickness = 1
useVertexColor = self.ropeNode.getUseVertexColor()
useVertexThickness = self.ropeNode.getUseVertexThickness()
vcd = self.ropeNode.getVertexColorDimension()
vtd = self.ropeNode.getVertexThicknessDimension()
for i in range(numVerts):
v = self.verts[i]
if isinstance(v, tuple):
nodePath, point = v
color = defaultColor
thickness = defaultThickness
else:
nodePath = v.get('node', defaultNodePath)
point = v.get('point', defaultPoint)
color = v.get('color', defaultColor)
thickness = v.get('thickness', defaultThickness)
if isinstance(point, tuple):
if (len(point) >= 4):
self.curve.setVertex(i, VBase4(point[0], point[1], point[2], point[3]))
else:
self.curve.setVertex(i, VBase3(point[0], point[1], point[2]))
else:
self.curve.setVertex(i, point)
if nodePath:
self.curve.setVertexSpace(i, nodePath)
if useVertexColor:
self.curve.setExtendedVertex(i, vcd + 0, color[0])
self.curve.setExtendedVertex(i, vcd + 1, color[1])
self.curve.setExtendedVertex(i, vcd + 2, color[2])
self.curve.setExtendedVertex(i, vcd + 3, color[3])
if useVertexThickness:
self.curve.setExtendedVertex(i, vtd, thickness)
if self.knots != None:
for i in range(len(self.knots)):
self.curve.setKnot(i, self.knots[i])
self.ropeNode.resetBound(self)
def getPoints(self, len):
result = self.curve.evaluate(self)
startT = result.getStartT()
sizeT = result.getEndT() - startT
numPts = len
ropePts = []
for i in range(numPts):
pt = Point3()
result.evalPoint(sizeT * i / float(numPts - 1) + startT, pt)
ropePts.append(pt)
return ropePts
| true | true |
1c3af0a9f97c74c950a5a9ea22723d4bcb881eca | 10,372 | py | Python | Jumpscale/core/PlatformTypes.py | Dinaamagdy/jumpscale_core | dec5d42e368ab2b5b2e40837b71d27a59cb501ab | [
"Apache-2.0"
] | null | null | null | Jumpscale/core/PlatformTypes.py | Dinaamagdy/jumpscale_core | dec5d42e368ab2b5b2e40837b71d27a59cb501ab | [
"Apache-2.0"
] | null | null | null | Jumpscale/core/PlatformTypes.py | Dinaamagdy/jumpscale_core | dec5d42e368ab2b5b2e40837b71d27a59cb501ab | [
"Apache-2.0"
] | null | null | null | from Jumpscale import j
import sys
import os
import platform
# import re
# def _useELFtrick(file):
# fd = os.open(file, os.O_RDONLY)
# out = os.read(fd, 5)
# if out[0:4] != "\x7fELF":
# result = 0 # ELF trick fails...
# elif out[4] == '\x01':
# result = 32
# elif out[4] == '\x02':
# result = 64
# else:
# result = 0
# os.close(fd)
# return result
JSBASE = j.application.jsbase_get_class()
class PlatformTypes(JSBASE):
def __init__(self):
self.__jslocation__ = "j.core.platformtype"
JSBASE.__init__(self)
self._myplatform = None
self._platformParents = {}
self._platformParents["unix"] = ["generic"]
self._platformParents["linux"] = ["unix"]
self._platformParents["linux32"] = ["linux", "unix32"]
self._platformParents["linux64"] = ["linux", "unix64"]
self._platformParents["unix32"] = ["unix"]
self._platformParents["unix64"] = ["unix"]
self._platformParents["alpine"] = ["linux"]
self._platformParents["alpine64"] = ["alpine", "linux64"]
self._platformParents["alpine32"] = ["alpine", "linux32"]
self._platformParents["ubuntu"] = ["linux"]
self._platformParents["ubuntu64"] = ["ubuntu", "linux64"]
self._platformParents["ubuntu32"] = ["ubuntu", "linux32"]
self._platformParents["mint64"] = ["mint", "ubuntu64"]
self._platformParents["mint32"] = ["mint", "ubuntu32"]
self._platformParents["win"] = ["generic"]
self._platformParents["win32"] = ["win"]
self._platformParents["win64"] = ["win"]
self._platformParents["win7"] = ["win"]
self._platformParents["win8"] = ["win"]
self._platformParents["vista"] = ["win"]
self._platformParents["cygwin32"] = ["cygwin"]
self._platformParents["cygwin64"] = ["cygwin"]
self._platformParents["win2008_64"] = ["win64"]
self._platformParents["win2012_64"] = ["win64"]
self._platformParents["cygwin_nt-10.064"] = ["win64", "cygwin64"]
self._platformParents["cygwin_nt-10.032"] = ["win32", "cygwin32"]
self._platformParents["arch"] = ["linux"]
self._platformParents["arch32"] = ["arch", "linux32"]
self._platformParents["arch64"] = ["arch", "linux64"]
self._platformParents["redhat"] = ["linux"]
self._platformParents["redhat32"] = ["redhat", "linux32"]
self._platformParents["redhat64"] = ["redhat", "linux64"]
# is not really linux but better to say I is I guess (kds)
self._platformParents["darwin32"] = ["darwin"]
self._platformParents["darwin64"] = ["darwin"]
self._platformParents["osx64"] = ["darwin64", "osx"]
self._platformParents["debian"] = ["ubuntu"]
self._platformParents["debian32"] = ["debian", "linux32"]
self._platformParents["debian64"] = ["debian", "linux64"]
self._cache = {}
@property
def myplatform(self):
if self._myplatform is None:
self._myplatform = PlatformType()
return self._myplatform
def getParents(self, name):
res = [name]
res = self._getParents(name, res)
return res
def _getParents(self, name, res=[]):
if name in self._platformParents:
for item in self._platformParents[name]:
if item not in res:
res.append(item)
res = self._getParents(item, res)
return res
def get(self, executor):
"""
@param executor is an executor object, None or $hostname:$port or $ipaddr:$port or $hostname or $ipaddr
"""
key = executor.id
if not key in self._cache:
self._cache[key] = PlatformType(executor=executor)
return self._cache[key]
class PlatformType(JSBASE):
def __init__(self, name="", executor=None):
# print("INIT PLATFORMTYPE:%s" % executor)
JSBASE.__init__(self)
self.myplatform = name
self._platformtypes = None
self._is64bit = None
self._osversion = None
self._hostname = None
self._uname = None
if executor is None:
self.executor = j.tools.executorLocal
else:
self.executor = executor
# print("PLATFORMTYPE:%s"%self.executor)
if name == "":
self._getPlatform()
@property
def platformtypes(self):
if self._platformtypes is None:
platformtypes = j.core.platformtype.getParents(self.myplatform)
self._platformtypes = [
item for item in platformtypes if item != ""]
return self._platformtypes
@property
def uname(self):
if self._uname is None:
if self.executor.type=="local":
unn = os.uname()
self._hostname = unn.nodename
distro_info = platform.linux_distribution()
if 'Ubuntu' in distro_info:
self._osversion = distro_info[1]
elif 'ubuntu' in j.tools.executorLocal.stateOnSystem['os_type'].lower():
version = self.executor.execute('lsb_release -r')[1]
# version should be something like: 'Release:\t16.04\n
self._osversion = version.split(':')[-1].strip()
else:
self._osversion = unn.release
self._cpu = unn.machine
self._platform = unn.sysname
else:
_uname = self.executor.stateOnSystem["uname"]
if _uname.find("warning: setlocale") != -1:
raise RuntimeError("run js_shell 'j.tools.bash.local.locale_check()'")
_uname = _uname.split("\n")[0]
_tmp, self._hostname, _osversion, self._cpu, self._platform = _uname.split(
" ")
if self.osname == "darwin":
self._osversion = _osversion
else:
# is for ubuntu
if "version_id" in self.executor.stateOnSystem:
self._osversion = self.executor.stateOnSystem["version_id"]
self._uname = _uname
return self._uname
@property
def hostname(self):
self.uname
return self._hostname.split(".")[0]
@property
def is64bit(self):
self.uname
self._is64bit = "64" in self._cpu
return self._is64bit
@property
def is32bit(self):
self.uname
self._is64bit = "32" in self._cpu
return self._is64bit
@property
def osversion(self):
self.uname
if self._osversion is None:
raise RuntimeError("need to fix, osversion should not be none")
# print("####OSVERSION")
# TELL KRISTOF YOU GOT HERE
rc, lsbcontent, err = self.executor.execute(
"cat /etc/*-release", replaceArgs=False, showout=False, die=False)
if rc == 0:
import re
try:
self._osname = re.findall(
"DISTRIB_ID=(\w+)", lsbcontent)[0].lower()
self._osversion = re.findall(
"DISTRIB_RELEASE=([\w.]+)", lsbcontent)[0].lower()
except IndexError as e:
self._osversion = self.uname
else:
self._osversion = self.uname
return self._osversion
@property
def osname(self):
if "os_type" not in self.executor.stateOnSystem:
return "unknown"
return self.executor.stateOnSystem["os_type"]
def checkMatch(self, match):
"""
match is in form of linux64,darwin
if any of the items e.g. darwin is in getMyRelevantPlatforms then return True
"""
tocheck = self.platformtypes
matches = [item.strip().lower()
for item in match.split(",") if item.strip() != ""]
for match in matches:
if match in tocheck:
return True
return False
def _getPlatform(self):
if self.is32bit:
name = "%s32" % (self.osname)
else:
name = "%s64" % (self.osname)
self.myplatform = name
def has_parent(self, name):
return name in self.platformtypes
def dieIfNotPlatform(self, platform):
if not self.has_parent(platform):
raise j.exceptions.RuntimeError(
"Can not continue, supported platform is %s, this platform is %s" %
(platform, self.myplatform))
@property
def isUbuntu(self):
return self.has_parent("ubuntu")
@property
def isMac(self):
return self.has_parent("darwin")
@property
def isAlpine(self):
return self.has_parent("alpine")
@property
def isUnix(self):
'''Checks whether the platform is Unix-based'''
return self.has_parent("unix")
@property
def isWindows(self):
'''Checks whether the platform is Windows-based'''
return self.has_parent("win")
@property
def isLinux(self):
'''Checks whether the platform is Linux-based'''
return self.has_parent("linux")
@property
def isXen(self):
'''Checks whether Xen support is enabled'''
return j.sal.process.checkProcessRunning('xen') == 0
@property
def isVirtualBox(self):
'''Check whether the system supports VirtualBox'''
return self.executor.stateOnSystem.get('vboxdrv', False)
# @property
# def isHyperV(self):
# '''Check whether the system supports HyperV'''
# # TODO: should be moved to _getPlatform & proper parent definition
# if self.isWindows:
# import winreg as wr
# try:
# virt = wr.OpenKey(
# wr.HKEY_LOCAL_MACHINE,
# 'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization',
# 0,
# wr.KEY_READ | wr.KEY_WOW64_64KEY)
# wr.QueryValueEx(virt, 'Version')
# except WindowsError:
# return False
# return True
# return False
def __str__(self):
return str(self.myplatform)
__repr__ = __str__
| 34.573333 | 111 | 0.56209 | from Jumpscale import j
import sys
import os
import platform
application.jsbase_get_class()
class PlatformTypes(JSBASE):
def __init__(self):
self.__jslocation__ = "j.core.platformtype"
JSBASE.__init__(self)
self._myplatform = None
self._platformParents = {}
self._platformParents["unix"] = ["generic"]
self._platformParents["linux"] = ["unix"]
self._platformParents["linux32"] = ["linux", "unix32"]
self._platformParents["linux64"] = ["linux", "unix64"]
self._platformParents["unix32"] = ["unix"]
self._platformParents["unix64"] = ["unix"]
self._platformParents["alpine"] = ["linux"]
self._platformParents["alpine64"] = ["alpine", "linux64"]
self._platformParents["alpine32"] = ["alpine", "linux32"]
self._platformParents["ubuntu"] = ["linux"]
self._platformParents["ubuntu64"] = ["ubuntu", "linux64"]
self._platformParents["ubuntu32"] = ["ubuntu", "linux32"]
self._platformParents["mint64"] = ["mint", "ubuntu64"]
self._platformParents["mint32"] = ["mint", "ubuntu32"]
self._platformParents["win"] = ["generic"]
self._platformParents["win32"] = ["win"]
self._platformParents["win64"] = ["win"]
self._platformParents["win7"] = ["win"]
self._platformParents["win8"] = ["win"]
self._platformParents["vista"] = ["win"]
self._platformParents["cygwin32"] = ["cygwin"]
self._platformParents["cygwin64"] = ["cygwin"]
self._platformParents["win2008_64"] = ["win64"]
self._platformParents["win2012_64"] = ["win64"]
self._platformParents["cygwin_nt-10.064"] = ["win64", "cygwin64"]
self._platformParents["cygwin_nt-10.032"] = ["win32", "cygwin32"]
self._platformParents["arch"] = ["linux"]
self._platformParents["arch32"] = ["arch", "linux32"]
self._platformParents["arch64"] = ["arch", "linux64"]
self._platformParents["redhat"] = ["linux"]
self._platformParents["redhat32"] = ["redhat", "linux32"]
self._platformParents["redhat64"] = ["redhat", "linux64"]
self._platformParents["darwin32"] = ["darwin"]
self._platformParents["darwin64"] = ["darwin"]
self._platformParents["osx64"] = ["darwin64", "osx"]
self._platformParents["debian"] = ["ubuntu"]
self._platformParents["debian32"] = ["debian", "linux32"]
self._platformParents["debian64"] = ["debian", "linux64"]
self._cache = {}
@property
def myplatform(self):
if self._myplatform is None:
self._myplatform = PlatformType()
return self._myplatform
def getParents(self, name):
res = [name]
res = self._getParents(name, res)
return res
def _getParents(self, name, res=[]):
if name in self._platformParents:
for item in self._platformParents[name]:
if item not in res:
res.append(item)
res = self._getParents(item, res)
return res
def get(self, executor):
key = executor.id
if not key in self._cache:
self._cache[key] = PlatformType(executor=executor)
return self._cache[key]
class PlatformType(JSBASE):
def __init__(self, name="", executor=None):
JSBASE.__init__(self)
self.myplatform = name
self._platformtypes = None
self._is64bit = None
self._osversion = None
self._hostname = None
self._uname = None
if executor is None:
self.executor = j.tools.executorLocal
else:
self.executor = executor
if name == "":
self._getPlatform()
@property
def platformtypes(self):
if self._platformtypes is None:
platformtypes = j.core.platformtype.getParents(self.myplatform)
self._platformtypes = [
item for item in platformtypes if item != ""]
return self._platformtypes
@property
def uname(self):
if self._uname is None:
if self.executor.type=="local":
unn = os.uname()
self._hostname = unn.nodename
distro_info = platform.linux_distribution()
if 'Ubuntu' in distro_info:
self._osversion = distro_info[1]
elif 'ubuntu' in j.tools.executorLocal.stateOnSystem['os_type'].lower():
version = self.executor.execute('lsb_release -r')[1]
self._osversion = version.split(':')[-1].strip()
else:
self._osversion = unn.release
self._cpu = unn.machine
self._platform = unn.sysname
else:
_uname = self.executor.stateOnSystem["uname"]
if _uname.find("warning: setlocale") != -1:
raise RuntimeError("run js_shell 'j.tools.bash.local.locale_check()'")
_uname = _uname.split("\n")[0]
_tmp, self._hostname, _osversion, self._cpu, self._platform = _uname.split(
" ")
if self.osname == "darwin":
self._osversion = _osversion
else:
# is for ubuntu
if "version_id" in self.executor.stateOnSystem:
self._osversion = self.executor.stateOnSystem["version_id"]
self._uname = _uname
return self._uname
@property
def hostname(self):
self.uname
return self._hostname.split(".")[0]
@property
def is64bit(self):
self.uname
self._is64bit = "64" in self._cpu
return self._is64bit
@property
def is32bit(self):
self.uname
self._is64bit = "32" in self._cpu
return self._is64bit
@property
def osversion(self):
self.uname
if self._osversion is None:
raise RuntimeError("need to fix, osversion should not be none")
# print("####OSVERSION")
# TELL KRISTOF YOU GOT HERE
rc, lsbcontent, err = self.executor.execute(
"cat /etc/*-release", replaceArgs=False, showout=False, die=False)
if rc == 0:
import re
try:
self._osname = re.findall(
"DISTRIB_ID=(\w+)", lsbcontent)[0].lower()
self._osversion = re.findall(
"DISTRIB_RELEASE=([\w.]+)", lsbcontent)[0].lower()
except IndexError as e:
self._osversion = self.uname
else:
self._osversion = self.uname
return self._osversion
@property
def osname(self):
if "os_type" not in self.executor.stateOnSystem:
return "unknown"
return self.executor.stateOnSystem["os_type"]
def checkMatch(self, match):
tocheck = self.platformtypes
matches = [item.strip().lower()
for item in match.split(",") if item.strip() != ""]
for match in matches:
if match in tocheck:
return True
return False
def _getPlatform(self):
if self.is32bit:
name = "%s32" % (self.osname)
else:
name = "%s64" % (self.osname)
self.myplatform = name
def has_parent(self, name):
return name in self.platformtypes
def dieIfNotPlatform(self, platform):
if not self.has_parent(platform):
raise j.exceptions.RuntimeError(
"Can not continue, supported platform is %s, this platform is %s" %
(platform, self.myplatform))
@property
def isUbuntu(self):
return self.has_parent("ubuntu")
@property
def isMac(self):
return self.has_parent("darwin")
@property
def isAlpine(self):
return self.has_parent("alpine")
@property
def isUnix(self):
return self.has_parent("unix")
@property
def isWindows(self):
return self.has_parent("win")
@property
def isLinux(self):
return self.has_parent("linux")
@property
def isXen(self):
return j.sal.process.checkProcessRunning('xen') == 0
@property
def isVirtualBox(self):
return self.executor.stateOnSystem.get('vboxdrv', False)
# @property
# def isHyperV(self):
# '''Check whether the system supports HyperV'''
# # TODO: should be moved to _getPlatform & proper parent definition
# if self.isWindows:
# import winreg as wr
# try:
# virt = wr.OpenKey(
# wr.HKEY_LOCAL_MACHINE,
# 'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization',
# 0,
# wr.KEY_READ | wr.KEY_WOW64_64KEY)
# wr.QueryValueEx(virt, 'Version')
# except WindowsError:
# return False
# return True
# return False
def __str__(self):
return str(self.myplatform)
__repr__ = __str__
| true | true |
1c3af165e3c85db39d0dd8c954af057b250efa94 | 40,450 | py | Python | chromewhip/protocol/debugger.py | Cosive/chromewhip | 3179aeeea898c20c8ba4fbb3926c7c696dfee165 | [
"MIT"
] | null | null | null | chromewhip/protocol/debugger.py | Cosive/chromewhip | 3179aeeea898c20c8ba4fbb3926c7c696dfee165 | [
"MIT"
] | 1 | 2022-01-26T02:10:57.000Z | 2022-02-07T21:07:18.000Z | chromewhip/protocol/debugger.py | Cosive/chromewhip | 3179aeeea898c20c8ba4fbb3926c7c696dfee165 | [
"MIT"
] | null | null | null | # noinspection PyPep8
# noinspection PyArgumentList
"""
AUTO-GENERATED BY `scripts/generate_protocol.py` using `data/browser_protocol.json`
and `data/js_protocol.json` as inputs! Please do not modify this file.
"""
import logging
from typing import Any, Optional, Union
from chromewhip.helpers import PayloadMixin, BaseEvent, ChromeTypeBase
log = logging.getLogger(__name__)
from chromewhip.protocol import runtime as Runtime
# BreakpointId: Breakpoint identifier.
BreakpointId = str
# CallFrameId: Call frame identifier.
CallFrameId = str
# Location: Location in the source code.
class Location(ChromeTypeBase):
def __init__(self,
scriptId: Union['Runtime.ScriptId'],
lineNumber: Union['int'],
columnNumber: Optional['int'] = None,
):
self.scriptId = scriptId
self.lineNumber = lineNumber
self.columnNumber = columnNumber
# ScriptPosition: Location in the source code.
class ScriptPosition(ChromeTypeBase):
def __init__(self,
lineNumber: Union['int'],
columnNumber: Union['int'],
):
self.lineNumber = lineNumber
self.columnNumber = columnNumber
# LocationRange: Location range within one script.
class LocationRange(ChromeTypeBase):
def __init__(self,
scriptId: Union['Runtime.ScriptId'],
start: Union['ScriptPosition'],
end: Union['ScriptPosition'],
):
self.scriptId = scriptId
self.start = start
self.end = end
# CallFrame: JavaScript call frame. Array of call frames form the call stack.
class CallFrame(ChromeTypeBase):
def __init__(self,
callFrameId: Union['CallFrameId'],
functionName: Union['str'],
location: Union['Location'],
url: Union['str'],
scopeChain: Union['[Scope]'],
this: Union['Runtime.RemoteObject'],
functionLocation: Optional['Location'] = None,
returnValue: Optional['Runtime.RemoteObject'] = None,
):
self.callFrameId = callFrameId
self.functionName = functionName
self.functionLocation = functionLocation
self.location = location
self.url = url
self.scopeChain = scopeChain
self.this = this
self.returnValue = returnValue
# Scope: Scope description.
class Scope(ChromeTypeBase):
def __init__(self,
type: Union['str'],
object: Union['Runtime.RemoteObject'],
name: Optional['str'] = None,
startLocation: Optional['Location'] = None,
endLocation: Optional['Location'] = None,
):
self.type = type
self.object = object
self.name = name
self.startLocation = startLocation
self.endLocation = endLocation
# SearchMatch: Search match for resource.
class SearchMatch(ChromeTypeBase):
def __init__(self,
lineNumber: Union['float'],
lineContent: Union['str'],
):
self.lineNumber = lineNumber
self.lineContent = lineContent
# BreakLocation:
class BreakLocation(ChromeTypeBase):
def __init__(self,
scriptId: Union['Runtime.ScriptId'],
lineNumber: Union['int'],
columnNumber: Optional['int'] = None,
type: Optional['str'] = None,
):
self.scriptId = scriptId
self.lineNumber = lineNumber
self.columnNumber = columnNumber
self.type = type
# ScriptLanguage: Enum of possible script languages.
ScriptLanguage = str
# DebugSymbols: Debug symbols available for a wasm script.
class DebugSymbols(ChromeTypeBase):
def __init__(self,
type: Union['str'],
externalURL: Optional['str'] = None,
):
self.type = type
self.externalURL = externalURL
class Debugger(PayloadMixin):
""" Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing
breakpoints, stepping through execution, exploring stack traces, etc.
"""
@classmethod
def continueToLocation(cls,
location: Union['Location'],
targetCallFrames: Optional['str'] = None,
):
"""Continues execution until specific location is reached.
:param location: Location to continue to.
:type location: Location
:param targetCallFrames:
:type targetCallFrames: str
"""
return (
cls.build_send_payload("continueToLocation", {
"location": location,
"targetCallFrames": targetCallFrames,
}),
None
)
@classmethod
def disable(cls):
"""Disables debugger for given page.
"""
return (
cls.build_send_payload("disable", {
}),
None
)
@classmethod
def enable(cls,
maxScriptsCacheSize: Optional['float'] = None,
):
"""Enables debugger for the given page. Clients should not assume that the debugging has been
enabled until the result for this command is received.
:param maxScriptsCacheSize: The maximum size in bytes of collected scripts (not referenced by other heap objects)
the debugger can hold. Puts no limit if paramter is omitted.
:type maxScriptsCacheSize: float
"""
return (
cls.build_send_payload("enable", {
"maxScriptsCacheSize": maxScriptsCacheSize,
}),
cls.convert_payload({
"debuggerId": {
"class": Runtime.UniqueDebuggerId,
"optional": False
},
})
)
@classmethod
def evaluateOnCallFrame(cls,
callFrameId: Union['CallFrameId'],
expression: Union['str'],
objectGroup: Optional['str'] = None,
includeCommandLineAPI: Optional['bool'] = None,
silent: Optional['bool'] = None,
returnByValue: Optional['bool'] = None,
generatePreview: Optional['bool'] = None,
throwOnSideEffect: Optional['bool'] = None,
timeout: Optional['Runtime.TimeDelta'] = None,
):
"""Evaluates expression on a given call frame.
:param callFrameId: Call frame identifier to evaluate on.
:type callFrameId: CallFrameId
:param expression: Expression to evaluate.
:type expression: str
:param objectGroup: String object group name to put result into (allows rapid releasing resulting object handles
using `releaseObjectGroup`).
:type objectGroup: str
:param includeCommandLineAPI: Specifies whether command line API should be available to the evaluated expression, defaults
to false.
:type includeCommandLineAPI: bool
:param silent: In silent mode exceptions thrown during evaluation are not reported and do not pause
execution. Overrides `setPauseOnException` state.
:type silent: bool
:param returnByValue: Whether the result is expected to be a JSON object that should be sent by value.
:type returnByValue: bool
:param generatePreview: Whether preview should be generated for the result.
:type generatePreview: bool
:param throwOnSideEffect: Whether to throw an exception if side effect cannot be ruled out during evaluation.
:type throwOnSideEffect: bool
:param timeout: Terminate execution after timing out (number of milliseconds).
:type timeout: Runtime.TimeDelta
"""
return (
cls.build_send_payload("evaluateOnCallFrame", {
"callFrameId": callFrameId,
"expression": expression,
"objectGroup": objectGroup,
"includeCommandLineAPI": includeCommandLineAPI,
"silent": silent,
"returnByValue": returnByValue,
"generatePreview": generatePreview,
"throwOnSideEffect": throwOnSideEffect,
"timeout": timeout,
}),
cls.convert_payload({
"result": {
"class": Runtime.RemoteObject,
"optional": False
},
"exceptionDetails": {
"class": Runtime.ExceptionDetails,
"optional": True
},
})
)
@classmethod
def getPossibleBreakpoints(cls,
start: Union['Location'],
end: Optional['Location'] = None,
restrictToFunction: Optional['bool'] = None,
):
"""Returns possible locations for breakpoint. scriptId in start and end range locations should be
the same.
:param start: Start of range to search possible breakpoint locations in.
:type start: Location
:param end: End of range to search possible breakpoint locations in (excluding). When not specified, end
of scripts is used as end of range.
:type end: Location
:param restrictToFunction: Only consider locations which are in the same (non-nested) function as start.
:type restrictToFunction: bool
"""
return (
cls.build_send_payload("getPossibleBreakpoints", {
"start": start,
"end": end,
"restrictToFunction": restrictToFunction,
}),
cls.convert_payload({
"locations": {
"class": [BreakLocation],
"optional": False
},
})
)
@classmethod
def getScriptSource(cls,
scriptId: Union['Runtime.ScriptId'],
):
"""Returns source for the script with given id.
:param scriptId: Id of the script to get source for.
:type scriptId: Runtime.ScriptId
"""
return (
cls.build_send_payload("getScriptSource", {
"scriptId": scriptId,
}),
cls.convert_payload({
"scriptSource": {
"class": str,
"optional": False
},
"bytecode": {
"class": str,
"optional": True
},
})
)
@classmethod
def getWasmBytecode(cls,
scriptId: Union['Runtime.ScriptId'],
):
"""This command is deprecated. Use getScriptSource instead.
:param scriptId: Id of the Wasm script to get source for.
:type scriptId: Runtime.ScriptId
"""
return (
cls.build_send_payload("getWasmBytecode", {
"scriptId": scriptId,
}),
cls.convert_payload({
"bytecode": {
"class": str,
"optional": False
},
})
)
@classmethod
def getStackTrace(cls,
stackTraceId: Union['Runtime.StackTraceId'],
):
"""Returns stack trace with given `stackTraceId`.
:param stackTraceId:
:type stackTraceId: Runtime.StackTraceId
"""
return (
cls.build_send_payload("getStackTrace", {
"stackTraceId": stackTraceId,
}),
cls.convert_payload({
"stackTrace": {
"class": Runtime.StackTrace,
"optional": False
},
})
)
@classmethod
def pause(cls):
"""Stops on the next JavaScript statement.
"""
return (
cls.build_send_payload("pause", {
}),
None
)
@classmethod
def pauseOnAsyncCall(cls,
parentStackTraceId: Union['Runtime.StackTraceId'],
):
"""
:param parentStackTraceId: Debugger will pause when async call with given stack trace is started.
:type parentStackTraceId: Runtime.StackTraceId
"""
return (
cls.build_send_payload("pauseOnAsyncCall", {
"parentStackTraceId": parentStackTraceId,
}),
None
)
@classmethod
def removeBreakpoint(cls,
breakpointId: Union['BreakpointId'],
):
"""Removes JavaScript breakpoint.
:param breakpointId:
:type breakpointId: BreakpointId
"""
return (
cls.build_send_payload("removeBreakpoint", {
"breakpointId": breakpointId,
}),
None
)
@classmethod
def restartFrame(cls,
callFrameId: Union['CallFrameId'],
):
"""Restarts particular call frame from the beginning.
:param callFrameId: Call frame identifier to evaluate on.
:type callFrameId: CallFrameId
"""
return (
cls.build_send_payload("restartFrame", {
"callFrameId": callFrameId,
}),
cls.convert_payload({
"callFrames": {
"class": [CallFrame],
"optional": False
},
"asyncStackTrace": {
"class": Runtime.StackTrace,
"optional": True
},
"asyncStackTraceId": {
"class": Runtime.StackTraceId,
"optional": True
},
})
)
@classmethod
def resume(cls,
terminateOnResume: Optional['bool'] = None,
):
"""Resumes JavaScript execution.
:param terminateOnResume: Set to true to terminate execution upon resuming execution. In contrast
to Runtime.terminateExecution, this will allows to execute further
JavaScript (i.e. via evaluation) until execution of the paused code
is actually resumed, at which point termination is triggered.
If execution is currently not paused, this parameter has no effect.
:type terminateOnResume: bool
"""
return (
cls.build_send_payload("resume", {
"terminateOnResume": terminateOnResume,
}),
None
)
@classmethod
def searchInContent(cls,
scriptId: Union['Runtime.ScriptId'],
query: Union['str'],
caseSensitive: Optional['bool'] = None,
isRegex: Optional['bool'] = None,
):
"""Searches for given string in script content.
:param scriptId: Id of the script to search in.
:type scriptId: Runtime.ScriptId
:param query: String to search for.
:type query: str
:param caseSensitive: If true, search is case sensitive.
:type caseSensitive: bool
:param isRegex: If true, treats string parameter as regex.
:type isRegex: bool
"""
return (
cls.build_send_payload("searchInContent", {
"scriptId": scriptId,
"query": query,
"caseSensitive": caseSensitive,
"isRegex": isRegex,
}),
cls.convert_payload({
"result": {
"class": [SearchMatch],
"optional": False
},
})
)
@classmethod
def setAsyncCallStackDepth(cls,
maxDepth: Union['int'],
):
"""Enables or disables async call stacks tracking.
:param maxDepth: Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async
call stacks (default).
:type maxDepth: int
"""
return (
cls.build_send_payload("setAsyncCallStackDepth", {
"maxDepth": maxDepth,
}),
None
)
@classmethod
def setBlackboxPatterns(cls,
patterns: Union['[]'],
):
"""Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in
scripts with url matching one of the patterns. VM will try to leave blackboxed script by
performing 'step in' several times, finally resorting to 'step out' if unsuccessful.
:param patterns: Array of regexps that will be used to check script url for blackbox state.
:type patterns: []
"""
return (
cls.build_send_payload("setBlackboxPatterns", {
"patterns": patterns,
}),
None
)
@classmethod
def setBlackboxedRanges(cls,
scriptId: Union['Runtime.ScriptId'],
positions: Union['[ScriptPosition]'],
):
"""Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted
scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful.
Positions array contains positions where blackbox state is changed. First interval isn't
blackboxed. Array should be sorted.
:param scriptId: Id of the script.
:type scriptId: Runtime.ScriptId
:param positions:
:type positions: [ScriptPosition]
"""
return (
cls.build_send_payload("setBlackboxedRanges", {
"scriptId": scriptId,
"positions": positions,
}),
None
)
@classmethod
def setBreakpoint(cls,
location: Union['Location'],
condition: Optional['str'] = None,
):
"""Sets JavaScript breakpoint at a given location.
:param location: Location to set breakpoint in.
:type location: Location
:param condition: Expression to use as a breakpoint condition. When specified, debugger will only stop on the
breakpoint if this expression evaluates to true.
:type condition: str
"""
return (
cls.build_send_payload("setBreakpoint", {
"location": location,
"condition": condition,
}),
cls.convert_payload({
"breakpointId": {
"class": BreakpointId,
"optional": False
},
"actualLocation": {
"class": Location,
"optional": False
},
})
)
@classmethod
def setInstrumentationBreakpoint(cls,
instrumentation: Union['str'],
):
"""Sets instrumentation breakpoint.
:param instrumentation: Instrumentation name.
:type instrumentation: str
"""
return (
cls.build_send_payload("setInstrumentationBreakpoint", {
"instrumentation": instrumentation,
}),
cls.convert_payload({
"breakpointId": {
"class": BreakpointId,
"optional": False
},
})
)
@classmethod
def setBreakpointByUrl(cls,
lineNumber: Union['int'],
url: Optional['str'] = None,
urlRegex: Optional['str'] = None,
scriptHash: Optional['str'] = None,
columnNumber: Optional['int'] = None,
condition: Optional['str'] = None,
):
"""Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this
command is issued, all existing parsed scripts will have breakpoints resolved and returned in
`locations` property. Further matching script parsing will result in subsequent
`breakpointResolved` events issued. This logical breakpoint will survive page reloads.
:param lineNumber: Line number to set breakpoint at.
:type lineNumber: int
:param url: URL of the resources to set breakpoint on.
:type url: str
:param urlRegex: Regex pattern for the URLs of the resources to set breakpoints on. Either `url` or
`urlRegex` must be specified.
:type urlRegex: str
:param scriptHash: Script hash of the resources to set breakpoint on.
:type scriptHash: str
:param columnNumber: Offset in the line to set breakpoint at.
:type columnNumber: int
:param condition: Expression to use as a breakpoint condition. When specified, debugger will only stop on the
breakpoint if this expression evaluates to true.
:type condition: str
"""
return (
cls.build_send_payload("setBreakpointByUrl", {
"lineNumber": lineNumber,
"url": url,
"urlRegex": urlRegex,
"scriptHash": scriptHash,
"columnNumber": columnNumber,
"condition": condition,
}),
cls.convert_payload({
"breakpointId": {
"class": BreakpointId,
"optional": False
},
"locations": {
"class": [Location],
"optional": False
},
})
)
@classmethod
def setBreakpointOnFunctionCall(cls,
objectId: Union['Runtime.RemoteObjectId'],
condition: Optional['str'] = None,
):
"""Sets JavaScript breakpoint before each call to the given function.
If another function was created from the same source as a given one,
calling it will also trigger the breakpoint.
:param objectId: Function object id.
:type objectId: Runtime.RemoteObjectId
:param condition: Expression to use as a breakpoint condition. When specified, debugger will
stop on the breakpoint if this expression evaluates to true.
:type condition: str
"""
return (
cls.build_send_payload("setBreakpointOnFunctionCall", {
"objectId": objectId,
"condition": condition,
}),
cls.convert_payload({
"breakpointId": {
"class": BreakpointId,
"optional": False
},
})
)
@classmethod
def setBreakpointsActive(cls,
active: Union['bool'],
):
"""Activates / deactivates all breakpoints on the page.
:param active: New value for breakpoints active state.
:type active: bool
"""
return (
cls.build_send_payload("setBreakpointsActive", {
"active": active,
}),
None
)
@classmethod
def setPauseOnExceptions(cls,
state: Union['str'],
):
"""Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or
no exceptions. Initial pause on exceptions state is `none`.
:param state: Pause on exceptions mode.
:type state: str
"""
return (
cls.build_send_payload("setPauseOnExceptions", {
"state": state,
}),
None
)
@classmethod
def setReturnValue(cls,
newValue: Union['Runtime.CallArgument'],
):
"""Changes return value in top frame. Available only at return break position.
:param newValue: New return value.
:type newValue: Runtime.CallArgument
"""
return (
cls.build_send_payload("setReturnValue", {
"newValue": newValue,
}),
None
)
@classmethod
def setScriptSource(cls,
scriptId: Union['Runtime.ScriptId'],
scriptSource: Union['str'],
dryRun: Optional['bool'] = None,
):
"""Edits JavaScript source live.
:param scriptId: Id of the script to edit.
:type scriptId: Runtime.ScriptId
:param scriptSource: New content of the script.
:type scriptSource: str
:param dryRun: If true the change will not actually be applied. Dry run may be used to get result
description without actually modifying the code.
:type dryRun: bool
"""
return (
cls.build_send_payload("setScriptSource", {
"scriptId": scriptId,
"scriptSource": scriptSource,
"dryRun": dryRun,
}),
cls.convert_payload({
"callFrames": {
"class": [CallFrame],
"optional": True
},
"stackChanged": {
"class": bool,
"optional": True
},
"asyncStackTrace": {
"class": Runtime.StackTrace,
"optional": True
},
"asyncStackTraceId": {
"class": Runtime.StackTraceId,
"optional": True
},
"exceptionDetails": {
"class": Runtime.ExceptionDetails,
"optional": True
},
})
)
@classmethod
def setSkipAllPauses(cls,
skip: Union['bool'],
):
"""Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc).
:param skip: New value for skip pauses state.
:type skip: bool
"""
return (
cls.build_send_payload("setSkipAllPauses", {
"skip": skip,
}),
None
)
@classmethod
def setVariableValue(cls,
scopeNumber: Union['int'],
variableName: Union['str'],
newValue: Union['Runtime.CallArgument'],
callFrameId: Union['CallFrameId'],
):
"""Changes value of variable in a callframe. Object-based scopes are not supported and must be
mutated manually.
:param scopeNumber: 0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch'
scope types are allowed. Other scopes could be manipulated manually.
:type scopeNumber: int
:param variableName: Variable name.
:type variableName: str
:param newValue: New variable value.
:type newValue: Runtime.CallArgument
:param callFrameId: Id of callframe that holds variable.
:type callFrameId: CallFrameId
"""
return (
cls.build_send_payload("setVariableValue", {
"scopeNumber": scopeNumber,
"variableName": variableName,
"newValue": newValue,
"callFrameId": callFrameId,
}),
None
)
@classmethod
def stepInto(cls,
breakOnAsyncCall: Optional['bool'] = None,
skipList: Optional['[LocationRange]'] = None,
):
"""Steps into the function call.
:param breakOnAsyncCall: Debugger will pause on the execution of the first async task which was scheduled
before next pause.
:type breakOnAsyncCall: bool
:param skipList: The skipList specifies location ranges that should be skipped on step into.
:type skipList: [LocationRange]
"""
return (
cls.build_send_payload("stepInto", {
"breakOnAsyncCall": breakOnAsyncCall,
"skipList": skipList,
}),
None
)
@classmethod
def stepOut(cls):
"""Steps out of the function call.
"""
return (
cls.build_send_payload("stepOut", {
}),
None
)
@classmethod
def stepOver(cls,
skipList: Optional['[LocationRange]'] = None,
):
"""Steps over the statement.
:param skipList: The skipList specifies location ranges that should be skipped on step over.
:type skipList: [LocationRange]
"""
return (
cls.build_send_payload("stepOver", {
"skipList": skipList,
}),
None
)
class BreakpointResolvedEvent(BaseEvent):
js_name = 'Debugger.breakpointResolved'
hashable = ['breakpointId']
is_hashable = True
def __init__(self,
breakpointId: Union['BreakpointId', dict],
location: Union['Location', dict],
):
if isinstance(breakpointId, dict):
breakpointId = BreakpointId(**breakpointId)
self.breakpointId = breakpointId
if isinstance(location, dict):
location = Location(**location)
self.location = location
@classmethod
def build_hash(cls, breakpointId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
class PausedEvent(BaseEvent):
js_name = 'Debugger.paused'
hashable = ['asyncStackTraceId', 'asyncCallStackTraceId']
is_hashable = True
def __init__(self,
callFrames: Union['[CallFrame]', dict],
reason: Union['str', dict],
data: Union['dict', dict, None] = None,
hitBreakpoints: Union['[]', dict, None] = None,
asyncStackTrace: Union['Runtime.StackTrace', dict, None] = None,
asyncStackTraceId: Union['Runtime.StackTraceId', dict, None] = None,
asyncCallStackTraceId: Union['Runtime.StackTraceId', dict, None] = None,
):
if isinstance(callFrames, dict):
callFrames = [CallFrame](**callFrames)
self.callFrames = callFrames
if isinstance(reason, dict):
reason = str(**reason)
self.reason = reason
if isinstance(data, dict):
data = dict(**data)
self.data = data
if isinstance(hitBreakpoints, dict):
hitBreakpoints = [](**hitBreakpoints)
self.hitBreakpoints = hitBreakpoints
if isinstance(asyncStackTrace, dict):
asyncStackTrace = Runtime.StackTrace(**asyncStackTrace)
self.asyncStackTrace = asyncStackTrace
if isinstance(asyncStackTraceId, dict):
asyncStackTraceId = Runtime.StackTraceId(**asyncStackTraceId)
self.asyncStackTraceId = asyncStackTraceId
if isinstance(asyncCallStackTraceId, dict):
asyncCallStackTraceId = Runtime.StackTraceId(**asyncCallStackTraceId)
self.asyncCallStackTraceId = asyncCallStackTraceId
@classmethod
def build_hash(cls, asyncStackTraceId, asyncCallStackTraceId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
class ResumedEvent(BaseEvent):
js_name = 'Debugger.resumed'
hashable = []
is_hashable = False
def __init__(self):
pass
@classmethod
def build_hash(cls):
raise ValueError('Unable to build hash for non-hashable type')
class ScriptFailedToParseEvent(BaseEvent):
js_name = 'Debugger.scriptFailedToParse'
hashable = ['executionContextId', 'scriptId']
is_hashable = True
def __init__(self,
scriptId: Union['Runtime.ScriptId', dict],
url: Union['str', dict],
startLine: Union['int', dict],
startColumn: Union['int', dict],
endLine: Union['int', dict],
endColumn: Union['int', dict],
executionContextId: Union['Runtime.ExecutionContextId', dict],
hash: Union['str', dict],
executionContextAuxData: Union['dict', dict, None] = None,
sourceMapURL: Union['str', dict, None] = None,
hasSourceURL: Union['bool', dict, None] = None,
isModule: Union['bool', dict, None] = None,
length: Union['int', dict, None] = None,
stackTrace: Union['Runtime.StackTrace', dict, None] = None,
codeOffset: Union['int', dict, None] = None,
scriptLanguage: Union['Debugger.ScriptLanguage', dict, None] = None,
embedderName: Union['str', dict, None] = None,
):
if isinstance(scriptId, dict):
scriptId = Runtime.ScriptId(**scriptId)
self.scriptId = scriptId
if isinstance(url, dict):
url = str(**url)
self.url = url
if isinstance(startLine, dict):
startLine = int(**startLine)
self.startLine = startLine
if isinstance(startColumn, dict):
startColumn = int(**startColumn)
self.startColumn = startColumn
if isinstance(endLine, dict):
endLine = int(**endLine)
self.endLine = endLine
if isinstance(endColumn, dict):
endColumn = int(**endColumn)
self.endColumn = endColumn
if isinstance(executionContextId, dict):
executionContextId = Runtime.ExecutionContextId(**executionContextId)
self.executionContextId = executionContextId
if isinstance(hash, dict):
hash = str(**hash)
self.hash = hash
if isinstance(executionContextAuxData, dict):
executionContextAuxData = dict(**executionContextAuxData)
self.executionContextAuxData = executionContextAuxData
if isinstance(sourceMapURL, dict):
sourceMapURL = str(**sourceMapURL)
self.sourceMapURL = sourceMapURL
if isinstance(hasSourceURL, dict):
hasSourceURL = bool(**hasSourceURL)
self.hasSourceURL = hasSourceURL
if isinstance(isModule, dict):
isModule = bool(**isModule)
self.isModule = isModule
if isinstance(length, dict):
length = int(**length)
self.length = length
if isinstance(stackTrace, dict):
stackTrace = Runtime.StackTrace(**stackTrace)
self.stackTrace = stackTrace
if isinstance(codeOffset, dict):
codeOffset = int(**codeOffset)
self.codeOffset = codeOffset
if isinstance(scriptLanguage, dict):
scriptLanguage = Debugger.ScriptLanguage(**scriptLanguage)
self.scriptLanguage = scriptLanguage
if isinstance(embedderName, dict):
embedderName = str(**embedderName)
self.embedderName = embedderName
@classmethod
def build_hash(cls, executionContextId, scriptId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
class ScriptParsedEvent(BaseEvent):
js_name = 'Debugger.scriptParsed'
hashable = ['executionContextId', 'scriptId']
is_hashable = True
def __init__(self,
scriptId: Union['Runtime.ScriptId', dict],
url: Union['str', dict],
startLine: Union['int', dict],
startColumn: Union['int', dict],
endLine: Union['int', dict],
endColumn: Union['int', dict],
executionContextId: Union['Runtime.ExecutionContextId', dict],
hash: Union['str', dict],
executionContextAuxData: Union['dict', dict, None] = None,
isLiveEdit: Union['bool', dict, None] = None,
sourceMapURL: Union['str', dict, None] = None,
hasSourceURL: Union['bool', dict, None] = None,
isModule: Union['bool', dict, None] = None,
length: Union['int', dict, None] = None,
stackTrace: Union['Runtime.StackTrace', dict, None] = None,
codeOffset: Union['int', dict, None] = None,
scriptLanguage: Union['Debugger.ScriptLanguage', dict, None] = None,
debugSymbols: Union['Debugger.DebugSymbols', dict, None] = None,
embedderName: Union['str', dict, None] = None,
):
if isinstance(scriptId, dict):
scriptId = Runtime.ScriptId(**scriptId)
self.scriptId = scriptId
if isinstance(url, dict):
url = str(**url)
self.url = url
if isinstance(startLine, dict):
startLine = int(**startLine)
self.startLine = startLine
if isinstance(startColumn, dict):
startColumn = int(**startColumn)
self.startColumn = startColumn
if isinstance(endLine, dict):
endLine = int(**endLine)
self.endLine = endLine
if isinstance(endColumn, dict):
endColumn = int(**endColumn)
self.endColumn = endColumn
if isinstance(executionContextId, dict):
executionContextId = Runtime.ExecutionContextId(**executionContextId)
self.executionContextId = executionContextId
if isinstance(hash, dict):
hash = str(**hash)
self.hash = hash
if isinstance(executionContextAuxData, dict):
executionContextAuxData = dict(**executionContextAuxData)
self.executionContextAuxData = executionContextAuxData
if isinstance(isLiveEdit, dict):
isLiveEdit = bool(**isLiveEdit)
self.isLiveEdit = isLiveEdit
if isinstance(sourceMapURL, dict):
sourceMapURL = str(**sourceMapURL)
self.sourceMapURL = sourceMapURL
if isinstance(hasSourceURL, dict):
hasSourceURL = bool(**hasSourceURL)
self.hasSourceURL = hasSourceURL
if isinstance(isModule, dict):
isModule = bool(**isModule)
self.isModule = isModule
if isinstance(length, dict):
length = int(**length)
self.length = length
if isinstance(stackTrace, dict):
stackTrace = Runtime.StackTrace(**stackTrace)
self.stackTrace = stackTrace
if isinstance(codeOffset, dict):
codeOffset = int(**codeOffset)
self.codeOffset = codeOffset
if isinstance(scriptLanguage, dict):
scriptLanguage = Debugger.ScriptLanguage(**scriptLanguage)
self.scriptLanguage = scriptLanguage
if isinstance(debugSymbols, dict):
debugSymbols = Debugger.DebugSymbols(**debugSymbols)
self.debugSymbols = debugSymbols
if isinstance(embedderName, dict):
embedderName = str(**embedderName)
self.embedderName = embedderName
@classmethod
def build_hash(cls, executionContextId, scriptId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
| 36.873291 | 130 | 0.548925 |
import logging
from typing import Any, Optional, Union
from chromewhip.helpers import PayloadMixin, BaseEvent, ChromeTypeBase
log = logging.getLogger(__name__)
from chromewhip.protocol import runtime as Runtime
BreakpointId = str
CallFrameId = str
class Location(ChromeTypeBase):
def __init__(self,
scriptId: Union['Runtime.ScriptId'],
lineNumber: Union['int'],
columnNumber: Optional['int'] = None,
):
self.scriptId = scriptId
self.lineNumber = lineNumber
self.columnNumber = columnNumber
class ScriptPosition(ChromeTypeBase):
def __init__(self,
lineNumber: Union['int'],
columnNumber: Union['int'],
):
self.lineNumber = lineNumber
self.columnNumber = columnNumber
class LocationRange(ChromeTypeBase):
def __init__(self,
scriptId: Union['Runtime.ScriptId'],
start: Union['ScriptPosition'],
end: Union['ScriptPosition'],
):
self.scriptId = scriptId
self.start = start
self.end = end
class CallFrame(ChromeTypeBase):
def __init__(self,
callFrameId: Union['CallFrameId'],
functionName: Union['str'],
location: Union['Location'],
url: Union['str'],
scopeChain: Union['[Scope]'],
this: Union['Runtime.RemoteObject'],
functionLocation: Optional['Location'] = None,
returnValue: Optional['Runtime.RemoteObject'] = None,
):
self.callFrameId = callFrameId
self.functionName = functionName
self.functionLocation = functionLocation
self.location = location
self.url = url
self.scopeChain = scopeChain
self.this = this
self.returnValue = returnValue
class Scope(ChromeTypeBase):
def __init__(self,
type: Union['str'],
object: Union['Runtime.RemoteObject'],
name: Optional['str'] = None,
startLocation: Optional['Location'] = None,
endLocation: Optional['Location'] = None,
):
self.type = type
self.object = object
self.name = name
self.startLocation = startLocation
self.endLocation = endLocation
class SearchMatch(ChromeTypeBase):
def __init__(self,
lineNumber: Union['float'],
lineContent: Union['str'],
):
self.lineNumber = lineNumber
self.lineContent = lineContent
class BreakLocation(ChromeTypeBase):
def __init__(self,
scriptId: Union['Runtime.ScriptId'],
lineNumber: Union['int'],
columnNumber: Optional['int'] = None,
type: Optional['str'] = None,
):
self.scriptId = scriptId
self.lineNumber = lineNumber
self.columnNumber = columnNumber
self.type = type
ScriptLanguage = str
class DebugSymbols(ChromeTypeBase):
def __init__(self,
type: Union['str'],
externalURL: Optional['str'] = None,
):
self.type = type
self.externalURL = externalURL
class Debugger(PayloadMixin):
@classmethod
def continueToLocation(cls,
location: Union['Location'],
targetCallFrames: Optional['str'] = None,
):
return (
cls.build_send_payload("continueToLocation", {
"location": location,
"targetCallFrames": targetCallFrames,
}),
None
)
@classmethod
def disable(cls):
return (
cls.build_send_payload("disable", {
}),
None
)
@classmethod
def enable(cls,
maxScriptsCacheSize: Optional['float'] = None,
):
return (
cls.build_send_payload("enable", {
"maxScriptsCacheSize": maxScriptsCacheSize,
}),
cls.convert_payload({
"debuggerId": {
"class": Runtime.UniqueDebuggerId,
"optional": False
},
})
)
@classmethod
def evaluateOnCallFrame(cls,
callFrameId: Union['CallFrameId'],
expression: Union['str'],
objectGroup: Optional['str'] = None,
includeCommandLineAPI: Optional['bool'] = None,
silent: Optional['bool'] = None,
returnByValue: Optional['bool'] = None,
generatePreview: Optional['bool'] = None,
throwOnSideEffect: Optional['bool'] = None,
timeout: Optional['Runtime.TimeDelta'] = None,
):
return (
cls.build_send_payload("evaluateOnCallFrame", {
"callFrameId": callFrameId,
"expression": expression,
"objectGroup": objectGroup,
"includeCommandLineAPI": includeCommandLineAPI,
"silent": silent,
"returnByValue": returnByValue,
"generatePreview": generatePreview,
"throwOnSideEffect": throwOnSideEffect,
"timeout": timeout,
}),
cls.convert_payload({
"result": {
"class": Runtime.RemoteObject,
"optional": False
},
"exceptionDetails": {
"class": Runtime.ExceptionDetails,
"optional": True
},
})
)
@classmethod
def getPossibleBreakpoints(cls,
start: Union['Location'],
end: Optional['Location'] = None,
restrictToFunction: Optional['bool'] = None,
):
return (
cls.build_send_payload("getPossibleBreakpoints", {
"start": start,
"end": end,
"restrictToFunction": restrictToFunction,
}),
cls.convert_payload({
"locations": {
"class": [BreakLocation],
"optional": False
},
})
)
@classmethod
def getScriptSource(cls,
scriptId: Union['Runtime.ScriptId'],
):
return (
cls.build_send_payload("getScriptSource", {
"scriptId": scriptId,
}),
cls.convert_payload({
"scriptSource": {
"class": str,
"optional": False
},
"bytecode": {
"class": str,
"optional": True
},
})
)
@classmethod
def getWasmBytecode(cls,
scriptId: Union['Runtime.ScriptId'],
):
return (
cls.build_send_payload("getWasmBytecode", {
"scriptId": scriptId,
}),
cls.convert_payload({
"bytecode": {
"class": str,
"optional": False
},
})
)
@classmethod
def getStackTrace(cls,
stackTraceId: Union['Runtime.StackTraceId'],
):
return (
cls.build_send_payload("getStackTrace", {
"stackTraceId": stackTraceId,
}),
cls.convert_payload({
"stackTrace": {
"class": Runtime.StackTrace,
"optional": False
},
})
)
@classmethod
def pause(cls):
return (
cls.build_send_payload("pause", {
}),
None
)
@classmethod
def pauseOnAsyncCall(cls,
parentStackTraceId: Union['Runtime.StackTraceId'],
):
return (
cls.build_send_payload("pauseOnAsyncCall", {
"parentStackTraceId": parentStackTraceId,
}),
None
)
@classmethod
def removeBreakpoint(cls,
breakpointId: Union['BreakpointId'],
):
return (
cls.build_send_payload("removeBreakpoint", {
"breakpointId": breakpointId,
}),
None
)
@classmethod
def restartFrame(cls,
callFrameId: Union['CallFrameId'],
):
return (
cls.build_send_payload("restartFrame", {
"callFrameId": callFrameId,
}),
cls.convert_payload({
"callFrames": {
"class": [CallFrame],
"optional": False
},
"asyncStackTrace": {
"class": Runtime.StackTrace,
"optional": True
},
"asyncStackTraceId": {
"class": Runtime.StackTraceId,
"optional": True
},
})
)
@classmethod
def resume(cls,
terminateOnResume: Optional['bool'] = None,
):
return (
cls.build_send_payload("resume", {
"terminateOnResume": terminateOnResume,
}),
None
)
@classmethod
def searchInContent(cls,
scriptId: Union['Runtime.ScriptId'],
query: Union['str'],
caseSensitive: Optional['bool'] = None,
isRegex: Optional['bool'] = None,
):
return (
cls.build_send_payload("searchInContent", {
"scriptId": scriptId,
"query": query,
"caseSensitive": caseSensitive,
"isRegex": isRegex,
}),
cls.convert_payload({
"result": {
"class": [SearchMatch],
"optional": False
},
})
)
@classmethod
def setAsyncCallStackDepth(cls,
maxDepth: Union['int'],
):
return (
cls.build_send_payload("setAsyncCallStackDepth", {
"maxDepth": maxDepth,
}),
None
)
@classmethod
def setBlackboxPatterns(cls,
patterns: Union['[]'],
):
return (
cls.build_send_payload("setBlackboxPatterns", {
"patterns": patterns,
}),
None
)
@classmethod
def setBlackboxedRanges(cls,
scriptId: Union['Runtime.ScriptId'],
positions: Union['[ScriptPosition]'],
):
return (
cls.build_send_payload("setBlackboxedRanges", {
"scriptId": scriptId,
"positions": positions,
}),
None
)
@classmethod
def setBreakpoint(cls,
location: Union['Location'],
condition: Optional['str'] = None,
):
return (
cls.build_send_payload("setBreakpoint", {
"location": location,
"condition": condition,
}),
cls.convert_payload({
"breakpointId": {
"class": BreakpointId,
"optional": False
},
"actualLocation": {
"class": Location,
"optional": False
},
})
)
@classmethod
def setInstrumentationBreakpoint(cls,
instrumentation: Union['str'],
):
return (
cls.build_send_payload("setInstrumentationBreakpoint", {
"instrumentation": instrumentation,
}),
cls.convert_payload({
"breakpointId": {
"class": BreakpointId,
"optional": False
},
})
)
@classmethod
def setBreakpointByUrl(cls,
lineNumber: Union['int'],
url: Optional['str'] = None,
urlRegex: Optional['str'] = None,
scriptHash: Optional['str'] = None,
columnNumber: Optional['int'] = None,
condition: Optional['str'] = None,
):
return (
cls.build_send_payload("setBreakpointByUrl", {
"lineNumber": lineNumber,
"url": url,
"urlRegex": urlRegex,
"scriptHash": scriptHash,
"columnNumber": columnNumber,
"condition": condition,
}),
cls.convert_payload({
"breakpointId": {
"class": BreakpointId,
"optional": False
},
"locations": {
"class": [Location],
"optional": False
},
})
)
@classmethod
def setBreakpointOnFunctionCall(cls,
objectId: Union['Runtime.RemoteObjectId'],
condition: Optional['str'] = None,
):
return (
cls.build_send_payload("setBreakpointOnFunctionCall", {
"objectId": objectId,
"condition": condition,
}),
cls.convert_payload({
"breakpointId": {
"class": BreakpointId,
"optional": False
},
})
)
@classmethod
def setBreakpointsActive(cls,
active: Union['bool'],
):
return (
cls.build_send_payload("setBreakpointsActive", {
"active": active,
}),
None
)
@classmethod
def setPauseOnExceptions(cls,
state: Union['str'],
):
return (
cls.build_send_payload("setPauseOnExceptions", {
"state": state,
}),
None
)
@classmethod
def setReturnValue(cls,
newValue: Union['Runtime.CallArgument'],
):
return (
cls.build_send_payload("setReturnValue", {
"newValue": newValue,
}),
None
)
@classmethod
def setScriptSource(cls,
scriptId: Union['Runtime.ScriptId'],
scriptSource: Union['str'],
dryRun: Optional['bool'] = None,
):
return (
cls.build_send_payload("setScriptSource", {
"scriptId": scriptId,
"scriptSource": scriptSource,
"dryRun": dryRun,
}),
cls.convert_payload({
"callFrames": {
"class": [CallFrame],
"optional": True
},
"stackChanged": {
"class": bool,
"optional": True
},
"asyncStackTrace": {
"class": Runtime.StackTrace,
"optional": True
},
"asyncStackTraceId": {
"class": Runtime.StackTraceId,
"optional": True
},
"exceptionDetails": {
"class": Runtime.ExceptionDetails,
"optional": True
},
})
)
@classmethod
def setSkipAllPauses(cls,
skip: Union['bool'],
):
return (
cls.build_send_payload("setSkipAllPauses", {
"skip": skip,
}),
None
)
@classmethod
def setVariableValue(cls,
scopeNumber: Union['int'],
variableName: Union['str'],
newValue: Union['Runtime.CallArgument'],
callFrameId: Union['CallFrameId'],
):
return (
cls.build_send_payload("setVariableValue", {
"scopeNumber": scopeNumber,
"variableName": variableName,
"newValue": newValue,
"callFrameId": callFrameId,
}),
None
)
@classmethod
def stepInto(cls,
breakOnAsyncCall: Optional['bool'] = None,
skipList: Optional['[LocationRange]'] = None,
):
return (
cls.build_send_payload("stepInto", {
"breakOnAsyncCall": breakOnAsyncCall,
"skipList": skipList,
}),
None
)
@classmethod
def stepOut(cls):
return (
cls.build_send_payload("stepOut", {
}),
None
)
@classmethod
def stepOver(cls,
skipList: Optional['[LocationRange]'] = None,
):
return (
cls.build_send_payload("stepOver", {
"skipList": skipList,
}),
None
)
class BreakpointResolvedEvent(BaseEvent):
js_name = 'Debugger.breakpointResolved'
hashable = ['breakpointId']
is_hashable = True
def __init__(self,
breakpointId: Union['BreakpointId', dict],
location: Union['Location', dict],
):
if isinstance(breakpointId, dict):
breakpointId = BreakpointId(**breakpointId)
self.breakpointId = breakpointId
if isinstance(location, dict):
location = Location(**location)
self.location = location
@classmethod
def build_hash(cls, breakpointId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
class PausedEvent(BaseEvent):
js_name = 'Debugger.paused'
hashable = ['asyncStackTraceId', 'asyncCallStackTraceId']
is_hashable = True
def __init__(self,
callFrames: Union['[CallFrame]', dict],
reason: Union['str', dict],
data: Union['dict', dict, None] = None,
hitBreakpoints: Union['[]', dict, None] = None,
asyncStackTrace: Union['Runtime.StackTrace', dict, None] = None,
asyncStackTraceId: Union['Runtime.StackTraceId', dict, None] = None,
asyncCallStackTraceId: Union['Runtime.StackTraceId', dict, None] = None,
):
if isinstance(callFrames, dict):
callFrames = [CallFrame](**callFrames)
self.callFrames = callFrames
if isinstance(reason, dict):
reason = str(**reason)
self.reason = reason
if isinstance(data, dict):
data = dict(**data)
self.data = data
if isinstance(hitBreakpoints, dict):
hitBreakpoints = [](**hitBreakpoints)
self.hitBreakpoints = hitBreakpoints
if isinstance(asyncStackTrace, dict):
asyncStackTrace = Runtime.StackTrace(**asyncStackTrace)
self.asyncStackTrace = asyncStackTrace
if isinstance(asyncStackTraceId, dict):
asyncStackTraceId = Runtime.StackTraceId(**asyncStackTraceId)
self.asyncStackTraceId = asyncStackTraceId
if isinstance(asyncCallStackTraceId, dict):
asyncCallStackTraceId = Runtime.StackTraceId(**asyncCallStackTraceId)
self.asyncCallStackTraceId = asyncCallStackTraceId
@classmethod
def build_hash(cls, asyncStackTraceId, asyncCallStackTraceId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
class ResumedEvent(BaseEvent):
js_name = 'Debugger.resumed'
hashable = []
is_hashable = False
def __init__(self):
pass
@classmethod
def build_hash(cls):
raise ValueError('Unable to build hash for non-hashable type')
class ScriptFailedToParseEvent(BaseEvent):
js_name = 'Debugger.scriptFailedToParse'
hashable = ['executionContextId', 'scriptId']
is_hashable = True
def __init__(self,
scriptId: Union['Runtime.ScriptId', dict],
url: Union['str', dict],
startLine: Union['int', dict],
startColumn: Union['int', dict],
endLine: Union['int', dict],
endColumn: Union['int', dict],
executionContextId: Union['Runtime.ExecutionContextId', dict],
hash: Union['str', dict],
executionContextAuxData: Union['dict', dict, None] = None,
sourceMapURL: Union['str', dict, None] = None,
hasSourceURL: Union['bool', dict, None] = None,
isModule: Union['bool', dict, None] = None,
length: Union['int', dict, None] = None,
stackTrace: Union['Runtime.StackTrace', dict, None] = None,
codeOffset: Union['int', dict, None] = None,
scriptLanguage: Union['Debugger.ScriptLanguage', dict, None] = None,
embedderName: Union['str', dict, None] = None,
):
if isinstance(scriptId, dict):
scriptId = Runtime.ScriptId(**scriptId)
self.scriptId = scriptId
if isinstance(url, dict):
url = str(**url)
self.url = url
if isinstance(startLine, dict):
startLine = int(**startLine)
self.startLine = startLine
if isinstance(startColumn, dict):
startColumn = int(**startColumn)
self.startColumn = startColumn
if isinstance(endLine, dict):
endLine = int(**endLine)
self.endLine = endLine
if isinstance(endColumn, dict):
endColumn = int(**endColumn)
self.endColumn = endColumn
if isinstance(executionContextId, dict):
executionContextId = Runtime.ExecutionContextId(**executionContextId)
self.executionContextId = executionContextId
if isinstance(hash, dict):
hash = str(**hash)
self.hash = hash
if isinstance(executionContextAuxData, dict):
executionContextAuxData = dict(**executionContextAuxData)
self.executionContextAuxData = executionContextAuxData
if isinstance(sourceMapURL, dict):
sourceMapURL = str(**sourceMapURL)
self.sourceMapURL = sourceMapURL
if isinstance(hasSourceURL, dict):
hasSourceURL = bool(**hasSourceURL)
self.hasSourceURL = hasSourceURL
if isinstance(isModule, dict):
isModule = bool(**isModule)
self.isModule = isModule
if isinstance(length, dict):
length = int(**length)
self.length = length
if isinstance(stackTrace, dict):
stackTrace = Runtime.StackTrace(**stackTrace)
self.stackTrace = stackTrace
if isinstance(codeOffset, dict):
codeOffset = int(**codeOffset)
self.codeOffset = codeOffset
if isinstance(scriptLanguage, dict):
scriptLanguage = Debugger.ScriptLanguage(**scriptLanguage)
self.scriptLanguage = scriptLanguage
if isinstance(embedderName, dict):
embedderName = str(**embedderName)
self.embedderName = embedderName
@classmethod
def build_hash(cls, executionContextId, scriptId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
class ScriptParsedEvent(BaseEvent):
js_name = 'Debugger.scriptParsed'
hashable = ['executionContextId', 'scriptId']
is_hashable = True
def __init__(self,
scriptId: Union['Runtime.ScriptId', dict],
url: Union['str', dict],
startLine: Union['int', dict],
startColumn: Union['int', dict],
endLine: Union['int', dict],
endColumn: Union['int', dict],
executionContextId: Union['Runtime.ExecutionContextId', dict],
hash: Union['str', dict],
executionContextAuxData: Union['dict', dict, None] = None,
isLiveEdit: Union['bool', dict, None] = None,
sourceMapURL: Union['str', dict, None] = None,
hasSourceURL: Union['bool', dict, None] = None,
isModule: Union['bool', dict, None] = None,
length: Union['int', dict, None] = None,
stackTrace: Union['Runtime.StackTrace', dict, None] = None,
codeOffset: Union['int', dict, None] = None,
scriptLanguage: Union['Debugger.ScriptLanguage', dict, None] = None,
debugSymbols: Union['Debugger.DebugSymbols', dict, None] = None,
embedderName: Union['str', dict, None] = None,
):
if isinstance(scriptId, dict):
scriptId = Runtime.ScriptId(**scriptId)
self.scriptId = scriptId
if isinstance(url, dict):
url = str(**url)
self.url = url
if isinstance(startLine, dict):
startLine = int(**startLine)
self.startLine = startLine
if isinstance(startColumn, dict):
startColumn = int(**startColumn)
self.startColumn = startColumn
if isinstance(endLine, dict):
endLine = int(**endLine)
self.endLine = endLine
if isinstance(endColumn, dict):
endColumn = int(**endColumn)
self.endColumn = endColumn
if isinstance(executionContextId, dict):
executionContextId = Runtime.ExecutionContextId(**executionContextId)
self.executionContextId = executionContextId
if isinstance(hash, dict):
hash = str(**hash)
self.hash = hash
if isinstance(executionContextAuxData, dict):
executionContextAuxData = dict(**executionContextAuxData)
self.executionContextAuxData = executionContextAuxData
if isinstance(isLiveEdit, dict):
isLiveEdit = bool(**isLiveEdit)
self.isLiveEdit = isLiveEdit
if isinstance(sourceMapURL, dict):
sourceMapURL = str(**sourceMapURL)
self.sourceMapURL = sourceMapURL
if isinstance(hasSourceURL, dict):
hasSourceURL = bool(**hasSourceURL)
self.hasSourceURL = hasSourceURL
if isinstance(isModule, dict):
isModule = bool(**isModule)
self.isModule = isModule
if isinstance(length, dict):
length = int(**length)
self.length = length
if isinstance(stackTrace, dict):
stackTrace = Runtime.StackTrace(**stackTrace)
self.stackTrace = stackTrace
if isinstance(codeOffset, dict):
codeOffset = int(**codeOffset)
self.codeOffset = codeOffset
if isinstance(scriptLanguage, dict):
scriptLanguage = Debugger.ScriptLanguage(**scriptLanguage)
self.scriptLanguage = scriptLanguage
if isinstance(debugSymbols, dict):
debugSymbols = Debugger.DebugSymbols(**debugSymbols)
self.debugSymbols = debugSymbols
if isinstance(embedderName, dict):
embedderName = str(**embedderName)
self.embedderName = embedderName
@classmethod
def build_hash(cls, executionContextId, scriptId):
kwargs = locals()
kwargs.pop('cls')
serialized_id_params = ','.join(['='.join([p, str(v)]) for p, v in kwargs.items()])
h = '{}:{}'.format(cls.js_name, serialized_id_params)
log.debug('generated hash = %s' % h)
return h
| true | true |
1c3af1a5253375ac01aa65d1cf0b3648005c610f | 3,475 | py | Python | nuitka/nodes/FunctionAttributeNodes.py | sthagen/Nuitka-Nuitka | 023dc76eeafd9c53ee2a51931474ddd98a3ba083 | [
"Apache-2.0"
] | null | null | null | nuitka/nodes/FunctionAttributeNodes.py | sthagen/Nuitka-Nuitka | 023dc76eeafd9c53ee2a51931474ddd98a3ba083 | [
"Apache-2.0"
] | null | null | null | nuitka/nodes/FunctionAttributeNodes.py | sthagen/Nuitka-Nuitka | 023dc76eeafd9c53ee2a51931474ddd98a3ba083 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Function attribute nodes
The represent special values of the modules. The "__qualname__" value node
is intended and to be resolved later. And the function output for error
messages, is also dynamic.
These nodes are intended to allow for as much compile time optimization as
possible, despite this difficulty. In some modes these node become constants
quickly, in others they will present boundaries for optimization.
"""
from .ExpressionBases import (
CompileTimeConstantExpressionBase,
ExpressionChildHavingBase,
)
from .NodeBases import SideEffectsFromChildrenMixin
from .NodeMakingHelpers import makeConstantReplacementNode
class ExpressionFunctionQualnameRef(CompileTimeConstantExpressionBase):
"""Node for value __qualname__ of function or class.
Notes:
This is for Python 3.4 and higher only, where classes calculate the __qualname__
value at runtime, then it's determined dynamically, while 3.3 set it more
statically, and Python2 didn't have this feature at all.
"""
kind = "EXPRESSION_FUNCTION_QUALNAME_REF"
__slots__ = ("function_body",)
def __init__(self, function_body, source_ref):
CompileTimeConstantExpressionBase.__init__(self, source_ref=source_ref)
self.function_body = function_body
def finalize(self):
del self.parent
del self.function_body
def computeExpressionRaw(self, trace_collection):
result = makeConstantReplacementNode(
node=self,
constant=self.function_body.getFunctionQualname(),
user_provided=True,
)
return (
result,
"new_constant",
"Executed '__qualname__' resolution to '%s'."
% self.function_body.getFunctionQualname(),
)
def getCompileTimeConstant(self):
return self.function_body.getFunctionQualname()
class ExpressionFunctionErrorStr(
SideEffectsFromChildrenMixin, ExpressionChildHavingBase
):
"""Node for value "_PyObject_FunctionStr" C-API of function or callable in general.
Notes:
This is for Python 3.9 and higher only, where functions have their module
added to the "__qualname__" value at runtime.
"""
kind = "EXPRESSION_FUNCTION_ERROR_STR"
named_child = "value"
def __init__(self, value, source_ref):
ExpressionChildHavingBase.__init__(self, value, source_ref=source_ref)
def mayRaiseException(self, exception_type):
return self.subnode_value.mayRaiseException(exception_type)
def computeExpression(self, trace_collection):
# TODO: Could compile time compute these for concrete functions.
return self, None, None
| 34.405941 | 88 | 0.721439 |
from .ExpressionBases import (
CompileTimeConstantExpressionBase,
ExpressionChildHavingBase,
)
from .NodeBases import SideEffectsFromChildrenMixin
from .NodeMakingHelpers import makeConstantReplacementNode
class ExpressionFunctionQualnameRef(CompileTimeConstantExpressionBase):
kind = "EXPRESSION_FUNCTION_QUALNAME_REF"
__slots__ = ("function_body",)
def __init__(self, function_body, source_ref):
CompileTimeConstantExpressionBase.__init__(self, source_ref=source_ref)
self.function_body = function_body
def finalize(self):
del self.parent
del self.function_body
def computeExpressionRaw(self, trace_collection):
result = makeConstantReplacementNode(
node=self,
constant=self.function_body.getFunctionQualname(),
user_provided=True,
)
return (
result,
"new_constant",
"Executed '__qualname__' resolution to '%s'."
% self.function_body.getFunctionQualname(),
)
def getCompileTimeConstant(self):
return self.function_body.getFunctionQualname()
class ExpressionFunctionErrorStr(
SideEffectsFromChildrenMixin, ExpressionChildHavingBase
):
kind = "EXPRESSION_FUNCTION_ERROR_STR"
named_child = "value"
def __init__(self, value, source_ref):
ExpressionChildHavingBase.__init__(self, value, source_ref=source_ref)
def mayRaiseException(self, exception_type):
return self.subnode_value.mayRaiseException(exception_type)
def computeExpression(self, trace_collection):
return self, None, None
| true | true |
1c3af21cc1494b3e7e8c52b4d2bce3b10af9c0f5 | 1,308 | py | Python | darkknight/models.py | fusionbox/django-darkknight | ae3d06f8c92120c45c5e18f6dd31950262f661ed | [
"BSD-2-Clause"
] | 3 | 2016-05-23T20:43:40.000Z | 2021-07-08T22:11:57.000Z | darkknight/models.py | fusionbox/django-darkknight | ae3d06f8c92120c45c5e18f6dd31950262f661ed | [
"BSD-2-Clause"
] | 1 | 2018-04-13T01:12:01.000Z | 2018-04-13T17:43:45.000Z | darkknight/models.py | fusionbox/django-darkknight | ae3d06f8c92120c45c5e18f6dd31950262f661ed | [
"BSD-2-Clause"
] | 2 | 2016-11-27T22:35:03.000Z | 2017-09-16T03:55:50.000Z | import os.path
import uuid
from django.db import models
from django.core.signing import Signer
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from OpenSSL import crypto
pk_signer = Signer('CSR PK')
# must be a top-level function to be serializable in migrations
def generate_uuid():
return uuid.uuid4().hex
class SSLKey(models.Model):
uuid = models.CharField(max_length=32, unique=True, default=generate_uuid, primary_key=True)
@property
def key_path(self):
return os.path.join(settings.DARKKNIGHT_STORAGE, '%s.key' % self.uuid)
@python_2_unicode_compatible
class CertificateSigningRequest(models.Model):
# for search over the domain
domain = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
content = models.TextField()
key = models.ForeignKey(SSLKey, related_name='csr_set')
def __str__(self):
return 'Certificate for %s' % self.domain
@property
def csr_path(self):
return os.path.join(settings.DARKKNIGHT_STORAGE, '%s.csr' % self.uuid)
@property
def csr_obj(self):
return crypto.load_certificate_request(crypto.FILETYPE_PEM, self.content)
@property
def subject(self):
return self.csr_obj.get_subject()
| 26.693878 | 96 | 0.733945 | import os.path
import uuid
from django.db import models
from django.core.signing import Signer
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from OpenSSL import crypto
pk_signer = Signer('CSR PK')
def generate_uuid():
return uuid.uuid4().hex
class SSLKey(models.Model):
uuid = models.CharField(max_length=32, unique=True, default=generate_uuid, primary_key=True)
@property
def key_path(self):
return os.path.join(settings.DARKKNIGHT_STORAGE, '%s.key' % self.uuid)
@python_2_unicode_compatible
class CertificateSigningRequest(models.Model):
domain = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
content = models.TextField()
key = models.ForeignKey(SSLKey, related_name='csr_set')
def __str__(self):
return 'Certificate for %s' % self.domain
@property
def csr_path(self):
return os.path.join(settings.DARKKNIGHT_STORAGE, '%s.csr' % self.uuid)
@property
def csr_obj(self):
return crypto.load_certificate_request(crypto.FILETYPE_PEM, self.content)
@property
def subject(self):
return self.csr_obj.get_subject()
| true | true |
1c3af43fb511853ff61c8fae924b0d3c70187d2e | 1,706 | py | Python | toontown/ai/DistributedAprilToonsMgr.py | LittleNed/toontown-stride | 1252a8f9a8816c1810106006d09c8bdfe6ad1e57 | [
"Apache-2.0"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | toontown/ai/DistributedAprilToonsMgr.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | null | null | null | toontown/ai/DistributedAprilToonsMgr.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | 4 | 2019-06-20T23:45:23.000Z | 2020-10-14T20:30:15.000Z | from direct.distributed.DistributedObject import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase.AprilToonsGlobals import *
from toontown.toonbase import ToontownGlobals
class DistributedAprilToonsMgr(DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('AprilToonsMgr')
def __init__(self, cr):
DistributedObject.__init__(self, cr)
cr.aprilToonsMgr = self
self.events = []
def announceGenerate(self):
DistributedObject.announceGenerate(self)
self.d_requestEventsList()
def d_requestEventsList(self):
self.notify.debug("Requesting events list from AI.")
self.sendUpdate('requestEventsList', [])
def requestEventsListResp(self, eventIds):
self.events = eventIds
self.checkActiveEvents()
def isEventActive(self, eventId):
# NOTE: Possible race condition where the client checks for if an event is active
# *before* it gets a response from the AI.
if not base.cr.config.GetBool('want-april-toons', False):
# If this DO is generated but we don't want april toons, always return
# false regardless.
return False
return eventId in self.events
def setEventActive(self, eventId, active):
if active and eventId not in self.events:
self.events.append(eventId)
elif not active and eventId in self.events:
del self.events[eventId]
def checkActiveEvents(self):
if self.isEventActive(EventGlobalGravity):
base.localAvatar.controlManager.currentControls.setGravity(ToontownGlobals.GravityValue * 0.75)
| 38.772727 | 107 | 0.697538 | from direct.distributed.DistributedObject import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase.AprilToonsGlobals import *
from toontown.toonbase import ToontownGlobals
class DistributedAprilToonsMgr(DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('AprilToonsMgr')
def __init__(self, cr):
DistributedObject.__init__(self, cr)
cr.aprilToonsMgr = self
self.events = []
def announceGenerate(self):
DistributedObject.announceGenerate(self)
self.d_requestEventsList()
def d_requestEventsList(self):
self.notify.debug("Requesting events list from AI.")
self.sendUpdate('requestEventsList', [])
def requestEventsListResp(self, eventIds):
self.events = eventIds
self.checkActiveEvents()
def isEventActive(self, eventId):
if not base.cr.config.GetBool('want-april-toons', False):
# false regardless.
return False
return eventId in self.events
def setEventActive(self, eventId, active):
if active and eventId not in self.events:
self.events.append(eventId)
elif not active and eventId in self.events:
del self.events[eventId]
def checkActiveEvents(self):
if self.isEventActive(EventGlobalGravity):
base.localAvatar.controlManager.currentControls.setGravity(ToontownGlobals.GravityValue * 0.75)
| true | true |
1c3af4ceae6c70494adf33983ad96e4c321ed51d | 552 | py | Python | Tests/image_tests/renderpasses/test_GBufferRT.py | Nuclearfossil/Falcor | 667dc68a51bbaf87a2a063f4f0ef8928990ed203 | [
"BSD-3-Clause"
] | 62 | 2022-02-04T10:34:29.000Z | 2022-03-31T19:41:20.000Z | Tests/image_tests/renderpasses/test_GBufferRT.py | Nuclearfossil/Falcor | 667dc68a51bbaf87a2a063f4f0ef8928990ed203 | [
"BSD-3-Clause"
] | 1 | 2021-02-18T16:38:38.000Z | 2021-02-18T16:38:38.000Z | Tests/image_tests/renderpasses/test_GBufferRT.py | fromasmtodisasm/Falcor | 300aee1d7a9609e427f07e8887fd9bcb377426b0 | [
"BSD-3-Clause"
] | 4 | 2022-02-04T16:08:30.000Z | 2022-03-09T09:39:41.000Z | from helpers import render_frames
from graphs.GBufferRT import GBufferRT as g
from falcor import *
m.addGraph(g)
m.loadScene('Arcade/Arcade.fscene')
ctx = locals()
# default
render_frames(ctx, 'default', frames=[1,16,64])
# enable depth-of-field
m.scene.camera.focalDistance = 3.0
m.scene.camera.apertureRadius = 0.1
render_frames(ctx, 'dof', frames=[1,16,64])
# re-load scene with non-indexed vertices
m.loadScene('Arcade/Arcade.fscene', buildFlags=SceneBuilderFlags.NonIndexedVertices)
render_frames(ctx, 'non-indexed', frames=[1,16,64])
exit()
| 25.090909 | 84 | 0.762681 | from helpers import render_frames
from graphs.GBufferRT import GBufferRT as g
from falcor import *
m.addGraph(g)
m.loadScene('Arcade/Arcade.fscene')
ctx = locals()
render_frames(ctx, 'default', frames=[1,16,64])
m.scene.camera.focalDistance = 3.0
m.scene.camera.apertureRadius = 0.1
render_frames(ctx, 'dof', frames=[1,16,64])
m.loadScene('Arcade/Arcade.fscene', buildFlags=SceneBuilderFlags.NonIndexedVertices)
render_frames(ctx, 'non-indexed', frames=[1,16,64])
exit()
| true | true |
1c3af52ad959cebe5559db5b7752747028e20cf2 | 4,399 | py | Python | sdk/python/pulumi_azure_nextgen/recoveryservices/get_replication_recovery_services_provider.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/recoveryservices/get_replication_recovery_services_provider.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/recoveryservices/get_replication_recovery_services_provider.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetReplicationRecoveryServicesProviderResult',
'AwaitableGetReplicationRecoveryServicesProviderResult',
'get_replication_recovery_services_provider',
]
@pulumi.output_type
class GetReplicationRecoveryServicesProviderResult:
"""
Provider details.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.RecoveryServicesProviderPropertiesResponse':
"""
Provider properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource Type
"""
return pulumi.get(self, "type")
class AwaitableGetReplicationRecoveryServicesProviderResult(GetReplicationRecoveryServicesProviderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetReplicationRecoveryServicesProviderResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
type=self.type)
def get_replication_recovery_services_provider(fabric_name: Optional[str] = None,
provider_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReplicationRecoveryServicesProviderResult:
"""
Provider details.
API Version: 2018-07-10.
:param str fabric_name: Fabric name.
:param str provider_name: Recovery services provider name
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str resource_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['fabricName'] = fabric_name
__args__['providerName'] = provider_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:recoveryservices:getReplicationRecoveryServicesProvider', __args__, opts=opts, typ=GetReplicationRecoveryServicesProviderResult).value
return AwaitableGetReplicationRecoveryServicesProviderResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| 34.912698 | 185 | 0.643555 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetReplicationRecoveryServicesProviderResult',
'AwaitableGetReplicationRecoveryServicesProviderResult',
'get_replication_recovery_services_provider',
]
@pulumi.output_type
class GetReplicationRecoveryServicesProviderResult:
def __init__(__self__, id=None, location=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.RecoveryServicesProviderPropertiesResponse':
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetReplicationRecoveryServicesProviderResult(GetReplicationRecoveryServicesProviderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetReplicationRecoveryServicesProviderResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
type=self.type)
def get_replication_recovery_services_provider(fabric_name: Optional[str] = None,
provider_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReplicationRecoveryServicesProviderResult:
__args__ = dict()
__args__['fabricName'] = fabric_name
__args__['providerName'] = provider_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:recoveryservices:getReplicationRecoveryServicesProvider', __args__, opts=opts, typ=GetReplicationRecoveryServicesProviderResult).value
return AwaitableGetReplicationRecoveryServicesProviderResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| true | true |
1c3af52ea76f81bce8055804eeaef5872fe35d37 | 30 | py | Python | pybot/plugins/api/__init__.py | harikrishnana2021/operationcode-pybot | 6e78e069c274281d50dcb71b98b9f485afb012fc | [
"MIT"
] | null | null | null | pybot/plugins/api/__init__.py | harikrishnana2021/operationcode-pybot | 6e78e069c274281d50dcb71b98b9f485afb012fc | [
"MIT"
] | null | null | null | pybot/plugins/api/__init__.py | harikrishnana2021/operationcode-pybot | 6e78e069c274281d50dcb71b98b9f485afb012fc | [
"MIT"
] | null | null | null | from .plugin import APIPlugin
| 15 | 29 | 0.833333 | from .plugin import APIPlugin
| true | true |
1c3af727a745fe5abf6fe52f3e5a1d0d832e4f48 | 11,756 | py | Python | tests/loggers/test_tensorboard.py | mathemusician/pytorch-lightning | 15fa5389387b3a220bc044dd30eb0be1e8f64944 | [
"Apache-2.0"
] | 3,469 | 2019-03-31T03:09:16.000Z | 2020-01-13T15:06:31.000Z | tests/loggers/test_tensorboard.py | mathemusician/pytorch-lightning | 15fa5389387b3a220bc044dd30eb0be1e8f64944 | [
"Apache-2.0"
] | 524 | 2019-04-02T12:33:39.000Z | 2020-01-14T02:53:33.000Z | tests/loggers/test_tensorboard.py | mathemusician/pytorch-lightning | 15fa5389387b3a220bc044dd30eb0be1e8f64944 | [
"Apache-2.0"
] | 365 | 2019-04-02T22:14:04.000Z | 2020-01-13T17:21:54.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import operator
import os
from argparse import Namespace
from unittest import mock
import numpy as np
import pytest
import torch
import yaml
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.imports import _compare_version, _OMEGACONF_AVAILABLE
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
if _OMEGACONF_AVAILABLE:
from omegaconf import OmegaConf
@pytest.mark.skipif(
_compare_version("tensorboard", operator.ge, "2.6.0"), reason="cannot import EventAccumulator in >= 2.6.0"
)
def test_tensorboard_hparams_reload(tmpdir):
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
class CustomModel(BoringModel):
def __init__(self, b1=0.5, b2=0.999):
super().__init__()
self.save_hyperparameters()
trainer = Trainer(max_steps=1, default_root_dir=tmpdir)
model = CustomModel()
assert trainer.log_dir == trainer.logger.log_dir
trainer.fit(model)
assert trainer.log_dir == trainer.logger.log_dir
folder_path = trainer.log_dir
# make sure yaml is there
with open(os.path.join(folder_path, "hparams.yaml")) as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
yaml_params = yaml.safe_load(file)
assert yaml_params["b1"] == 0.5
assert yaml_params["b2"] == 0.999
assert len(yaml_params.keys()) == 2
# verify artifacts
assert len(os.listdir(os.path.join(folder_path, "checkpoints"))) == 1
# verify tb logs
event_acc = EventAccumulator(folder_path)
event_acc.Reload()
hparams_data = b'\x12\x1f"\x06\n\x02b1 \x03"\x06\n\x02b2 \x03*\r\n\x0b\x12\thp_metric'
assert event_acc.summary_metadata["_hparams_/experiment"].plugin_data.plugin_name == "hparams"
assert event_acc.summary_metadata["_hparams_/experiment"].plugin_data.content == hparams_data
def test_tensorboard_automatic_versioning(tmpdir):
"""Verify that automatic versioning works."""
root_dir = tmpdir / "tb_versioning"
root_dir.mkdir()
(root_dir / "version_0").mkdir()
(root_dir / "version_1").mkdir()
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning")
assert logger.version == 2
def test_tensorboard_manual_versioning(tmpdir):
"""Verify that manual versioning works."""
root_dir = tmpdir / "tb_versioning"
root_dir.mkdir()
(root_dir / "version_0").mkdir()
(root_dir / "version_1").mkdir()
(root_dir / "version_2").mkdir()
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=1)
assert logger.version == 1
def test_tensorboard_named_version(tmpdir):
"""Verify that manual versioning works for string versions, e.g. '2020-02-05-162402'."""
name = "tb_versioning"
(tmpdir / name).mkdir()
expected_version = "2020-02-05-162402"
logger = TensorBoardLogger(save_dir=tmpdir, name=name, version=expected_version)
logger.log_hyperparams({"a": 1, "b": 2, 123: 3, 3.5: 4, 5j: 5}) # Force data to be written
assert logger.version == expected_version
assert os.listdir(tmpdir / name) == [expected_version]
assert os.listdir(tmpdir / name / expected_version)
@pytest.mark.parametrize("name", ["", None])
def test_tensorboard_no_name(tmpdir, name):
"""Verify that None or empty name works."""
logger = TensorBoardLogger(save_dir=tmpdir, name=name)
logger.log_hyperparams({"a": 1, "b": 2, 123: 3, 3.5: 4, 5j: 5}) # Force data to be written
assert os.path.normpath(logger.root_dir) == tmpdir # use os.path.normpath to handle trailing /
assert os.listdir(tmpdir / "version_0")
@mock.patch.dict(os.environ, {}, clear=True)
def test_tensorboard_log_sub_dir(tmpdir):
class TestLogger(TensorBoardLogger):
# for reproducibility
@property
def version(self):
return "version"
@property
def name(self):
return "name"
trainer_args = dict(default_root_dir=tmpdir, max_steps=1)
# no sub_dir specified
save_dir = tmpdir / "logs"
logger = TestLogger(save_dir)
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(save_dir, "name", "version")
# sub_dir specified
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(save_dir, "name", "version", "sub_dir")
# test home dir (`~`) handling
save_dir = "~/tmp"
explicit_save_dir = os.path.expanduser(save_dir)
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(explicit_save_dir, "name", "version", "sub_dir")
# test env var (`$`) handling
test_env_dir = "some_directory"
os.environ["test_env_dir"] = test_env_dir
save_dir = "$test_env_dir/tmp"
explicit_save_dir = f"{test_env_dir}/tmp"
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(explicit_save_dir, "name", "version", "sub_dir")
@pytest.mark.parametrize("step_idx", [10, None])
def test_tensorboard_log_metrics(tmpdir, step_idx):
logger = TensorBoardLogger(tmpdir)
metrics = {"float": 0.3, "int": 1, "FloatTensor": torch.tensor(0.1), "IntTensor": torch.tensor(1)}
logger.log_metrics(metrics, step_idx)
def test_tensorboard_log_hyperparams(tmpdir):
logger = TensorBoardLogger(tmpdir)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {"a": {"b": "c"}},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar="buzz")),
"layer": torch.nn.BatchNorm1d,
"tensor": torch.empty(2, 2, 2),
"array": np.empty([2, 2, 2]),
}
logger.log_hyperparams(hparams)
def test_tensorboard_log_hparams_and_metrics(tmpdir):
logger = TensorBoardLogger(tmpdir, default_hp_metric=False)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {"a": {"b": "c"}},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar="buzz")),
"layer": torch.nn.BatchNorm1d,
"tensor": torch.empty(2, 2, 2),
"array": np.empty([2, 2, 2]),
}
metrics = {"abc": torch.tensor([0.54])}
logger.log_hyperparams(hparams, metrics)
@RunIf(omegaconf=True)
def test_tensorboard_log_omegaconf_hparams_and_metrics(tmpdir):
logger = TensorBoardLogger(tmpdir, default_hp_metric=False)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {"a": {"b": "c"}},
"list": [1, 2, 3],
}
hparams = OmegaConf.create(hparams)
metrics = {"abc": torch.tensor([0.54])}
logger.log_hyperparams(hparams, metrics)
@pytest.mark.parametrize("example_input_array", [None, torch.rand(2, 32)])
def test_tensorboard_log_graph(tmpdir, example_input_array):
"""test that log graph works with both model.example_input_array and if array is passed externally."""
model = BoringModel()
if example_input_array is not None:
model.example_input_array = None
logger = TensorBoardLogger(tmpdir, log_graph=True)
logger.log_graph(model, example_input_array)
def test_tensorboard_log_graph_warning_no_example_input_array(tmpdir):
"""test that log graph throws warning if model.example_input_array is None."""
model = BoringModel()
model.example_input_array = None
logger = TensorBoardLogger(tmpdir, log_graph=True)
with pytest.warns(
UserWarning,
match="Could not log computational graph since the `model.example_input_array`"
" attribute is not set or `input_array` was not given",
):
logger.log_graph(model)
@mock.patch("pytorch_lightning.loggers.TensorBoardLogger.log_metrics")
def test_tensorboard_with_accummulated_gradients(mock_log_metrics, tmpdir):
"""Tests to ensure that tensorboard log properly when accumulated_gradients > 1."""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.indexes = []
def training_step(self, *args):
self.log("foo", 1, on_step=True, on_epoch=True)
if not self.trainer.fit_loop._should_accumulate():
if self.trainer._logger_connector.should_update_logs:
self.indexes.append(self.trainer.global_step)
return super().training_step(*args)
model = TestModel()
model.training_epoch_end = None
logger_0 = TensorBoardLogger(tmpdir, default_hp_metric=False)
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=12,
limit_val_batches=0,
max_epochs=3,
accumulate_grad_batches=2,
logger=[logger_0],
log_every_n_steps=3,
)
trainer.fit(model)
calls = [m[2] for m in mock_log_metrics.mock_calls]
count_epochs = [c["step"] for c in calls if "foo_epoch" in c["metrics"]]
assert count_epochs == [5, 11, 17]
count_steps = [c["step"] for c in calls if "foo_step" in c["metrics"]]
assert count_steps == model.indexes
@mock.patch("pytorch_lightning.loggers.tensorboard.SummaryWriter")
def test_tensorboard_finalize(summary_writer, tmpdir):
"""Test that the SummaryWriter closes in finalize."""
logger = TensorBoardLogger(save_dir=tmpdir)
logger.finalize("any")
summary_writer().flush.assert_called()
summary_writer().close.assert_called()
def test_tensorboard_save_hparams_to_yaml_once(tmpdir):
model = BoringModel()
logger = TensorBoardLogger(save_dir=tmpdir, default_hp_metric=False)
trainer = Trainer(max_steps=1, default_root_dir=tmpdir, logger=logger)
assert trainer.log_dir == trainer.logger.log_dir
trainer.fit(model)
hparams_file = "hparams.yaml"
assert os.path.isfile(os.path.join(trainer.log_dir, hparams_file))
assert not os.path.isfile(os.path.join(tmpdir, hparams_file))
@mock.patch("pytorch_lightning.loggers.tensorboard.log")
def test_tensorboard_with_symlink(log, tmpdir):
"""Tests a specific failure case when tensorboard logger is used with empty name, symbolic link ``save_dir``,
and relative paths."""
os.chdir(tmpdir) # need to use relative paths
source = os.path.join(".", "lightning_logs")
dest = os.path.join(".", "sym_lightning_logs")
os.makedirs(source, exist_ok=True)
os.symlink(source, dest)
logger = TensorBoardLogger(save_dir=dest, name="")
_ = logger.version
log.warning.assert_not_called()
def test_tensorboard_missing_folder_warning(tmpdir, caplog):
"""Verify that the logger throws a warning for invalid directory."""
name = "fake_dir"
logger = TensorBoardLogger(save_dir=tmpdir, name=name)
with caplog.at_level(logging.WARNING):
assert logger.version == 0
assert "Missing logger folder:" in caplog.text
| 34.884273 | 113 | 0.68986 |
import logging
import operator
import os
from argparse import Namespace
from unittest import mock
import numpy as np
import pytest
import torch
import yaml
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.imports import _compare_version, _OMEGACONF_AVAILABLE
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
if _OMEGACONF_AVAILABLE:
from omegaconf import OmegaConf
@pytest.mark.skipif(
_compare_version("tensorboard", operator.ge, "2.6.0"), reason="cannot import EventAccumulator in >= 2.6.0"
)
def test_tensorboard_hparams_reload(tmpdir):
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
class CustomModel(BoringModel):
def __init__(self, b1=0.5, b2=0.999):
super().__init__()
self.save_hyperparameters()
trainer = Trainer(max_steps=1, default_root_dir=tmpdir)
model = CustomModel()
assert trainer.log_dir == trainer.logger.log_dir
trainer.fit(model)
assert trainer.log_dir == trainer.logger.log_dir
folder_path = trainer.log_dir
with open(os.path.join(folder_path, "hparams.yaml")) as file:
yaml_params = yaml.safe_load(file)
assert yaml_params["b1"] == 0.5
assert yaml_params["b2"] == 0.999
assert len(yaml_params.keys()) == 2
assert len(os.listdir(os.path.join(folder_path, "checkpoints"))) == 1
event_acc = EventAccumulator(folder_path)
event_acc.Reload()
hparams_data = b'\x12\x1f"\x06\n\x02b1 \x03"\x06\n\x02b2 \x03*\r\n\x0b\x12\thp_metric'
assert event_acc.summary_metadata["_hparams_/experiment"].plugin_data.plugin_name == "hparams"
assert event_acc.summary_metadata["_hparams_/experiment"].plugin_data.content == hparams_data
def test_tensorboard_automatic_versioning(tmpdir):
root_dir = tmpdir / "tb_versioning"
root_dir.mkdir()
(root_dir / "version_0").mkdir()
(root_dir / "version_1").mkdir()
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning")
assert logger.version == 2
def test_tensorboard_manual_versioning(tmpdir):
root_dir = tmpdir / "tb_versioning"
root_dir.mkdir()
(root_dir / "version_0").mkdir()
(root_dir / "version_1").mkdir()
(root_dir / "version_2").mkdir()
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=1)
assert logger.version == 1
def test_tensorboard_named_version(tmpdir):
name = "tb_versioning"
(tmpdir / name).mkdir()
expected_version = "2020-02-05-162402"
logger = TensorBoardLogger(save_dir=tmpdir, name=name, version=expected_version)
logger.log_hyperparams({"a": 1, "b": 2, 123: 3, 3.5: 4, 5j: 5})
assert logger.version == expected_version
assert os.listdir(tmpdir / name) == [expected_version]
assert os.listdir(tmpdir / name / expected_version)
@pytest.mark.parametrize("name", ["", None])
def test_tensorboard_no_name(tmpdir, name):
logger = TensorBoardLogger(save_dir=tmpdir, name=name)
logger.log_hyperparams({"a": 1, "b": 2, 123: 3, 3.5: 4, 5j: 5})
assert os.path.normpath(logger.root_dir) == tmpdir
assert os.listdir(tmpdir / "version_0")
@mock.patch.dict(os.environ, {}, clear=True)
def test_tensorboard_log_sub_dir(tmpdir):
class TestLogger(TensorBoardLogger):
@property
def version(self):
return "version"
@property
def name(self):
return "name"
trainer_args = dict(default_root_dir=tmpdir, max_steps=1)
save_dir = tmpdir / "logs"
logger = TestLogger(save_dir)
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(save_dir, "name", "version")
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(save_dir, "name", "version", "sub_dir")
save_dir = "~/tmp"
explicit_save_dir = os.path.expanduser(save_dir)
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(explicit_save_dir, "name", "version", "sub_dir")
test_env_dir = "some_directory"
os.environ["test_env_dir"] = test_env_dir
save_dir = "$test_env_dir/tmp"
explicit_save_dir = f"{test_env_dir}/tmp"
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(explicit_save_dir, "name", "version", "sub_dir")
@pytest.mark.parametrize("step_idx", [10, None])
def test_tensorboard_log_metrics(tmpdir, step_idx):
logger = TensorBoardLogger(tmpdir)
metrics = {"float": 0.3, "int": 1, "FloatTensor": torch.tensor(0.1), "IntTensor": torch.tensor(1)}
logger.log_metrics(metrics, step_idx)
def test_tensorboard_log_hyperparams(tmpdir):
logger = TensorBoardLogger(tmpdir)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {"a": {"b": "c"}},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar="buzz")),
"layer": torch.nn.BatchNorm1d,
"tensor": torch.empty(2, 2, 2),
"array": np.empty([2, 2, 2]),
}
logger.log_hyperparams(hparams)
def test_tensorboard_log_hparams_and_metrics(tmpdir):
logger = TensorBoardLogger(tmpdir, default_hp_metric=False)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {"a": {"b": "c"}},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar="buzz")),
"layer": torch.nn.BatchNorm1d,
"tensor": torch.empty(2, 2, 2),
"array": np.empty([2, 2, 2]),
}
metrics = {"abc": torch.tensor([0.54])}
logger.log_hyperparams(hparams, metrics)
@RunIf(omegaconf=True)
def test_tensorboard_log_omegaconf_hparams_and_metrics(tmpdir):
logger = TensorBoardLogger(tmpdir, default_hp_metric=False)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {"a": {"b": "c"}},
"list": [1, 2, 3],
}
hparams = OmegaConf.create(hparams)
metrics = {"abc": torch.tensor([0.54])}
logger.log_hyperparams(hparams, metrics)
@pytest.mark.parametrize("example_input_array", [None, torch.rand(2, 32)])
def test_tensorboard_log_graph(tmpdir, example_input_array):
model = BoringModel()
if example_input_array is not None:
model.example_input_array = None
logger = TensorBoardLogger(tmpdir, log_graph=True)
logger.log_graph(model, example_input_array)
def test_tensorboard_log_graph_warning_no_example_input_array(tmpdir):
model = BoringModel()
model.example_input_array = None
logger = TensorBoardLogger(tmpdir, log_graph=True)
with pytest.warns(
UserWarning,
match="Could not log computational graph since the `model.example_input_array`"
" attribute is not set or `input_array` was not given",
):
logger.log_graph(model)
@mock.patch("pytorch_lightning.loggers.TensorBoardLogger.log_metrics")
def test_tensorboard_with_accummulated_gradients(mock_log_metrics, tmpdir):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.indexes = []
def training_step(self, *args):
self.log("foo", 1, on_step=True, on_epoch=True)
if not self.trainer.fit_loop._should_accumulate():
if self.trainer._logger_connector.should_update_logs:
self.indexes.append(self.trainer.global_step)
return super().training_step(*args)
model = TestModel()
model.training_epoch_end = None
logger_0 = TensorBoardLogger(tmpdir, default_hp_metric=False)
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=12,
limit_val_batches=0,
max_epochs=3,
accumulate_grad_batches=2,
logger=[logger_0],
log_every_n_steps=3,
)
trainer.fit(model)
calls = [m[2] for m in mock_log_metrics.mock_calls]
count_epochs = [c["step"] for c in calls if "foo_epoch" in c["metrics"]]
assert count_epochs == [5, 11, 17]
count_steps = [c["step"] for c in calls if "foo_step" in c["metrics"]]
assert count_steps == model.indexes
@mock.patch("pytorch_lightning.loggers.tensorboard.SummaryWriter")
def test_tensorboard_finalize(summary_writer, tmpdir):
logger = TensorBoardLogger(save_dir=tmpdir)
logger.finalize("any")
summary_writer().flush.assert_called()
summary_writer().close.assert_called()
def test_tensorboard_save_hparams_to_yaml_once(tmpdir):
model = BoringModel()
logger = TensorBoardLogger(save_dir=tmpdir, default_hp_metric=False)
trainer = Trainer(max_steps=1, default_root_dir=tmpdir, logger=logger)
assert trainer.log_dir == trainer.logger.log_dir
trainer.fit(model)
hparams_file = "hparams.yaml"
assert os.path.isfile(os.path.join(trainer.log_dir, hparams_file))
assert not os.path.isfile(os.path.join(tmpdir, hparams_file))
@mock.patch("pytorch_lightning.loggers.tensorboard.log")
def test_tensorboard_with_symlink(log, tmpdir):
os.chdir(tmpdir)
source = os.path.join(".", "lightning_logs")
dest = os.path.join(".", "sym_lightning_logs")
os.makedirs(source, exist_ok=True)
os.symlink(source, dest)
logger = TensorBoardLogger(save_dir=dest, name="")
_ = logger.version
log.warning.assert_not_called()
def test_tensorboard_missing_folder_warning(tmpdir, caplog):
name = "fake_dir"
logger = TensorBoardLogger(save_dir=tmpdir, name=name)
with caplog.at_level(logging.WARNING):
assert logger.version == 0
assert "Missing logger folder:" in caplog.text
| true | true |
1c3af7863bed29b416a3e11a298c516cddaa8b47 | 407 | py | Python | nicos_virt_mlz/biodiff/setups/shutter.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_virt_mlz/biodiff/setups/shutter.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_virt_mlz/biodiff/setups/shutter.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | # -*- coding: utf-8 -*-
description = 'Shutter setup'
group = 'lowlevel'
devices = dict(
gammashutter = device('nicos.devices.generic.ManualSwitch',
description = 'Gamma shutter (virtual)',
states = ['open', 'closed'],
),
photoshutter = device('nicos.devices.generic.ManualSwitch',
description = 'Photo shutter (virtual)',
states = ['open', 'closed'],
),
)
| 25.4375 | 63 | 0.60688 |
description = 'Shutter setup'
group = 'lowlevel'
devices = dict(
gammashutter = device('nicos.devices.generic.ManualSwitch',
description = 'Gamma shutter (virtual)',
states = ['open', 'closed'],
),
photoshutter = device('nicos.devices.generic.ManualSwitch',
description = 'Photo shutter (virtual)',
states = ['open', 'closed'],
),
)
| true | true |
1c3af82d4aa6de435bc8f76fa7dbf08b70fa5380 | 5,756 | py | Python | inferbeddings/nli/disan/general.py | issca/inferbeddings | 80492a7aebcdcac21e758514c8af403d77e8594a | [
"MIT"
] | 33 | 2017-07-25T14:31:00.000Z | 2019-03-06T09:18:00.000Z | inferbeddings/nli/disan/general.py | issca/inferbeddings | 80492a7aebcdcac21e758514c8af403d77e8594a | [
"MIT"
] | 1 | 2017-08-22T13:49:30.000Z | 2017-08-22T13:49:30.000Z | inferbeddings/nli/disan/general.py | issca/inferbeddings | 80492a7aebcdcac21e758514c8af403d77e8594a | [
"MIT"
] | 9 | 2017-10-05T08:50:45.000Z | 2019-04-18T12:40:56.000Z | # -*- coding: utf-8 -*-
import tensorflow as tf
from functools import reduce
from operator import mul
VERY_BIG_NUMBER = 1e30
VERY_SMALL_NUMBER = 1e-30
VERY_POSITIVE_NUMBER = VERY_BIG_NUMBER
VERY_NEGATIVE_NUMBER = -VERY_BIG_NUMBER
def get_last_state(rnn_out_put, mask): # correct
'''
get_last_state of rnn output
:param rnn_out_put: [d1,d2,dn-1,max_len,d]
:param mask: [d1,d2,dn-1,max_len]
:return: [d1,d2,dn-1,d]
'''
rnn_out_put_flatten = flatten(rnn_out_put, 2)# [X, ml, d]
mask_flatten = flatten(mask,1) # [X,ml]
idxs = tf.reduce_sum(tf.cast(mask_flatten,tf.int32),-1) - 1 # [X]
indices = tf.stack([tf.range(tf.shape(idxs)[0]), idxs], axis=-1) #[X] => [X,2]
flatten_res = tf.expand_dims(tf.gather_nd(rnn_out_put_flatten, indices),-2 )# #[x,d]->[x,1,d]
return tf.squeeze(reconstruct(flatten_res,rnn_out_put,2),-2) #[d1,d2,dn-1,1,d] ->[d1,d2,dn-1,d]
def expand_tile(tensor,pattern,tile_num = None, scope=None): # todo: add more func
with tf.name_scope(scope or 'expand_tile'):
assert isinstance(pattern,(tuple,list))
assert isinstance(tile_num,(tuple,list)) or tile_num is None
assert len(pattern) == len(tile_num) or tile_num is None
idx_pattern = list([(dim, p) for dim, p in enumerate(pattern)])
for dim,p in idx_pattern:
if p == 'x':
tensor = tf.expand_dims(tensor,dim)
return tf.tile(tensor,tile_num) if tile_num is not None else tensor
def get_initializer(matrix):
def _initializer(shape, dtype=None, partition_info=None, **kwargs): return matrix
return _initializer
def mask(val, mask, name=None):
if name is None:
name = 'mask'
return tf.multiply(val, tf.cast(mask, 'float'), name=name)
def mask_for_high_rank(val, val_mask, name=None):
val_mask = tf.expand_dims(val_mask, -1)
return tf.multiply(val, tf.cast(val_mask, tf.float32), name=name or 'mask_for_high_rank')
def exp_mask(val, mask, name=None):
"""Give very negative number to unmasked elements in val.
For example, [-3, -2, 10], [True, True, False] -> [-3, -2, -1e9].
Typically, this effectively masks in exponential space (e.g. softmax)
Args:
val: values to be masked
mask: masking boolean tensor, same shape as tensor
name: name for output tensor
Returns:
Same shape as val, where some elements are very small (exponentially zero)
"""
if name is None:
name = "exp_mask"
return tf.add(val, (1 - tf.cast(mask, 'float')) * VERY_NEGATIVE_NUMBER, name=name)
def exp_mask_for_high_rank(val, val_mask, name=None):
val_mask = tf.expand_dims(val_mask, -1)
return tf.add(val, (1 - tf.cast(val_mask, tf.float32)) * VERY_NEGATIVE_NUMBER,
name=name or 'exp_mask_for_high_rank')
def flatten(tensor, keep):
fixed_shape = tensor.get_shape().as_list()
start = len(fixed_shape) - keep
left = reduce(mul, [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start)])
out_shape = [left] + [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start, len(fixed_shape))]
flat = tf.reshape(tensor, out_shape)
return flat
def reconstruct(tensor, ref, keep, dim_reduced_keep=None):
dim_reduced_keep = dim_reduced_keep or keep
ref_shape = ref.get_shape().as_list() # original shape
tensor_shape = tensor.get_shape().as_list() # current shape
ref_stop = len(ref_shape) - keep # flatten dims list
tensor_start = len(tensor_shape) - dim_reduced_keep # start
pre_shape = [ref_shape[i] or tf.shape(ref)[i] for i in range(ref_stop)] #
keep_shape = [tensor_shape[i] or tf.shape(tensor)[i] for i in range(tensor_start, len(tensor_shape))] #
# pre_shape = [tf.shape(ref)[i] for i in range(len(ref.get_shape().as_list()[:-keep]))]
# keep_shape = tensor.get_shape().as_list()[-keep:]
target_shape = pre_shape + keep_shape
out = tf.reshape(tensor, target_shape)
return out
def add_wd(wd, scope=None):
scope = scope or tf.get_variable_scope().name
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
counter = 0
with tf.name_scope("weight_decay"):
for var in variables:
counter+=1
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd,
name="{}-wd".format('-'.join(str(var.op.name).split('/'))))
tf.add_to_collection('losses', weight_decay)
return counter
def add_wd_without_bias(wd, scope=None):
scope = scope or tf.get_variable_scope().name
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
counter = 0
with tf.name_scope("weight_decay"):
for var in variables:
if len(var.get_shape().as_list()) <= 1: continue
counter += 1
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd,
name="{}-wd".format('-'.join(str(var.op.name).split('/'))))
tf.add_to_collection('losses', weight_decay)
return counter
def add_reg_without_bias(scope=None):
scope = scope or tf.get_variable_scope().name
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
counter = 0
for var in variables:
if len(var.get_shape().as_list()) <= 1: continue
tf.add_to_collection('reg_vars', var)
counter += 1
return counter
def add_var_reg(var):
tf.add_to_collection('reg_vars', var)
def add_wd_for_var(var, wd):
with tf.name_scope("weight_decay"):
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd,
name="{}-wd".format('-'.join(str(var.op.name).split('/'))))
tf.add_to_collection('losses', weight_decay)
| 37.620915 | 107 | 0.653753 |
import tensorflow as tf
from functools import reduce
from operator import mul
VERY_BIG_NUMBER = 1e30
VERY_SMALL_NUMBER = 1e-30
VERY_POSITIVE_NUMBER = VERY_BIG_NUMBER
VERY_NEGATIVE_NUMBER = -VERY_BIG_NUMBER
def get_last_state(rnn_out_put, mask):
rnn_out_put_flatten = flatten(rnn_out_put, 2)
mask_flatten = flatten(mask,1)
idxs = tf.reduce_sum(tf.cast(mask_flatten,tf.int32),-1) - 1
indices = tf.stack([tf.range(tf.shape(idxs)[0]), idxs], axis=-1)
flatten_res = tf.expand_dims(tf.gather_nd(rnn_out_put_flatten, indices),-2 )squeeze(reconstruct(flatten_res,rnn_out_put,2),-2)
def expand_tile(tensor,pattern,tile_num = None, scope=None):
with tf.name_scope(scope or 'expand_tile'):
assert isinstance(pattern,(tuple,list))
assert isinstance(tile_num,(tuple,list)) or tile_num is None
assert len(pattern) == len(tile_num) or tile_num is None
idx_pattern = list([(dim, p) for dim, p in enumerate(pattern)])
for dim,p in idx_pattern:
if p == 'x':
tensor = tf.expand_dims(tensor,dim)
return tf.tile(tensor,tile_num) if tile_num is not None else tensor
def get_initializer(matrix):
def _initializer(shape, dtype=None, partition_info=None, **kwargs): return matrix
return _initializer
def mask(val, mask, name=None):
if name is None:
name = 'mask'
return tf.multiply(val, tf.cast(mask, 'float'), name=name)
def mask_for_high_rank(val, val_mask, name=None):
val_mask = tf.expand_dims(val_mask, -1)
return tf.multiply(val, tf.cast(val_mask, tf.float32), name=name or 'mask_for_high_rank')
def exp_mask(val, mask, name=None):
if name is None:
name = "exp_mask"
return tf.add(val, (1 - tf.cast(mask, 'float')) * VERY_NEGATIVE_NUMBER, name=name)
def exp_mask_for_high_rank(val, val_mask, name=None):
val_mask = tf.expand_dims(val_mask, -1)
return tf.add(val, (1 - tf.cast(val_mask, tf.float32)) * VERY_NEGATIVE_NUMBER,
name=name or 'exp_mask_for_high_rank')
def flatten(tensor, keep):
fixed_shape = tensor.get_shape().as_list()
start = len(fixed_shape) - keep
left = reduce(mul, [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start)])
out_shape = [left] + [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start, len(fixed_shape))]
flat = tf.reshape(tensor, out_shape)
return flat
def reconstruct(tensor, ref, keep, dim_reduced_keep=None):
dim_reduced_keep = dim_reduced_keep or keep
ref_shape = ref.get_shape().as_list()
tensor_shape = tensor.get_shape().as_list()
ref_stop = len(ref_shape) - keep
tensor_start = len(tensor_shape) - dim_reduced_keep
pre_shape = [ref_shape[i] or tf.shape(ref)[i] for i in range(ref_stop)]
keep_shape = [tensor_shape[i] or tf.shape(tensor)[i] for i in range(tensor_start, len(tensor_shape))]
target_shape = pre_shape + keep_shape
out = tf.reshape(tensor, target_shape)
return out
def add_wd(wd, scope=None):
scope = scope or tf.get_variable_scope().name
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
counter = 0
with tf.name_scope("weight_decay"):
for var in variables:
counter+=1
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd,
name="{}-wd".format('-'.join(str(var.op.name).split('/'))))
tf.add_to_collection('losses', weight_decay)
return counter
def add_wd_without_bias(wd, scope=None):
scope = scope or tf.get_variable_scope().name
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
counter = 0
with tf.name_scope("weight_decay"):
for var in variables:
if len(var.get_shape().as_list()) <= 1: continue
counter += 1
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd,
name="{}-wd".format('-'.join(str(var.op.name).split('/'))))
tf.add_to_collection('losses', weight_decay)
return counter
def add_reg_without_bias(scope=None):
scope = scope or tf.get_variable_scope().name
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
counter = 0
for var in variables:
if len(var.get_shape().as_list()) <= 1: continue
tf.add_to_collection('reg_vars', var)
counter += 1
return counter
def add_var_reg(var):
tf.add_to_collection('reg_vars', var)
def add_wd_for_var(var, wd):
with tf.name_scope("weight_decay"):
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd,
name="{}-wd".format('-'.join(str(var.op.name).split('/'))))
tf.add_to_collection('losses', weight_decay)
| true | true |
1c3af8b980375785b361c0078e3e12131203e9fc | 5,121 | py | Python | detection/configs/sparse_rcnn_pvt_v2_b2_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py | cclauss/PVT | cceb465b7dfb2b7a48b39074a14a04dedab427e8 | [
"Apache-2.0"
] | 1,056 | 2021-02-24T03:26:56.000Z | 2022-03-31T01:57:48.000Z | detection/configs/sparse_rcnn_pvt_v2_b2_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py | youngwanLEE/PVT | 61d92c2a704bb35f6247e5fe957d7c9a08ed28f0 | [
"Apache-2.0"
] | 85 | 2021-02-26T02:58:29.000Z | 2022-03-15T08:32:00.000Z | detection/configs/sparse_rcnn_pvt_v2_b2_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py | youngwanLEE/PVT | 61d92c2a704bb35f6247e5fe957d7c9a08ed28f0 | [
"Apache-2.0"
] | 173 | 2021-02-25T02:21:41.000Z | 2022-03-19T04:36:22.000Z | _base_ = [
'_base_/datasets/coco_detection.py',
'_base_/schedules/schedule_1x.py',
'_base_/default_runtime.py'
]
num_stages = 6
num_proposals = 300
model = dict(
type='SparseRCNN',
# pretrained='pretrained/pvt_v2_b2.pth',
pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth',
backbone=dict(
type='pvt_v2_b2',
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[64, 128, 320, 512],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=4),
rpn_head=dict(
type='EmbeddingRPNHead',
num_proposals=num_proposals,
proposal_feature_channel=256),
roi_head=dict(
type='SparseRoIHead',
num_stages=num_stages,
stage_loss_weights=[1] * num_stages,
proposal_feature_channel=256,
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='DIIHead',
num_classes=80,
num_ffn_fcs=2,
num_heads=8,
num_cls_fcs=1,
num_reg_fcs=3,
feedforward_channels=2048,
in_channels=256,
dropout=0.0,
ffn_act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
input_feat_shape=7,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
clip_border=False,
target_means=[0., 0., 0., 0.],
target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages)
]),
# training and testing settings
train_cfg=dict(
rpn=None,
rcnn=[
dict(
assigner=dict(
type='HungarianAssigner',
cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBoxL1Cost', weight=5.0),
iou_cost=dict(type='IoUCost', iou_mode='giou',
weight=2.0)),
sampler=dict(type='PseudoSampler'),
pos_weight=1) for _ in range(num_stages)
]),
test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals)))
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.000025 / 1.4, weight_decay=0.0001)
optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2))
# learning policy
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=1,
train=dict(pipeline=train_pipeline))
lr_config = dict(policy='step', step=[27, 33])
runner = dict(type='EpochBasedRunner', max_epochs=36)
| 38.503759 | 85 | 0.507909 | _base_ = [
'_base_/datasets/coco_detection.py',
'_base_/schedules/schedule_1x.py',
'_base_/default_runtime.py'
]
num_stages = 6
num_proposals = 300
model = dict(
type='SparseRCNN',
pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth',
backbone=dict(
type='pvt_v2_b2',
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[64, 128, 320, 512],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=4),
rpn_head=dict(
type='EmbeddingRPNHead',
num_proposals=num_proposals,
proposal_feature_channel=256),
roi_head=dict(
type='SparseRoIHead',
num_stages=num_stages,
stage_loss_weights=[1] * num_stages,
proposal_feature_channel=256,
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='DIIHead',
num_classes=80,
num_ffn_fcs=2,
num_heads=8,
num_cls_fcs=1,
num_reg_fcs=3,
feedforward_channels=2048,
in_channels=256,
dropout=0.0,
ffn_act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
input_feat_shape=7,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
clip_border=False,
target_means=[0., 0., 0., 0.],
target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages)
]),
train_cfg=dict(
rpn=None,
rcnn=[
dict(
assigner=dict(
type='HungarianAssigner',
cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBoxL1Cost', weight=5.0),
iou_cost=dict(type='IoUCost', iou_mode='giou',
weight=2.0)),
sampler=dict(type='PseudoSampler'),
pos_weight=1) for _ in range(num_stages)
]),
test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals)))
optimizer = dict(_delete_=True, type='AdamW', lr=0.000025 / 1.4, weight_decay=0.0001)
optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=1,
train=dict(pipeline=train_pipeline))
lr_config = dict(policy='step', step=[27, 33])
runner = dict(type='EpochBasedRunner', max_epochs=36)
| true | true |
1c3afa472f2951ad9c8d105d75fd9458ae8aeedd | 50,638 | py | Python | tests/helpers/test_template.py | dcramer/home-assistant | c1936f6fe4b28a3899c31524027c06e7b31a050b | [
"Apache-2.0"
] | 1 | 2020-01-03T03:35:49.000Z | 2020-01-03T03:35:49.000Z | tests/helpers/test_template.py | pickerin/home-assistant | 4bf15a07a34c24e9c4939cd05ea2c86d8a013b2f | [
"Apache-2.0"
] | 5 | 2021-02-08T20:54:44.000Z | 2022-03-12T00:48:52.000Z | tests/helpers/test_template.py | pickerin/home-assistant | 4bf15a07a34c24e9c4939cd05ea2c86d8a013b2f | [
"Apache-2.0"
] | null | null | null | """Test Home Assistant template helper methods."""
from datetime import datetime
import math
import random
from unittest.mock import patch
import pytest
import pytz
from homeassistant.components import group
from homeassistant.const import (
LENGTH_METERS,
MASS_GRAMS,
MATCH_ALL,
PRESSURE_PA,
TEMP_CELSIUS,
VOLUME_LITERS,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import UnitSystem
def _set_up_units(hass):
"""Set up the tests."""
hass.config.units = UnitSystem(
"custom", TEMP_CELSIUS, LENGTH_METERS, VOLUME_LITERS, MASS_GRAMS, PRESSURE_PA
)
def render_to_info(hass, template_str, variables=None):
"""Create render info from template."""
tmp = template.Template(template_str, hass)
return tmp.async_render_to_info(variables)
def extract_entities(hass, template_str, variables=None):
"""Extract entities from a template."""
info = render_to_info(hass, template_str, variables)
# pylint: disable=protected-access
assert not hasattr(info, "_domains")
return info._entities
def assert_result_info(info, result, entities=None, domains=None, all_states=False):
"""Check result info."""
assert info.result == result
# pylint: disable=protected-access
assert info._all_states == all_states
assert info.filter_lifecycle("invalid_entity_name.somewhere") == all_states
if entities is not None:
assert info._entities == frozenset(entities)
assert all([info.filter(entity) for entity in entities])
assert not info.filter("invalid_entity_name.somewhere")
else:
assert not info._entities
if domains is not None:
assert info._domains == frozenset(domains)
assert all([info.filter_lifecycle(domain + ".entity") for domain in domains])
else:
assert not hasattr(info, "_domains")
def test_template_equality():
"""Test template comparison and hashing."""
template_one = template.Template("{{ template_one }}")
template_one_1 = template.Template("{{ template_" + "one }}")
template_two = template.Template("{{ template_two }}")
assert template_one == template_one_1
assert template_one != template_two
assert hash(template_one) == hash(template_one_1)
assert hash(template_one) != hash(template_two)
assert str(template_one_1) == 'Template("{{ template_one }}")'
with pytest.raises(TypeError):
template.Template(["{{ template_one }}"])
def test_invalid_template(hass):
"""Invalid template raises error."""
tmpl = template.Template("{{", hass)
with pytest.raises(TemplateError):
tmpl.ensure_valid()
with pytest.raises(TemplateError):
tmpl.async_render()
info = tmpl.async_render_to_info()
with pytest.raises(TemplateError):
assert info.result == "impossible"
tmpl = template.Template("{{states(keyword)}}", hass)
tmpl.ensure_valid()
with pytest.raises(TemplateError):
tmpl.async_render()
def test_referring_states_by_entity_id(hass):
"""Test referring states by entity id."""
hass.states.async_set("test.object", "happy")
assert (
template.Template("{{ states.test.object.state }}", hass).async_render()
== "happy"
)
assert (
template.Template('{{ states["test.object"].state }}', hass).async_render()
== "happy"
)
assert (
template.Template('{{ states("test.object") }}', hass).async_render() == "happy"
)
def test_invalid_entity_id(hass):
"""Test referring states by entity id."""
with pytest.raises(TemplateError):
template.Template('{{ states["big.fat..."] }}', hass).async_render()
with pytest.raises(TemplateError):
template.Template('{{ states.test["big.fat..."] }}', hass).async_render()
with pytest.raises(TemplateError):
template.Template('{{ states["invalid/domain"] }}', hass).async_render()
def test_raise_exception_on_error(hass):
"""Test raising an exception on error."""
with pytest.raises(TemplateError):
template.Template("{{ invalid_syntax").ensure_valid()
def test_iterating_all_states(hass):
"""Test iterating all states."""
tmpl_str = "{% for state in states %}{{ state.state }}{% endfor %}"
info = render_to_info(hass, tmpl_str)
assert_result_info(info, "", all_states=True)
hass.states.async_set("test.object", "happy")
hass.states.async_set("sensor.temperature", 10)
info = render_to_info(hass, tmpl_str)
assert_result_info(
info, "10happy", entities=["test.object", "sensor.temperature"], all_states=True
)
def test_iterating_domain_states(hass):
"""Test iterating domain states."""
tmpl_str = "{% for state in states.sensor %}{{ state.state }}{% endfor %}"
info = render_to_info(hass, tmpl_str)
assert_result_info(info, "", domains=["sensor"])
hass.states.async_set("test.object", "happy")
hass.states.async_set("sensor.back_door", "open")
hass.states.async_set("sensor.temperature", 10)
info = render_to_info(hass, tmpl_str)
assert_result_info(
info,
"open10",
entities=["sensor.back_door", "sensor.temperature"],
domains=["sensor"],
)
def test_float(hass):
"""Test float."""
hass.states.async_set("sensor.temperature", "12")
assert (
template.Template(
"{{ float(states.sensor.temperature.state) }}", hass
).async_render()
== "12.0"
)
assert (
template.Template(
"{{ float(states.sensor.temperature.state) > 11 }}", hass
).async_render()
== "True"
)
assert (
template.Template("{{ float('forgiving') }}", hass).async_render()
== "forgiving"
)
def test_rounding_value(hass):
"""Test rounding value."""
hass.states.async_set("sensor.temperature", 12.78)
assert (
template.Template(
"{{ states.sensor.temperature.state | round(1) }}", hass
).async_render()
== "12.8"
)
assert (
template.Template(
"{{ states.sensor.temperature.state | multiply(10) | round }}", hass
).async_render()
== "128"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "floor") }}', hass
).async_render()
== "12.7"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "ceil") }}', hass
).async_render()
== "12.8"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "half") }}', hass
).async_render()
== "13.0"
)
def test_rounding_value_get_original_value_on_error(hass):
"""Test rounding value get original value on error."""
assert template.Template("{{ None | round }}", hass).async_render() == "None"
assert (
template.Template('{{ "no_number" | round }}', hass).async_render()
== "no_number"
)
def test_multiply(hass):
"""Test multiply."""
tests = {None: "None", 10: "100", '"abcd"': "abcd"}
for inp, out in tests.items():
assert (
template.Template(
"{{ %s | multiply(10) | round }}" % inp, hass
).async_render()
== out
)
def test_logarithm(hass):
"""Test logarithm."""
tests = [
(4, 2, "2.0"),
(1000, 10, "3.0"),
(math.e, "", "1.0"),
('"invalid"', "_", "invalid"),
(10, '"invalid"', "10.0"),
]
for value, base, expected in tests:
assert (
template.Template(
"{{ %s | log(%s) | round(1) }}" % (value, base), hass
).async_render()
== expected
)
assert (
template.Template(
"{{ log(%s, %s) | round(1) }}" % (value, base), hass
).async_render()
== expected
)
def test_sine(hass):
"""Test sine."""
tests = [
(0, "0.0"),
(math.pi / 2, "1.0"),
(math.pi, "0.0"),
(math.pi * 1.5, "-1.0"),
(math.pi / 10, "0.309"),
('"duck"', "duck"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | sin | round(3) }}" % value, hass).async_render()
== expected
)
def test_cos(hass):
"""Test cosine."""
tests = [
(0, "1.0"),
(math.pi / 2, "0.0"),
(math.pi, "-1.0"),
(math.pi * 1.5, "-0.0"),
(math.pi / 10, "0.951"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | cos | round(3) }}" % value, hass).async_render()
== expected
)
def test_tan(hass):
"""Test tangent."""
tests = [
(0, "0.0"),
(math.pi, "-0.0"),
(math.pi / 180 * 45, "1.0"),
(math.pi / 180 * 90, "1.633123935319537e+16"),
(math.pi / 180 * 135, "-1.0"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | tan | round(3) }}" % value, hass).async_render()
== expected
)
def test_sqrt(hass):
"""Test square root."""
tests = [
(0, "0.0"),
(1, "1.0"),
(2, "1.414"),
(10, "3.162"),
(100, "10.0"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | sqrt | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_sine(hass):
"""Test arcus sine."""
tests = [
(-2.0, "-2.0"), # value error
(-1.0, "-1.571"),
(-0.5, "-0.524"),
(0.0, "0.0"),
(0.5, "0.524"),
(1.0, "1.571"),
(2.0, "2.0"), # value error
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | asin | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_cos(hass):
"""Test arcus cosine."""
tests = [
(-2.0, "-2.0"), # value error
(-1.0, "3.142"),
(-0.5, "2.094"),
(0.0, "1.571"),
(0.5, "1.047"),
(1.0, "0.0"),
(2.0, "2.0"), # value error
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | acos | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_tan(hass):
"""Test arcus tangent."""
tests = [
(-10.0, "-1.471"),
(-2.0, "-1.107"),
(-1.0, "-0.785"),
(-0.5, "-0.464"),
(0.0, "0.0"),
(0.5, "0.464"),
(1.0, "0.785"),
(2.0, "1.107"),
(10.0, "1.471"),
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | atan | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_tan2(hass):
"""Test two parameter version of arcus tangent."""
tests = [
(-10.0, -10.0, "-2.356"),
(-10.0, 0.0, "-1.571"),
(-10.0, 10.0, "-0.785"),
(0.0, -10.0, "3.142"),
(0.0, 0.0, "0.0"),
(0.0, 10.0, "0.0"),
(10.0, -10.0, "2.356"),
(10.0, 0.0, "1.571"),
(10.0, 10.0, "0.785"),
(-4.0, 3.0, "-0.927"),
(-1.0, 2.0, "-0.464"),
(2.0, 1.0, "1.107"),
('"duck"', '"goose"', "('duck', 'goose')"),
]
for y, x, expected in tests:
assert (
template.Template(
"{{ (%s, %s) | atan2 | round(3) }}" % (y, x), hass
).async_render()
== expected
)
assert (
template.Template(
"{{ atan2(%s, %s) | round(3) }}" % (y, x), hass
).async_render()
== expected
)
def test_strptime(hass):
"""Test the parse timestamp method."""
tests = [
("2016-10-19 15:22:05.588122 UTC", "%Y-%m-%d %H:%M:%S.%f %Z", None),
("2016-10-19 15:22:05.588122+0100", "%Y-%m-%d %H:%M:%S.%f%z", None),
("2016-10-19 15:22:05.588122", "%Y-%m-%d %H:%M:%S.%f", None),
("2016-10-19", "%Y-%m-%d", None),
("2016", "%Y", None),
("15:22:05", "%H:%M:%S", None),
("1469119144", "%Y", "1469119144"),
("invalid", "%Y", "invalid"),
]
for inp, fmt, expected in tests:
if expected is None:
expected = datetime.strptime(inp, fmt)
temp = "{{ strptime('%s', '%s') }}" % (inp, fmt)
assert template.Template(temp, hass).async_render() == str(expected)
def test_timestamp_custom(hass):
"""Test the timestamps to custom filter."""
now = dt_util.utcnow()
tests = [
(None, None, None, "None"),
(1469119144, None, True, "2016-07-21 16:39:04"),
(1469119144, "%Y", True, "2016"),
(1469119144, "invalid", True, "invalid"),
(dt_util.as_timestamp(now), None, False, now.strftime("%Y-%m-%d %H:%M:%S")),
]
for inp, fmt, local, out in tests:
if fmt:
fil = "timestamp_custom('{}')".format(fmt)
elif fmt and local:
fil = "timestamp_custom('{0}', {1})".format(fmt, local)
else:
fil = "timestamp_custom"
assert (
template.Template("{{ %s | %s }}" % (inp, fil), hass).async_render() == out
)
def test_timestamp_local(hass):
"""Test the timestamps to local filter."""
tests = {None: "None", 1469119144: "2016-07-21 16:39:04"}
for inp, out in tests.items():
assert (
template.Template("{{ %s | timestamp_local }}" % inp, hass).async_render()
== out
)
def test_to_json(hass):
"""Test the object to JSON string filter."""
# Note that we're not testing the actual json.loads and json.dumps methods,
# only the filters, so we don't need to be exhaustive with our sample JSON.
expected_result = '{"Foo": "Bar"}'
actual_result = template.Template(
"{{ {'Foo': 'Bar'} | to_json }}", hass
).async_render()
assert actual_result == expected_result
def test_from_json(hass):
"""Test the JSON string to object filter."""
# Note that we're not testing the actual json.loads and json.dumps methods,
# only the filters, so we don't need to be exhaustive with our sample JSON.
expected_result = "Bar"
actual_result = template.Template(
'{{ (\'{"Foo": "Bar"}\' | from_json).Foo }}', hass
).async_render()
assert actual_result == expected_result
def test_min(hass):
"""Test the min filter."""
assert template.Template("{{ [1, 2, 3] | min }}", hass).async_render() == "1"
def test_max(hass):
"""Test the max filter."""
assert template.Template("{{ [1, 2, 3] | max }}", hass).async_render() == "3"
def test_ord(hass):
"""Test the ord filter."""
assert template.Template('{{ "d" | ord }}', hass).async_render() == "100"
def test_base64_encode(hass):
"""Test the base64_encode filter."""
assert (
template.Template('{{ "homeassistant" | base64_encode }}', hass).async_render()
== "aG9tZWFzc2lzdGFudA=="
)
def test_base64_decode(hass):
"""Test the base64_decode filter."""
assert (
template.Template(
'{{ "aG9tZWFzc2lzdGFudA==" | base64_decode }}', hass
).async_render()
== "homeassistant"
)
def test_ordinal(hass):
"""Test the ordinal filter."""
tests = [
(1, "1st"),
(2, "2nd"),
(3, "3rd"),
(4, "4th"),
(5, "5th"),
(12, "12th"),
(100, "100th"),
(101, "101st"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | ordinal }}" % value, hass).async_render()
== expected
)
def test_timestamp_utc(hass):
"""Test the timestamps to local filter."""
now = dt_util.utcnow()
tests = {
None: "None",
1469119144: "2016-07-21 16:39:04",
dt_util.as_timestamp(now): now.strftime("%Y-%m-%d %H:%M:%S"),
}
for inp, out in tests.items():
assert (
template.Template("{{ %s | timestamp_utc }}" % inp, hass).async_render()
== out
)
def test_as_timestamp(hass):
"""Test the as_timestamp function."""
assert (
template.Template('{{ as_timestamp("invalid") }}', hass).async_render()
== "None"
)
hass.mock = None
assert (
template.Template("{{ as_timestamp(states.mock) }}", hass).async_render()
== "None"
)
tpl = (
'{{ as_timestamp(strptime("2024-02-03T09:10:24+0000", '
'"%Y-%m-%dT%H:%M:%S%z")) }}'
)
assert template.Template(tpl, hass).async_render() == "1706951424.0"
@patch.object(random, "choice")
def test_random_every_time(test_choice, hass):
"""Ensure the random filter runs every time, not just once."""
tpl = template.Template("{{ [1,2] | random }}", hass)
test_choice.return_value = "foo"
assert tpl.async_render() == "foo"
test_choice.return_value = "bar"
assert tpl.async_render() == "bar"
def test_passing_vars_as_keywords(hass):
"""Test passing variables as keywords."""
assert template.Template("{{ hello }}", hass).async_render(hello=127) == "127"
def test_passing_vars_as_vars(hass):
"""Test passing variables as variables."""
assert template.Template("{{ hello }}", hass).async_render({"hello": 127}) == "127"
def test_passing_vars_as_list(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello }}", hass), {"hello": ["foo", "bar"]}
)
== "['foo', 'bar']"
)
def test_passing_vars_as_list_element(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello[1] }}", hass), {"hello": ["foo", "bar"]}
)
== "bar"
)
def test_passing_vars_as_dict_element(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello.foo }}", hass), {"hello": {"foo": "bar"}}
)
== "bar"
)
def test_passing_vars_as_dict(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello }}", hass), {"hello": {"foo": "bar"}}
)
== "{'foo': 'bar'}"
)
def test_render_with_possible_json_value_with_valid_json(hass):
"""Render with possible JSON value with valid JSON."""
tpl = template.Template("{{ value_json.hello }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == "world"
def test_render_with_possible_json_value_with_invalid_json(hass):
"""Render with possible JSON value with invalid JSON."""
tpl = template.Template("{{ value_json }}", hass)
assert tpl.async_render_with_possible_json_value("{ I AM NOT JSON }") == ""
def test_render_with_possible_json_value_with_template_error_value(hass):
"""Render with possible JSON value with template error value."""
tpl = template.Template("{{ non_existing.variable }}", hass)
assert tpl.async_render_with_possible_json_value("hello", "-") == "-"
def test_render_with_possible_json_value_with_missing_json_value(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.goodbye }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == ""
def test_render_with_possible_json_value_valid_with_is_defined(hass):
"""Render with possible JSON value with known JSON object."""
tpl = template.Template("{{ value_json.hello|is_defined }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == "world"
def test_render_with_possible_json_value_undefined_json(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.bye|is_defined }}", hass)
assert (
tpl.async_render_with_possible_json_value('{"hello": "world"}')
== '{"hello": "world"}'
)
def test_render_with_possible_json_value_undefined_json_error_value(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.bye|is_defined }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}', "") == ""
def test_render_with_possible_json_value_non_string_value(hass):
"""Render with possible JSON value with non-string value."""
tpl = template.Template(
"""
{{ strptime(value~'+0000', '%Y-%m-%d %H:%M:%S%z') }}
""",
hass,
)
value = datetime(2019, 1, 18, 12, 13, 14)
expected = str(pytz.utc.localize(value))
assert tpl.async_render_with_possible_json_value(value) == expected
def test_if_state_exists(hass):
"""Test if state exists works."""
hass.states.async_set("test.object", "available")
tpl = template.Template(
"{% if states.test.object %}exists{% else %}not exists{% endif %}", hass
)
assert tpl.async_render() == "exists"
def test_is_state(hass):
"""Test is_state method."""
hass.states.async_set("test.object", "available")
tpl = template.Template(
"""
{% if is_state("test.object", "available") %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ is_state("test.noobject", "available") }}
""",
hass,
)
assert tpl.async_render() == "False"
def test_is_state_attr(hass):
"""Test is_state_attr method."""
hass.states.async_set("test.object", "available", {"mode": "on"})
tpl = template.Template(
"""
{% if is_state_attr("test.object", "mode", "on") %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ is_state_attr("test.noobject", "mode", "on") }}
""",
hass,
)
assert tpl.async_render() == "False"
def test_state_attr(hass):
"""Test state_attr method."""
hass.states.async_set("test.object", "available", {"mode": "on"})
tpl = template.Template(
"""
{% if state_attr("test.object", "mode") == "on" %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ state_attr("test.noobject", "mode") == None }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_states_function(hass):
"""Test using states as a function."""
hass.states.async_set("test.object", "available")
tpl = template.Template('{{ states("test.object") }}', hass)
assert tpl.async_render() == "available"
tpl2 = template.Template('{{ states("test.object2") }}', hass)
assert tpl2.async_render() == "unknown"
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_now(mock_is_safe, hass):
"""Test now method."""
now = dt_util.now()
with patch("homeassistant.util.dt.now", return_value=now):
assert (
now.isoformat()
== template.Template("{{ now().isoformat() }}", hass).async_render()
)
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_utcnow(mock_is_safe, hass):
"""Test utcnow method."""
now = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=now):
assert (
now.isoformat()
== template.Template("{{ utcnow().isoformat() }}", hass).async_render()
)
def test_regex_match(hass):
"""Test regex_match method."""
tpl = template.Template(
r"""
{{ '123-456-7890' | regex_match('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'home assistant test' | regex_match('Home', True) }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Another home assistant test' | regex_match('home') }}
""",
hass,
)
assert tpl.async_render() == "False"
tpl = template.Template(
"""
{{ ['home assistant test'] | regex_match('.*assist') }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_regex_search(hass):
"""Test regex_search method."""
tpl = template.Template(
r"""
{{ '123-456-7890' | regex_search('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'home assistant test' | regex_search('Home', True) }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Another home assistant test' | regex_search('home') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ ['home assistant test'] | regex_search('assist') }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_regex_replace(hass):
"""Test regex_replace method."""
tpl = template.Template(
r"""
{{ 'Hello World' | regex_replace('(Hello\\s)',) }}
""",
hass,
)
assert tpl.async_render() == "World"
tpl = template.Template(
"""
{{ ['home hinderant test'] | regex_replace('hinder', 'assist') }}
""",
hass,
)
assert tpl.async_render() == "['home assistant test']"
def test_regex_findall_index(hass):
"""Test regex_findall_index method."""
tpl = template.Template(
"""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 0) }}
""",
hass,
)
assert tpl.async_render() == "JFK"
tpl = template.Template(
"""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 1) }}
""",
hass,
)
assert tpl.async_render() == "LHR"
tpl = template.Template(
"""
{{ ['JFK', 'LHR'] | regex_findall_index('([A-Z]{3})', 1) }}
""",
hass,
)
assert tpl.async_render() == "LHR"
def test_bitwise_and(hass):
"""Test bitwise_and method."""
tpl = template.Template(
"""
{{ 8 | bitwise_and(8) }}
""",
hass,
)
assert tpl.async_render() == str(8 & 8)
tpl = template.Template(
"""
{{ 10 | bitwise_and(2) }}
""",
hass,
)
assert tpl.async_render() == str(10 & 2)
tpl = template.Template(
"""
{{ 8 | bitwise_and(2) }}
""",
hass,
)
assert tpl.async_render() == str(8 & 2)
def test_bitwise_or(hass):
"""Test bitwise_or method."""
tpl = template.Template(
"""
{{ 8 | bitwise_or(8) }}
""",
hass,
)
assert tpl.async_render() == str(8 | 8)
tpl = template.Template(
"""
{{ 10 | bitwise_or(2) }}
""",
hass,
)
assert tpl.async_render() == str(10 | 2)
tpl = template.Template(
"""
{{ 8 | bitwise_or(2) }}
""",
hass,
)
assert tpl.async_render() == str(8 | 2)
def test_distance_function_with_1_state(hass):
"""Test distance function with 1 state."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
tpl = template.Template("{{ distance(states.test.object) | round }}", hass)
assert tpl.async_render() == "187"
def test_distance_function_with_2_states(hass):
"""Test distance function with 2 states."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
"{{ distance(states.test.object, states.test.object_2) | round }}", hass
)
assert tpl.async_render() == "187"
def test_distance_function_with_1_coord(hass):
"""Test distance function with 1 coord."""
_set_up_units(hass)
tpl = template.Template('{{ distance("32.87336", "-117.22943") | round }}', hass)
assert tpl.async_render() == "187"
def test_distance_function_with_2_coords(hass):
"""Test distance function with 2 coords."""
_set_up_units(hass)
assert (
template.Template(
'{{ distance("32.87336", "-117.22943", %s, %s) | round }}'
% (hass.config.latitude, hass.config.longitude),
hass,
).async_render()
== "187"
)
def test_distance_function_with_1_state_1_coord(hass):
"""Test distance function with 1 state 1 coord."""
_set_up_units(hass)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("32.87336", "-117.22943", states.test.object_2) ' "| round }}",
hass,
)
assert tpl.async_render() == "187"
tpl2 = template.Template(
'{{ distance(states.test.object_2, "32.87336", "-117.22943") ' "| round }}",
hass,
)
assert tpl2.async_render() == "187"
def test_distance_function_return_none_if_invalid_state(hass):
"""Test distance function return None if invalid state."""
hass.states.async_set("test.object_2", "happy", {"latitude": 10})
tpl = template.Template("{{ distance(states.test.object_2) | round }}", hass)
assert tpl.async_render() == "None"
def test_distance_function_return_none_if_invalid_coord(hass):
"""Test distance function return None if invalid coord."""
assert (
template.Template('{{ distance("123", "abc") }}', hass).async_render() == "None"
)
assert template.Template('{{ distance("123") }}', hass).async_render() == "None"
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template('{{ distance("123", states.test_object_2) }}', hass)
assert tpl.async_render() == "None"
def test_distance_function_with_2_entity_ids(hass):
"""Test distance function with 2 entity ids."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("test.object", "test.object_2") | round }}', hass
)
assert tpl.async_render() == "187"
def test_distance_function_with_1_entity_1_coord(hass):
"""Test distance function with 1 entity_id and 1 coord."""
_set_up_units(hass)
hass.states.async_set(
"test.object",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("test.object", "32.87336", "-117.22943") | round }}', hass
)
assert tpl.async_render() == "187"
def test_closest_function_home_vs_domain(hass):
"""Test closest function home vs domain."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_test_domain.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert (
template.Template(
"{{ closest(states.test_domain).entity_id }}", hass
).async_render()
== "test_domain.object"
)
assert (
template.Template(
"{{ (states.test_domain | closest).entity_id }}", hass
).async_render()
== "test_domain.object"
)
def test_closest_function_home_vs_all_states(hass):
"""Test closest function home vs all states."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain_2.and_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert (
template.Template("{{ closest(states).entity_id }}", hass).async_render()
== "test_domain_2.and_closer"
)
assert (
template.Template("{{ (states | closest).entity_id }}", hass).async_render()
== "test_domain_2.and_closer"
)
async def test_closest_function_home_vs_group_entity_id(hass):
"""Test closest function home vs group entity id."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_in_group.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
await group.Group.async_create_group(hass, "location group", ["test_domain.object"])
info = render_to_info(hass, '{{ closest("group.location_group").entity_id }}')
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
async def test_closest_function_home_vs_group_state(hass):
"""Test closest function home vs group state."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_in_group.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
await group.Group.async_create_group(hass, "location group", ["test_domain.object"])
info = render_to_info(hass, '{{ closest("group.location_group").entity_id }}')
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
info = render_to_info(hass, "{{ closest(states.group.location_group).entity_id }}")
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
async def test_expand(hass):
"""Test expand function."""
info = render_to_info(hass, "{{ expand('test.object') }}")
assert_result_info(info, "[]", ["test.object"])
info = render_to_info(hass, "{{ expand(56) }}")
assert_result_info(info, "[]")
hass.states.async_set("test.object", "happy")
info = render_to_info(
hass, "{{ expand('test.object') | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "test.object", [])
info = render_to_info(
hass,
"{{ expand('group.new_group') | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "", ["group.new_group"])
info = render_to_info(
hass, "{{ expand(states.group) | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "", [], ["group"])
await group.Group.async_create_group(hass, "new group", ["test.object"])
info = render_to_info(
hass,
"{{ expand('group.new_group') | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
info = render_to_info(
hass, "{{ expand(states.group) | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "test.object", ["group.new_group"], ["group"])
info = render_to_info(
hass,
"{{ expand('group.new_group', 'test.object')"
" | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
info = render_to_info(
hass,
"{{ ['group.new_group', 'test.object'] | expand"
" | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
def test_closest_function_to_coord(hass):
"""Test closest function to coord."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
tpl = template.Template(
'{{ closest("%s", %s, states.test_domain).entity_id }}'
% (hass.config.latitude + 0.3, hass.config.longitude + 0.3),
hass,
)
assert tpl.async_render() == "test_domain.closest_zone"
tpl = template.Template(
'{{ (states.test_domain | closest("%s", %s)).entity_id }}'
% (hass.config.latitude + 0.3, hass.config.longitude + 0.3),
hass,
)
assert tpl.async_render() == "test_domain.closest_zone"
def test_closest_function_to_entity_id(hass):
"""Test closest function to entity id."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
info = render_to_info(
hass,
"{{ closest(zone, states.test_domain).entity_id }}",
{"zone": "zone.far_away"},
)
assert_result_info(
info,
"test_domain.closest_zone",
["test_domain.closest_home", "test_domain.closest_zone", "zone.far_away"],
["test_domain"],
)
info = render_to_info(
hass,
"{{ ([states.test_domain, 'test_domain.closest_zone'] "
"| closest(zone)).entity_id }}",
{"zone": "zone.far_away"},
)
assert_result_info(
info,
"test_domain.closest_zone",
["test_domain.closest_home", "test_domain.closest_zone", "zone.far_away"],
["test_domain"],
)
def test_closest_function_to_state(hass):
"""Test closest function to state."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
assert (
template.Template(
"{{ closest(states.zone.far_away, states.test_domain).entity_id }}", hass
).async_render()
== "test_domain.closest_zone"
)
def test_closest_function_invalid_state(hass):
"""Test closest function invalid state."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
for state in ("states.zone.non_existing", '"zone.non_existing"'):
assert (
template.Template("{{ closest(%s, states) }}" % state, hass).async_render()
== "None"
)
def test_closest_function_state_with_invalid_location(hass):
"""Test closest function state with invalid location."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{"latitude": "invalid latitude", "longitude": hass.config.longitude + 0.1},
)
assert (
template.Template(
"{{ closest(states.test_domain.closest_home, states) }}", hass
).async_render()
== "None"
)
def test_closest_function_invalid_coordinates(hass):
"""Test closest function invalid coordinates."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
assert (
template.Template(
'{{ closest("invalid", "coord", states) }}', hass
).async_render()
== "None"
)
assert (
template.Template(
'{{ states | closest("invalid", "coord") }}', hass
).async_render()
== "None"
)
def test_closest_function_no_location_states(hass):
"""Test closest function without location states."""
assert (
template.Template("{{ closest(states).entity_id }}", hass).async_render() == ""
)
def test_extract_entities_none_exclude_stuff(hass):
"""Test extract entities function with none or exclude stuff."""
assert template.extract_entities(None) == []
assert template.extract_entities("mdi:water") == []
assert (
template.extract_entities(
"{{ closest(states.zone.far_away, states.test_domain).entity_id }}"
)
== MATCH_ALL
)
assert (
template.extract_entities('{{ distance("123", states.test_object_2) }}')
== MATCH_ALL
)
def test_extract_entities_no_match_entities(hass):
"""Test extract entities function with none entities stuff."""
assert (
template.extract_entities("{{ value_json.tst | timestamp_custom('%Y' True) }}")
== MATCH_ALL
)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},d
{% endfor %}
""",
)
assert_result_info(info, "", domains=["sensor"])
def test_generate_filter_iterators(hass):
"""Test extract entities function with none entities stuff."""
info = render_to_info(
hass,
"""
{% for state in states %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "", all_states=True)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "", domains=["sensor"])
hass.states.async_set("sensor.test_sensor", "off", {"attr": "value"})
# Don't need the entity because the state is not accessed
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "sensor.test_sensor", domains=["sensor"])
# But we do here because the state gets accessed
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},
{% endfor %}
""",
)
assert_result_info(
info, "sensor.test_sensor=off,", ["sensor.test_sensor"], ["sensor"]
)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.attributes.attr }},
{% endfor %}
""",
)
assert_result_info(
info, "sensor.test_sensor=value,", ["sensor.test_sensor"], ["sensor"]
)
def test_generate_select(hass):
"""Test extract entities function with none entities stuff."""
template_str = """
{{ states.sensor|selectattr("state","equalto","off")
|join(",", attribute="entity_id") }}
"""
tmp = template.Template(template_str, hass)
info = tmp.async_render_to_info()
assert_result_info(info, "", [], ["sensor"])
hass.states.async_set("sensor.test_sensor", "off", {"attr": "value"})
hass.states.async_set("sensor.test_sensor_on", "on")
info = tmp.async_render_to_info()
assert_result_info(
info,
"sensor.test_sensor",
["sensor.test_sensor", "sensor.test_sensor_on"],
["sensor"],
)
def test_extract_entities_match_entities(hass):
"""Test extract entities function with entities stuff."""
assert (
template.extract_entities(
"""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% else %}
Hercules is at {{ states('device_tracker.phone_1') }}.
{% endif %}
"""
)
== ["device_tracker.phone_1"]
)
assert (
template.extract_entities(
"""
{{ as_timestamp(states.binary_sensor.garage_door.last_changed) }}
"""
)
== ["binary_sensor.garage_door"]
)
assert (
template.extract_entities(
"""
{{ states("binary_sensor.garage_door") }}
"""
)
== ["binary_sensor.garage_door"]
)
hass.states.async_set("device_tracker.phone_2", "not_home", {"battery": 20})
assert (
template.extract_entities(
"""
{{ is_state_attr('device_tracker.phone_2', 'battery', 40) }}
"""
)
== ["device_tracker.phone_2"]
)
assert sorted(["device_tracker.phone_1", "device_tracker.phone_2"]) == sorted(
template.extract_entities(
"""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% elif states.device_tracker.phone_2.attributes.battery < 40 %}
Hercules you power goes done!.
{% endif %}
"""
)
)
assert sorted(["sensor.pick_humidity", "sensor.pick_temperature"]) == sorted(
template.extract_entities(
"""
{{
states.sensor.pick_temperature.state ~ „°C (“ ~
states.sensor.pick_humidity.state ~ „ %“
}}
"""
)
)
assert sorted(
["sensor.luftfeuchtigkeit_mean", "input_number.luftfeuchtigkeit"]
) == sorted(
template.extract_entities(
"{% if (states('sensor.luftfeuchtigkeit_mean') | int)"
" > (states('input_number.luftfeuchtigkeit') | int +1.5)"
" %}true{% endif %}"
)
)
def test_extract_entities_with_variables(hass):
"""Test extract entities function with variables and entities stuff."""
hass.states.async_set("input_boolean.switch", "on")
assert {"input_boolean.switch"} == extract_entities(
hass, "{{ is_state('input_boolean.switch', 'off') }}", {}
)
assert {"input_boolean.switch"} == extract_entities(
hass,
"{{ is_state(trigger.entity_id, 'off') }}",
{"trigger": {"entity_id": "input_boolean.switch"}},
)
assert {"no_state"} == extract_entities(
hass, "{{ is_state(data, 'off') }}", {"data": "no_state"}
)
assert {"input_boolean.switch"} == extract_entities(
hass, "{{ is_state(data, 'off') }}", {"data": "input_boolean.switch"}
)
assert {"input_boolean.switch"} == extract_entities(
hass,
"{{ is_state(trigger.entity_id, 'off') }}",
{"trigger": {"entity_id": "input_boolean.switch"}},
)
hass.states.async_set("media_player.livingroom", "off")
assert {"media_player.livingroom"} == extract_entities(
hass,
"{{ is_state('media_player.' ~ where , 'playing') }}",
{"where": "livingroom"},
)
def test_jinja_namespace(hass):
"""Test Jinja's namespace command can be used."""
test_template = template.Template(
(
"{% set ns = namespace(a_key='') %}"
"{% set ns.a_key = states.sensor.dummy.state %}"
"{{ ns.a_key }}"
),
hass,
)
hass.states.async_set("sensor.dummy", "a value")
assert test_template.async_render() == "a value"
hass.states.async_set("sensor.dummy", "another value")
assert test_template.async_render() == "another value"
def test_state_with_unit(hass):
"""Test the state_with_unit property helper."""
hass.states.async_set("sensor.test", "23", {"unit_of_measurement": "beers"})
hass.states.async_set("sensor.test2", "wow")
tpl = template.Template("{{ states.sensor.test.state_with_unit }}", hass)
assert tpl.async_render() == "23 beers"
tpl = template.Template("{{ states.sensor.test2.state_with_unit }}", hass)
assert tpl.async_render() == "wow"
tpl = template.Template(
"{% for state in states %}{{ state.state_with_unit }} {% endfor %}", hass
)
assert tpl.async_render() == "23 beers wow"
tpl = template.Template("{{ states.sensor.non_existing.state_with_unit }}", hass)
assert tpl.async_render() == ""
def test_length_of_states(hass):
"""Test fetching the length of states."""
hass.states.async_set("sensor.test", "23")
hass.states.async_set("sensor.test2", "wow")
hass.states.async_set("climate.test2", "cooling")
tpl = template.Template("{{ states | length }}", hass)
assert tpl.async_render() == "3"
tpl = template.Template("{{ states.sensor | length }}", hass)
assert tpl.async_render() == "2"
def test_render_complex_handling_non_template_values(hass):
"""Test that we can render non-template fields."""
assert template.render_complex(
{True: 1, False: template.Template("{{ hello }}", hass)}, {"hello": 2}
) == {True: 1, False: "2"}
| 28.03876 | 88 | 0.570481 | from datetime import datetime
import math
import random
from unittest.mock import patch
import pytest
import pytz
from homeassistant.components import group
from homeassistant.const import (
LENGTH_METERS,
MASS_GRAMS,
MATCH_ALL,
PRESSURE_PA,
TEMP_CELSIUS,
VOLUME_LITERS,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import UnitSystem
def _set_up_units(hass):
hass.config.units = UnitSystem(
"custom", TEMP_CELSIUS, LENGTH_METERS, VOLUME_LITERS, MASS_GRAMS, PRESSURE_PA
)
def render_to_info(hass, template_str, variables=None):
tmp = template.Template(template_str, hass)
return tmp.async_render_to_info(variables)
def extract_entities(hass, template_str, variables=None):
info = render_to_info(hass, template_str, variables)
assert not hasattr(info, "_domains")
return info._entities
def assert_result_info(info, result, entities=None, domains=None, all_states=False):
assert info.result == result
assert info._all_states == all_states
assert info.filter_lifecycle("invalid_entity_name.somewhere") == all_states
if entities is not None:
assert info._entities == frozenset(entities)
assert all([info.filter(entity) for entity in entities])
assert not info.filter("invalid_entity_name.somewhere")
else:
assert not info._entities
if domains is not None:
assert info._domains == frozenset(domains)
assert all([info.filter_lifecycle(domain + ".entity") for domain in domains])
else:
assert not hasattr(info, "_domains")
def test_template_equality():
template_one = template.Template("{{ template_one }}")
template_one_1 = template.Template("{{ template_" + "one }}")
template_two = template.Template("{{ template_two }}")
assert template_one == template_one_1
assert template_one != template_two
assert hash(template_one) == hash(template_one_1)
assert hash(template_one) != hash(template_two)
assert str(template_one_1) == 'Template("{{ template_one }}")'
with pytest.raises(TypeError):
template.Template(["{{ template_one }}"])
def test_invalid_template(hass):
tmpl = template.Template("{{", hass)
with pytest.raises(TemplateError):
tmpl.ensure_valid()
with pytest.raises(TemplateError):
tmpl.async_render()
info = tmpl.async_render_to_info()
with pytest.raises(TemplateError):
assert info.result == "impossible"
tmpl = template.Template("{{states(keyword)}}", hass)
tmpl.ensure_valid()
with pytest.raises(TemplateError):
tmpl.async_render()
def test_referring_states_by_entity_id(hass):
hass.states.async_set("test.object", "happy")
assert (
template.Template("{{ states.test.object.state }}", hass).async_render()
== "happy"
)
assert (
template.Template('{{ states["test.object"].state }}', hass).async_render()
== "happy"
)
assert (
template.Template('{{ states("test.object") }}', hass).async_render() == "happy"
)
def test_invalid_entity_id(hass):
with pytest.raises(TemplateError):
template.Template('{{ states["big.fat..."] }}', hass).async_render()
with pytest.raises(TemplateError):
template.Template('{{ states.test["big.fat..."] }}', hass).async_render()
with pytest.raises(TemplateError):
template.Template('{{ states["invalid/domain"] }}', hass).async_render()
def test_raise_exception_on_error(hass):
with pytest.raises(TemplateError):
template.Template("{{ invalid_syntax").ensure_valid()
def test_iterating_all_states(hass):
tmpl_str = "{% for state in states %}{{ state.state }}{% endfor %}"
info = render_to_info(hass, tmpl_str)
assert_result_info(info, "", all_states=True)
hass.states.async_set("test.object", "happy")
hass.states.async_set("sensor.temperature", 10)
info = render_to_info(hass, tmpl_str)
assert_result_info(
info, "10happy", entities=["test.object", "sensor.temperature"], all_states=True
)
def test_iterating_domain_states(hass):
tmpl_str = "{% for state in states.sensor %}{{ state.state }}{% endfor %}"
info = render_to_info(hass, tmpl_str)
assert_result_info(info, "", domains=["sensor"])
hass.states.async_set("test.object", "happy")
hass.states.async_set("sensor.back_door", "open")
hass.states.async_set("sensor.temperature", 10)
info = render_to_info(hass, tmpl_str)
assert_result_info(
info,
"open10",
entities=["sensor.back_door", "sensor.temperature"],
domains=["sensor"],
)
def test_float(hass):
hass.states.async_set("sensor.temperature", "12")
assert (
template.Template(
"{{ float(states.sensor.temperature.state) }}", hass
).async_render()
== "12.0"
)
assert (
template.Template(
"{{ float(states.sensor.temperature.state) > 11 }}", hass
).async_render()
== "True"
)
assert (
template.Template("{{ float('forgiving') }}", hass).async_render()
== "forgiving"
)
def test_rounding_value(hass):
hass.states.async_set("sensor.temperature", 12.78)
assert (
template.Template(
"{{ states.sensor.temperature.state | round(1) }}", hass
).async_render()
== "12.8"
)
assert (
template.Template(
"{{ states.sensor.temperature.state | multiply(10) | round }}", hass
).async_render()
== "128"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "floor") }}', hass
).async_render()
== "12.7"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "ceil") }}', hass
).async_render()
== "12.8"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "half") }}', hass
).async_render()
== "13.0"
)
def test_rounding_value_get_original_value_on_error(hass):
assert template.Template("{{ None | round }}", hass).async_render() == "None"
assert (
template.Template('{{ "no_number" | round }}', hass).async_render()
== "no_number"
)
def test_multiply(hass):
tests = {None: "None", 10: "100", '"abcd"': "abcd"}
for inp, out in tests.items():
assert (
template.Template(
"{{ %s | multiply(10) | round }}" % inp, hass
).async_render()
== out
)
def test_logarithm(hass):
tests = [
(4, 2, "2.0"),
(1000, 10, "3.0"),
(math.e, "", "1.0"),
('"invalid"', "_", "invalid"),
(10, '"invalid"', "10.0"),
]
for value, base, expected in tests:
assert (
template.Template(
"{{ %s | log(%s) | round(1) }}" % (value, base), hass
).async_render()
== expected
)
assert (
template.Template(
"{{ log(%s, %s) | round(1) }}" % (value, base), hass
).async_render()
== expected
)
def test_sine(hass):
tests = [
(0, "0.0"),
(math.pi / 2, "1.0"),
(math.pi, "0.0"),
(math.pi * 1.5, "-1.0"),
(math.pi / 10, "0.309"),
('"duck"', "duck"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | sin | round(3) }}" % value, hass).async_render()
== expected
)
def test_cos(hass):
tests = [
(0, "1.0"),
(math.pi / 2, "0.0"),
(math.pi, "-1.0"),
(math.pi * 1.5, "-0.0"),
(math.pi / 10, "0.951"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | cos | round(3) }}" % value, hass).async_render()
== expected
)
def test_tan(hass):
tests = [
(0, "0.0"),
(math.pi, "-0.0"),
(math.pi / 180 * 45, "1.0"),
(math.pi / 180 * 90, "1.633123935319537e+16"),
(math.pi / 180 * 135, "-1.0"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | tan | round(3) }}" % value, hass).async_render()
== expected
)
def test_sqrt(hass):
tests = [
(0, "0.0"),
(1, "1.0"),
(2, "1.414"),
(10, "3.162"),
(100, "10.0"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | sqrt | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_sine(hass):
tests = [
(-2.0, "-2.0"),
(-1.0, "-1.571"),
(-0.5, "-0.524"),
(0.0, "0.0"),
(0.5, "0.524"),
(1.0, "1.571"),
(2.0, "2.0"),
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | asin | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_cos(hass):
tests = [
(-2.0, "-2.0"),
(-1.0, "3.142"),
(-0.5, "2.094"),
(0.0, "1.571"),
(0.5, "1.047"),
(1.0, "0.0"),
(2.0, "2.0"),
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | acos | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_tan(hass):
tests = [
(-10.0, "-1.471"),
(-2.0, "-1.107"),
(-1.0, "-0.785"),
(-0.5, "-0.464"),
(0.0, "0.0"),
(0.5, "0.464"),
(1.0, "0.785"),
(2.0, "1.107"),
(10.0, "1.471"),
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | atan | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_tan2(hass):
tests = [
(-10.0, -10.0, "-2.356"),
(-10.0, 0.0, "-1.571"),
(-10.0, 10.0, "-0.785"),
(0.0, -10.0, "3.142"),
(0.0, 0.0, "0.0"),
(0.0, 10.0, "0.0"),
(10.0, -10.0, "2.356"),
(10.0, 0.0, "1.571"),
(10.0, 10.0, "0.785"),
(-4.0, 3.0, "-0.927"),
(-1.0, 2.0, "-0.464"),
(2.0, 1.0, "1.107"),
('"duck"', '"goose"', "('duck', 'goose')"),
]
for y, x, expected in tests:
assert (
template.Template(
"{{ (%s, %s) | atan2 | round(3) }}" % (y, x), hass
).async_render()
== expected
)
assert (
template.Template(
"{{ atan2(%s, %s) | round(3) }}" % (y, x), hass
).async_render()
== expected
)
def test_strptime(hass):
tests = [
("2016-10-19 15:22:05.588122 UTC", "%Y-%m-%d %H:%M:%S.%f %Z", None),
("2016-10-19 15:22:05.588122+0100", "%Y-%m-%d %H:%M:%S.%f%z", None),
("2016-10-19 15:22:05.588122", "%Y-%m-%d %H:%M:%S.%f", None),
("2016-10-19", "%Y-%m-%d", None),
("2016", "%Y", None),
("15:22:05", "%H:%M:%S", None),
("1469119144", "%Y", "1469119144"),
("invalid", "%Y", "invalid"),
]
for inp, fmt, expected in tests:
if expected is None:
expected = datetime.strptime(inp, fmt)
temp = "{{ strptime('%s', '%s') }}" % (inp, fmt)
assert template.Template(temp, hass).async_render() == str(expected)
def test_timestamp_custom(hass):
now = dt_util.utcnow()
tests = [
(None, None, None, "None"),
(1469119144, None, True, "2016-07-21 16:39:04"),
(1469119144, "%Y", True, "2016"),
(1469119144, "invalid", True, "invalid"),
(dt_util.as_timestamp(now), None, False, now.strftime("%Y-%m-%d %H:%M:%S")),
]
for inp, fmt, local, out in tests:
if fmt:
fil = "timestamp_custom('{}')".format(fmt)
elif fmt and local:
fil = "timestamp_custom('{0}', {1})".format(fmt, local)
else:
fil = "timestamp_custom"
assert (
template.Template("{{ %s | %s }}" % (inp, fil), hass).async_render() == out
)
def test_timestamp_local(hass):
tests = {None: "None", 1469119144: "2016-07-21 16:39:04"}
for inp, out in tests.items():
assert (
template.Template("{{ %s | timestamp_local }}" % inp, hass).async_render()
== out
)
def test_to_json(hass):
# only the filters, so we don't need to be exhaustive with our sample JSON.
expected_result = '{"Foo": "Bar"}'
actual_result = template.Template(
"{{ {'Foo': 'Bar'} | to_json }}", hass
).async_render()
assert actual_result == expected_result
def test_from_json(hass):
# only the filters, so we don't need to be exhaustive with our sample JSON.
expected_result = "Bar"
actual_result = template.Template(
'{{ (\'{"Foo": "Bar"}\' | from_json).Foo }}', hass
).async_render()
assert actual_result == expected_result
def test_min(hass):
assert template.Template("{{ [1, 2, 3] | min }}", hass).async_render() == "1"
def test_max(hass):
assert template.Template("{{ [1, 2, 3] | max }}", hass).async_render() == "3"
def test_ord(hass):
assert template.Template('{{ "d" | ord }}', hass).async_render() == "100"
def test_base64_encode(hass):
assert (
template.Template('{{ "homeassistant" | base64_encode }}', hass).async_render()
== "aG9tZWFzc2lzdGFudA=="
)
def test_base64_decode(hass):
assert (
template.Template(
'{{ "aG9tZWFzc2lzdGFudA==" | base64_decode }}', hass
).async_render()
== "homeassistant"
)
def test_ordinal(hass):
tests = [
(1, "1st"),
(2, "2nd"),
(3, "3rd"),
(4, "4th"),
(5, "5th"),
(12, "12th"),
(100, "100th"),
(101, "101st"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | ordinal }}" % value, hass).async_render()
== expected
)
def test_timestamp_utc(hass):
now = dt_util.utcnow()
tests = {
None: "None",
1469119144: "2016-07-21 16:39:04",
dt_util.as_timestamp(now): now.strftime("%Y-%m-%d %H:%M:%S"),
}
for inp, out in tests.items():
assert (
template.Template("{{ %s | timestamp_utc }}" % inp, hass).async_render()
== out
)
def test_as_timestamp(hass):
assert (
template.Template('{{ as_timestamp("invalid") }}', hass).async_render()
== "None"
)
hass.mock = None
assert (
template.Template("{{ as_timestamp(states.mock) }}", hass).async_render()
== "None"
)
tpl = (
'{{ as_timestamp(strptime("2024-02-03T09:10:24+0000", '
'"%Y-%m-%dT%H:%M:%S%z")) }}'
)
assert template.Template(tpl, hass).async_render() == "1706951424.0"
@patch.object(random, "choice")
def test_random_every_time(test_choice, hass):
tpl = template.Template("{{ [1,2] | random }}", hass)
test_choice.return_value = "foo"
assert tpl.async_render() == "foo"
test_choice.return_value = "bar"
assert tpl.async_render() == "bar"
def test_passing_vars_as_keywords(hass):
assert template.Template("{{ hello }}", hass).async_render(hello=127) == "127"
def test_passing_vars_as_vars(hass):
assert template.Template("{{ hello }}", hass).async_render({"hello": 127}) == "127"
def test_passing_vars_as_list(hass):
assert (
template.render_complex(
template.Template("{{ hello }}", hass), {"hello": ["foo", "bar"]}
)
== "['foo', 'bar']"
)
def test_passing_vars_as_list_element(hass):
assert (
template.render_complex(
template.Template("{{ hello[1] }}", hass), {"hello": ["foo", "bar"]}
)
== "bar"
)
def test_passing_vars_as_dict_element(hass):
assert (
template.render_complex(
template.Template("{{ hello.foo }}", hass), {"hello": {"foo": "bar"}}
)
== "bar"
)
def test_passing_vars_as_dict(hass):
assert (
template.render_complex(
template.Template("{{ hello }}", hass), {"hello": {"foo": "bar"}}
)
== "{'foo': 'bar'}"
)
def test_render_with_possible_json_value_with_valid_json(hass):
tpl = template.Template("{{ value_json.hello }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == "world"
def test_render_with_possible_json_value_with_invalid_json(hass):
tpl = template.Template("{{ value_json }}", hass)
assert tpl.async_render_with_possible_json_value("{ I AM NOT JSON }") == ""
def test_render_with_possible_json_value_with_template_error_value(hass):
tpl = template.Template("{{ non_existing.variable }}", hass)
assert tpl.async_render_with_possible_json_value("hello", "-") == "-"
def test_render_with_possible_json_value_with_missing_json_value(hass):
tpl = template.Template("{{ value_json.goodbye }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == ""
def test_render_with_possible_json_value_valid_with_is_defined(hass):
tpl = template.Template("{{ value_json.hello|is_defined }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == "world"
def test_render_with_possible_json_value_undefined_json(hass):
tpl = template.Template("{{ value_json.bye|is_defined }}", hass)
assert (
tpl.async_render_with_possible_json_value('{"hello": "world"}')
== '{"hello": "world"}'
)
def test_render_with_possible_json_value_undefined_json_error_value(hass):
tpl = template.Template("{{ value_json.bye|is_defined }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}', "") == ""
def test_render_with_possible_json_value_non_string_value(hass):
tpl = template.Template(
"""
{{ strptime(value~'+0000', '%Y-%m-%d %H:%M:%S%z') }}
""",
hass,
)
value = datetime(2019, 1, 18, 12, 13, 14)
expected = str(pytz.utc.localize(value))
assert tpl.async_render_with_possible_json_value(value) == expected
def test_if_state_exists(hass):
hass.states.async_set("test.object", "available")
tpl = template.Template(
"{% if states.test.object %}exists{% else %}not exists{% endif %}", hass
)
assert tpl.async_render() == "exists"
def test_is_state(hass):
hass.states.async_set("test.object", "available")
tpl = template.Template(
"""
{% if is_state("test.object", "available") %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ is_state("test.noobject", "available") }}
""",
hass,
)
assert tpl.async_render() == "False"
def test_is_state_attr(hass):
hass.states.async_set("test.object", "available", {"mode": "on"})
tpl = template.Template(
"""
{% if is_state_attr("test.object", "mode", "on") %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ is_state_attr("test.noobject", "mode", "on") }}
""",
hass,
)
assert tpl.async_render() == "False"
def test_state_attr(hass):
hass.states.async_set("test.object", "available", {"mode": "on"})
tpl = template.Template(
"""
{% if state_attr("test.object", "mode") == "on" %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ state_attr("test.noobject", "mode") == None }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_states_function(hass):
hass.states.async_set("test.object", "available")
tpl = template.Template('{{ states("test.object") }}', hass)
assert tpl.async_render() == "available"
tpl2 = template.Template('{{ states("test.object2") }}', hass)
assert tpl2.async_render() == "unknown"
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_now(mock_is_safe, hass):
now = dt_util.now()
with patch("homeassistant.util.dt.now", return_value=now):
assert (
now.isoformat()
== template.Template("{{ now().isoformat() }}", hass).async_render()
)
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_utcnow(mock_is_safe, hass):
now = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=now):
assert (
now.isoformat()
== template.Template("{{ utcnow().isoformat() }}", hass).async_render()
)
def test_regex_match(hass):
tpl = template.Template(
r"""
{{ '123-456-7890' | regex_match('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'home assistant test' | regex_match('Home', True) }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Another home assistant test' | regex_match('home') }}
""",
hass,
)
assert tpl.async_render() == "False"
tpl = template.Template(
"""
{{ ['home assistant test'] | regex_match('.*assist') }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_regex_search(hass):
tpl = template.Template(
r"""
{{ '123-456-7890' | regex_search('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'home assistant test' | regex_search('Home', True) }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Another home assistant test' | regex_search('home') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ ['home assistant test'] | regex_search('assist') }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_regex_replace(hass):
tpl = template.Template(
r"""
{{ 'Hello World' | regex_replace('(Hello\\s)',) }}
""",
hass,
)
assert tpl.async_render() == "World"
tpl = template.Template(
"""
{{ ['home hinderant test'] | regex_replace('hinder', 'assist') }}
""",
hass,
)
assert tpl.async_render() == "['home assistant test']"
def test_regex_findall_index(hass):
tpl = template.Template(
"""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 0) }}
""",
hass,
)
assert tpl.async_render() == "JFK"
tpl = template.Template(
"""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 1) }}
""",
hass,
)
assert tpl.async_render() == "LHR"
tpl = template.Template(
"""
{{ ['JFK', 'LHR'] | regex_findall_index('([A-Z]{3})', 1) }}
""",
hass,
)
assert tpl.async_render() == "LHR"
def test_bitwise_and(hass):
tpl = template.Template(
"""
{{ 8 | bitwise_and(8) }}
""",
hass,
)
assert tpl.async_render() == str(8 & 8)
tpl = template.Template(
"""
{{ 10 | bitwise_and(2) }}
""",
hass,
)
assert tpl.async_render() == str(10 & 2)
tpl = template.Template(
"""
{{ 8 | bitwise_and(2) }}
""",
hass,
)
assert tpl.async_render() == str(8 & 2)
def test_bitwise_or(hass):
tpl = template.Template(
"""
{{ 8 | bitwise_or(8) }}
""",
hass,
)
assert tpl.async_render() == str(8 | 8)
tpl = template.Template(
"""
{{ 10 | bitwise_or(2) }}
""",
hass,
)
assert tpl.async_render() == str(10 | 2)
tpl = template.Template(
"""
{{ 8 | bitwise_or(2) }}
""",
hass,
)
assert tpl.async_render() == str(8 | 2)
def test_distance_function_with_1_state(hass):
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
tpl = template.Template("{{ distance(states.test.object) | round }}", hass)
assert tpl.async_render() == "187"
def test_distance_function_with_2_states(hass):
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
"{{ distance(states.test.object, states.test.object_2) | round }}", hass
)
assert tpl.async_render() == "187"
def test_distance_function_with_1_coord(hass):
_set_up_units(hass)
tpl = template.Template('{{ distance("32.87336", "-117.22943") | round }}', hass)
assert tpl.async_render() == "187"
def test_distance_function_with_2_coords(hass):
_set_up_units(hass)
assert (
template.Template(
'{{ distance("32.87336", "-117.22943", %s, %s) | round }}'
% (hass.config.latitude, hass.config.longitude),
hass,
).async_render()
== "187"
)
def test_distance_function_with_1_state_1_coord(hass):
_set_up_units(hass)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("32.87336", "-117.22943", states.test.object_2) ' "| round }}",
hass,
)
assert tpl.async_render() == "187"
tpl2 = template.Template(
'{{ distance(states.test.object_2, "32.87336", "-117.22943") ' "| round }}",
hass,
)
assert tpl2.async_render() == "187"
def test_distance_function_return_none_if_invalid_state(hass):
hass.states.async_set("test.object_2", "happy", {"latitude": 10})
tpl = template.Template("{{ distance(states.test.object_2) | round }}", hass)
assert tpl.async_render() == "None"
def test_distance_function_return_none_if_invalid_coord(hass):
assert (
template.Template('{{ distance("123", "abc") }}', hass).async_render() == "None"
)
assert template.Template('{{ distance("123") }}', hass).async_render() == "None"
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template('{{ distance("123", states.test_object_2) }}', hass)
assert tpl.async_render() == "None"
def test_distance_function_with_2_entity_ids(hass):
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("test.object", "test.object_2") | round }}', hass
)
assert tpl.async_render() == "187"
def test_distance_function_with_1_entity_1_coord(hass):
_set_up_units(hass)
hass.states.async_set(
"test.object",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("test.object", "32.87336", "-117.22943") | round }}', hass
)
assert tpl.async_render() == "187"
def test_closest_function_home_vs_domain(hass):
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_test_domain.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert (
template.Template(
"{{ closest(states.test_domain).entity_id }}", hass
).async_render()
== "test_domain.object"
)
assert (
template.Template(
"{{ (states.test_domain | closest).entity_id }}", hass
).async_render()
== "test_domain.object"
)
def test_closest_function_home_vs_all_states(hass):
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain_2.and_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert (
template.Template("{{ closest(states).entity_id }}", hass).async_render()
== "test_domain_2.and_closer"
)
assert (
template.Template("{{ (states | closest).entity_id }}", hass).async_render()
== "test_domain_2.and_closer"
)
async def test_closest_function_home_vs_group_entity_id(hass):
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_in_group.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
await group.Group.async_create_group(hass, "location group", ["test_domain.object"])
info = render_to_info(hass, '{{ closest("group.location_group").entity_id }}')
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
async def test_closest_function_home_vs_group_state(hass):
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_in_group.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
await group.Group.async_create_group(hass, "location group", ["test_domain.object"])
info = render_to_info(hass, '{{ closest("group.location_group").entity_id }}')
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
info = render_to_info(hass, "{{ closest(states.group.location_group).entity_id }}")
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
async def test_expand(hass):
info = render_to_info(hass, "{{ expand('test.object') }}")
assert_result_info(info, "[]", ["test.object"])
info = render_to_info(hass, "{{ expand(56) }}")
assert_result_info(info, "[]")
hass.states.async_set("test.object", "happy")
info = render_to_info(
hass, "{{ expand('test.object') | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "test.object", [])
info = render_to_info(
hass,
"{{ expand('group.new_group') | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "", ["group.new_group"])
info = render_to_info(
hass, "{{ expand(states.group) | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "", [], ["group"])
await group.Group.async_create_group(hass, "new group", ["test.object"])
info = render_to_info(
hass,
"{{ expand('group.new_group') | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
info = render_to_info(
hass, "{{ expand(states.group) | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "test.object", ["group.new_group"], ["group"])
info = render_to_info(
hass,
"{{ expand('group.new_group', 'test.object')"
" | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
info = render_to_info(
hass,
"{{ ['group.new_group', 'test.object'] | expand"
" | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
def test_closest_function_to_coord(hass):
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
tpl = template.Template(
'{{ closest("%s", %s, states.test_domain).entity_id }}'
% (hass.config.latitude + 0.3, hass.config.longitude + 0.3),
hass,
)
assert tpl.async_render() == "test_domain.closest_zone"
tpl = template.Template(
'{{ (states.test_domain | closest("%s", %s)).entity_id }}'
% (hass.config.latitude + 0.3, hass.config.longitude + 0.3),
hass,
)
assert tpl.async_render() == "test_domain.closest_zone"
def test_closest_function_to_entity_id(hass):
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
info = render_to_info(
hass,
"{{ closest(zone, states.test_domain).entity_id }}",
{"zone": "zone.far_away"},
)
assert_result_info(
info,
"test_domain.closest_zone",
["test_domain.closest_home", "test_domain.closest_zone", "zone.far_away"],
["test_domain"],
)
info = render_to_info(
hass,
"{{ ([states.test_domain, 'test_domain.closest_zone'] "
"| closest(zone)).entity_id }}",
{"zone": "zone.far_away"},
)
assert_result_info(
info,
"test_domain.closest_zone",
["test_domain.closest_home", "test_domain.closest_zone", "zone.far_away"],
["test_domain"],
)
def test_closest_function_to_state(hass):
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
assert (
template.Template(
"{{ closest(states.zone.far_away, states.test_domain).entity_id }}", hass
).async_render()
== "test_domain.closest_zone"
)
def test_closest_function_invalid_state(hass):
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
for state in ("states.zone.non_existing", '"zone.non_existing"'):
assert (
template.Template("{{ closest(%s, states) }}" % state, hass).async_render()
== "None"
)
def test_closest_function_state_with_invalid_location(hass):
hass.states.async_set(
"test_domain.closest_home",
"happy",
{"latitude": "invalid latitude", "longitude": hass.config.longitude + 0.1},
)
assert (
template.Template(
"{{ closest(states.test_domain.closest_home, states) }}", hass
).async_render()
== "None"
)
def test_closest_function_invalid_coordinates(hass):
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
assert (
template.Template(
'{{ closest("invalid", "coord", states) }}', hass
).async_render()
== "None"
)
assert (
template.Template(
'{{ states | closest("invalid", "coord") }}', hass
).async_render()
== "None"
)
def test_closest_function_no_location_states(hass):
assert (
template.Template("{{ closest(states).entity_id }}", hass).async_render() == ""
)
def test_extract_entities_none_exclude_stuff(hass):
assert template.extract_entities(None) == []
assert template.extract_entities("mdi:water") == []
assert (
template.extract_entities(
"{{ closest(states.zone.far_away, states.test_domain).entity_id }}"
)
== MATCH_ALL
)
assert (
template.extract_entities('{{ distance("123", states.test_object_2) }}')
== MATCH_ALL
)
def test_extract_entities_no_match_entities(hass):
assert (
template.extract_entities("{{ value_json.tst | timestamp_custom('%Y' True) }}")
== MATCH_ALL
)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},d
{% endfor %}
""",
)
assert_result_info(info, "", domains=["sensor"])
def test_generate_filter_iterators(hass):
info = render_to_info(
hass,
"""
{% for state in states %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "", all_states=True)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "", domains=["sensor"])
hass.states.async_set("sensor.test_sensor", "off", {"attr": "value"})
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "sensor.test_sensor", domains=["sensor"])
# But we do here because the state gets accessed
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},
{% endfor %}
""",
)
assert_result_info(
info, "sensor.test_sensor=off,", ["sensor.test_sensor"], ["sensor"]
)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.attributes.attr }},
{% endfor %}
""",
)
assert_result_info(
info, "sensor.test_sensor=value,", ["sensor.test_sensor"], ["sensor"]
)
def test_generate_select(hass):
template_str = """
{{ states.sensor|selectattr("state","equalto","off")
|join(",", attribute="entity_id") }}
"""
tmp = template.Template(template_str, hass)
info = tmp.async_render_to_info()
assert_result_info(info, "", [], ["sensor"])
hass.states.async_set("sensor.test_sensor", "off", {"attr": "value"})
hass.states.async_set("sensor.test_sensor_on", "on")
info = tmp.async_render_to_info()
assert_result_info(
info,
"sensor.test_sensor",
["sensor.test_sensor", "sensor.test_sensor_on"],
["sensor"],
)
def test_extract_entities_match_entities(hass):
assert (
template.extract_entities(
"""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% else %}
Hercules is at {{ states('device_tracker.phone_1') }}.
{% endif %}
"""
)
== ["device_tracker.phone_1"]
)
assert (
template.extract_entities(
"""
{{ as_timestamp(states.binary_sensor.garage_door.last_changed) }}
"""
)
== ["binary_sensor.garage_door"]
)
assert (
template.extract_entities(
"""
{{ states("binary_sensor.garage_door") }}
"""
)
== ["binary_sensor.garage_door"]
)
hass.states.async_set("device_tracker.phone_2", "not_home", {"battery": 20})
assert (
template.extract_entities(
"""
{{ is_state_attr('device_tracker.phone_2', 'battery', 40) }}
"""
)
== ["device_tracker.phone_2"]
)
assert sorted(["device_tracker.phone_1", "device_tracker.phone_2"]) == sorted(
template.extract_entities(
"""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% elif states.device_tracker.phone_2.attributes.battery < 40 %}
Hercules you power goes done!.
{% endif %}
"""
)
)
assert sorted(["sensor.pick_humidity", "sensor.pick_temperature"]) == sorted(
template.extract_entities(
"""
{{
states.sensor.pick_temperature.state ~ „°C (“ ~
states.sensor.pick_humidity.state ~ „ %“
}}
"""
)
)
assert sorted(
["sensor.luftfeuchtigkeit_mean", "input_number.luftfeuchtigkeit"]
) == sorted(
template.extract_entities(
"{% if (states('sensor.luftfeuchtigkeit_mean') | int)"
" > (states('input_number.luftfeuchtigkeit') | int +1.5)"
" %}true{% endif %}"
)
)
def test_extract_entities_with_variables(hass):
hass.states.async_set("input_boolean.switch", "on")
assert {"input_boolean.switch"} == extract_entities(
hass, "{{ is_state('input_boolean.switch', 'off') }}", {}
)
assert {"input_boolean.switch"} == extract_entities(
hass,
"{{ is_state(trigger.entity_id, 'off') }}",
{"trigger": {"entity_id": "input_boolean.switch"}},
)
assert {"no_state"} == extract_entities(
hass, "{{ is_state(data, 'off') }}", {"data": "no_state"}
)
assert {"input_boolean.switch"} == extract_entities(
hass, "{{ is_state(data, 'off') }}", {"data": "input_boolean.switch"}
)
assert {"input_boolean.switch"} == extract_entities(
hass,
"{{ is_state(trigger.entity_id, 'off') }}",
{"trigger": {"entity_id": "input_boolean.switch"}},
)
hass.states.async_set("media_player.livingroom", "off")
assert {"media_player.livingroom"} == extract_entities(
hass,
"{{ is_state('media_player.' ~ where , 'playing') }}",
{"where": "livingroom"},
)
def test_jinja_namespace(hass):
test_template = template.Template(
(
"{% set ns = namespace(a_key='') %}"
"{% set ns.a_key = states.sensor.dummy.state %}"
"{{ ns.a_key }}"
),
hass,
)
hass.states.async_set("sensor.dummy", "a value")
assert test_template.async_render() == "a value"
hass.states.async_set("sensor.dummy", "another value")
assert test_template.async_render() == "another value"
def test_state_with_unit(hass):
hass.states.async_set("sensor.test", "23", {"unit_of_measurement": "beers"})
hass.states.async_set("sensor.test2", "wow")
tpl = template.Template("{{ states.sensor.test.state_with_unit }}", hass)
assert tpl.async_render() == "23 beers"
tpl = template.Template("{{ states.sensor.test2.state_with_unit }}", hass)
assert tpl.async_render() == "wow"
tpl = template.Template(
"{% for state in states %}{{ state.state_with_unit }} {% endfor %}", hass
)
assert tpl.async_render() == "23 beers wow"
tpl = template.Template("{{ states.sensor.non_existing.state_with_unit }}", hass)
assert tpl.async_render() == ""
def test_length_of_states(hass):
hass.states.async_set("sensor.test", "23")
hass.states.async_set("sensor.test2", "wow")
hass.states.async_set("climate.test2", "cooling")
tpl = template.Template("{{ states | length }}", hass)
assert tpl.async_render() == "3"
tpl = template.Template("{{ states.sensor | length }}", hass)
assert tpl.async_render() == "2"
def test_render_complex_handling_non_template_values(hass):
assert template.render_complex(
{True: 1, False: template.Template("{{ hello }}", hass)}, {"hello": 2}
) == {True: 1, False: "2"}
| true | true |
1c3afb5ff9d4981f9d2a60625c92fcfd2930c52c | 327 | py | Python | controller/recipe/service.py | StykMartin/poc-controller | 6deb6be12e72837a9f4e40d13899a267f1c6a088 | [
"MIT"
] | 1 | 2021-12-17T16:58:39.000Z | 2021-12-17T16:58:39.000Z | controller/recipe/service.py | StykMartin/poc-controller | 6deb6be12e72837a9f4e40d13899a267f1c6a088 | [
"MIT"
] | 3 | 2021-11-28T08:58:39.000Z | 2021-12-20T19:01:36.000Z | controller/recipe/service.py | StykMartin/poc-controller | 6deb6be12e72837a9f4e40d13899a267f1c6a088 | [
"MIT"
] | 1 | 2021-12-19T05:10:41.000Z | 2021-12-19T05:10:41.000Z | # Proxy to Beaker Server
from typing import Any, Dict, Tuple
def get_recipe(recipe_id: int) -> Dict[str, Any]:
return {"recipe_id": recipe_id}
def get_recipe_watchdog(recipe_id: int) -> int:
return recipe_id
def post_recipe_watchdog(recipe_id: int, seconds: int) -> Tuple[int, int]:
return recipe_id, seconds
| 21.8 | 74 | 0.721713 |
from typing import Any, Dict, Tuple
def get_recipe(recipe_id: int) -> Dict[str, Any]:
return {"recipe_id": recipe_id}
def get_recipe_watchdog(recipe_id: int) -> int:
return recipe_id
def post_recipe_watchdog(recipe_id: int, seconds: int) -> Tuple[int, int]:
return recipe_id, seconds
| true | true |
1c3afc82812398389454280db5bc7d765bef91ec | 5,745 | py | Python | funowl/general_definitions.py | hsolbrig/funowl | 9345591e6c6cdf246fb8f4b0fcdae0b904c2a45d | [
"CC0-1.0"
] | 23 | 2019-05-24T05:27:25.000Z | 2022-02-18T16:37:17.000Z | funowl/general_definitions.py | cmungall/funowl | 67ccb3ece369b1c889341403430120c33cb5a572 | [
"CC0-1.0"
] | 22 | 2019-11-04T21:03:33.000Z | 2022-03-11T19:38:15.000Z | funowl/general_definitions.py | cmungall/funowl | 67ccb3ece369b1c889341403430120c33cb5a572 | [
"CC0-1.0"
] | 5 | 2019-10-07T13:28:14.000Z | 2021-12-20T08:28:58.000Z | """
nonNegativeInteger := a nonempty finite sequence of digits between 0 and 9
quotedString := a finite sequence of characters in which " (U+22) and \\ (U+5C) occur only in pairs of the form \\" (U+5C, U+22) and \\ (U+5C, U+5C), enclosed in a pair of " (U+22) characters
languageTag := @ (U+40) followed a nonempty sequence of characters matching the langtag production from [BCP 47]
nodeID := a finite sequence of characters matching the BLANK_NODE_LABEL production of [SPARQL]
fullIRI := an IRI as defined in [RFC3987], enclosed in a pair of < (U+3C) and > (U+3E) characters
prefixName := a finite sequence of characters matching the as PNAME_NS production of [SPARQL]
abbreviatedIRI := a finite sequence of characters matching the PNAME_LN production of [SPARQL]
"""
from typing import Optional, ClassVar, Set
import bcp47
import rdflib
import rfc3987
from rdflib import BNode, URIRef, Graph, Literal
from rdflib.namespace import is_ncname, XSD
from funowl.base.fun_owl_base import FunOwlBase, FunOwlRoot
from funowl.terminals.Terminals import PNAME_NS, PNAME_LN, BLANK_NODE_LABEL, QUOTED_STRING, OPT_PNAME_NS
from funowl.writers.FunctionalWriter import FunctionalWriter
# Note - super class warning is ok
class NonNegativeInteger(int, FunOwlBase):
""" a nonempty finite sequence of digits between 0 and 9 """
def __init__(self, v: int) -> None:
if not isinstance(v, type(self)):
raise TypeError(f"{v}: invalid non-negative integer")
def _is_valid(self, instance) -> bool:
try:
return int(instance) >= 0
except TypeError:
return False
except ValueError:
return False
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> Literal:
return Literal(self, datatype=XSD.nonNegativeInteger)
class QuotedString(QUOTED_STRING, FunOwlRoot):
""" finite sequence of characters in which " (U+22) and \\ (U+5C) occur only in pairs of the form
"\\ (U+5C, U+22) and \\ (U+5C, U+5C), enclosed in a pair of " (U+22) characters
"""
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w + ('"' + self.replace('\\', '\\\\').replace('"', '\\"') + '"')
class LanguageTag(str, FunOwlBase):
# A map from lower case tags to the case defined in BCP 47.
languages: ClassVar[Set[str]] = set(bcp47.languages.values())
lc_languages: ClassVar[Set[str]] = {l.lower() for l in languages}
CASE_SENSITIVE: bool = False
""" @ (U+40) followed a nonempty sequence of characters matching the langtag production from [BCP 47] """
def __init__(self, v: str) -> None:
if not isinstance(v, type(self)):
raise TypeError(f"{v}: invalid language tag")
def _is_valid(self, instance) -> bool:
return instance in LanguageTag.languages if LanguageTag.CASE_SENSITIVE else\
instance.lower() in LanguageTag.lc_languages
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w + ('@' + str(self))
class NodeID(BLANK_NODE_LABEL, FunOwlRoot):
""" a finite sequence of characters matching the BLANK_NODE_LABEL production of [SPARQL] """
def __new__(cls, v: Optional[str] = None) -> object:
if v is None:
return BLANK_NODE_LABEL.__new__(cls, BNode().n3())
elif not isinstance(v, BLANK_NODE_LABEL):
raise TypeError(f"{v} is not a valid {cls}")
return BLANK_NODE_LABEL.__new__(cls, v)
def __init__(self, v: Optional[str] = None) -> None:
if not isinstance(self, type(self)):
raise TypeError(f"{v} is not a valid {type(self)}")
BLANK_NODE_LABEL.__init__(self, self)
def to_rdf(self, _: Graph) -> BNode:
return BNode()
class FullIRI(str, FunOwlBase):
""" fullIRI := an IRI as defined in [RFC3987], enclosed in a pair of < (U+3C) and > (U+3E) characters """
def __init__(self, v: str) -> None:
if v is None or not self._is_valid(v):
raise TypeError(f"{v} is not a valid {type(self)}")
def _is_valid(self, instance) -> bool:
# Something is considered to be an instance of FullIRI iff:
# It isn't an RDFLib Literal -- you can never cross those beams
# It is already declated to be a URIRef no matter what it looks like
# It looks like an IRI
return instance is not None and not isinstance(instance, (rdflib.Literal, Literal)) \
and (isinstance(instance, URIRef) or rfc3987.match(str(instance), 'IRI'))
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w + w.g.namespace_manager.normalizeUri(str(self))
def to_rdf(self, _: Graph) -> URIRef:
return URIRef(str(self))
class PrefixName(OPT_PNAME_NS, FunOwlRoot):
def __new__(cls, v: Optional[str] = None) -> object:
if v is None:
v = ''
elif not isinstance(v, OPT_PNAME_NS):
raise TypeError(f"{v} is not a valid {cls}")
return PNAME_NS.__new__(cls, v)
def __init__(self, v: Optional[str] = None) -> None:
super().__init__(v)
if v and not is_ncname(v):
raise ValueError(f"{v} is not a valid NCNAME according to rdflib")
def __str__(self) -> str:
return '' if self is None else super().__str__()
""" a finite sequence of characters matching the as PNAME_NS production of [SPARQL] """
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.concat(str(self), sep='')
class AbbreviatedIRI(PNAME_LN, FunOwlRoot):
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> URIRef:
prefix, name = self.split(':', 1)
return URIRef(g.namespace_manager.store.namespace(prefix or "") + name)
| 42.555556 | 191 | 0.662489 | from typing import Optional, ClassVar, Set
import bcp47
import rdflib
import rfc3987
from rdflib import BNode, URIRef, Graph, Literal
from rdflib.namespace import is_ncname, XSD
from funowl.base.fun_owl_base import FunOwlBase, FunOwlRoot
from funowl.terminals.Terminals import PNAME_NS, PNAME_LN, BLANK_NODE_LABEL, QUOTED_STRING, OPT_PNAME_NS
from funowl.writers.FunctionalWriter import FunctionalWriter
class NonNegativeInteger(int, FunOwlBase):
def __init__(self, v: int) -> None:
if not isinstance(v, type(self)):
raise TypeError(f"{v}: invalid non-negative integer")
def _is_valid(self, instance) -> bool:
try:
return int(instance) >= 0
except TypeError:
return False
except ValueError:
return False
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> Literal:
return Literal(self, datatype=XSD.nonNegativeInteger)
class QuotedString(QUOTED_STRING, FunOwlRoot):
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w + ('"' + self.replace('\\', '\\\\').replace('"', '\\"') + '"')
class LanguageTag(str, FunOwlBase):
languages: ClassVar[Set[str]] = set(bcp47.languages.values())
lc_languages: ClassVar[Set[str]] = {l.lower() for l in languages}
CASE_SENSITIVE: bool = False
def __init__(self, v: str) -> None:
if not isinstance(v, type(self)):
raise TypeError(f"{v}: invalid language tag")
def _is_valid(self, instance) -> bool:
return instance in LanguageTag.languages if LanguageTag.CASE_SENSITIVE else\
instance.lower() in LanguageTag.lc_languages
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w + ('@' + str(self))
class NodeID(BLANK_NODE_LABEL, FunOwlRoot):
def __new__(cls, v: Optional[str] = None) -> object:
if v is None:
return BLANK_NODE_LABEL.__new__(cls, BNode().n3())
elif not isinstance(v, BLANK_NODE_LABEL):
raise TypeError(f"{v} is not a valid {cls}")
return BLANK_NODE_LABEL.__new__(cls, v)
def __init__(self, v: Optional[str] = None) -> None:
if not isinstance(self, type(self)):
raise TypeError(f"{v} is not a valid {type(self)}")
BLANK_NODE_LABEL.__init__(self, self)
def to_rdf(self, _: Graph) -> BNode:
return BNode()
class FullIRI(str, FunOwlBase):
def __init__(self, v: str) -> None:
if v is None or not self._is_valid(v):
raise TypeError(f"{v} is not a valid {type(self)}")
def _is_valid(self, instance) -> bool:
# It is already declated to be a URIRef no matter what it looks like
# It looks like an IRI
return instance is not None and not isinstance(instance, (rdflib.Literal, Literal)) \
and (isinstance(instance, URIRef) or rfc3987.match(str(instance), 'IRI'))
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w + w.g.namespace_manager.normalizeUri(str(self))
def to_rdf(self, _: Graph) -> URIRef:
return URIRef(str(self))
class PrefixName(OPT_PNAME_NS, FunOwlRoot):
def __new__(cls, v: Optional[str] = None) -> object:
if v is None:
v = ''
elif not isinstance(v, OPT_PNAME_NS):
raise TypeError(f"{v} is not a valid {cls}")
return PNAME_NS.__new__(cls, v)
def __init__(self, v: Optional[str] = None) -> None:
super().__init__(v)
if v and not is_ncname(v):
raise ValueError(f"{v} is not a valid NCNAME according to rdflib")
def __str__(self) -> str:
return '' if self is None else super().__str__()
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.concat(str(self), sep='')
class AbbreviatedIRI(PNAME_LN, FunOwlRoot):
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> URIRef:
prefix, name = self.split(':', 1)
return URIRef(g.namespace_manager.store.namespace(prefix or "") + name)
| true | true |
1c3afd51a20bfdb1b868bd870ca6f23018628fae | 18,995 | py | Python | dautils/nb.py | diabloxenon/dautils | 064307b0fd9bbca2adcc7df5c6a0289954c74d58 | [
"MIT"
] | null | null | null | dautils/nb.py | diabloxenon/dautils | 064307b0fd9bbca2adcc7df5c6a0289954c74d58 | [
"MIT"
] | null | null | null | dautils/nb.py | diabloxenon/dautils | 064307b0fd9bbca2adcc7df5c6a0289954c74d58 | [
"MIT"
] | null | null | null | """ IPython/Jupyter notebook widgets and utilities. """
from IPython.display import display
from IPython.display import Math
# from IPython.html import widgets : DEPRECATED OPTION
import ipywidgets as widgets
from dautils import collect
from dautils import conf
from dautils import ts
from dautils import log_api
import matplotlib as mpl
from matplotlib.colors import rgb2hex
import pprint
from matplotlib.colors import ColorConverter
def create_month_widget(month, *args, **kwargs):
""" Creates a dropdown wiget with short month names
as labels.
:param month: The month to select by default.
:returns: The configured month widget.
"""
return widgets.Dropdown(options=ts.short_months(),
selected_label=month, *args, **kwargs)
class WidgetFactory():
''' A factory for IPython widgets part of the \
`RcWidget` GUI.
:ivar rc_widget: A `RcWidget` instance.
'''
def __init__(self, rcw):
self.rc_widget = rcw
# TODO allow ‘rgbcmykw’
# TODO allow standard names like 'aqua'
def color_chooser(self, property):
""" Creates a box with widgets related to choosing a color.
:param property: A color related key in matplotlib.rcParams.
:returns: A box with widgets.
"""
cc = ColorConverter()
rgb = cc.to_rgb(mpl.rcParams[property])
logger = log_api.env_logger()
logger.debug('{0} {1}'.format(property, rgb))
r = widgets.FloatSlider(min=0, max=1, value=rgb[0], description='Red')
r.border_color = 'red'
g = widgets.FloatSlider(min=0, max=1, value=rgb[1],
description='Green')
g.border_color = 'green'
b = widgets.FloatSlider(min=0, max=1, value=rgb[2], description='Blue')
b.border_color = 'blue'
h = widgets.widget_string.HTML(property)
# TODO put this in a func
hex = rgb2hex((rgb[0], rgb[1], rgb[2]))
h.value = '<p style="background-color: {0};">{0}</p>'.format(hex)
def update(name, value):
hex = rgb2hex((r.value, g.value, b.value))
h.value = '<p style="background-color: {0};">{0}</p>'.format(hex)
self.rc_widget.process(property, hex)
r.on_trait_change(update, 'value')
g.on_trait_change(update, 'value')
b.on_trait_change(update, 'value')
box = widgets.VBox(children=(r, g, b, h))
box.border_style = 'dotted'
box.description = property
return box
class PageBuilder():
""" Creates a page with widgets for the `RcWidget`.
:ivar widgets: A dictionary containing widgets.
:ivar prefix: The prefix for the widget, for instance 'axes.'.
:ivar factory: A `WidgetFactory`.
'ivar keys: A list of matplotlib properties.
"""
def __init__(self, prefix, factory):
self.widgets = {}
self.prefix = prefix
self.factory = factory
self.keys = collect.filter_dict_keys(
lambda x: x.startswith(self.prefix), mpl.rcParams)
self.add_color_choosers()
def add(self, widget):
""" Adds a new widget to the internal dictionary.
:param widget: An IPython HTML widget.
"""
self.widgets[widget.description] = widget
def add_color_choosers(self):
""" Adds color choosers for relevant properties starting
with a prefix such as axes.
"""
logger = log_api.env_logger()
logger.debug('self.keys {}'.format(self.keys))
color_keys = collect.filter_list(lambda x: x.endswith('color'),
self.keys)
logger.debug('Color keys {}'.format(color_keys))
for key in color_keys:
self.widgets[key] = self.factory.color_chooser(key)
def build(self):
""" Builds an `Accordion` containing widgets.
:returns: The `Accordion` with widgets sorted by descriptions.
"""
self.widgets = collect.sort_dict_by_keys(self.widgets)
box = widgets.Accordion()
box.children = [self.widgets[k] for k in self.widgets.keys()]
for i, k in enumerate(self.widgets.keys()):
box.set_title(i, k)
return box
# TODO mix DIY widgets with WidgetFactory calls
class RcWidget():
""" This widget configures the
`matplotlib.rcParams` global settings.
:ivar context: A `Context` instance.
:ivar factory: A `WidgetFactory` instance.
"""
def __init__(self, context=None):
self.context = context
self.factory = WidgetFactory(self)
if self.context:
rc = self.context.read_rc()
if rc:
mpl.rcParams.update(rc)
self.old = mpl.rcParams.copy()
tab = widgets.Tab(children=[self.axes_page(), self.figure_page(),
self.font_page(), self.grid_page(),
self.lines_page()])
tab.set_title(0, 'Axes')
tab.set_title(1, 'Figure')
tab.set_title(2, 'Font')
tab.set_title(3, 'Grid')
tab.set_title(4, 'Lines')
display(tab)
self.updates_text = widgets.HTML()
display(self.updates_text)
self.params_text = widgets.HTML()
display(self.params_text)
self.show_params = widgets.widget_button.Button()
self.show_params.description = 'Show rcParams'
self.show_params.on_click(self.print_params)
display(self.show_params)
def print_params(self, button_instance):
""" Prints the current matplotlib.rcParams in a textarea.
:param button_instance: The button to click on.
"""
html = '<textarea rows="5" cols="50" readonly>{}</textarea>'
self.params_text.value = html.format(pprint.pformat(mpl.rcParams))
def process(self, param, value):
""" Processes changes to the GUI and updates `matplotlib.rcParams`.
:param param: A key in the `matplotlib.rcParams` dictionary.
:param value: A value in the `matplotlib.rcParams` dictionary.
"""
logger = log_api.env_logger()
logger.debug('name={0}, value={1}'.format(param, value))
self.params_text.value = ''
mpl.rcParams[param] = value
updates = collect.dict_updates(self.old, mpl.rcParams)
if self.context:
self.context.update_rc(updates)
self.updates_text.value = ('<p>mpl.RcParams updates {}</p>'.
format(updates))
def axes_page(self):
""" Creates a tab page for the `matplotlib.rcParams`
keys which start with **axes.**"""
linewidth = create_linewidth_slider('axes.linewidth')
titlesize = create_size_slider('axes.titlesize')
def update_axes_linewidth(name, value):
self.process(linewidth.description, value)
def update_titlesize(name, value):
self.process(titlesize.description, value)
linewidth.on_trait_change(update_axes_linewidth, 'value')
titlesize.on_trait_change(update_titlesize, 'value')
page = PageBuilder('axes.', self.factory)
page.add(linewidth)
page.add(titlesize)
return page.build()
def font_page(self):
""" Creates a tab page for the `matplotlib.rcParams`
keys which start with **font.**"""
size = create_size_slider('font.size')
def update_font_size(name, value):
self.process(size.description, value)
size.on_trait_change(update_font_size, 'value')
page = PageBuilder('font.', self.factory)
page.add(size)
return page.build()
def figure_page(self):
""" Creates a tab page for the `matplotlib.rcParams`
keys which start with **figure.**"""
figsize = widgets.Box()
figsize.description = 'figure.figsize'
figsize_val = mpl.rcParams[figsize.description]
height = widgets.FloatSlider(min=0, max=16,
value=figsize_val[0],
description='Height')
width = widgets.FloatSlider(
min=0, max=12, value=figsize_val[1],
description='Width')
figsize.children = [height, width]
def update_fig_size(name, value):
self.process(figsize.description, (height.value, width.value))
height.on_trait_change(update_fig_size, 'value')
width.on_trait_change(update_fig_size, 'value')
page = PageBuilder('figure.', self.factory)
page.add(figsize)
return page.build()
def grid_page(self):
""" Creates a tab page for the `matplotlib.rcParams`
keys which start with **grid.**"""
logger = log_api.env_logger()
logger.debug('Created grid page')
linewidth = create_linewidth_slider('grid.linewidth')
def update_linewidth(name, value):
self.process(linewidth.description, value)
linewidth.on_trait_change(update_linewidth, 'value')
page = PageBuilder('grid.', self.factory)
page.add(linewidth)
return page.build()
def lines_page(self):
""" Creates a tab page for the `matplotlib.rcParams`
keys which start with **lines.**"""
linewidth = create_linewidth_slider('lines.linewidth')
def update_linewidth(name, value):
self.process(linewidth.description, value)
linewidth.on_trait_change(update_linewidth, 'value')
page = PageBuilder('lines.', self.factory)
page.add(linewidth)
return page.build()
def create_linewidth_slider(desc, *args, **kwargs):
""" Creates a slider for linewidth-type settings
in `matplotlib.rcParams`.
:param desc: The description label of the widget.
:returns: The configured slider.
"""
from_rc = mpl.rcParams[desc]
val = 0
# TODO deal with strings
if not isinstance(from_rc, str):
val = from_rc
return widgets.IntSlider(min=0, max=9, value=val,
description=desc, *args, **kwargs)
def create_size_slider(desc, *args, **kwargs):
""" Creates a slider for size-type settings
in `matplotlib.rcParams`.
:param desc: The description label of the widget.
:returns: The configured slider.
"""
from_rc = mpl.rcParams[desc]
val = 0
# TODO deal with strings
if not isinstance(from_rc, str):
val = from_rc
return widgets.FloatSlider(min=0, value=val,
description=desc, *args, **kwargs)
class LatexRenderer():
""" Utility class which helps number and render Latex
in a IPython/Jupyter notebook.
:ivar chapter: Chapter number.
:ivar curr: Current equation number.
:ivar numbers: List of used equation numbers.
:ivar context: A `Context` instance.
.. code-block:: python
import dautils as dl
lr = dl.nb.LatexRenderer(chapter=6, start=6, context=context)
lr.render(r'Y_j= \sum _{i=-(m-1)/2}')
"""
def __init__(self, chapter=None, start=1, context=None):
self.chapter = chapter
self.curr = start
self.numbers = []
self.context = context
if self.context:
from_context = self.context.read_latex()
if from_context:
log_api.Printer().print(from_context)
eqn_list = list(from_context.values())
assert start not in collect.flatten(eqn_list), from_context
# DIY numbering because IPython doesn't
# support numbering
def number_equation(self):
""" Creates a Latex string relating
to the numbering of equations.
:returns: A Latex string with the correct equation number.
"""
number = '('
if self.chapter:
number += str(self.chapter) + '.'
number += str(self.curr) + ')\hspace{1cm}'
return number
def render(self, equation):
""" Renders an equation.
:param equation: A string containing the equation.
"""
number = self.number_equation()
self.numbers.append(self.curr)
logger = log_api.env_logger()
if self.context:
logger.debug(self.numbers)
self.context.update_latex(self.numbers)
display(Math(r'%s' % (number + equation)))
self.curr += 1
# TODO store key/id information in sql lite db in CONF_DIR
# with key mnemonic, creation time etc
class Context():
""" A mediator for the storing and retrieving
of configuration settings.
:ivar fname: Name of the context, this should be unique \
such as the name of a notebook
"""
def __init__(self, fname):
self.fname = fname
self.labels = fname + '.labels'
self.latex = fname + '.latex'
def read_rc(self):
""" Reads the current configuration
settings related to `matplotlib.rcParams`,
which are used by `RcWidget`.
:returns: The current configuration settings or \
an empty dict.
"""
config = conf.read_rc()
if config:
config = config.get(self.fname, {})
logger = log_api.env_logger()
logger.debug('config %s', config)
return config
def update_rc(self, updates):
""" Updates the configuration settings related to
`matplotlib.rcParams` used by `RcWidget`.
:param updates: Changes to the configuration.
"""
conf.update_rc(self.fname, updates)
def read_labels(self):
""" Reads the current configuration settings related
to the `matplotlib.rcParams` used by `LabelWidget`.
:returns: The current configuration settings\
or None if no settings are found.
"""
config = conf.read_rc()
if config:
config = config.get(self.labels, None)
return config
def update_labels(self, updates):
""" Updates the configuration settings related to
`matplotlib.rcParams` used by `LabelWidget`.
:param updates: Changes to the configuration.
"""
conf.update_rc(self.labels, updates)
def read_latex(self):
""" Reads the current configuration settings related
to the `LatexRenderer`.
:returns: The current configuration settings\
or None if no settings are found.
"""
config = conf.read_rc()
if config:
keys = collect.filter_dict_keys(lambda x: x.endswith('.latex'),
config)
config = collect.dict_from_keys(config, keys)
config.pop(self.latex, None)
return config
def update_latex(self, updates):
""" Updates the configuration settings related to
`LatexRenderer`.
:param updates: Changes to the configuration.
"""
conf.update_rc(self.latex, updates)
class NullContext(Context):
""" A context following the Null Object Pattern
which does nothing """
def __init__(self):
pass
def __bool__(self):
return False
def read_rc(self):
pass
def update_rc(self, updates):
pass
def read_labels(self):
pass
def update_labels(self, updates):
pass
def read_latex(self):
pass
def update_latex(self, updates):
pass
class LabelWidget():
""" A widget you can use to easily fill
in strings for titles, xlabels and ylabels
of matplotlib subplots.
:ivar context: A `Context` instance.
:ivar labels: A grid of labels.
.. code-block:: python
import dautils as dl
dl.nb.LabelWidget(2, 2, context)
"""
def __init__(self, nrows=1, ncols=1, context=NullContext()):
assert context, 'Define context'
self.context = context
self.labels = collect.GridList(nrows, ncols, {})
self.read_old_labels()
for i in range(nrows):
children = []
for j in range(ncols):
labels_box = self.create_mpl_labels_box(i, j,
self.labels.grid[i][j])
children.append(labels_box)
display(widgets.HBox(children=children))
display(widgets.HTML('<br/>'))
def read_old_labels(self):
""" Reads the labels from a configuration file. """
old = self.context.read_labels()
if old:
self.labels.fill(old)
def update(self, name, value, row, col):
""" Updates an internal data structure and
related configuration file.
:param name: title, xlabel, legend or ylabel.
:param value: A string representing a label. \
If needed use curly braces as used by the Python \
format() string method.
:param row: The number of the row.
:param col: The number of the col.
"""
self.labels.update(row, col, {name: value})
self.context.update_labels(self.labels.grid)
def create_mpl_labels_box(self, row, col, old):
""" Creates a box with the widgets for a single
subplot (cell).
:param row: The row number of the subplot.
:param col: The column number of the subplot.
:param old: The setting for this subplot from a configuration file.
:returns: The box with widgets.
"""
box = widgets.VBox()
box.border_color = 'red'
coord = ' [{0}][{1}]'.format(row, col)
title = widgets.widget_string.Text(old.get('title', ''),
description='title' + coord)
xlabel = widgets.widget_string.Text(old.get('xlabel', ''),
description='xlabel' + coord)
ylabel = widgets.widget_string.Text(old.get('ylabel', ''),
description='ylabel' + coord)
legend = widgets.Dropdown(options=['No Legend', 'loc=best',
'loc=upper right',
'loc=upper left'],
selected_label=old.get('legend',
'No Legend'))
box.children = [title, xlabel, ylabel, legend]
def update_title(name, value):
self.update('title', value, row, col)
title.on_trait_change(update_title, 'value')
def update_xlabel(name, value):
self.update('xlabel', value, row, col)
xlabel.on_trait_change(update_xlabel, 'value')
def update_ylabel(name, value):
self.update('ylabel', value, row, col)
ylabel.on_trait_change(update_ylabel, 'value')
def update_legend(name, value):
self.update('legend', value, row, col)
legend.on_trait_change(update_legend, 'value')
return box
| 31.08838 | 79 | 0.59621 | from IPython.display import display
from IPython.display import Math
import ipywidgets as widgets
from dautils import collect
from dautils import conf
from dautils import ts
from dautils import log_api
import matplotlib as mpl
from matplotlib.colors import rgb2hex
import pprint
from matplotlib.colors import ColorConverter
def create_month_widget(month, *args, **kwargs):
return widgets.Dropdown(options=ts.short_months(),
selected_label=month, *args, **kwargs)
class WidgetFactory():
def __init__(self, rcw):
self.rc_widget = rcw
def color_chooser(self, property):
cc = ColorConverter()
rgb = cc.to_rgb(mpl.rcParams[property])
logger = log_api.env_logger()
logger.debug('{0} {1}'.format(property, rgb))
r = widgets.FloatSlider(min=0, max=1, value=rgb[0], description='Red')
r.border_color = 'red'
g = widgets.FloatSlider(min=0, max=1, value=rgb[1],
description='Green')
g.border_color = 'green'
b = widgets.FloatSlider(min=0, max=1, value=rgb[2], description='Blue')
b.border_color = 'blue'
h = widgets.widget_string.HTML(property)
hex = rgb2hex((rgb[0], rgb[1], rgb[2]))
h.value = '<p style="background-color: {0};">{0}</p>'.format(hex)
def update(name, value):
hex = rgb2hex((r.value, g.value, b.value))
h.value = '<p style="background-color: {0};">{0}</p>'.format(hex)
self.rc_widget.process(property, hex)
r.on_trait_change(update, 'value')
g.on_trait_change(update, 'value')
b.on_trait_change(update, 'value')
box = widgets.VBox(children=(r, g, b, h))
box.border_style = 'dotted'
box.description = property
return box
class PageBuilder():
def __init__(self, prefix, factory):
self.widgets = {}
self.prefix = prefix
self.factory = factory
self.keys = collect.filter_dict_keys(
lambda x: x.startswith(self.prefix), mpl.rcParams)
self.add_color_choosers()
def add(self, widget):
self.widgets[widget.description] = widget
def add_color_choosers(self):
logger = log_api.env_logger()
logger.debug('self.keys {}'.format(self.keys))
color_keys = collect.filter_list(lambda x: x.endswith('color'),
self.keys)
logger.debug('Color keys {}'.format(color_keys))
for key in color_keys:
self.widgets[key] = self.factory.color_chooser(key)
def build(self):
self.widgets = collect.sort_dict_by_keys(self.widgets)
box = widgets.Accordion()
box.children = [self.widgets[k] for k in self.widgets.keys()]
for i, k in enumerate(self.widgets.keys()):
box.set_title(i, k)
return box
class RcWidget():
def __init__(self, context=None):
self.context = context
self.factory = WidgetFactory(self)
if self.context:
rc = self.context.read_rc()
if rc:
mpl.rcParams.update(rc)
self.old = mpl.rcParams.copy()
tab = widgets.Tab(children=[self.axes_page(), self.figure_page(),
self.font_page(), self.grid_page(),
self.lines_page()])
tab.set_title(0, 'Axes')
tab.set_title(1, 'Figure')
tab.set_title(2, 'Font')
tab.set_title(3, 'Grid')
tab.set_title(4, 'Lines')
display(tab)
self.updates_text = widgets.HTML()
display(self.updates_text)
self.params_text = widgets.HTML()
display(self.params_text)
self.show_params = widgets.widget_button.Button()
self.show_params.description = 'Show rcParams'
self.show_params.on_click(self.print_params)
display(self.show_params)
def print_params(self, button_instance):
html = '<textarea rows="5" cols="50" readonly>{}</textarea>'
self.params_text.value = html.format(pprint.pformat(mpl.rcParams))
def process(self, param, value):
logger = log_api.env_logger()
logger.debug('name={0}, value={1}'.format(param, value))
self.params_text.value = ''
mpl.rcParams[param] = value
updates = collect.dict_updates(self.old, mpl.rcParams)
if self.context:
self.context.update_rc(updates)
self.updates_text.value = ('<p>mpl.RcParams updates {}</p>'.
format(updates))
def axes_page(self):
linewidth = create_linewidth_slider('axes.linewidth')
titlesize = create_size_slider('axes.titlesize')
def update_axes_linewidth(name, value):
self.process(linewidth.description, value)
def update_titlesize(name, value):
self.process(titlesize.description, value)
linewidth.on_trait_change(update_axes_linewidth, 'value')
titlesize.on_trait_change(update_titlesize, 'value')
page = PageBuilder('axes.', self.factory)
page.add(linewidth)
page.add(titlesize)
return page.build()
def font_page(self):
size = create_size_slider('font.size')
def update_font_size(name, value):
self.process(size.description, value)
size.on_trait_change(update_font_size, 'value')
page = PageBuilder('font.', self.factory)
page.add(size)
return page.build()
def figure_page(self):
figsize = widgets.Box()
figsize.description = 'figure.figsize'
figsize_val = mpl.rcParams[figsize.description]
height = widgets.FloatSlider(min=0, max=16,
value=figsize_val[0],
description='Height')
width = widgets.FloatSlider(
min=0, max=12, value=figsize_val[1],
description='Width')
figsize.children = [height, width]
def update_fig_size(name, value):
self.process(figsize.description, (height.value, width.value))
height.on_trait_change(update_fig_size, 'value')
width.on_trait_change(update_fig_size, 'value')
page = PageBuilder('figure.', self.factory)
page.add(figsize)
return page.build()
def grid_page(self):
logger = log_api.env_logger()
logger.debug('Created grid page')
linewidth = create_linewidth_slider('grid.linewidth')
def update_linewidth(name, value):
self.process(linewidth.description, value)
linewidth.on_trait_change(update_linewidth, 'value')
page = PageBuilder('grid.', self.factory)
page.add(linewidth)
return page.build()
def lines_page(self):
linewidth = create_linewidth_slider('lines.linewidth')
def update_linewidth(name, value):
self.process(linewidth.description, value)
linewidth.on_trait_change(update_linewidth, 'value')
page = PageBuilder('lines.', self.factory)
page.add(linewidth)
return page.build()
def create_linewidth_slider(desc, *args, **kwargs):
from_rc = mpl.rcParams[desc]
val = 0
if not isinstance(from_rc, str):
val = from_rc
return widgets.IntSlider(min=0, max=9, value=val,
description=desc, *args, **kwargs)
def create_size_slider(desc, *args, **kwargs):
from_rc = mpl.rcParams[desc]
val = 0
if not isinstance(from_rc, str):
val = from_rc
return widgets.FloatSlider(min=0, value=val,
description=desc, *args, **kwargs)
class LatexRenderer():
def __init__(self, chapter=None, start=1, context=None):
self.chapter = chapter
self.curr = start
self.numbers = []
self.context = context
if self.context:
from_context = self.context.read_latex()
if from_context:
log_api.Printer().print(from_context)
eqn_list = list(from_context.values())
assert start not in collect.flatten(eqn_list), from_context
# support numbering
def number_equation(self):
number = '('
if self.chapter:
number += str(self.chapter) + '.'
number += str(self.curr) + ')\hspace{1cm}'
return number
def render(self, equation):
number = self.number_equation()
self.numbers.append(self.curr)
logger = log_api.env_logger()
if self.context:
logger.debug(self.numbers)
self.context.update_latex(self.numbers)
display(Math(r'%s' % (number + equation)))
self.curr += 1
# TODO store key/id information in sql lite db in CONF_DIR
# with key mnemonic, creation time etc
class Context():
def __init__(self, fname):
self.fname = fname
self.labels = fname + '.labels'
self.latex = fname + '.latex'
def read_rc(self):
config = conf.read_rc()
if config:
config = config.get(self.fname, {})
logger = log_api.env_logger()
logger.debug('config %s', config)
return config
def update_rc(self, updates):
conf.update_rc(self.fname, updates)
def read_labels(self):
config = conf.read_rc()
if config:
config = config.get(self.labels, None)
return config
def update_labels(self, updates):
conf.update_rc(self.labels, updates)
def read_latex(self):
config = conf.read_rc()
if config:
keys = collect.filter_dict_keys(lambda x: x.endswith('.latex'),
config)
config = collect.dict_from_keys(config, keys)
config.pop(self.latex, None)
return config
def update_latex(self, updates):
conf.update_rc(self.latex, updates)
class NullContext(Context):
def __init__(self):
pass
def __bool__(self):
return False
def read_rc(self):
pass
def update_rc(self, updates):
pass
def read_labels(self):
pass
def update_labels(self, updates):
pass
def read_latex(self):
pass
def update_latex(self, updates):
pass
class LabelWidget():
def __init__(self, nrows=1, ncols=1, context=NullContext()):
assert context, 'Define context'
self.context = context
self.labels = collect.GridList(nrows, ncols, {})
self.read_old_labels()
for i in range(nrows):
children = []
for j in range(ncols):
labels_box = self.create_mpl_labels_box(i, j,
self.labels.grid[i][j])
children.append(labels_box)
display(widgets.HBox(children=children))
display(widgets.HTML('<br/>'))
def read_old_labels(self):
old = self.context.read_labels()
if old:
self.labels.fill(old)
def update(self, name, value, row, col):
self.labels.update(row, col, {name: value})
self.context.update_labels(self.labels.grid)
def create_mpl_labels_box(self, row, col, old):
box = widgets.VBox()
box.border_color = 'red'
coord = ' [{0}][{1}]'.format(row, col)
title = widgets.widget_string.Text(old.get('title', ''),
description='title' + coord)
xlabel = widgets.widget_string.Text(old.get('xlabel', ''),
description='xlabel' + coord)
ylabel = widgets.widget_string.Text(old.get('ylabel', ''),
description='ylabel' + coord)
legend = widgets.Dropdown(options=['No Legend', 'loc=best',
'loc=upper right',
'loc=upper left'],
selected_label=old.get('legend',
'No Legend'))
box.children = [title, xlabel, ylabel, legend]
def update_title(name, value):
self.update('title', value, row, col)
title.on_trait_change(update_title, 'value')
def update_xlabel(name, value):
self.update('xlabel', value, row, col)
xlabel.on_trait_change(update_xlabel, 'value')
def update_ylabel(name, value):
self.update('ylabel', value, row, col)
ylabel.on_trait_change(update_ylabel, 'value')
def update_legend(name, value):
self.update('legend', value, row, col)
legend.on_trait_change(update_legend, 'value')
return box
| true | true |
1c3afe002948062303b2fdb2338c03553fc6ecf1 | 2,763 | py | Python | mqtt_pi0_client/list_control.py | hengying/mqtt_at_home | ed1cc5dd0a57cc46e57ec5edeb48ae1d019ebae5 | [
"BSD-3-Clause"
] | null | null | null | mqtt_pi0_client/list_control.py | hengying/mqtt_at_home | ed1cc5dd0a57cc46e57ec5edeb48ae1d019ebae5 | [
"BSD-3-Clause"
] | null | null | null | mqtt_pi0_client/list_control.py | hengying/mqtt_at_home | ed1cc5dd0a57cc46e57ec5edeb48ae1d019ebae5 | [
"BSD-3-Clause"
] | null | null | null |
from layer import Layer
from button_enum import *
from event import *
BIAS_Y = -2
class ListControl(Layer):
def __init__(self, holder, name, labels):
super().__init__(holder, name)
self._labels = labels
self._current_item = 0
self._top_item = 0
def handle_event(self, event):
super().handle_event(event)
if type(event) == ButtonDownEvent:
if event.button_type == ButtonType.STICK_UP:
self._current_item -= 1
if self._current_item < self._top_item:
self._top_item = self._current_item
if self._current_item < 0:
self._current_item = len(self._labels) - 1
self._top_item = self._current_item - self._holder._row_count + 1
if self._top_item < 0:
self._top_item = 0
if event.button_type == ButtonType.STICK_DOWN:
self._current_item += 1
if self._current_item >= self._top_item + self._holder._row_count:
self._top_item = self._current_item - self._holder._row_count + 1
if self._current_item >= len(self._labels):
self._current_item = 0
self._top_item = self._current_item - self._holder._row_count + 1
if self._top_item < 0:
self._top_item = 0
if event.button_type == ButtonType.STICK_LEFT or \
event.button_type == ButtonType.BUTTON_1:
self._holder.add_event(ListBack(self._name))
if event.button_type == ButtonType.STICK_RIGHT or \
event.button_type == ButtonType.STICK_PRESS or \
event.button_type == ButtonType.BUTTON_3:
self._holder.add_event(ListItemSelected(self._name, self._current_item))
if event.button_type == ButtonType.BUTTON_2:
pass
def paint(self, image_draw):
items_blow = len(self._labels) - self._top_item
if items_blow > self._holder.row_count:
items_blow = self._holder.row_count
for i in range(items_blow):
if i == self._current_item - self._top_item:
image_draw.text((0, self._holder.row_height * i + BIAS_Y),
'>{}'.format(self._labels[self._top_item + i]),
font=self._holder.font16, fill=self._holder.foreground_color)
else:
image_draw.text((0, self._holder.row_height * i + BIAS_Y),
' {}'.format(self._labels[self._top_item + i]),
font=self._holder.font16, fill=self._holder.foreground_color)
| 44.564516 | 93 | 0.564966 |
from layer import Layer
from button_enum import *
from event import *
BIAS_Y = -2
class ListControl(Layer):
def __init__(self, holder, name, labels):
super().__init__(holder, name)
self._labels = labels
self._current_item = 0
self._top_item = 0
def handle_event(self, event):
super().handle_event(event)
if type(event) == ButtonDownEvent:
if event.button_type == ButtonType.STICK_UP:
self._current_item -= 1
if self._current_item < self._top_item:
self._top_item = self._current_item
if self._current_item < 0:
self._current_item = len(self._labels) - 1
self._top_item = self._current_item - self._holder._row_count + 1
if self._top_item < 0:
self._top_item = 0
if event.button_type == ButtonType.STICK_DOWN:
self._current_item += 1
if self._current_item >= self._top_item + self._holder._row_count:
self._top_item = self._current_item - self._holder._row_count + 1
if self._current_item >= len(self._labels):
self._current_item = 0
self._top_item = self._current_item - self._holder._row_count + 1
if self._top_item < 0:
self._top_item = 0
if event.button_type == ButtonType.STICK_LEFT or \
event.button_type == ButtonType.BUTTON_1:
self._holder.add_event(ListBack(self._name))
if event.button_type == ButtonType.STICK_RIGHT or \
event.button_type == ButtonType.STICK_PRESS or \
event.button_type == ButtonType.BUTTON_3:
self._holder.add_event(ListItemSelected(self._name, self._current_item))
if event.button_type == ButtonType.BUTTON_2:
pass
def paint(self, image_draw):
items_blow = len(self._labels) - self._top_item
if items_blow > self._holder.row_count:
items_blow = self._holder.row_count
for i in range(items_blow):
if i == self._current_item - self._top_item:
image_draw.text((0, self._holder.row_height * i + BIAS_Y),
'>{}'.format(self._labels[self._top_item + i]),
font=self._holder.font16, fill=self._holder.foreground_color)
else:
image_draw.text((0, self._holder.row_height * i + BIAS_Y),
' {}'.format(self._labels[self._top_item + i]),
font=self._holder.font16, fill=self._holder.foreground_color)
| true | true |
1c3afe5cfc45a414c8c4351a3aecf2ce63bcb80e | 751 | py | Python | 200. Number of Islands/solution.py | alexwhyy/leetcode | 41664aa48137677d2f98817b9c512d76f13c525f | [
"MIT"
] | null | null | null | 200. Number of Islands/solution.py | alexwhyy/leetcode | 41664aa48137677d2f98817b9c512d76f13c525f | [
"MIT"
] | null | null | null | 200. Number of Islands/solution.py | alexwhyy/leetcode | 41664aa48137677d2f98817b9c512d76f13c525f | [
"MIT"
] | null | null | null | class Solution:
def traverse(self, grid, y, x):
grid[y][x] = "0"
if y - 1 >= 0 and grid[y - 1][x] == "1":
self.traverse(grid, y - 1, x)
if x + 1 < len(grid[0]) and grid[y][x + 1] == "1":
self.traverse(grid, y, x + 1)
if y + 1 < len(grid) and grid[y + 1][x] == "1":
self.traverse(grid, y + 1, x)
if x - 1 >= 0 and grid[y][x - 1] == "1":
self.traverse(grid, y, x - 1)
def numIslands(self, grid: List[List[str]]) -> int:
count = 0
for y in range(0, len(grid)):
for x in range(0, len(grid[y])):
if grid[y][x] == "1":
self.traverse(grid, y, x)
count += 1
return count | 37.55 | 58 | 0.423435 | class Solution:
def traverse(self, grid, y, x):
grid[y][x] = "0"
if y - 1 >= 0 and grid[y - 1][x] == "1":
self.traverse(grid, y - 1, x)
if x + 1 < len(grid[0]) and grid[y][x + 1] == "1":
self.traverse(grid, y, x + 1)
if y + 1 < len(grid) and grid[y + 1][x] == "1":
self.traverse(grid, y + 1, x)
if x - 1 >= 0 and grid[y][x - 1] == "1":
self.traverse(grid, y, x - 1)
def numIslands(self, grid: List[List[str]]) -> int:
count = 0
for y in range(0, len(grid)):
for x in range(0, len(grid[y])):
if grid[y][x] == "1":
self.traverse(grid, y, x)
count += 1
return count | true | true |
1c3afea626267efe957e1bdf8d89b5b62b7b2996 | 11,343 | py | Python | myven/lib/python3.8/site-packages/ansible/modules/network/a10/a10_virtual_server.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2021-04-02T08:08:39.000Z | 2021-04-02T08:08:39.000Z | myven/lib/python3.8/site-packages/ansible/modules/network/a10/a10_virtual_server.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | null | null | null | myven/lib/python3.8/site-packages/ansible/modules/network/a10/a10_virtual_server.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2020-05-03T01:13:16.000Z | 2020-05-03T01:13:16.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Mischa Peters <mpeters@a10networks.com>,
# Eric Chou <ericc@a10networks.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: a10_virtual_server
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' virtual servers.
description:
- Manage SLB (Server Load Balancing) virtual server objects on A10 Networks devices via aXAPIv2.
author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014"
notes:
- Requires A10 Networks aXAPI 2.1.
extends_documentation_fragment:
- a10
- url
options:
state:
description:
- If the specified virtual server should exist.
choices: ['present', 'absent']
default: present
partition:
version_added: "2.3"
description:
- set active-partition
required: false
default: null
virtual_server:
description:
- The SLB (Server Load Balancing) virtual server name.
required: true
default: null
aliases: ['vip', 'virtual']
virtual_server_ip:
description:
- The SLB virtual server IPv4 address.
required: false
default: null
aliases: ['ip', 'address']
virtual_server_status:
description:
- The SLB virtual server status, such as enabled or disabled.
required: false
default: enable
aliases: ['status']
choices: ['enabled', 'disabled']
virtual_server_ports:
description:
- A list of ports to create for the virtual server. Each list item should be a
dictionary which specifies the C(port:) and C(type:), but can also optionally
specify the C(service_group:) as well as the C(status:). See the examples
below for details. This parameter is required when C(state) is C(present).
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Create a new virtual server
- a10_virtual_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
partition: mypartition
virtual_server: vserver1
virtual_server_ip: 1.1.1.1
virtual_server_ports:
- port: 80
protocol: TCP
service_group: sg-80-tcp
- port: 443
protocol: HTTPS
service_group: sg-443-https
- port: 8080
protocol: http
status: disabled
'''
RETURN = '''
content:
description: the full info regarding the slb_virtual
returned: success
type: string
sample: "mynewvirtualserver"
'''
import json
from ansible.module_utils.network.a10.a10 import (axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure,
axapi_enabled_disabled, axapi_get_vport_protocol, AXAPI_VPORT_PROTOCOLS)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status']
def validate_ports(module, ports):
for item in ports:
for key in item:
if key not in VALID_PORT_FIELDS:
module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="port definitions must be integers")
else:
module.fail_json(msg="port definitions must define the port field")
# validate the port protocol is present, and convert it to
# the internal API integer value (and validate it)
if 'protocol' in item:
protocol = axapi_get_vport_protocol(item['protocol'])
if not protocol:
module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS))
else:
item['protocol'] = protocol
else:
module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS))
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
# ensure the service_group field is at least present
if 'service_group' not in item:
item['service_group'] = ''
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True),
virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
virtual_server_ports=dict(type='list', required=True),
partition=dict(type='str', default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
partition = module.params['partition']
state = module.params['state']
write_config = module.params['write_config']
slb_virtual = module.params['virtual_server']
slb_virtual_ip = module.params['virtual_server_ip']
slb_virtual_status = module.params['virtual_server_status']
slb_virtual_ports = module.params['virtual_server_ports']
if slb_virtual is None:
module.fail_json(msg='virtual_server is required')
validate_ports(module, slb_virtual_ports)
axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
session_url = axapi_authenticate(module, axapi_base_url, username, password)
axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
slb_virtual_exists = not axapi_failure(slb_virtual_data)
changed = False
if state == 'present':
json_post = {
'virtual_server': {
'name': slb_virtual,
'address': slb_virtual_ip,
'status': axapi_enabled_disabled(slb_virtual_status),
'vport_list': slb_virtual_ports,
}
}
# before creating/updating we need to validate that any
# service groups defined in the ports list exist since
# since the API will still create port definitions for
# them while indicating a failure occurred
checked_service_groups = []
for port in slb_virtual_ports:
if 'service_group' in port and port['service_group'] not in checked_service_groups:
# skip blank service group entries
if port['service_group'] == '':
continue
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']}))
if axapi_failure(result):
module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group'])
checked_service_groups.append(port['service_group'])
if not slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
else:
def needs_update(src_ports, dst_ports):
'''
Checks to determine if the port definitions of the src_ports
array are in or different from those in dst_ports. If there is
a difference, this function returns true, otherwise false.
'''
for src_port in src_ports:
found = False
different = False
for dst_port in dst_ports:
if src_port['port'] == dst_port['port']:
found = True
for valid_field in VALID_PORT_FIELDS:
if src_port[valid_field] != dst_port[valid_field]:
different = True
break
if found or different:
break
if not found or different:
return True
# every port from the src exists in the dst, and none of them were different
return False
defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', [])
# we check for a needed update both ways, in case ports
# are missing from either the ones specified by the user
# or from those on the device
if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports):
result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
else:
result = slb_virtual_data
elif state == 'absent':
if slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual}))
changed = True
else:
result = dict(msg="the virtual server was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
if __name__ == '__main__':
main()
| 38.979381 | 138 | 0.627876 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: a10_virtual_server
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' virtual servers.
description:
- Manage SLB (Server Load Balancing) virtual server objects on A10 Networks devices via aXAPIv2.
author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014"
notes:
- Requires A10 Networks aXAPI 2.1.
extends_documentation_fragment:
- a10
- url
options:
state:
description:
- If the specified virtual server should exist.
choices: ['present', 'absent']
default: present
partition:
version_added: "2.3"
description:
- set active-partition
required: false
default: null
virtual_server:
description:
- The SLB (Server Load Balancing) virtual server name.
required: true
default: null
aliases: ['vip', 'virtual']
virtual_server_ip:
description:
- The SLB virtual server IPv4 address.
required: false
default: null
aliases: ['ip', 'address']
virtual_server_status:
description:
- The SLB virtual server status, such as enabled or disabled.
required: false
default: enable
aliases: ['status']
choices: ['enabled', 'disabled']
virtual_server_ports:
description:
- A list of ports to create for the virtual server. Each list item should be a
dictionary which specifies the C(port:) and C(type:), but can also optionally
specify the C(service_group:) as well as the C(status:). See the examples
below for details. This parameter is required when C(state) is C(present).
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Create a new virtual server
- a10_virtual_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
partition: mypartition
virtual_server: vserver1
virtual_server_ip: 1.1.1.1
virtual_server_ports:
- port: 80
protocol: TCP
service_group: sg-80-tcp
- port: 443
protocol: HTTPS
service_group: sg-443-https
- port: 8080
protocol: http
status: disabled
'''
RETURN = '''
content:
description: the full info regarding the slb_virtual
returned: success
type: string
sample: "mynewvirtualserver"
'''
import json
from ansible.module_utils.network.a10.a10 import (axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure,
axapi_enabled_disabled, axapi_get_vport_protocol, AXAPI_VPORT_PROTOCOLS)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status']
def validate_ports(module, ports):
for item in ports:
for key in item:
if key not in VALID_PORT_FIELDS:
module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="port definitions must be integers")
else:
module.fail_json(msg="port definitions must define the port field")
# validate the port protocol is present, and convert it to
# the internal API integer value (and validate it)
if 'protocol' in item:
protocol = axapi_get_vport_protocol(item['protocol'])
if not protocol:
module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS))
else:
item['protocol'] = protocol
else:
module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS))
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
# ensure the service_group field is at least present
if 'service_group' not in item:
item['service_group'] = ''
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True),
virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
virtual_server_ports=dict(type='list', required=True),
partition=dict(type='str', default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
partition = module.params['partition']
state = module.params['state']
write_config = module.params['write_config']
slb_virtual = module.params['virtual_server']
slb_virtual_ip = module.params['virtual_server_ip']
slb_virtual_status = module.params['virtual_server_status']
slb_virtual_ports = module.params['virtual_server_ports']
if slb_virtual is None:
module.fail_json(msg='virtual_server is required')
validate_ports(module, slb_virtual_ports)
axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
session_url = axapi_authenticate(module, axapi_base_url, username, password)
axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
slb_virtual_exists = not axapi_failure(slb_virtual_data)
changed = False
if state == 'present':
json_post = {
'virtual_server': {
'name': slb_virtual,
'address': slb_virtual_ip,
'status': axapi_enabled_disabled(slb_virtual_status),
'vport_list': slb_virtual_ports,
}
}
# before creating/updating we need to validate that any
# service groups defined in the ports list exist since
# since the API will still create port definitions for
# them while indicating a failure occurred
checked_service_groups = []
for port in slb_virtual_ports:
if 'service_group' in port and port['service_group'] not in checked_service_groups:
# skip blank service group entries
if port['service_group'] == '':
continue
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']}))
if axapi_failure(result):
module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group'])
checked_service_groups.append(port['service_group'])
if not slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
else:
def needs_update(src_ports, dst_ports):
'''
Checks to determine if the port definitions of the src_ports
array are in or different from those in dst_ports. If there is
a difference, this function returns true, otherwise false.
'''
for src_port in src_ports:
found = False
different = False
for dst_port in dst_ports:
if src_port['port'] == dst_port['port']:
found = True
for valid_field in VALID_PORT_FIELDS:
if src_port[valid_field] != dst_port[valid_field]:
different = True
break
if found or different:
break
if not found or different:
return True
# every port from the src exists in the dst, and none of them were different
return False
defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', [])
# we check for a needed update both ways, in case ports
# are missing from either the ones specified by the user
# or from those on the device
if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports):
result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
else:
result = slb_virtual_data
elif state == 'absent':
if slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual}))
changed = True
else:
result = dict(msg="the virtual server was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
if __name__ == '__main__':
main()
| true | true |
1c3afeb4753bda4fbff9b2dcc854e5d06db45765 | 1,054 | py | Python | python36/dxa/sn_random_numbers.py | luckyzflQ/py4fix | 03c17eeab201f9510789b328609273205db75d41 | [
"CNRI-Python"
] | 1 | 2018-09-10T18:51:14.000Z | 2018-09-10T18:51:14.000Z | python3/dxa/sn_random_numbers.py | ioancw/py4fi | bbf7b41d375e4f7b0344bc9b1e97d7910ad1e6ec | [
"CNRI-Python"
] | 2 | 2020-10-27T19:44:15.000Z | 2020-11-03T23:55:36.000Z | python3/dxa/sn_random_numbers.py | ioancw/py4fi | bbf7b41d375e4f7b0344bc9b1e97d7910ad1e6ec | [
"CNRI-Python"
] | 2 | 2019-03-05T15:03:15.000Z | 2019-12-27T19:09:41.000Z | import numpy as np
def sn_random_numbers(shape, antithetic=True, moment_matching=True,
fixed_seed=False):
''' Returns an array of shape shape with (pseudo)random numbers
that are standard normally distributed.
Parameters
==========
shape : tuple (o, n, m)
generation of array with shape (o, n, m)
antithetic : Boolean
generation of antithetic variates
moment_matching : Boolean
matching of first and second moments
fixed_seed : Boolean
flag to fix the seed
Results
=======
ran : (o, n, m) array of (pseudo)random numbers
'''
if fixed_seed:
np.random.seed(1000)
if antithetic:
ran = np.random.standard_normal((shape[0], shape[1], int(shape[2] / 2)))
ran = np.concatenate((ran, -ran), axis=2)
else:
ran = np.random.standard_normal(shape)
if moment_matching:
ran = ran - np.mean(ran)
ran = ran / np.std(ran)
if shape[0] == 1:
return ran[0]
else:
return ran | 29.277778 | 80 | 0.591082 | import numpy as np
def sn_random_numbers(shape, antithetic=True, moment_matching=True,
fixed_seed=False):
if fixed_seed:
np.random.seed(1000)
if antithetic:
ran = np.random.standard_normal((shape[0], shape[1], int(shape[2] / 2)))
ran = np.concatenate((ran, -ran), axis=2)
else:
ran = np.random.standard_normal(shape)
if moment_matching:
ran = ran - np.mean(ran)
ran = ran / np.std(ran)
if shape[0] == 1:
return ran[0]
else:
return ran | true | true |
1c3b00055fe866bbbd99dcd71c94fc7e0edbc356 | 396 | py | Python | src/blog/migrations/0009_auto_20200804_1509.py | SleepNoMore/django_blog_site | d23397e1595c488c424ed7eb46d1f844afd8178e | [
"MIT"
] | null | null | null | src/blog/migrations/0009_auto_20200804_1509.py | SleepNoMore/django_blog_site | d23397e1595c488c424ed7eb46d1f844afd8178e | [
"MIT"
] | null | null | null | src/blog/migrations/0009_auto_20200804_1509.py | SleepNoMore/django_blog_site | d23397e1595c488c424ed7eb46d1f844afd8178e | [
"MIT"
] | null | null | null | # Generated by Django 3.0.9 on 2020-08-04 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_auto_20200804_1508'),
]
operations = [
migrations.AlterField(
model_name='post',
name='category',
field=models.CharField(blank=True, max_length=255),
),
]
| 20.842105 | 63 | 0.60101 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_auto_20200804_1508'),
]
operations = [
migrations.AlterField(
model_name='post',
name='category',
field=models.CharField(blank=True, max_length=255),
),
]
| true | true |
1c3b0039b51ce776822a9b608112c4ba7e849670 | 7,997 | py | Python | django/utils/http.py | blaze33/django | 2f6d887bd0a110e3a662ac1d056d6cdabf38632b | [
"BSD-3-Clause"
] | null | null | null | django/utils/http.py | blaze33/django | 2f6d887bd0a110e3a662ac1d056d6cdabf38632b | [
"BSD-3-Clause"
] | 1 | 2016-02-19T00:22:18.000Z | 2016-02-19T00:22:18.000Z | django/utils/http.py | blaze33/django | 2f6d887bd0a110e3a662ac1d056d6cdabf38632b | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import calendar
import datetime
import re
import sys
try:
from urllib import parse as urllib_parse
except ImportError: # Python 2
import urllib as urllib_parse
import urlparse
urllib_parse.urlparse = urlparse.urlparse
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(urllib_parse.quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(urllib_parse.quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(urllib_parse.unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(urllib_parse.unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib_parse.urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if not six.PY3 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if not six.PY3:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urllib_parse.urlparse(url1), urllib_parse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
| 34.769565 | 84 | 0.654996 | from __future__ import unicode_literals
import calendar
import datetime
import re
import sys
try:
from urllib import parse as urllib_parse
except ImportError:
import urllib as urllib_parse
import urlparse
urllib_parse.urlparse = urlparse.urlparse
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
return force_text(urllib_parse.quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
return force_text(urllib_parse.quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
return force_text(urllib_parse.unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
return force_text(urllib_parse.unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib_parse.urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if not six.PY3 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if not six.PY3:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
p1, p2 = urllib_parse.urlparse(url1), urllib_parse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
| true | true |
1c3b00ee71b0bffd223e0576b49eb93ff93b66a5 | 1,194 | py | Python | email/app.py | vivekbhanu18/MASK_RCNN | 0fe43b5638a062f6fc42cd1a9598f45cb06b99b3 | [
"MIT"
] | null | null | null | email/app.py | vivekbhanu18/MASK_RCNN | 0fe43b5638a062f6fc42cd1a9598f45cb06b99b3 | [
"MIT"
] | null | null | null | email/app.py | vivekbhanu18/MASK_RCNN | 0fe43b5638a062f6fc42cd1a9598f45cb06b99b3 | [
"MIT"
] | null | null | null | # importing libraries
from flask import Flask
from flask_mail import Mail, Message
from flask import Flask, render_template, request,url_for, redirect
app = Flask(__name__)
mail = Mail(app) # instantiate the mail class
# configuration of mail
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = 'bhanuvivek9705@gmail.com'
app.config['MAIL_PASSWORD'] = '9029861004'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
@app.route("/")
def display():
return render_template("example.html")
# message object mapped to a particular URL ‘/’
@app.route("/", methods=["GET","POST"])
def email_sent():
if request.method == "POST":
msg = Message(
'WASTE MANAGEMENT',
sender ='bhanuvivek9705@gmail.com',
recipients = ['jigarmange.jm@gmail.com']
)
msg.body = 'Hello, We have found that there is high usage of plastic in your locality. Please take care of it.'
mail.send(msg)
return render_template("example.html")
if __name__ == '__main__':
app.run(debug = True)
| 30.615385 | 119 | 0.643216 |
from flask import Flask
from flask_mail import Mail, Message
from flask import Flask, render_template, request,url_for, redirect
app = Flask(__name__)
mail = Mail(app)
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = 'bhanuvivek9705@gmail.com'
app.config['MAIL_PASSWORD'] = '9029861004'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
@app.route("/")
def display():
return render_template("example.html")
@app.route("/", methods=["GET","POST"])
def email_sent():
if request.method == "POST":
msg = Message(
'WASTE MANAGEMENT',
sender ='bhanuvivek9705@gmail.com',
recipients = ['jigarmange.jm@gmail.com']
)
msg.body = 'Hello, We have found that there is high usage of plastic in your locality. Please take care of it.'
mail.send(msg)
return render_template("example.html")
if __name__ == '__main__':
app.run(debug = True)
| true | true |
1c3b0189e79fc7f49f9e191f628922ea18e6c04e | 1,367 | py | Python | maize/types/unfinished_header_block.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 14 | 2021-07-21T19:45:05.000Z | 2022-02-09T04:29:51.000Z | maize/types/unfinished_header_block.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 9 | 2021-07-24T09:30:46.000Z | 2021-12-05T19:51:29.000Z | maize/types/unfinished_header_block.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 5 | 2021-10-04T17:33:47.000Z | 2022-03-15T08:37:51.000Z | from dataclasses import dataclass
from typing import List, Optional
from maize.types.blockchain_format.foliage import Foliage, FoliageTransactionBlock
from maize.types.blockchain_format.reward_chain_block import RewardChainBlockUnfinished
from maize.types.blockchain_format.vdf import VDFProof
from maize.types.end_of_slot_bundle import EndOfSubSlotBundle
from maize.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class UnfinishedHeaderBlock(Streamable):
# Same as a FullBlock but without TransactionInfo and Generator, used by light clients
finished_sub_slots: List[EndOfSubSlotBundle] # If first sb
reward_chain_block: RewardChainBlockUnfinished # Reward chain trunk data
challenge_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
reward_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
foliage: Foliage # Reward chain foliage data
foliage_transaction_block: Optional[FoliageTransactionBlock] # Reward chain foliage data (tx block)
transactions_filter: bytes # Filter for block transactions
@property
def prev_header_hash(self):
return self.foliage.prev_block_hash
@property
def header_hash(self):
return self.foliage.get_hash()
@property
def total_iters(self):
return self.reward_chain_block.total_iters
| 40.205882 | 104 | 0.792977 | from dataclasses import dataclass
from typing import List, Optional
from maize.types.blockchain_format.foliage import Foliage, FoliageTransactionBlock
from maize.types.blockchain_format.reward_chain_block import RewardChainBlockUnfinished
from maize.types.blockchain_format.vdf import VDFProof
from maize.types.end_of_slot_bundle import EndOfSubSlotBundle
from maize.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class UnfinishedHeaderBlock(Streamable):
finished_sub_slots: List[EndOfSubSlotBundle]
reward_chain_block: RewardChainBlockUnfinished
challenge_chain_sp_proof: Optional[VDFProof]
reward_chain_sp_proof: Optional[VDFProof]
foliage: Foliage
foliage_transaction_block: Optional[FoliageTransactionBlock]
transactions_filter: bytes
@property
def prev_header_hash(self):
return self.foliage.prev_block_hash
@property
def header_hash(self):
return self.foliage.get_hash()
@property
def total_iters(self):
return self.reward_chain_block.total_iters
| true | true |
1c3b01a302a11cf5e166c2f86c0e9660fd8d7353 | 1,693 | py | Python | tests/web/test_flask.py | mikkoi/py-healthcheck | ca86da28c26d7b1a4adda90dfff870f0326bc9ff | [
"MIT"
] | 85 | 2017-02-09T18:11:38.000Z | 2022-01-30T17:19:09.000Z | tests/web/test_flask.py | mikkoi/py-healthcheck | ca86da28c26d7b1a4adda90dfff870f0326bc9ff | [
"MIT"
] | 16 | 2017-03-29T21:41:08.000Z | 2021-07-06T08:24:57.000Z | tests/web/test_flask.py | mikkoi/py-healthcheck | ca86da28c26d7b1a4adda90dfff870f0326bc9ff | [
"MIT"
] | 26 | 2017-02-07T22:49:26.000Z | 2022-02-18T11:01:36.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import flask
from healthcheck import HealthCheck, EnvironmentDump
class BasicHealthCheckTest(unittest.TestCase):
def setUp(self):
self.path = '/h'
self.app = flask.Flask(__name__)
self.hc = self._hc()
self.client = self.app.test_client()
self.app.add_url_rule(self.path, view_func=lambda: self.hc.run())
def _hc(self):
return HealthCheck()
def test_basic_check(self):
response = self.client.get(self.path)
self.assertEqual(200, response.status_code)
def test_failing_check(self):
def fail_check():
return False, "FAIL"
self.hc.add_check(fail_check)
response = self.client.get(self.path)
self.assertEqual(500, response.status_code)
jr = flask.json.loads(response.data)
self.assertEqual("failure", jr["status"])
class BasicEnvironmentDumpTest(unittest.TestCase):
def setUp(self):
self.path = '/e'
self.app = flask.Flask(__name__)
self.hc = self._hc()
self.client = self.app.test_client()
self.app.add_url_rule(self.path, view_func=lambda: self.hc.run())
def _hc(self):
return EnvironmentDump()
def test_basic_check(self):
def test_ok():
return "OK"
self.hc.add_section("test_func", test_ok)
self.hc.add_section("config", self.app.config)
response = self.client.get(self.path)
self.assertEqual(200, response.status_code)
jr = flask.json.loads(response.data)
self.assertEqual("OK", jr["test_func"])
if __name__ == '__main__':
unittest.main()
| 25.268657 | 73 | 0.632014 |
import unittest
import flask
from healthcheck import HealthCheck, EnvironmentDump
class BasicHealthCheckTest(unittest.TestCase):
def setUp(self):
self.path = '/h'
self.app = flask.Flask(__name__)
self.hc = self._hc()
self.client = self.app.test_client()
self.app.add_url_rule(self.path, view_func=lambda: self.hc.run())
def _hc(self):
return HealthCheck()
def test_basic_check(self):
response = self.client.get(self.path)
self.assertEqual(200, response.status_code)
def test_failing_check(self):
def fail_check():
return False, "FAIL"
self.hc.add_check(fail_check)
response = self.client.get(self.path)
self.assertEqual(500, response.status_code)
jr = flask.json.loads(response.data)
self.assertEqual("failure", jr["status"])
class BasicEnvironmentDumpTest(unittest.TestCase):
def setUp(self):
self.path = '/e'
self.app = flask.Flask(__name__)
self.hc = self._hc()
self.client = self.app.test_client()
self.app.add_url_rule(self.path, view_func=lambda: self.hc.run())
def _hc(self):
return EnvironmentDump()
def test_basic_check(self):
def test_ok():
return "OK"
self.hc.add_section("test_func", test_ok)
self.hc.add_section("config", self.app.config)
response = self.client.get(self.path)
self.assertEqual(200, response.status_code)
jr = flask.json.loads(response.data)
self.assertEqual("OK", jr["test_func"])
if __name__ == '__main__':
unittest.main()
| true | true |
1c3b0308475be21dbb6cbfba0ce3411666d1d13f | 6,312 | py | Python | python/raspberrypi/DFRobot_4DIO_S.py | cdjq/DFRobot_4DIO_S | d278e935ab5f217198e3d2ff3964454e9aa3d89b | [
"MIT"
] | null | null | null | python/raspberrypi/DFRobot_4DIO_S.py | cdjq/DFRobot_4DIO_S | d278e935ab5f217198e3d2ff3964454e9aa3d89b | [
"MIT"
] | null | null | null | python/raspberrypi/DFRobot_4DIO_S.py | cdjq/DFRobot_4DIO_S | d278e935ab5f217198e3d2ff3964454e9aa3d89b | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
'''
@file DFRobot_4DIO_S.py
@brief DFRobot_4DIO_S libary for raspberry.
@copyright Copyright (c) 2010 DFRobot Co.Ltd (http://www.dfrobot.com)
@licence The MIT License (MIT)
@author [fengli](li.feng@dfrobot.com)
@version V1.0
@date 2021-07-21
@https://github.com/DFRobot/DFRobot_RTU
'''
from DFRobot_RTU import *
class DFRobot_4DIO_S(DFRobot_RTU):
REG_4DIO_DEVICE_PID = 0x00 #pid
REG_4DIO_DEVICE_VID = 0x01 #vid
REG_4DIO_DEVICE_ADRR = 0x02 #设备地址寄存器
REG_4DIO_UART_CONFIG1 = 0x03 #设备串口通信配置寄存器1
REG_4DIO_UART_CONFIG2 = 0x04 #设备串口通信配置寄存器2
BEEP = 0X00 #蜂鸣器
DIO_1 = 0X01 #编号1的继电器
DIO_2 = 0X02 #编号2的继电器
DIO_3 = 0X03 #编号3的继电器
DIO_4 = 0X04 #编号4的继电器
INT_1 = 0X01 #离散输入1
INT_2 = 0X02 #离散输入2
INT_3 = 0X03 #离散输入3
INT_4 = 0X04 #离散输入4
'''
@brief Init communication port
@param Device address
@param Software serial or hardware serial
'''
def __init__(self ,addr,Baud):
self.__Baud = Baud
self.__Addr = addr
super(DFRobot_4DIO_S, self).__init__(Baud, 8, 'N', 1)
'''
@brief 获取设备pid
@return 返回16设备pid
'''
def read_pid(self):
return self.read_holding_register(self.__Addr,self.REG_4DIO_DEVICE_PID)
'''
@brief 获取设备vid
@return 返回16设备vid
'''
def read_vid(self):
return self.read_holding_register(self.__Addr,self.REG_4DIO_DEVICE_VID)
'''
@brief 设置设备地址
@param addr 要设置的设备地址
@return Exception code:
@n 0 : sucess.
@n 1 or eRTU_EXCEPTION_ILLEGAL_FUNCTION : Illegal function.
@n 2 or eRTU_EXCEPTION_ILLEGAL_DATA_ADDRESS: Illegal data address.
@n 3 or eRTU_EXCEPTION_ILLEGAL_DATA_VALUE: Illegal data value.
@n 4 or eRTU_EXCEPTION_SLAVE_FAILURE: Slave failure.
@n 8 or eRTU_EXCEPTION_CRC_ERROR: CRC check error.
@n 9 or eRTU_RECV_ERROR: Receive packet error.
@n 10 or eRTU_MEMORY_ERROR: Memory error.
@n 11 or eRTU_ID_ERROR: Broadcasr address or error ID
'''
def set_addr(self,addr):
return self.write_holding_register(self.__Addr,self.REG_4DIO_DEVICE_ADRR,addr)
'''
@brief 设置从机通信的波特率(复位从机后生效)
@param rate 要设置的波特率
0x0001 : 2400
0x0002 : 4800
0x0003 : 9600
0x0004 : 14400
0x0005 : 19200
0x0006 : 38400
0x0007 : 57600
0x0008 : 115200
other : 115200
@return Exception code:
@n 0 : sucess.
@n 1 or eRTU_EXCEPTION_ILLEGAL_FUNCTION : Illegal function.
@n 2 or eRTU_EXCEPTION_ILLEGAL_DATA_ADDRESS: Illegal data address.
@n 3 or eRTU_EXCEPTION_ILLEGAL_DATA_VALUE: Illegal data value.
@n 4 or eRTU_EXCEPTION_SLAVE_FAILURE: Slave failure.
@n 8 or eRTU_EXCEPTION_CRC_ERROR: CRC check error.
@n 9 or eRTU_RECV_ERROR: Receive packet error.
@n 10 or eRTU_MEMORY_ERROR: Memory error.
@n 11 or eRTU_ID_ERROR: Broadcasr address or error ID
'''
def set_baudr_rate(self,rate):
return self.write_holding_register(self.__Addr,self.REG_4DIO_UART_CONFIG1,rate)
'''
@brief 设置单个继电器为连通状态
@param number 继电器的编号
@return True(成功)/False(失败)
'''
def set_dio(self,number):
return self.write_coils_register(self.__Addr,number,0xff00)
'''
@brief 设置单个继电器为断开状态
@param number 继电器的编号
@return True(成功)/False(失败)
'''
def reset_dio(self,number):
return self.write_coils_register(self.__Addr,number,0x00)
'''
@brief 配置四个继电器的状态
@param dio1 True(连通)/False(断开)
@param dio2 True(连通)/False(断开)
@param dio3 True(连通)/False(断开)
@param dio4 True(连通)/False(断开)
@return True(成功)/False(失败)
'''
def config_dio(self,dio1,dio2,dio3,dio4):
data = 0
data = int(dio1)
data = data | int(dio2 << 1);
data = data | int(dio3 << 2);
data = data | int(dio4 << 3);
return self.write_coils_registers(self.__Addr,1,data)
'''
@brief 获取单个继电器的开关状态
@param number 继电器的编号
@return 1(连通)/0(断开)
'''
def read_dio(self,number):
return self.read_coils_register(self.__Addr,number)
'''
@brief 获取四个继电器的开关状态
@param dio1 1(连通)/0(断开)
@param dio2 1(连通)/0(断开)
@param dio3 1(连通)/0(断开)
@param dio4 1(连通)/0(断开)
@return Exception code:
@n 0 : sucess.
@n 1 or eRTU_EXCEPTION_ILLEGAL_FUNCTION : Illegal function.
@n 2 or eRTU_EXCEPTION_ILLEGAL_DATA_ADDRESS: Illegal data address.
@n 3 or eRTU_EXCEPTION_ILLEGAL_DATA_VALUE: Illegal data value.
@n 4 or eRTU_EXCEPTION_SLAVE_FAILURE: Slave failure.
@n 8 or eRTU_EXCEPTION_CRC_ERROR: CRC check error.
@n 9 or eRTU_RECV_ERROR: Receive packet error.
@n 10 or eRTU_MEMORY_ERROR: Memory error.
@n 11 or eRTU_ID_ERROR: Broadcasr address or error ID
'''
def read_mult_dio(self):
list = self.read_coils_registers(self.__Addr,1,4)
dio1 = list[1] & 0x01
dio2 = (list[1] & 0x02)>>1
dio3 = (list[1] & 0x04)>>2
dio4 = (list[1] & 0x08)>>3
return dio1,dio2,dio3,dio4
'''
@brief 获取单个离散输入引脚的状态
@param intN 离散输入编号
@return 1(高电平)/0(低电平)
'''
def read_int(self,intN):
return self.read_discrete_inputs_register(self.__Addr,intN)
'''
@brief 获取四个离散输入引脚的状态
@param int1 1(高电平)/0(低电平)
@param int2 1(高电平)/0(低电平)
@param int3 1(高电平)/0(低电平)
@param int4 1(高电平)/0(低电平)
@return Exception code:
@n 0 : sucess.
@n 1 or eRTU_EXCEPTION_ILLEGAL_FUNCTION : Illegal function.
@n 2 or eRTU_EXCEPTION_ILLEGAL_DATA_ADDRESS: Illegal data address.
@n 3 or eRTU_EXCEPTION_ILLEGAL_DATA_VALUE: Illegal data value.
@n 4 or eRTU_EXCEPTION_SLAVE_FAILURE: Slave failure.
@n 8 or eRTU_EXCEPTION_CRC_ERROR: CRC check error.
@n 9 or eRTU_RECV_ERROR: Receive packet error.
@n 10 or eRTU_MEMORY_ERROR: Memory error.
@n 11 or eRTU_ID_ERROR: Broadcasr address or error ID
'''
def read_mult_int(self):
list = self.read_discrete_inputs_registers(self.__Addr,1,4)
int1 = list[1] & 0x01
int2 = (list[1] & 0x02)>>1
int3 = (list[1] & 0x04)>>2
int4 = (list[1] & 0x08)>>3
return int1,int2,int3,int4
| 31.718593 | 84 | 0.637516 |
from DFRobot_RTU import *
class DFRobot_4DIO_S(DFRobot_RTU):
REG_4DIO_DEVICE_PID = 0x00
REG_4DIO_DEVICE_VID = 0x01
REG_4DIO_DEVICE_ADRR = 0x02
REG_4DIO_UART_CONFIG1 = 0x03
REG_4DIO_UART_CONFIG2 = 0x04
BEEP = 0X00
DIO_1 = 0X01
DIO_2 = 0X02
DIO_3 = 0X03
DIO_4 = 0X04
INT_1 = 0X01
INT_2 = 0X02
INT_3 = 0X03
INT_4 = 0X04
def __init__(self ,addr,Baud):
self.__Baud = Baud
self.__Addr = addr
super(DFRobot_4DIO_S, self).__init__(Baud, 8, 'N', 1)
def read_pid(self):
return self.read_holding_register(self.__Addr,self.REG_4DIO_DEVICE_PID)
def read_vid(self):
return self.read_holding_register(self.__Addr,self.REG_4DIO_DEVICE_VID)
def set_addr(self,addr):
return self.write_holding_register(self.__Addr,self.REG_4DIO_DEVICE_ADRR,addr)
def set_baudr_rate(self,rate):
return self.write_holding_register(self.__Addr,self.REG_4DIO_UART_CONFIG1,rate)
def set_dio(self,number):
return self.write_coils_register(self.__Addr,number,0xff00)
def reset_dio(self,number):
return self.write_coils_register(self.__Addr,number,0x00)
def config_dio(self,dio1,dio2,dio3,dio4):
data = 0
data = int(dio1)
data = data | int(dio2 << 1);
data = data | int(dio3 << 2);
data = data | int(dio4 << 3);
return self.write_coils_registers(self.__Addr,1,data)
def read_dio(self,number):
return self.read_coils_register(self.__Addr,number)
def read_mult_dio(self):
list = self.read_coils_registers(self.__Addr,1,4)
dio1 = list[1] & 0x01
dio2 = (list[1] & 0x02)>>1
dio3 = (list[1] & 0x04)>>2
dio4 = (list[1] & 0x08)>>3
return dio1,dio2,dio3,dio4
def read_int(self,intN):
return self.read_discrete_inputs_register(self.__Addr,intN)
def read_mult_int(self):
list = self.read_discrete_inputs_registers(self.__Addr,1,4)
int1 = list[1] & 0x01
int2 = (list[1] & 0x02)>>1
int3 = (list[1] & 0x04)>>2
int4 = (list[1] & 0x08)>>3
return int1,int2,int3,int4
| true | true |
1c3b03e56fa1ffe917a51782c729d303e6974a4a | 3,496 | py | Python | docs/conf.py | sbslee/dokdo | a528a830b3347c39e1dc415b0f3e2c6ad60b0a1d | [
"MIT"
] | 23 | 2020-11-01T21:55:30.000Z | 2021-12-05T14:03:05.000Z | docs/conf.py | sbslee/dokdo | a528a830b3347c39e1dc415b0f3e2c6ad60b0a1d | [
"MIT"
] | 25 | 2020-11-25T23:24:23.000Z | 2022-03-30T04:40:45.000Z | docs/conf.py | sbslee/dokdo | a528a830b3347c39e1dc415b0f3e2c6ad60b0a1d | [
"MIT"
] | 7 | 2020-11-27T06:46:47.000Z | 2021-09-25T03:26:07.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'dokdo'
copyright = '2020, Seung-been "Steven" Lee'
author = 'Seung-been "Steven" Lee'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosectionlabel',
'sphinx_rtd_theme',
'sphinx_issues'
]
autodoc_mock_imports = [
'numpy',
'pandas',
'qiime2',
'scipy',
'matplotlib',
'seaborn',
'skbio'
]
issues_github_path = 'sbslee/dokdo'
napoleon_use_param = False
# Include the example source for plots in API docs.
plot_include_source = True
plot_formats = [('png', 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
autosectionlabel_prefix_document = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Add external links to source code with sphinx.ext.linkcode --------------
import inspect
def linkcode_resolve(domain, info):
if domain != 'py':
return None
modname = info['module']
if not modname:
return None
submod = sys.modules.get(modname)
if submod is None:
return None
fullname = info['fullname']
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except AttributeError:
return None
try:
fn = inspect.getsourcefile(inspect.unwrap(obj))
except TypeError:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
lineno = None
if lineno:
linespec = f'#L{lineno}-L{lineno + len(source) - 1}'
else:
linespec = ''
fn = fn.split('/api/')[1]
return f'https://github.com/sbslee/dokdo/tree/master/dokdo/api/{fn}/{linespec}'
| 27.100775 | 83 | 0.641876 |
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
project = 'dokdo'
copyright = '2020, Seung-been "Steven" Lee'
author = 'Seung-been "Steven" Lee'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosectionlabel',
'sphinx_rtd_theme',
'sphinx_issues'
]
autodoc_mock_imports = [
'numpy',
'pandas',
'qiime2',
'scipy',
'matplotlib',
'seaborn',
'skbio'
]
issues_github_path = 'sbslee/dokdo'
napoleon_use_param = False
plot_include_source = True
plot_formats = [('png', 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
autosectionlabel_prefix_document = True
html_theme = 'sphinx_rtd_theme'
html_static_path = []
import inspect
def linkcode_resolve(domain, info):
if domain != 'py':
return None
modname = info['module']
if not modname:
return None
submod = sys.modules.get(modname)
if submod is None:
return None
fullname = info['fullname']
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except AttributeError:
return None
try:
fn = inspect.getsourcefile(inspect.unwrap(obj))
except TypeError:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
lineno = None
if lineno:
linespec = f'#L{lineno}-L{lineno + len(source) - 1}'
else:
linespec = ''
fn = fn.split('/api/')[1]
return f'https://github.com/sbslee/dokdo/tree/master/dokdo/api/{fn}/{linespec}'
| true | true |
1c3b064445f3e0a09a6d3b4b622259737fb43e8d | 5,717 | py | Python | ceilometer/tests/functional/api/v2/test_list_events_scenarios.py | redhat-openstack/ceilometer | 9e503d7068889e52e9144079de331ed51676e535 | [
"Apache-2.0"
] | 1 | 2016-03-10T06:55:45.000Z | 2016-03-10T06:55:45.000Z | ceilometer/tests/functional/api/v2/test_list_events_scenarios.py | redhat-openstack/ceilometer | 9e503d7068889e52e9144079de331ed51676e535 | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/functional/api/v2/test_list_events_scenarios.py | redhat-openstack/ceilometer | 9e503d7068889e52e9144079de331ed51676e535 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test listing raw events.
"""
import datetime
import mock
from oslo_utils import timeutils
import six
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer.tests import db as tests_db
from ceilometer.tests.functional.api import v2
class TestListEvents(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
def setUp(self):
super(TestListEvents, self).setUp()
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = datetime.datetime(2014, 2, 11, 16, 42)
self.sample1 = sample.Sample(
'instance',
'cumulative',
'',
1,
'user-id',
'project1',
'resource-id',
timestamp=datetime.datetime(2012, 7, 2, 10, 40),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample',
'dict_properties': {'key': 'value'},
'not_ignored_list': ['returned'],
},
source='test_source',
)
msg = utils.meter_message_from_counter(
self.sample1, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
self.sample2 = sample.Sample(
'instance',
'cumulative',
'',
1,
'user-id2',
'project2',
'resource-id-alternate',
timestamp=datetime.datetime(2012, 7, 2, 10, 41),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample2',
},
source='source2',
)
msg2 = utils.meter_message_from_counter(
self.sample2, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg2)
def test_all(self):
data = self.get_json('/meters/instance')
self.assertEqual(2, len(data))
for s in data:
self.assertEqual(timeutils.utcnow().isoformat(), s['recorded_at'])
def test_all_trailing_slash(self):
data = self.get_json('/meters/instance/')
self.assertEqual(2, len(data))
def test_empty_project(self):
data = self.get_json('/meters/instance',
q=[{'field': 'project_id',
'value': 'no-such-project',
}])
self.assertEqual([], data)
def test_by_project(self):
data = self.get_json('/meters/instance',
q=[{'field': 'project_id',
'value': 'project1',
}])
self.assertEqual(1, len(data))
def test_empty_resource(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'no-such-resource',
}])
self.assertEqual([], data)
def test_by_resource(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'resource-id',
}])
self.assertEqual(1, len(data))
def test_empty_source(self):
data = self.get_json('/meters/instance',
q=[{'field': 'source',
'value': 'no-such-source',
}])
self.assertEqual(0, len(data))
def test_by_source(self):
data = self.get_json('/meters/instance',
q=[{'field': 'source',
'value': 'test_source',
}])
self.assertEqual(1, len(data))
def test_empty_user(self):
data = self.get_json('/meters/instance',
q=[{'field': 'user_id',
'value': 'no-such-user',
}])
self.assertEqual([], data)
def test_by_user(self):
data = self.get_json('/meters/instance',
q=[{'field': 'user_id',
'value': 'user-id',
}])
self.assertEqual(1, len(data))
def test_metadata(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'resource-id',
}])
sample = data[0]
self.assertIn('resource_metadata', sample)
self.assertEqual(
[('dict_properties.key', 'value'),
('display_name', 'test-server'),
('not_ignored_list', "['returned']"),
('tag', 'self.sample'),
],
list(sorted(six.iteritems(sample['resource_metadata']))))
| 35.955975 | 78 | 0.500437 |
import datetime
import mock
from oslo_utils import timeutils
import six
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer.tests import db as tests_db
from ceilometer.tests.functional.api import v2
class TestListEvents(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
def setUp(self):
super(TestListEvents, self).setUp()
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = datetime.datetime(2014, 2, 11, 16, 42)
self.sample1 = sample.Sample(
'instance',
'cumulative',
'',
1,
'user-id',
'project1',
'resource-id',
timestamp=datetime.datetime(2012, 7, 2, 10, 40),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample',
'dict_properties': {'key': 'value'},
'not_ignored_list': ['returned'],
},
source='test_source',
)
msg = utils.meter_message_from_counter(
self.sample1, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
self.sample2 = sample.Sample(
'instance',
'cumulative',
'',
1,
'user-id2',
'project2',
'resource-id-alternate',
timestamp=datetime.datetime(2012, 7, 2, 10, 41),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample2',
},
source='source2',
)
msg2 = utils.meter_message_from_counter(
self.sample2, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg2)
def test_all(self):
data = self.get_json('/meters/instance')
self.assertEqual(2, len(data))
for s in data:
self.assertEqual(timeutils.utcnow().isoformat(), s['recorded_at'])
def test_all_trailing_slash(self):
data = self.get_json('/meters/instance/')
self.assertEqual(2, len(data))
def test_empty_project(self):
data = self.get_json('/meters/instance',
q=[{'field': 'project_id',
'value': 'no-such-project',
}])
self.assertEqual([], data)
def test_by_project(self):
data = self.get_json('/meters/instance',
q=[{'field': 'project_id',
'value': 'project1',
}])
self.assertEqual(1, len(data))
def test_empty_resource(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'no-such-resource',
}])
self.assertEqual([], data)
def test_by_resource(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'resource-id',
}])
self.assertEqual(1, len(data))
def test_empty_source(self):
data = self.get_json('/meters/instance',
q=[{'field': 'source',
'value': 'no-such-source',
}])
self.assertEqual(0, len(data))
def test_by_source(self):
data = self.get_json('/meters/instance',
q=[{'field': 'source',
'value': 'test_source',
}])
self.assertEqual(1, len(data))
def test_empty_user(self):
data = self.get_json('/meters/instance',
q=[{'field': 'user_id',
'value': 'no-such-user',
}])
self.assertEqual([], data)
def test_by_user(self):
data = self.get_json('/meters/instance',
q=[{'field': 'user_id',
'value': 'user-id',
}])
self.assertEqual(1, len(data))
def test_metadata(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'resource-id',
}])
sample = data[0]
self.assertIn('resource_metadata', sample)
self.assertEqual(
[('dict_properties.key', 'value'),
('display_name', 'test-server'),
('not_ignored_list', "['returned']"),
('tag', 'self.sample'),
],
list(sorted(six.iteritems(sample['resource_metadata']))))
| true | true |
1c3b0668bca9636afcc439e2353bcb34470133fc | 81 | py | Python | demo/bug/apps.py | valbendan/django-ninja-demo | 806071b82e55b5034a13cc1674520a9743f94963 | [
"MIT"
] | null | null | null | demo/bug/apps.py | valbendan/django-ninja-demo | 806071b82e55b5034a13cc1674520a9743f94963 | [
"MIT"
] | 3 | 2021-03-30T13:43:20.000Z | 2021-06-10T19:39:51.000Z | demo/bug/apps.py | valbendan/django-ninja-demo | 806071b82e55b5034a13cc1674520a9743f94963 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class BugConfig(AppConfig):
name = 'bug'
| 13.5 | 33 | 0.728395 | from django.apps import AppConfig
class BugConfig(AppConfig):
name = 'bug'
| true | true |
1c3b067e5ceed6737730ff58e1ac4790a027212a | 444 | py | Python | arc/arc017/a/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | 2 | 2022-01-22T07:56:58.000Z | 2022-01-24T00:29:37.000Z | arc/arc017/a/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | arc/arc017/a/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
def is_prime(x):
for i in range(2, int(math.sqrt(x)) + 1):
if x % i == 0:
return False
return True
N = ni()
if is_prime(N):
print("YES")
else:
print("NO")
| 18.5 | 53 | 0.617117 | import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
def is_prime(x):
for i in range(2, int(math.sqrt(x)) + 1):
if x % i == 0:
return False
return True
N = ni()
if is_prime(N):
print("YES")
else:
print("NO")
| true | true |
1c3b06f168230558a536abf144a7eeef74c0d92d | 68 | py | Python | Practice/Python/EyeAndIdentity.py | avantikasharma/HackerRank-Solutions | a980859ac352688853fcbcf3c7ec6d95685f99ea | [
"MIT"
] | 1 | 2018-07-08T15:44:15.000Z | 2018-07-08T15:44:15.000Z | Practice/Python/EyeAndIdentity.py | avantikasharma/HackerRank-Solutions | a980859ac352688853fcbcf3c7ec6d95685f99ea | [
"MIT"
] | null | null | null | Practice/Python/EyeAndIdentity.py | avantikasharma/HackerRank-Solutions | a980859ac352688853fcbcf3c7ec6d95685f99ea | [
"MIT"
] | 2 | 2018-08-10T06:49:34.000Z | 2020-10-01T04:50:59.000Z | import numpy
N,M=map(int,input().split())
print(numpy.eye(N,M,k=0))
| 17 | 28 | 0.676471 | import numpy
N,M=map(int,input().split())
print(numpy.eye(N,M,k=0))
| true | true |
1c3b087880b70328bf06a6fb67b73e768fcfddb8 | 11,821 | py | Python | resolwe_bio/tools/samplehcluster.py | dblenkus/resolwe-bio | 5077a162f454576dbe1bc41e97923bde49420261 | [
"Apache-2.0"
] | null | null | null | resolwe_bio/tools/samplehcluster.py | dblenkus/resolwe-bio | 5077a162f454576dbe1bc41e97923bde49420261 | [
"Apache-2.0"
] | null | null | null | resolwe_bio/tools/samplehcluster.py | dblenkus/resolwe-bio | 5077a162f454576dbe1bc41e97923bde49420261 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Hierarchical clustering of samples."""
import argparse
import json
import numpy as np
import pandas as pd
import resdk
from resolwe_runtime_utils import error, warning
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.stats import spearmanr, zscore
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description="Hierarchical clustering of samples")
parser.add_argument(
"-f", "--sample-files", nargs="+", help="Sample files", required=True
)
parser.add_argument(
"-i", "--sample-ids", nargs="+", help="Sample IDs", type=int, required=True
)
parser.add_argument(
"-n", "--sample-names", nargs="+", help="Sample names", required=True
)
parser.add_argument("-s", "--source", help="Source", required=True)
parser.add_argument("-p", "--species", help="Species", required=True)
parser.add_argument(
"-g", "--gene-labels", nargs="+", default=[], help="Subset of gene labels"
)
parser.add_argument("-t", "--log2", action="store_true", help="Log2 transformation")
parser.add_argument(
"-z", "--z-score", action="store_true", help="Z-score normalization"
)
parser.add_argument(
"-r",
"--remove-const",
action="store_true",
help="Remove samples with constant expression",
)
parser.add_argument(
"-d", "--distance-metric", default="euclidean", help="Distance metric"
)
parser.add_argument(
"-l", "--linkage-method", default="average", help="Linkage method"
)
parser.add_argument("-o", "--order", action="store_true", help="Optimal ordering")
parser.add_argument("--output", help="Output JSON filename")
return parser.parse_args()
def get_expression(fname, sep="\t", gene_set=[]):
"""Read expressions from file and return only expressions of genes in gene_set."""
df = pd.read_csv(
filepath_or_buffer=fname,
sep=sep,
header=0,
index_col=0,
compression="gzip",
dtype={0: str, 1: float,},
keep_default_na=False,
)
df.index = df.index.map(str)
if not gene_set:
return df
intersection = [gene for gene in gene_set if gene in df.index]
return df.loc[intersection]
def get_expressions(fnames, sep="\t", gene_set=[]):
"""Read expressions from files.
Return only expressions of genes that are listed in all samples and in gene_set.
"""
dfs = [get_expression(fname, sep=sep, gene_set=gene_set) for fname in fnames]
inner = pd.concat(dfs, axis=1, join="inner")
outer = pd.concat(dfs, axis=1, join="outer", sort=True)
if gene_set:
excluded = sorted(set(gene_set).difference(set(inner.index)))
else:
excluded = sorted(outer.index.difference(inner.index))
return inner, excluded
def transform(expressions, log2=False, const=1.0, z_score=False, ddof=1):
"""Compute log2 and normalize expression values.
Parameters:
- log2: use log2(x+const) transformation
- const: an additive constant used in computation of log2
- z_score: use Z-score normalization
- ddof: degrees of freedom used in computation of Z-score
"""
if log2:
expressions = expressions.applymap(lambda x: np.log2(x + const))
if expressions.isnull().values.any():
msg = "Cannot apply log2 to expression values."
set_error(msg)
if z_score:
expressions = expressions.apply(
lambda x: zscore(x, ddof=ddof), axis=1, result_type="broadcast"
)
expressions.fillna(value=0.0, inplace=True)
return expressions
def get_distance_metric(distance_metric):
"""Get distance metric."""
if distance_metric == "spearman":
return lambda x, y: 1.0 - spearmanr(x, y).correlation
elif distance_metric == "pearson":
return "correlation"
return distance_metric
def is_const(values):
"""Return True, if all values are approximately equal, otherwise return False."""
mn = np.min(values)
mx = np.max(values)
if mn + mx == 0.0:
return mn == mx
else:
return (mx - mn) / abs(mx + mn) < 1.0e-6
def remove_const_samples(expressions):
"""Remove samples with constant expression profile across genes."""
matches = expressions.apply(lambda x: not is_const(x), axis=0)
return expressions.loc[:, matches], matches.values.tolist()
def get_clustering(
expressions, distance_metric="euclidean", linkage_method="average", order=False
):
"""Compute linkage, order, and produce a dendrogram."""
try:
link = linkage(
y=expressions.transpose(),
method=linkage_method,
metric=distance_metric,
optimal_ordering=order,
)
except Exception:
msg = "Cannot compute linkage."
set_error(msg)
try:
dend = dendrogram(link, no_plot=True)
except Exception:
msg = "Cannot compute dendrogram."
set_error(msg)
return link, dend
def output_json(result=dict(), fname=None):
"""Print json if fname=None else write json to file 'fname'."""
if fname:
with open(fname, "w") as f:
json.dump(result, f)
else:
print(json.dumps({"cluster": result}, separators=(",", ":")))
def set_error(msg):
"""Print error message and raise ValueError."""
print(error(msg))
raise ValueError(msg)
def get_gene_names(feature_ids, source, species):
"""Map feature IDs to gene names."""
res = resdk.Resolwe()
features = res.feature.filter(
feature_id__in=feature_ids, source=source, species=species
)
return [feature.name for feature in features]
def main():
"""Compute sample hierarchical clustering."""
args = parse_args()
if len(args.sample_files) != len(args.sample_ids):
msg = "The number of sample files does not match the number of sample IDs."
set_error(msg)
if len(args.sample_files) != len(args.sample_names):
msg = "The number of sample files does not match the number of sample names."
set_error(msg)
if len(args.sample_files) < 2:
msg = (
"Select at least two samples to compute hierarchical clustering of samples."
)
set_error(msg)
if len(args.gene_labels) == 1 and args.distance_metric != "euclidean":
msg = (
"Select at least two genes to compute hierarchical clustering of samples with "
"correlation distance metric or use Euclidean distance metric."
)
set_error(msg)
expressions, excluded = get_expressions(
fnames=args.sample_files, gene_set=args.gene_labels
)
if len(expressions.index) == 0:
if not args.gene_labels:
msg = "The selected samples do not have any common genes."
else:
msg = "None of the selected genes are present in all samples."
set_error(msg)
if len(expressions.index) == 1 and args.distance_metric != "euclidean":
if not args.gene_labels:
msg = (
"The selected samples contain only one common gene ({}). At least two common "
"genes are required to compute hierarchical clustering of samples with "
"correlation distance metric. Select a different set of samples or use Euclidean "
"distance metric.".format(
get_gene_names(list(expressions.index), args.source, args.species)[
0
]
)
)
else:
msg = (
"Only one of the selected genes ({}) is present in all samples but at least two "
"such genes are required to compute hierarchical clustering of samples with "
"correlation distance metric. Select more genes or use Euclidean distance "
"metric.".format(
get_gene_names(list(expressions.index), args.source, args.species)[
0
]
)
)
set_error(msg)
expressions = transform(expressions, log2=args.log2, z_score=args.z_score)
if args.remove_const:
expressions, matches = remove_const_samples(expressions)
if len(expressions.columns) == 0:
msg = (
"All of the selected samples have constant expression across genes. Hierarchical "
"clustering of samples cannot be computed."
)
set_error(msg)
if len(expressions.columns) == 1:
sample_name = [id for i, id in enumerate(args.sample_names) if matches[i]][
0
]
msg = (
"Only one of the selected samples ({}) has a non-constant expression across "
"genes. However, hierarchical clustering of samples cannot be computed with "
"just one sample.".format(sample_name)
)
set_error(msg)
removed = [name for i, name in enumerate(args.sample_names) if not matches[i]]
suffix = "" if len(removed) <= 3 else ", ..."
if removed:
msg = (
"{} of the selected samples ({}) have constant expression across genes. "
"Those samples are excluded from the computation of hierarchical clustering of "
"samples with correlation distance "
"metric.".format(len(removed), ", ".join(removed[:3]) + suffix)
)
print(warning(msg))
else:
matches = [True] * len(args.sample_files)
suffix = "" if len(excluded) <= 3 else ", ..."
if excluded:
excluded_names = get_gene_names(excluded[:3], args.source, args.species)
if len(excluded) == 1:
if not args.gene_labels:
msg = (
"Gene {} is present in some but not all of the selected samples. This "
"gene is excluded from the computation of hierarchical clustering of "
"samples.".format(len(excluded), ", ".join(excluded_names))
)
else:
msg = (
"{} of the selected genes ({}) is missing in at least one of the selected "
"samples. This gene is excluded from the computation of hierarchical "
"clustering of samples.".format(
len(excluded), ", ".join(excluded_names)
)
)
print(warning(msg))
if len(excluded) > 1:
if not args.gene_labels:
msg = (
"{} genes ({}) are present in some but not all of the selected samples. Those "
"genes are excluded from the computation of hierarchical clustering of "
"samples.".format(len(excluded), ", ".join(excluded_names))
)
else:
msg = (
"{} of the selected genes ({}) are missing in at least one of the selected "
"samples. Those genes are excluded from the computation of hierarchical "
"clustering of samples.".format(
len(excluded), ", ".join(excluded_names)
)
)
print(warning(msg))
linkage, dendrogram = get_clustering(
expressions,
distance_metric=get_distance_metric(args.distance_metric),
linkage_method=args.linkage_method,
order=args.order,
)
sample_ids = [
sample_id for i, sample_id in enumerate(args.sample_ids) if matches[i]
]
result = {
"sample_ids": {i: {"id": sample_id} for i, sample_id in enumerate(sample_ids)},
"linkage": linkage.tolist(),
"order": dendrogram["leaves"],
}
output_json(result, args.output)
main()
| 35.821212 | 98 | 0.602825 |
import argparse
import json
import numpy as np
import pandas as pd
import resdk
from resolwe_runtime_utils import error, warning
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.stats import spearmanr, zscore
def parse_args():
parser = argparse.ArgumentParser(description="Hierarchical clustering of samples")
parser.add_argument(
"-f", "--sample-files", nargs="+", help="Sample files", required=True
)
parser.add_argument(
"-i", "--sample-ids", nargs="+", help="Sample IDs", type=int, required=True
)
parser.add_argument(
"-n", "--sample-names", nargs="+", help="Sample names", required=True
)
parser.add_argument("-s", "--source", help="Source", required=True)
parser.add_argument("-p", "--species", help="Species", required=True)
parser.add_argument(
"-g", "--gene-labels", nargs="+", default=[], help="Subset of gene labels"
)
parser.add_argument("-t", "--log2", action="store_true", help="Log2 transformation")
parser.add_argument(
"-z", "--z-score", action="store_true", help="Z-score normalization"
)
parser.add_argument(
"-r",
"--remove-const",
action="store_true",
help="Remove samples with constant expression",
)
parser.add_argument(
"-d", "--distance-metric", default="euclidean", help="Distance metric"
)
parser.add_argument(
"-l", "--linkage-method", default="average", help="Linkage method"
)
parser.add_argument("-o", "--order", action="store_true", help="Optimal ordering")
parser.add_argument("--output", help="Output JSON filename")
return parser.parse_args()
def get_expression(fname, sep="\t", gene_set=[]):
df = pd.read_csv(
filepath_or_buffer=fname,
sep=sep,
header=0,
index_col=0,
compression="gzip",
dtype={0: str, 1: float,},
keep_default_na=False,
)
df.index = df.index.map(str)
if not gene_set:
return df
intersection = [gene for gene in gene_set if gene in df.index]
return df.loc[intersection]
def get_expressions(fnames, sep="\t", gene_set=[]):
dfs = [get_expression(fname, sep=sep, gene_set=gene_set) for fname in fnames]
inner = pd.concat(dfs, axis=1, join="inner")
outer = pd.concat(dfs, axis=1, join="outer", sort=True)
if gene_set:
excluded = sorted(set(gene_set).difference(set(inner.index)))
else:
excluded = sorted(outer.index.difference(inner.index))
return inner, excluded
def transform(expressions, log2=False, const=1.0, z_score=False, ddof=1):
if log2:
expressions = expressions.applymap(lambda x: np.log2(x + const))
if expressions.isnull().values.any():
msg = "Cannot apply log2 to expression values."
set_error(msg)
if z_score:
expressions = expressions.apply(
lambda x: zscore(x, ddof=ddof), axis=1, result_type="broadcast"
)
expressions.fillna(value=0.0, inplace=True)
return expressions
def get_distance_metric(distance_metric):
if distance_metric == "spearman":
return lambda x, y: 1.0 - spearmanr(x, y).correlation
elif distance_metric == "pearson":
return "correlation"
return distance_metric
def is_const(values):
mn = np.min(values)
mx = np.max(values)
if mn + mx == 0.0:
return mn == mx
else:
return (mx - mn) / abs(mx + mn) < 1.0e-6
def remove_const_samples(expressions):
matches = expressions.apply(lambda x: not is_const(x), axis=0)
return expressions.loc[:, matches], matches.values.tolist()
def get_clustering(
expressions, distance_metric="euclidean", linkage_method="average", order=False
):
try:
link = linkage(
y=expressions.transpose(),
method=linkage_method,
metric=distance_metric,
optimal_ordering=order,
)
except Exception:
msg = "Cannot compute linkage."
set_error(msg)
try:
dend = dendrogram(link, no_plot=True)
except Exception:
msg = "Cannot compute dendrogram."
set_error(msg)
return link, dend
def output_json(result=dict(), fname=None):
if fname:
with open(fname, "w") as f:
json.dump(result, f)
else:
print(json.dumps({"cluster": result}, separators=(",", ":")))
def set_error(msg):
print(error(msg))
raise ValueError(msg)
def get_gene_names(feature_ids, source, species):
res = resdk.Resolwe()
features = res.feature.filter(
feature_id__in=feature_ids, source=source, species=species
)
return [feature.name for feature in features]
def main():
args = parse_args()
if len(args.sample_files) != len(args.sample_ids):
msg = "The number of sample files does not match the number of sample IDs."
set_error(msg)
if len(args.sample_files) != len(args.sample_names):
msg = "The number of sample files does not match the number of sample names."
set_error(msg)
if len(args.sample_files) < 2:
msg = (
"Select at least two samples to compute hierarchical clustering of samples."
)
set_error(msg)
if len(args.gene_labels) == 1 and args.distance_metric != "euclidean":
msg = (
"Select at least two genes to compute hierarchical clustering of samples with "
"correlation distance metric or use Euclidean distance metric."
)
set_error(msg)
expressions, excluded = get_expressions(
fnames=args.sample_files, gene_set=args.gene_labels
)
if len(expressions.index) == 0:
if not args.gene_labels:
msg = "The selected samples do not have any common genes."
else:
msg = "None of the selected genes are present in all samples."
set_error(msg)
if len(expressions.index) == 1 and args.distance_metric != "euclidean":
if not args.gene_labels:
msg = (
"The selected samples contain only one common gene ({}). At least two common "
"genes are required to compute hierarchical clustering of samples with "
"correlation distance metric. Select a different set of samples or use Euclidean "
"distance metric.".format(
get_gene_names(list(expressions.index), args.source, args.species)[
0
]
)
)
else:
msg = (
"Only one of the selected genes ({}) is present in all samples but at least two "
"such genes are required to compute hierarchical clustering of samples with "
"correlation distance metric. Select more genes or use Euclidean distance "
"metric.".format(
get_gene_names(list(expressions.index), args.source, args.species)[
0
]
)
)
set_error(msg)
expressions = transform(expressions, log2=args.log2, z_score=args.z_score)
if args.remove_const:
expressions, matches = remove_const_samples(expressions)
if len(expressions.columns) == 0:
msg = (
"All of the selected samples have constant expression across genes. Hierarchical "
"clustering of samples cannot be computed."
)
set_error(msg)
if len(expressions.columns) == 1:
sample_name = [id for i, id in enumerate(args.sample_names) if matches[i]][
0
]
msg = (
"Only one of the selected samples ({}) has a non-constant expression across "
"genes. However, hierarchical clustering of samples cannot be computed with "
"just one sample.".format(sample_name)
)
set_error(msg)
removed = [name for i, name in enumerate(args.sample_names) if not matches[i]]
suffix = "" if len(removed) <= 3 else ", ..."
if removed:
msg = (
"{} of the selected samples ({}) have constant expression across genes. "
"Those samples are excluded from the computation of hierarchical clustering of "
"samples with correlation distance "
"metric.".format(len(removed), ", ".join(removed[:3]) + suffix)
)
print(warning(msg))
else:
matches = [True] * len(args.sample_files)
suffix = "" if len(excluded) <= 3 else ", ..."
if excluded:
excluded_names = get_gene_names(excluded[:3], args.source, args.species)
if len(excluded) == 1:
if not args.gene_labels:
msg = (
"Gene {} is present in some but not all of the selected samples. This "
"gene is excluded from the computation of hierarchical clustering of "
"samples.".format(len(excluded), ", ".join(excluded_names))
)
else:
msg = (
"{} of the selected genes ({}) is missing in at least one of the selected "
"samples. This gene is excluded from the computation of hierarchical "
"clustering of samples.".format(
len(excluded), ", ".join(excluded_names)
)
)
print(warning(msg))
if len(excluded) > 1:
if not args.gene_labels:
msg = (
"{} genes ({}) are present in some but not all of the selected samples. Those "
"genes are excluded from the computation of hierarchical clustering of "
"samples.".format(len(excluded), ", ".join(excluded_names))
)
else:
msg = (
"{} of the selected genes ({}) are missing in at least one of the selected "
"samples. Those genes are excluded from the computation of hierarchical "
"clustering of samples.".format(
len(excluded), ", ".join(excluded_names)
)
)
print(warning(msg))
linkage, dendrogram = get_clustering(
expressions,
distance_metric=get_distance_metric(args.distance_metric),
linkage_method=args.linkage_method,
order=args.order,
)
sample_ids = [
sample_id for i, sample_id in enumerate(args.sample_ids) if matches[i]
]
result = {
"sample_ids": {i: {"id": sample_id} for i, sample_id in enumerate(sample_ids)},
"linkage": linkage.tolist(),
"order": dendrogram["leaves"],
}
output_json(result, args.output)
main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.