text string | size int64 | token_count int64 |
|---|---|---|
#!/usr/bin/env python
import rospy
from moveit_msgs.srv import GetPositionIK, GetPositionIKRequest, GetPositionIKResponse
from geometry_msgs.msg import PoseStamped
from moveit_commander import MoveGroupCommander
import numpy as np
from numpy import linalg
import sys
def make0(robo, arm, xi, upper):
# Construct the request
request = GetPositionIKRequest()
request.ik_request.group_name = arm + "_arm"
# If a Sawyer does not have a gripper, replace '_gripper_tip' with '_wrist' instead
link = arm + "_gripper"
if robo == 'sawyer':
link += '_tip'
request.ik_request.ik_link_name = link
request.ik_request.attempts = 20
request.ik_request.pose_stamped.header.frame_id = "base"
# print('Opening...')
# right_gripper.open()
# rospy.sleep(1.0)
# print('Done!')
b = 0.048
a = 0.068
k = 0.193
h = 0.765
xi = np.round(xi, 3)
if upper == True:
#math equatio
y = np.round((np.sqrt((b**2)-((b**2/a**2)*((xi-h)**2))))+k, 3)
print(xi)
print(y)
if np.isnan(y):
y = 0.193
if xi == 0.765:
y = 0.293
#y = np.round(((b/a)*np.sqrt(((a**2)-((xi-h)**2)))+k), 3)
else:
print("im here")
y = np.round((-1*np.sqrt((b**2)-((b**2/a**2)*((xi-h)**2))))+k, 3)
print(xi)
print(y)
if np.isnan(y):
y = 0.193
if xi == 0.765:
y = 0.093
#y = np.round(((b/a)*-np.sqrt(((a**2)-((xi-h)**2)))+k), 3)
# Set the desired orientation for the end effector HERE
#switch = not switch
request.ik_request.pose_stamped.pose.position.x = xi
request.ik_request.pose_stamped.pose.position.y = y
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
return request
def make1(robo, arm, xi, xc, yc):
# Construct the request
request = GetPositionIKRequest()
request.ik_request.group_name = arm + "_arm"
# If a Sawyer does not have a gripper, replace '_gripper_tip' with '_wrist' instead
link = arm + "_gripper"
if robo == 'sawyer':
link += '_tip'
request.ik_request.ik_link_name = link
request.ik_request.attempts = 20
request.ik_request.pose_stamped.header.frame_id = "base"
y = yc+0.059
request.ik_request.pose_stamped.pose.position.x = xi
request.ik_request.pose_stamped.pose.position.y = y
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
return request
def make2(robo, arm, xi, upper, mid, center_x = 0.691, center_y = 0.259):
# Construct the request
request = GetPositionIKRequest()
request.ik_request.group_name = arm + "_arm"
# If a Sawyer does not have a gripper, replace '_gripper_tip' with '_wrist' instead
link = arm + "_gripper"
if robo == 'sawyer':
link += '_tip'
request.ik_request.ik_link_name = link
request.ik_request.attempts = 20
request.ik_request.pose_stamped.header.frame_id = "base"
#draw top of two
if upper == True and mid == True:
y = -np.sqrt((-xi+center_x)/-20) + center_y
elif upper == True:
# y = -20*((xi-center_x)**2)+center_y
y = np.sqrt((-xi+center_x)/-20) + center_y
elif mid == True:
# y = xi-.741+.209
# y = xi - center_x - 0.05 + center_y -0.05
y = -xi + center_y +.05 + center_x + .05
else:
# y = center_y -0.15
y = xi
xi = center_x + .15
request.ik_request.pose_stamped.pose.position.x = xi
request.ik_request.pose_stamped.pose.position.y = y
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
return request
def make3(robo, arm, xi, upper, xc= 0.691,yc= 0.259):
request = GetPositionIKRequest()
request.ik_request.group_name = arm + "_arm"
# If a Sawyer does not have a gripper, replace '_gripper_tip' with '_wrist' instead
link = arm + "_gripper"
if robo == 'sawyer':
link += '_tip'
request.ik_request.ik_link_name = link
request.ik_request.attempts = 20
request.ik_request.pose_stamped.header.frame_id = "base"
xi = round(xi,3)
if upper == True:
y= round(-30*((xi-xc)**2) +yc,3)
else:
y= round(-30*((xi-(xc*1.1))**2) +yc, 3)
request.ik_request.pose_stamped.pose.position.x = xi
request.ik_request.pose_stamped.pose.position.y = y
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
return request
def make4(robo, arm, xi, xc= 0.691,yc= 0.259):
request = GetPositionIKRequest()
request.ik_request.group_name = arm + "_arm"
# If a Sawyer does not have a gripper, replace '_gripper_tip' with '_wrist' instead
link = arm + "_gripper"
if robo == 'sawyer':
link += '_tip'
request.ik_request.ik_link_name = link
request.ik_request.attempts = 20
request.ik_request.pose_stamped.header.frame_id = "base"
if xi == 0:
request.ik_request.pose_stamped.pose.position.x = xc
request.ik_request.pose_stamped.pose.position.y = yc
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
if xi == 1:
request.ik_request.pose_stamped.pose.position.x = xc+(0.0788/2.0)
request.ik_request.pose_stamped.pose.position.y = yc
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
if xi == 2:
request.ik_request.pose_stamped.pose.position.x = xc+0.0788
request.ik_request.pose_stamped.pose.position.y = yc
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
if xi == 3:
request.ik_request.pose_stamped.pose.position.x = xc+0.0788
request.ik_request.pose_stamped.pose.position.y = yc + 0.059
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
if xi == 4:
request.ik_request.pose_stamped.pose.position.x = xc
request.ik_request.pose_stamped.pose.position.y = yc + 0.059
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
if xi == 5:
request.ik_request.pose_stamped.pose.position.x = xc+(0.1577/2.0)
request.ik_request.pose_stamped.pose.position.y = yc + 0.059
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
if xi == 6:
request.ik_request.pose_stamped.pose.position.x = xc+0.1577
request.ik_request.pose_stamped.pose.position.y = yc + 0.059
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
return request
def make5(robo, arm, xi, upper, mid, xc=0.6467, yc=0.2):
request = GetPositionIKRequest()
request.ik_request.group_name = arm + "_arm"
# If a Sawyer does not have a gripper, replace '_gripper_tip' with '_wrist' instead
link = arm + "_gripper"
if robo == 'sawyer':
link += '_tip'
request.ik_request.ik_link_name = link
request.ik_request.attempts = 20
request.ik_request.pose_stamped.header.frame_id = "base"
xi = round(xi,3)
if upper == True:
y = xi
xi = xc
elif mid == True:
y=yc
else:
y= round(-30*((xi-((xc+0.0443)*1.1))**2) +yc +0.059, 3)
request.ik_request.pose_stamped.pose.position.x = xi
request.ik_request.pose_stamped.pose.position.y = y
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
return request
def make6(robo, arm, xi, upper, center_x = 0.6566, center_y = 0.2235):
# Construct the request
request = GetPositionIKRequest()
request.ik_request.group_name = arm + "_arm"
# If a Sawyer does not have a gripper, replace '_gripper_tip' with '_wrist' instead
link = arm + "_gripper"
if robo == 'sawyer':
link += '_tip'
request.ik_request.ik_link_name = link
request.ik_request.attempts = 20
request.ik_request.pose_stamped.header.frame_id = "base"
#draw top of two
if upper == 0:
y = 30 * (xi-(center_x+0.0344))**2 + center_y - 0.0355
elif upper == 1:
y=center_y-0.0355
elif upper == 2:
y = 30*(xi - ((center_x+0.0344)*1.1))**2 + center_y - 0.0355
elif upper == 3:
y = -30*(xi - ((center_x+0.0344)*1.1))**2 + center_y +0.0355
elif upper==4:
y = 30*(xi - ((center_x+0.0344)*1.1))**2 + center_y - 0.0355
request.ik_request.pose_stamped.pose.position.x = xi
request.ik_request.pose_stamped.pose.position.y = y
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
return request
def make7(robo, arm, xi, upper, xc= 0.6467,yc= 0.2):
request = GetPositionIKRequest()
request.ik_request.group_name = arm + "_arm"
# If a Sawyer does not have a gripper, replace '_gripper_tip' with '_wrist' instead
link = arm + "_gripper"
if robo == 'sawyer':
link += '_tip'
request.ik_request.ik_link_name = link
request.ik_request.attempts = 20
request.ik_request.pose_stamped.header.frame_id = "base"
xi = round(xi,3)
if upper:
y=xi
xi = xc
else:
y = yc+0.059
request.ik_request.pose_stamped.pose.position.x = xi
request.ik_request.pose_stamped.pose.position.y = y
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
return request
def make8(robo, arm, xi, upper, mid, center_x = 0.6566, center_y = 0.2235):
# Construct the request
request = GetPositionIKRequest()
request.ik_request.group_name = arm + "_arm"
# If a Sawyer does not have a gripper, replace '_gripper_tip' with '_wrist' instead
link = arm + "_gripper"
if robo == 'sawyer':
link += '_tip'
request.ik_request.ik_link_name = link
request.ik_request.attempts = 20
request.ik_request.pose_stamped.header.frame_id = "base"
#draw top of two
if upper == True and mid ==True:
y = -30*((xi-(center_x+0.0344))**2)+center_y+0.0355
elif upper == False and mid == False:
y = 30*(xi - ((center_x+0.0344)*1.1))**2 + center_y - 0.0355 # y = xi-.741+.209
elif upper == False and mid == True:
y = -30*(xi - ((center_x+0.0344)*1.1))**2 + center_y +0.0355
elif upper == True and mid == False:
y = 30 * (xi-(center_x+0.0344))**2 + center_y - 0.0355
request.ik_request.pose_stamped.pose.position.x = xi
request.ik_request.pose_stamped.pose.position.y = y
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
return request
def make9(robo, arm, xi, upper, mid, center_x = 0.6566, center_y = 0.2235):
# Construct the request
request = GetPositionIKRequest()
request.ik_request.group_name = arm + "_arm"
# If a Sawyer does not have a gripper, replace '_gripper_tip' with '_wrist' instead
link = arm + "_gripper"
if robo == 'sawyer':
link += '_tip'
request.ik_request.ik_link_name = link
request.ik_request.attempts = 20
request.ik_request.pose_stamped.header.frame_id = "base"
#draw top of two
if upper == True and mid ==True:
y = -30*((xi-(center_x+0.0344))**2)+center_y+0.0355
elif upper == False and mid == False:
y = 30 * (xi-(center_x+0.0344))**2 + center_y - 0.0355 # y = xi-.741+.209
elif upper == False and mid == True:
y = -30*((xi-(center_x+0.0344))**2)+center_y+0.0355
elif upper == True and mid == False:
y = center_y + 0.0355
request.ik_request.pose_stamped.pose.position.x = xi
request.ik_request.pose_stamped.pose.position.y = y
request.ik_request.pose_stamped.pose.position.z = -0.1
request.ik_request.pose_stamped.pose.orientation.x = 0.0
request.ik_request.pose_stamped.pose.orientation.y = 1.0
request.ik_request.pose_stamped.pose.orientation.z = 0.0
request.ik_request.pose_stamped.pose.orientation.w = 0.0
return request
def main(robo):
# Wait for the IK service to become available
rospy.wait_for_service('compute_ik')
rospy.init_node('service_query')
# Set up the right gripper
right_gripper = robot_gripper.Gripper('right')
# Calibrate the gripper (other commands won't work unless you do this first)
print('Calibrating...')
right_gripper.calibrate()
rospy.sleep(2.0)
arm = 'left'
# Create the function used to call the service
compute_ik = rospy.ServiceProxy('compute_ik', GetPositionIK)
if robo == 'sawyer':
arm = 'right'
switch = True
number = 0 #change this to change the number drawn
while not rospy.is_shutdown():
raw_input('Press [ Enter ]: ')
if number == 9:
#Computer vision determines start point.
center_x = 0.6566
center_y = 0.2235
# for xi in np.linspace(0.641, 0.741, 3):
request = make9(robo, arm, center_x + 0.0344, upper=True, mid=True, center_x = 0.691, center_y = 0.259)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x + 0.0688, center_x, 3):
request = make9(robo, arm, xi, False, False, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x, center_x + 0.0344, 3):
request = make9(robo, arm, xi, False, True, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x + 0.0344, center_x + 0.1379, 3):
request = make9(robo, arm, xi, True, False, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if number == 8:
#Computer vision determines start point.
center_x = 0.691
center_y = 0.259
# for xi in np.linspace(0.641, 0.741, 3):
for xi in np.linspace(center_x, center_x + 0.0688, 3):
request = make8(robo, arm, xi, True, True, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x + 0.0688, center_x + 0.1379, 3):
request = make8(robo, arm, xi, False, False, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x + 0.1379, center_x + 0.0688, 3):
request = make8(robo, arm, xi, False, True, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x + 0.0688, center_x, 3):
request = make8(robo, arm, xi, True, False, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if number == 7:
center_x = 0.6467
center_y = 0.2
for xi in np.linspace(center_y, center_y+0.059, 3):
request = make7(robo, arm, xi, True, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x, center_x+0.1577, 3):
request = make7(robo, arm, xi, False, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if number == 6:
#Computer vision determines start point.
center_x = 0.691
center_y = 0.259
# for xi in np.linspace(0.641, 0.741, 3):
for xi in np.linspace(center_x, center_x + 0.0344, 3):
request = make6(robo, arm, xi, 0, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x + 0.0344, center_x + 0.1035, 3):
request = make6(robo, arm, xi, 1, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x + 0.1035, center_x + 0.1379, 3):
request = make6(robo, arm, xi, 2, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x + 0.1379, center_x + 0.0688, 3):
request = make6(robo, arm, xi, 3, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x + 0.0688, center_x + 0.1035, 3):
request = make6(robo, arm, xi, 4, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if number == 5:
center_x = 0.6467
center_y = 0.2
for xi in np.linspace(center_y+0.059, center_y, 3):
request = make5(robo, arm, xi, True, False, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x, center_x+0.0691, 3):
request = make5(robo, arm, xi, False, True, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x+0.0691, center_x+0.1577, 5):
request = make5(robo, arm, xi, False, False, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if number == 4:
center_x = 0.6467
center_y = 0.2
for xi in range(7):
request = make4(robo, arm, xi, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if number == 3:
center_x = 0.691
center_y = 0.259
for xi in np.linspace(center_x-0.0443, center_x+0.0345, 3):
request = make3(robo, arm, xi, True, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x+0.0345, center_x+0.0691, 3):
request = make3(robo, arm, xi, False, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x+0.0691, center_x+0.1134, 3):
request = make3(robo, arm, xi, False, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if number == 2:
#Computer vision determines start point.
center_x = 0.691
center_y = 0.259
# for xi in np.linspace(0.641, 0.741, 3):
for xi in np.linspace(center_x + 0.05, center_x, 3):
request = make2(robo, arm, xi, True, True, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x, center_x + 0.05, 3):
request = make2(robo, arm, xi, True, False, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_x + 0.05, center_x + 0.15, 3):
request = make2(robo, arm, xi, False, True, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(center_y - 0.05, center_y + 0.05, 3):
request = make2(robo, arm, xi, False, False, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
# Set the desired orientation for the end effector HERE
if number==1:
center_x = 0.6467
center_y = 0.2
for xi in np.linspace(center_x, center_x+0.1577, 4):
request = make1(robo, arm, xi, center_x, center_y)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if number == 0:
for xi in np.linspace(0.692, 0.765, 3):
request = make0(robo, arm, xi, True)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# TRY THIS
# Setting just the position without specifying the orientation
# group.set_position_target([0.5, 0.5, 0.0])
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(0.765, 0.838, 3):
request = make0(robo, arm, xi, True)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# TRY THIS
# Setting just the position without specifying the orientation
# group.set_position_target([0.5, 0.5, 0.0])
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(0.838, 0.765, 3):
print("new")
request = make0(robo, arm, xi, False)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# TRY THIS
# Setting just the position without specifying the orientation
# group.set_position_target([0.5, 0.5, 0.0])
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
for xi in np.linspace(0.765, 0.692, 3):
request = make0(robo, arm, xi, False)
try:
# Send the request to the service
response = compute_ik(request)
# Print the response HERE
# print(response)
group = MoveGroupCommander(arm + "_arm")
# Setting position and orientation target
group.set_pose_target(request.ik_request.pose_stamped)
# TRY THIS
# Setting just the position without specifying the orientation
# group.set_position_target([0.5, 0.5, 0.0])
# Plan IK and execute
group.go()
rospy.sleep(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
# Python's syntax for a main() method
if __name__ == '__main__':
if sys.argv[1] == 'sawyer':
from intera_interface import gripper as robot_gripper
else:
from baxter_interface import gripper as robot_gripper
main(sys.argv[1])
| 42,030 | 13,432 |
import tkinter as tk
# Create a window
root = tk.Tk()
name1 = tk.Label(root, text="Aman")
name2 = tk.Label(root, text="Rahul")
name3 = tk.Label(root, text="Manoj")
name1.grid(row=0, column=0)
name2.grid(row=1, column=1)
name3.grid(row=3, column=3)
root.mainloop()
| 269 | 120 |
# -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-04-11
算法思想:链表重排序
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reorderList(self, head):
"""
:type head: ListNode
:rtype: void Do not return anything, modify head in-place instead.
"""
if not head or not head.next:
return
midNode = self.midOfList(head)
behindHead = self.reverseList(midNode.next)
midNode.next = None
head = self.mergeList(head, behindHead)
def midOfList(self, head):
if not head:
return head
slow, fast = head, head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
return slow
def reverseList(self, head):
if not head or not head.next:
return head
pre = None
cur = head
nhead = None
while cur:
nextNode = cur.next
if cur.next == None:
nhead = cur
cur.next = pre
pre = cur
cur = nextNode
return nhead
def mergeList(self, head1, head2):
if not head2:
return head1
if not head1:
return head2
dummy = ListNode(0)
l3 = dummy
while head1 and head2:
l3.next = head1
head1 = head1.next
l3 = l3.next
l3.next = head2
head2 = head2.next
l3 = l3.next
if head1:
l3.next = head1
if head2:
l3.next = head2
return dummy.next | 1,747 | 545 |
# ANALOG COLORS
# Import the library
try:
# This is the statement you normally use.
colors = ximport("colors")
except ImportError:
# But since these examples are "inside" the library
# we may need to try something different when
# the library is not located in /Application Support
colors = ximport("__init__")
reload(colors)
size(600, 600)
nofill()
stroke(0.4, 0.5, 0)
strokewidth(0.1)
autoclosepath(False)
clr = colors.color(0.6, 0.4, 0)
# Get a very dark variation of the color for the background.
background(colors.dark(clr).darken(0.1))
clr.alpha = 0.5
# Each curve has a shadow and there are a lot of them,
# so we have to use a very subtle shadow:
# very transparent and thin (little blur).
colors.shadow(alpha=0.05, blur=0.2)
for i in range(50):
# Each strand of curves has an analogous color
# (i.e. hues that are next to each other on the color wheel).
# This yields a very natural effect.
stroke(clr.analog(angle=10, d=0.3))
# Start drawing strands of curves from the center.
x0 = WIDTH/2
y0 = HEIGHT/2
# Each strand of curves bends in a certain way.
vx0 = random(-200, 200)
vy0 = random(-200, 200)
vx1 = random(-200, 200)
vy1 = random(-200, 200)
# A strand ends up either left or right outside the screen.
# Each curve in a strand ends up at the same place
# (identical x1 and y1).
x1 = choice((-10, WIDTH))
y1 = random(HEIGHT)
# This code gives interesting effects as well:
#from math import radians, sin, cos
#angle = random(360)
#x1 = x0 + cos(radians(angle)) * 100
#y1 = y0 + sin(radians(angle)) * 100
for j in range(100):
beginpath(x0, y0)
curveto(
# The bend of each curve in a strand differs slightly
# at the start, so the strand looks thicker at the start
# and then all the curves come together at x1 and y1.
x0+vx0+random(80),
y0+vy0+random(80),
x1+vx1,
y1+vy1,
x1,
y1
)
endpath()
"""
# Some type, with a heart symbol!
heart = u"\u2665"
s1 = "strands of analogous curves "+heart
s2 = "gratuitous type always looks cool on these things"
fill(1, 1, 1, 0.85)
fontsize(18)
text(s1, 65, HEIGHT/2)
fontsize(9)
text(s2.upper(), 65, HEIGHT/2+12)
stroke(1)
strokewidth(1)
line(0, HEIGHT/2, 60, HEIGHT/2)
""" | 2,409 | 903 |
from .main import cyclegan
| 27 | 10 |
from typing import Callable
from typing import List
from telegram import MessageEntity
from telegram import Update
from telegram.ext import Handler
class CustomCommandHandle(Handler):
def __init__(
self,
commands_callback: Callable[[], List[str]],
callback: Callable,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
):
super(CustomCommandHandle, self).__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
)
self.commands_callback = commands_callback
def check_update(self, update):
"""Determines whether an update should be passed to this handlers :attr:`callback`.
Args:
update (:class:`telegram.Update`): Incoming telegram update.
Returns:
:obj:`list`: The list of args for the handler
"""
if isinstance(update, Update) and update.effective_message:
message = update.effective_message
if (
message.entities
and message.entities[0].type == MessageEntity.BOT_COMMAND
and message.entities[0].offset == 0
):
command = message.text[1 : message.entities[0].length]
args = message.text.split()[1:]
command = command.split("@")
command.append(message.bot.username)
if not (
command[0].lower() in self.list_commands() and command[1].lower() == message.bot.username.lower()
):
return None
return args
def list_commands(self) -> List[str]:
return self.commands_callback()
| 1,875 | 487 |
from .spectrum import Spectrum
from .blank import Blank
from .thermal import Thermal
from .sun import Sun
from .lightbulbs import * # , LED, CFL
# from .PHOENIX import Star
| 175 | 57 |
import csv
from io import StringIO
import datetime
from sqlalchemy import MetaData
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy_utils import EncryptedType
import pytest
from acondbs import create_app
from acondbs.db.ops import convert_data_type_for_insert
##__________________________________________________________________||
sa = SQLAlchemy()
class SampleTable(sa.Model):
__tablename__ = "sample_table"
id_ = sa.Column(sa.Integer(), primary_key=True)
# https://docs.sqlalchemy.org/en/14/core/type_basics.html#generic-types
text = sa.Column(sa.Text())
unicode_text = sa.Column(sa.UnicodeText())
boolean = sa.Column(sa.Boolean())
integer = sa.Column(sa.Integer())
float = sa.Column(sa.Float())
date = sa.Column(sa.Date())
date_time = sa.Column(sa.DateTime())
time = sa.Column(sa.Time())
encrypted = sa.Column(EncryptedType(sa.Text(), "8b5d3d25b3e5"))
##__________________________________________________________________||
@pytest.fixture
def app_with_empty_db():
database_uri = "sqlite:///:memory:"
app = create_app(SQLALCHEMY_DATABASE_URI=database_uri)
yield app
@pytest.fixture
def app_with_empty_tables(app_with_empty_db):
app = app_with_empty_db
# define tables
with app.app_context():
engine = sa.engine
metadata = MetaData()
metadata.reflect(bind=engine)
metadata.drop_all(bind=engine)
sa.Model.metadata.create_all(engine)
yield app
##__________________________________________________________________||
params = [
pytest.param(
dict(
text="abcde",
unicode_text="絵文字😀 😃 😄 😁 😆",
boolean=False,
integer=512,
float=2.34556234,
date=datetime.date(2021, 10, 7),
date_time=datetime.datetime(2021, 10, 7, 15, 4, 20),
time=datetime.time(15, 4, 20),
encrypted="secret string",
),
id="one",
),
pytest.param(
dict(
boolean=True,
),
id="bool-true",
),
pytest.param(
dict(
text="",
unicode_text="",
boolean=None,
integer=None,
float=None,
date=None,
date_time=None,
time=None,
encrypted=None,
),
id="none",
),
]
@pytest.mark.parametrize("data", params)
def test_convert(app_with_empty_tables, data):
"""test convert_data_type_for_insert()"""
app = app_with_empty_tables
tbl_name = "sample_table"
expected = list(data.items()) # e.g., [('text', 'abcde'), ...]
fields = list(data.keys()) # .e.,g ['text', 'unicode_text', ...]
# delete all rows from the table
# The table is not empty! Not clear why!
with app.app_context():
SampleTable.query.delete()
sa.session.commit()
# enter data
with app.app_context():
row = SampleTable(**data)
sa.session.add(row)
sa.session.commit()
# assert the data are committed as they entered
with app.app_context():
row = SampleTable.query.one()
actual = [(f, getattr(row, f)) for f in fields]
assert actual == expected
# export to csv as string
with app.app_context():
csv_str = _export_tbl_to_csv(tbl_name)
# empty the table
SampleTable.query.delete()
sa.session.commit()
# import from the csv
with app.app_context():
# confirm the table is empty
assert SampleTable.query.count() == 0
_import_tbl_from_csv(tbl_name, csv_str)
# assert
with app.app_context():
row = SampleTable.query.one()
actual = [(f, getattr(row, f)) for f in fields]
assert actual == expected
def _export_tbl_to_csv(tbl_name):
result_proxy = sa.session.execute(f"select * from {tbl_name}")
b = StringIO()
csv_writer = csv.writer(b, lineterminator="\n")
csv_writer.writerow(result_proxy.keys())
csv_writer.writerows(result_proxy)
ret = b.getvalue()
b.close()
return ret
def _import_tbl_from_csv(tbl_name, csv_str):
engine = sa.engine
metadata = MetaData()
metadata.reflect(bind=engine)
tbl = metadata.tables[tbl_name]
rows = list(csv.reader(StringIO(csv_str)))
fields = rows[0]
rows = rows[1:]
field_types = [tbl.columns[f].type for f in fields]
data = [
{
f: convert_data_type_for_insert(e, t)
for f, t, e in zip(fields, field_types, r)
}
for r in rows
]
ins = tbl.insert()
sa.session.execute(ins, data)
##__________________________________________________________________||
| 4,704 | 1,541 |
import pandas as pd
from sklearn.decomposition import PCA
import DataPreprocessing as dp
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from scipy.stats import pearsonr
#################################################################################################
#################################################################################################
#################################################################################################
def implementClustering(principal_df):
X_df = pd.DataFrame(principal_df)
principal_df = StandardScaler().fit_transform(X_df)
kmeans = KMeans(n_clusters=3, init='k-means++')
y_kmeans3 = kmeans.fit_predict(principal_df)
print(y_kmeans3)
cent = kmeans.cluster_centers_
print(cent)
plt.figure(figsize=(10,7))
X = np.array(principal_df)
plt.scatter(X[:,0],X[:,1],c=y_kmeans3,cmap='rainbow')
plt.title('K_means_clustering')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.show()
def loading_matrix(pca_model):
variables_name=['V60','Vmn','Vsd','Asd','A+mn','A+sd','Br_mn','Br_sd','W']
mat = pd.DataFrame(pca_model.components_,columns=variables_name)
print(np.transpose(mat))
def plot_principalComponents(pca_train):
plt.figure(figsize=(8,6))
plt.title("PCA for Drivability")
plt.scatter(pca_train[:,0],pca_train[:,1],cmap='rainbow')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.show()
def correlation(X,Y):
return pearsonr(X,Y)[0]
if __name__=='__main__':
dataset = pd.DataFrame(dp.X_norm)
#print(dataset)
pca_obd = PCA(n_components=2)
principal_comp = pca_obd.fit(dp.X_norm)
principal_comp = pca_obd.fit_transform(dp.X_norm)
############# PRINTING THE TYPE ##########################################
print(type(principal_comp))
principal_df = pd.DataFrame(data=principal_comp,columns=['PC1','PC2'])
print(principal_df)
X = dp.X
###################################################################################
############### CALCULAING CORRELATION MATRIX ####################################
###################################################################################
corr_matrix = []
for i in range(X.shape[1]):
temp = []
for j in range(principal_comp.shape[1]):
temp.append(correlation(X[:,i],principal_comp[:,j]))
corr_matrix.append(temp)
corr_matrix = np.array(corr_matrix)
print(pd.DataFrame(corr_matrix,index= ['V60','Vmn','Vsd','Asd','A+mn','A+sd','Br_mn','Br_sd','W'],columns=['PC1','PC2']))
###################################################################################
############## CALCULATINg VARIANCE RETAINED ####################################
###################################################################################
print("Amount of data held after Dimensionality Reduction")
print(sum(pca_obd.explained_variance_ratio_)*100)
#RCA(principal_comp)
#plot_principalComponents(principal_comp)
#loading_matrix(pca_model)
implementClustering(principal_df)
| 3,041 | 1,065 |
ORDERED_STREAM_NAMES = ['facilities', 'customers', 'bookings', 'checkins', 'invoices'] | 86 | 32 |
# Native modules
from enum import IntEnum
class Teams(IntEnum):
PLAYER = 0
COMPUTER = 1
| 98 | 37 |
import networkx as nx
def remove_self_loops_from_graph(g):
self_loops = list(g.selfloop_edges())
g.remove_edges_from(self_loops)
return self_loops
def remove_self_loops_from_edges_file(graph_file):
g = nx.read_edgelist(args.original_graph, nodetype = int, create_using = nx.DiGraph())
return remove_self_loops_from_graph(g)
| 331 | 136 |
"""
[2015-04-22] Challenge #211 [Intermediate] Ogre Maze
https://www.reddit.com/r/dailyprogrammer/comments/33hwwf/20150422_challenge_211_intermediate_ogre_maze/
#Description:
Today we are going to solve a maze. What? Again? Come on, Simpsons did it. Yah okay so we always pick a hero to walk a
maze. This time our hero is an Ogre.
An ogre is large. Your run of the mill hero "@" takes up a 1x1 spot. Easy. But our beloved hero today is an ogre.
@@
@@
Ogres take up a 2x2 space instead of a 1x1. This makes navigating a maze tougher as you have to handle the bigger ogre.
So I will give you a layout of a swamp. (Ogres navigate swamps while puny heroes navigate caves. That's the unwritten
rules of maze challenges) You will find the path (if possible) for
the ogre to walk to his gold.
#Input:
You will read in a swamp. The swamp is laid out in 10x10 spaces. Each space can be the following:
* . - empty spot
* @ - 1/4th of the 2x2 ogre
* $ - the ogre's gold
* O - sink hole - the ogre cannot touch these. All 2x2 of the Ogre manages to fall down one of these (even if it is a
1x1 spot too. Don't be bothered by this - think of it as a "wall" but in a swamp we call them sink holes)
#Output:
You will navigate the swamp. If you find a path you will display the solution of all the spaces the ogre will occupy to
get to his gold. Use a "&" symbol to show the muddy path created by the ogre to reach his gold. If there is no path at
all then you will output "No Path"
#Example Input 1:
@@........
@@O.......
.....O.O..
..........
..O.O.....
..O....O.O
.O........
..........
.....OO...
.........$
#Example Output 1:
&&.&&&&&&&
&&O&&&&&&&
&&&&&O.O&&
&&&&&&&&&&
..O.O&&&&&
..O..&&O.O
.O...&&&&.
.....&&&&.
.....OO&&&
.......&&&
#Example Input 2:
@@........
@@O.......
.....O.O..
..........
..O.O.....
..O....O.O
.O........
..........
.....OO.O.
.........$
#Example Output 2:
No Path
#FAQ (Will update with answers here)
* Q: Does path have to be shortest Path.
* A: No.
### -
* Q: There could be a few different paths. Which one do I output?
* A: The first one that works. Answers will vary based on how people solve it.
### -
* Q: My output should show all the spots the Ogre moves too or just the optimal path?
* A: The ogre will hit dead ends. But only show the optimal path and not all his dead ends. Think of this as a GPS
Tom-Tom guide for the Ogre so he uses the program to find his gold. TIL Ogres subscribe to /r/dailyprogrammer. (And use
the internet....)
#Challenge Input 1:
$.O...O...
...O......
..........
O..O..O...
..........
O..O..O...
..........
......OO..
O..O....@@
........@@
#Challenge Input 2:
.@@.....O.
.@@.......
..O..O....
.......O..
...O......
..........
.......O.O
...O.O....
.......O..
.........$
#Bonus:
For those seeking more challenge. Instead of using input swamps you will generate a swamp. Place the Ogre randomly.
Place his gold randomly. Generate sinkholes based on the size of the swamp.
For example you are given N for a NxN swamp to generate. Generate a random swamp and apply your solution to it. The
exact design/algorithm for random generation I leave it for you to tinker with. I suggest start with like 15% of the
swamp spots are sinkholes and go up or down based on your results. (So you get paths and not always No Path)
"""
def main():
pass
if __name__ == "__main__":
main()
| 3,438 | 1,212 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import copy
import time
from test.intelliflow.core.signal_processing.dimension_constructs.test_dimension_spec import TestDimensionSpec
from test.intelliflow.core.signal_processing.routing_runtime_constructs import create_incoming_signal
from test.intelliflow.core.signal_processing.signal.test_signal_link_node import signal_dimension_tuple
import pytest
from intelliflow.core.platform.constructs import RoutingHookInterface
from intelliflow.core.serialization import dumps, loads
from intelliflow.core.signal_processing.definitions.dimension_defs import Type
from intelliflow.core.signal_processing.routing_runtime_constructs import *
from intelliflow.core.signal_processing.signal import *
from intelliflow.core.signal_processing.signal_source import InternalDatasetSignalSourceAccessSpec
from intelliflow.core.signal_processing.slot import SlotType
def _create_hook(code: str = "pass") -> Slot:
return Slot(SlotType.SYNC_INLINED, dumps(code), None, None, None, None)
class TestRoute:
@classmethod
def _route_1_basic(cls):
from test.intelliflow.core.signal_processing.test_slot import TestSlot
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
from test.intelliflow.core.signal_processing.signal.test_signal_link_node import TestSignalLinkNode
signal_link_node = copy.deepcopy(TestSignalLinkNode.signal_link_node_1)
output_spec = DimensionSpec.load_from_pretty({"output_dim": {type: Type.LONG}})
output_dim_link_matrix = [
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim"), lambda x: x, signal_dimension_tuple(TestSignal.signal_internal_1, "dim_1_1")
)
]
output_filter = signal_link_node.get_output_filter(
output_spec,
# Logical equivalent -> output_dim = (signal_internal_1('dim_1_1')
output_dim_link_matrix,
)
output_signal = Signal(
TestSignal.signal_internal_1.type,
InternalDatasetSignalSourceAccessSpec("sample_data", output_spec, **{}),
SignalDomainSpec(output_spec, output_filter, TestSignal.signal_internal_1.domain_spec.integrity_check_protocol),
"sample_data",
)
return Route(
f"InternalDataNode-{output_signal.alias}",
signal_link_node,
output_signal,
output_dim_link_matrix,
[TestSlot.slot_batch_compute_basic],
False,
)
@classmethod
def _route_2_two_inputs_linked(cls):
from test.intelliflow.core.signal_processing.test_slot import TestSlot
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
from test.intelliflow.core.signal_processing.signal.test_signal_link_node import TestSignalLinkNode
signal_link_node = copy.deepcopy(TestSignalLinkNode.signal_link_node_2)
output_spec = DimensionSpec.load_from_pretty({"output_dim": {type: Type.LONG}})
output_dim_link_matrix = [
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim"), lambda x: x, signal_dimension_tuple(TestSignal.signal_internal_1, "dim_1_1")
)
]
output_filter = signal_link_node.get_output_filter(output_spec, output_dim_link_matrix)
output_signal = Signal(
TestSignal.signal_internal_1.type,
InternalDatasetSignalSourceAccessSpec("sample_data_2", output_spec, **{}),
SignalDomainSpec(output_spec, output_filter, TestSignal.signal_internal_1.domain_spec.integrity_check_protocol),
"sample_data_2",
)
return Route(
f"InternalDataNode-{output_signal.alias}",
signal_link_node,
output_signal,
output_dim_link_matrix,
[TestSlot.slot_batch_compute_basic],
False,
)
@classmethod
def _route_3_three_inputs_unlinked(cls):
from test.intelliflow.core.signal_processing.test_slot import TestSlot
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
from test.intelliflow.core.signal_processing.signal.test_signal_link_node import TestSignalLinkNode
signal_link_node = copy.deepcopy(TestSignalLinkNode.signal_link_node_3_complex)
# create sample expected output
output_spec = DimensionSpec.load_from_pretty(
{"output_dim_1": {type: Type.LONG, "output_dim_2": {type: Type.LONG, "output_dim_3": {type: Type.LONG}}}}
)
output_dim_link_matrix = [
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim_1"),
lambda x: x,
signal_dimension_tuple(TestSignal.signal_internal_complex_1, "dim_1_1"),
),
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim_2"),
# input's sub dimension is of type String, convert it.
# because output spec expects it to be of type Long.
lambda x: ord(x),
signal_dimension_tuple(TestSignal.signal_internal_complex_1, "dim_1_2"),
),
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim_3"),
# and this one is from the 3rd input (which has only one dim 'dim_1_1')
lambda x: x,
signal_dimension_tuple(TestSignal.signal_s3_1, "dim_1_1"),
),
]
output_filter = signal_link_node.get_output_filter(output_spec, output_dim_link_matrix)
output_signal = Signal(
SignalType.INTERNAL_PARTITION_CREATION,
InternalDatasetSignalSourceAccessSpec("sample_data_3", output_spec, **{}),
SignalDomainSpec(output_spec, output_filter, TestSignal.signal_internal_complex_1.domain_spec.integrity_check_protocol),
"sample_data_3",
)
return Route(
f"InternalDataNode-{output_signal.alias}",
signal_link_node,
output_signal,
output_dim_link_matrix,
[TestSlot.slot_batch_compute_basic],
False,
)
@classmethod
def _route_3_three_inputs_linked(cls):
from test.intelliflow.core.signal_processing.test_slot import TestSlot
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
from test.intelliflow.core.signal_processing.signal.test_signal_link_node import TestSignalLinkNode
signal_link_node = copy.deepcopy(TestSignalLinkNode.signal_link_node_3_complex)
# add links (since the dimension names on same, use the auto-linking of dimensions,
# so that;
# signal_internal_complex_1['dim_1_1'] == signal_s3_1['dim_1_1'], etc
signal_link_node.compensate_missing_links()
# create sample expected output
output_spec = DimensionSpec.load_from_pretty(
{
"output_dim_1": {
type: Type.LONG,
"output_dim_2": {
type: Type.LONG,
},
}
}
)
output_dim_link_matrix = [
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim_1"),
# from the second dimension of the first/second inputs (convert to Long)
lambda x: ord(x),
signal_dimension_tuple(TestSignal.signal_internal_complex_1, "dim_1_2"),
),
SignalDimensionLink(
signal_dimension_tuple(None, "output_dim_2"),
# and this one is from the 3rd input (which has only one dim 'dim_1_1')
lambda x: x,
signal_dimension_tuple(TestSignal.signal_s3_1, "dim_1_1"),
),
]
output_filter = signal_link_node.get_output_filter(output_spec, output_dim_link_matrix)
output_signal = Signal(
SignalType.INTERNAL_PARTITION_CREATION,
InternalDatasetSignalSourceAccessSpec("sample_data_4", output_spec, **{}),
SignalDomainSpec(output_spec, output_filter, TestSignal.signal_internal_complex_1.domain_spec.integrity_check_protocol),
"sample_data_4",
)
return Route(
f"InternalDataNode-{output_signal.alias}",
signal_link_node,
output_signal,
output_dim_link_matrix,
[TestSlot.slot_batch_compute_basic],
False,
)
def test_route_init(self):
assert self._route_1_basic()
def test_route_init_with_hooks(self):
route = self._route_1_basic()
Route(
route.route_id,
route.link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_create_hook(),
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[RouteCheckpoint(5, _create_hook())],
),
30 * 24 * 60 * 60,
RoutePendingNodeHook(on_pending_node_created=_create_hook(), on_expiration=_create_hook(), checkpoints=None),
)
# check another instantiation case + checkpoint sorting
assert (
Route(
route.route_id,
route.link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_create_hook(),
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[],
),
None,
RoutePendingNodeHook(
on_pending_node_created=_create_hook(),
on_expiration=None,
checkpoints=[RouteCheckpoint(2, _create_hook()), RouteCheckpoint(1, _create_hook())],
),
)
.pending_node_hook.checkpoints[0]
.checkpoint_in_secs
== 1
)
def test_route_init_with_hook_chain(self):
route = self._route_1_basic()
callback1_var = None
callback1_var_expected = 1
def _callback1(*args, **kwargs):
nonlocal callback1_var
callback1_var = callback1_var_expected
callback2_var = None
callback2_var_expected = 2
def _callback2(*args, **kwargs):
nonlocal callback2_var
callback2_var = callback2_var_expected
hook1 = RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_callback1,
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[RouteCheckpoint(5, _create_hook())],
)
hook2 = RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_callback2,
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[RouteCheckpoint(10, _create_hook())],
)
exec_hook_chain = hook1.chain(hook2)
pending_hook1 = RoutePendingNodeHook(
on_pending_node_created=_create_hook(), on_expiration=_create_hook(), checkpoints=[RouteCheckpoint(5, _create_hook())]
)
pending_hook2 = RoutePendingNodeHook(
on_pending_node_created=_create_hook(), on_expiration=_create_hook(), checkpoints=[RouteCheckpoint(10, _create_hook())]
)
pending_hook3 = RoutePendingNodeHook(
on_pending_node_created=_create_hook(), on_expiration=_create_hook(), checkpoints=[RouteCheckpoint(13, _create_hook())]
)
pending_hook_chain = pending_hook1.chain(pending_hook2, pending_hook3)
pending_hook_chain_2 = pending_hook1.chain(pending_hook2).chain(pending_hook3)
Route(
route.route_id,
route.link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
exec_hook_chain,
24 * 60 * 60,
pending_hook_chain,
)
assert len(exec_hook_chain.checkpoints) == 2
assert len(pending_hook_chain.checkpoints) == 3
assert len(pending_hook_chain_2.checkpoints) == 3
exec_hook_chain.on_exec_begin()
pending_hook_chain.on_pending_node_created()
pending_hook_chain_2.on_expiration()
exec_hook_chain.on_exec_skipped()
assert callback1_var == callback1_var_expected
assert callback2_var == callback2_var_expected
def test_route_equality(self):
assert self._route_1_basic() == self._route_1_basic()
assert Route("test", None, None, [], [], False) == Route("test", None, None, [], [], False)
assert Route("test", None, None, [], [], False) != Route("test2", None, None, [], [], False)
assert self._route_1_basic() == self._route_1_basic().clone()
def test_route_check_integrity(self):
route = self._route_1_basic()
assert route.check_integrity(self._route_1_basic())
route2 = self._route_2_two_inputs_linked()
# Route is very sensitive about an integrity check against a different Route. This is very critical
# for whole Routing module. It should not occur! A safe-guard against a high-level (e.g RoutingTable) bug.
with pytest.raises(ValueError):
assert route.check_integrity(route2)
# make id equal so that check move on to other fields
route2._id = route.route_id
assert not route.check_integrity(route2)
assert route.check_integrity(Route(route.route_id, route.link_node, route.output, route._output_dim_matrix, route.slots, False))
assert not route.check_integrity(
Route(route.route_id, route2.link_node, route.output, route._output_dim_matrix, route.slots, False)
)
assert not route.check_integrity(
Route(route.route_id, route.link_node, route2.output, route._output_dim_matrix, route.slots, False)
)
assert not route.check_integrity(Route(route.route_id, route.link_node, route.output, [], route.slots, False))
assert not route.check_integrity(Route(route.route_id, route.link_node, route.output, route._output_dim_matrix, [], False))
def test_route_check_integrity_noops(self):
"""show that some type of changes in route should not invalidate the integrity"""
route = self._route_3_three_inputs_linked()
# dim matrix ordering should not alter the semantics of route
new_route = copy.deepcopy(route)
new_route.link_node.link_matrix.reverse()
new_route.output_dim_matrix.reverse()
# TODO evaluate slots order? currently impacting integrity but not as critical as dim matrice
assert route.check_integrity(new_route)
@pytest.mark.parametrize(
"execution_hook_1, pending_node_ttl_1, pending_hook_1, execution_hook_2, pending_node_ttl_2, pending_hook_2, result",
[
(None, 30 * 24 * 60 * 60, None, None, 24 * 60 * 60, None, False),
(
RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_create_hook(),
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[RouteCheckpoint(checkpoint_in_secs=5, slot=_create_hook())],
),
30 * 24 * 60 * 60,
RoutePendingNodeHook(
on_pending_node_created=_create_hook(),
on_expiration=_create_hook(),
checkpoints=[RouteCheckpoint(checkpoint_in_secs=1, slot=_create_hook()), RouteCheckpoint(2, _create_hook())],
),
RouteExecutionHook(
on_exec_begin=_create_hook(),
on_exec_skipped=_create_hook(),
on_compute_success=_create_hook(),
on_compute_failure=_create_hook(),
on_success=_create_hook(),
on_failure=_create_hook(),
checkpoints=[RouteCheckpoint(5, _create_hook())],
),
30 * 24 * 60 * 60,
RoutePendingNodeHook(
on_pending_node_created=_create_hook(),
on_expiration=_create_hook(),
# also test that checkpoint other should not matter as long as values are same
checkpoints=[RouteCheckpoint(2, _create_hook()), RouteCheckpoint(1, _create_hook())],
),
True,
),
(
RouteExecutionHook(on_exec_begin=_create_hook()),
30 * 24 * 60 * 60,
RoutePendingNodeHook(),
RouteExecutionHook(on_exec_begin=_create_hook()),
30 * 24 * 60 * 60,
RoutePendingNodeHook(),
True,
),
(
RouteExecutionHook(on_exec_begin=_create_hook("print('diff')")),
30 * 24 * 60 * 60,
RoutePendingNodeHook(),
RouteExecutionHook(on_exec_begin=_create_hook()),
30 * 24 * 60 * 60,
RoutePendingNodeHook(),
False,
),
(None, None, None, None, None, None, True),
(
RouteExecutionHook(on_exec_begin=None, on_exec_skipped=None),
None,
None,
RouteExecutionHook(on_exec_begin=None, on_exec_skipped=_create_hook()),
None,
None,
False,
),
(
RouteExecutionHook(on_exec_begin=None, on_exec_skipped=_create_hook()),
None,
None,
RouteExecutionHook(on_exec_begin=None, on_exec_skipped=None),
None,
None,
False,
),
(
RouteExecutionHook(
on_exec_begin=None,
on_exec_skipped=None,
on_compute_success=None,
on_compute_failure=None,
on_success=None,
on_failure=None,
checkpoints=[RouteCheckpoint(1, _create_hook())],
),
None,
RoutePendingNodeHook(),
RouteExecutionHook(
on_exec_begin=None,
on_exec_skipped=None,
on_compute_success=None,
on_compute_failure=None,
on_success=None,
on_failure=None,
# change the value of first checkpoint
checkpoints=[RouteCheckpoint(5, _create_hook())],
),
None,
RoutePendingNodeHook(),
False,
),
(
RouteExecutionHook(),
None,
RoutePendingNodeHook(
on_pending_node_created=_create_hook(), on_expiration=None, checkpoints=[RouteCheckpoint(2, _create_hook())]
),
RouteExecutionHook(),
None,
RoutePendingNodeHook(
on_pending_node_created=_create_hook(),
on_expiration=None,
# also test that checkpoint other should not matter as long as values are same
checkpoints=[RouteCheckpoint(1, _create_hook())],
),
False,
),
(
None,
None,
RoutePendingNodeHook(on_pending_node_created=None, on_expiration=None, checkpoints=[RouteCheckpoint(1, _create_hook())]),
None,
None,
RoutePendingNodeHook(
on_pending_node_created=None,
on_expiration=None,
# also test that checkpoint other should not matter as long as values are same
checkpoints=[RouteCheckpoint(1, _create_hook("print('diff 2')"))],
),
False,
),
],
)
def test_route_check_auxiliary_integrity(
self, execution_hook_1, pending_node_ttl_1, pending_hook_1, execution_hook_2, pending_node_ttl_2, pending_hook_2, result
):
route = self._route_1_basic()
assert (
Route(
route.route_id,
route.link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
execution_hook_1,
pending_node_ttl_1,
pending_hook_1,
).check_auxiliary_data_integrity(
Route(
route.route_id,
route.link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
execution_hook_2,
pending_node_ttl_2,
pending_hook_2,
)
)
== result
)
def test_route_serialization(self):
route = self._route_1_basic()
assert route == loads(dumps(route))
def test_route_receive_basic(self):
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
route = self._route_1_basic()
# route will reject incompatible signal
assert not route.receive(create_incoming_signal(TestSignal.signal_s3_1, [1]))
assert not route._pending_nodes
# successful trigger # 1
response: Optional[Route.Response] = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [1]))
assert response
assert len(response.new_execution_contexts) == 1
assert response.new_execution_contexts[0].slots
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[0].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({1: {}})
)
# since the node completed immediately (since it has only one input),
# also removed from the internal pending nodes.
assert not route._pending_nodes
# successful trigger # 2
response: Optional[Route.Response] = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [2]))
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[0].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({2: {}})
)
# since the node completed immediately (since it has only one input),
# also removed from the internal pending nodes.
assert not route._pending_nodes
def test_route_receive_two_inputs_linked(self):
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
route = self._route_2_two_inputs_linked()
# will consume the event, create a new pending node but return no 'new_execution_contexts'
response = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [1]))
assert not response.new_execution_contexts
assert len(response.new_pending_nodes) == 1
assert len(route._pending_nodes) == 1
# will consume the event, create a new pending node but return no 'new_execution_contexts'
response = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [2]))
assert not response.new_execution_contexts
assert len(response.new_pending_nodes) == 1
assert len(route._pending_nodes) == 2 # please note that it is 2 now!
# will consume again with no internal effect
response = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [2]))
assert not response.new_execution_contexts
assert not response.new_pending_nodes
response = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [1]))
assert not response.new_execution_contexts
assert not response.new_pending_nodes
assert len(route._pending_nodes) == 2 # please note that it is 2 still
# send in a Signal that belongs to the second input but with different dim value
# will create another pending node since it is neither '1' nor '2' (linking is active).
response = route.receive(create_incoming_signal(TestSignal.signal_s3_1, [3]))
assert not response.new_execution_contexts
assert len(response.new_pending_nodes) == 1
assert len(route._pending_nodes) == 3 # please note that it is 3 now!
# Completions
# unleash the third pending node (which is pending on its first input with dim value 3)
response = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [3]))
assert len(response.new_execution_contexts) == 1
assert not response.new_pending_nodes
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[0].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({3: {}})
)
assert len(route._pending_nodes) == 2 # please note that it got back to 2!
# unleash the fist node
response = route.receive(create_incoming_signal(TestSignal.signal_s3_1, [1]))
assert len(response.new_execution_contexts) == 1
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[0].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({1: {}})
)
assert len(route._pending_nodes) == 1
# and finally the second node
response = route.receive(create_incoming_signal(TestSignal.signal_s3_1, [2]))
assert len(response.new_execution_contexts) == 1
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[0].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({2: {}})
)
assert not route._pending_nodes
def test_route_receive_three_inputs_unlinked(self):
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
route = self._route_3_three_inputs_unlinked()
# will consume the event, create a new pending node but return no 'new_execution_contexts'
response = route.receive(create_incoming_signal(TestSignal.signal_internal_complex_1, [1, "y"]))
assert not response.new_execution_contexts
assert len(route._pending_nodes) == 1
# will consume the event, create a new pending node but return no 'new_execution_contexts'
response = route.receive(create_incoming_signal(TestSignal.signal_internal_complex_1, [2, "y"]))
assert not response.new_execution_contexts
assert len(route._pending_nodes) == 2 # please note that it is 2 now!
# will consume again with no internal effect
response = route.receive(create_incoming_signal(TestSignal.signal_internal_complex_1, [2, "y"]))
assert not response.new_execution_contexts
response = route.receive(create_incoming_signal(TestSignal.signal_internal_complex_1, [1, "y"]))
assert not response.new_execution_contexts
assert len(route._pending_nodes) == 2 # please note that it is 2 still
# EFFECT of missing linking (N-N logic)
# incoming signal will satisfy all of the pending nodes
response = route.receive(create_incoming_signal(TestSignal.signal_s3_1, [3]))
assert len(response.new_execution_contexts) == 2
assert not route._pending_nodes # please note that it got back to 0 now!
# we have to compare this way since the order is not guarateed
if DimensionFilter.check_equivalence(
response.new_execution_contexts[0].output.domain_spec.dimension_filter_spec,
DimensionFilter.load_raw(
{
2: { # from the 1st dim of the 1st input signal
121: {3: {}} # ord('y') from the second dim of the 1st input signal # from the 3rd input
}
}
),
):
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[1].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({1: {121: {3: {}}}})
)
else:
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[1].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({2: {121: {3: {}}}})
)
def test_route_receive_three_inputs_linked(self):
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
route = self._route_3_three_inputs_linked()
# will consume the event, create a new pending node but return no 'new_execution_contexts'
response = route.receive(create_incoming_signal(TestSignal.signal_internal_complex_1, [1, "y"]))
assert not response.new_execution_contexts
assert len(route._pending_nodes) == 1
# will consume the event, create a new pending node but return no 'new_execution_contexts'
response = route.receive(create_incoming_signal(TestSignal.signal_internal_complex_1, [2, "y"]))
assert not response.new_execution_contexts
assert len(route._pending_nodes) == 2
# EFFECT of linking
# incoming signal will not satisfy dimensional linking and will just create another node.
response = route.receive(create_incoming_signal(TestSignal.signal_s3_1, [3]))
assert not response.new_execution_contexts
assert len(route._pending_nodes) == 3 # please note that it is 3 now!
# unleash the most recent node
response = route.receive(create_incoming_signal(TestSignal.signal_internal_complex_1, [3, "y"]))
assert len(response.new_execution_contexts) == 1
assert len(route._pending_nodes) == 2
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[0].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({121: {3: {}}})
)
# unleash the node that created first
response = route.receive(create_incoming_signal(TestSignal.signal_s3_1, [1]))
assert len(response.new_execution_contexts) == 1
assert len(route._pending_nodes) == 1
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[0].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({121: {1: {}}})
)
# unleash the node that created second
response = route.receive(create_incoming_signal(TestSignal.signal_s3_1, [2]))
assert len(response.new_execution_contexts) == 1
assert not route._pending_nodes # no remaining pending nodes!
assert DimensionFilter.check_equivalence(
response.new_execution_contexts[0].output.domain_spec.dimension_filter_spec, DimensionFilter.load_raw({121: {2: {}}})
)
def test_route_check_expired_nodes(self):
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
route = self._route_2_two_inputs_linked()
route = Route(
route.route_id,
route.link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
RouteExecutionHook(),
5, # seconds
RoutePendingNodeHook(),
)
route.receive(create_incoming_signal(TestSignal.signal_internal_1, [1]))
route.receive(create_incoming_signal(TestSignal.signal_internal_1, [2]))
assert len(route._pending_nodes) == 2
# send in a Signal that belongs to the second input but with different dim value
# will create another pending node since it is neither '1' nor '2' (linking is active).
response = route.receive(create_incoming_signal(TestSignal.signal_s3_1, [3]))
assert not response.new_execution_contexts
assert len(response.new_pending_nodes) == 1
assert len(route._pending_nodes) == 3 # please note that it is 3 now!
# Completions
# unleash the third pending node (which is pending on its first input with dim value 3)
route.receive(create_incoming_signal(TestSignal.signal_internal_1, [3]))
assert len(route._pending_nodes) == 2 # please note that it got back to 2!
# just make sure that it has been at least 5 seconds after the creation of those pending nodes.
time.sleep(5)
expired_nodes = route.check_expired_nodes()
assert len(expired_nodes) == 2
assert len(route._pending_nodes) == 0
def test_route_zombie_node_on_other_input_already_materialized(self):
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
route = self._route_2_two_inputs_linked()
# create new route to make sure that the second input is already materialized on value 3 [for dim_1_1]!
new_signal_link_node = SignalLinkNode(
[TestSignal.signal_internal_1, create_incoming_signal(TestSignal.signal_s3_1.clone("test_signal_from_S3"), [3])]
)
new_signal_link_node.compensate_missing_links()
route = Route(
route.route_id,
new_signal_link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
route.execution_hook,
route.pending_node_ttl_in_secs, # seconds
route.pending_node_hook,
)
# since second input is locked on 3, this event would yield a zombie node
# 1 != 3
response = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [1]))
assert not response.new_execution_contexts
assert len(response.new_pending_nodes) == 1
assert len(route._pending_nodes) == 1
assert next(iter(response.new_pending_nodes)).is_zombie
# same again 2 != 3
response = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [2]))
# since second input is locked on 3, this event would yield a zombie node
assert not response.new_execution_contexts
assert len(response.new_pending_nodes) == 1
assert len(route._pending_nodes) == 2
assert next(iter(response.new_pending_nodes)).is_zombie
# new pending node! 3 == 3
response = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [3]))
assert not response.new_execution_contexts
assert len(response.new_pending_nodes) == 1
assert len(route._pending_nodes) == 3
# new node should NOT be a zombie, waiting for TestSignal.signal_s3_1[3] to come in
assert not next(iter(response.new_pending_nodes)).is_zombie
def test_route_zombie_node_not_possible_when_inputs_unlinked(self):
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
route = self._route_2_two_inputs_linked()
# create new route to make sure that the second input is already materialized on value 3 [for dim_1_1]!
new_signal_link_node = SignalLinkNode(
[TestSignal.signal_internal_1, create_incoming_signal(TestSignal.signal_s3_1.clone("test_signal_from_S3"), [3])]
)
# UNLINKED !
# new_signal_link_node.compensate_missing_links()
route = Route(
route.route_id,
new_signal_link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
route.execution_hook,
route.pending_node_ttl_in_secs, # seconds
route.pending_node_hook,
)
# since second input is locked on 3, this event can NOT yield a zombie node since they are unlinked
# 1 != 3
response = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [1]))
assert not response.new_execution_contexts
assert len(response.new_pending_nodes) == 1
assert len(route._pending_nodes) == 1
assert not next(iter(response.new_pending_nodes)).is_zombie
def test_route_zombie_node_not_possible_when_other_is_a_materialized_reference_even_if_inputs_linked(self):
"""Actually yields execution immediately since the second input is a materialized reference"""
from test.intelliflow.core.signal_processing.signal.test_signal import TestSignal
route = self._route_2_two_inputs_linked()
# create new route to make sure that the second input is already materialized on value 3 [for dim_1_1]!
new_signal_link_node = SignalLinkNode(
[
TestSignal.signal_internal_1,
# materialized reference input
create_incoming_signal(TestSignal.signal_s3_1.clone("test_signal_from_S3").as_reference(), [3]),
]
)
# LINK !
new_signal_link_node.compensate_missing_links()
route = Route(
route.route_id,
new_signal_link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
route.execution_hook,
route.pending_node_ttl_in_secs,
route.pending_node_hook,
)
# although second input is locked on 3, this event can NOT yield a zombie node since it is a material reference.
# 1 != 3
response = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [1]))
# yields execution !
assert response.new_execution_contexts
assert len(response.new_pending_nodes) == 0
assert len(route._pending_nodes) == 0
# DONE
# We are actually done but let's show that even if they are unlinked, the result would not change.
new_signal_link_node = SignalLinkNode(
[
TestSignal.signal_internal_1,
# materialized reference input
create_incoming_signal(TestSignal.signal_s3_1.clone("test_signal_from_S3").as_reference(), [3]),
]
)
# UNLINK !
# new_signal_link_node.compensate_missing_links()
route = Route(
route.route_id,
new_signal_link_node,
route.output,
route._output_dim_matrix,
route.slots,
False,
route.execution_hook,
route.pending_node_ttl_in_secs,
route.pending_node_hook,
)
response = route.receive(create_incoming_signal(TestSignal.signal_internal_1, [1]))
# yields execution again!
assert response.new_execution_contexts
assert len(response.new_pending_nodes) == 0
assert len(route._pending_nodes) == 0
| 39,967 | 11,581 |
from django.urls import path, include
from . import views
from django.views.generic import TemplateView
app_name = 'forum'
urlpatterns = [
# /forum/
path('about/', TemplateView.as_view(template_name='forum/about.html'),name='about'),
path('', views.IndexView.as_view(), name = 'index'),
path('top/', views.IndexView.as_view(), name = 'top'),
path('new/', views.IndexView.as_view(), name = 'new'),
path('<str:mode>/<int:page>/', views.PageView.as_view(), name = 'page'),
# /forum/edit/
path('edit/', views.EditView.as_view(), name = 'new_post'),
path('<int:post_id>/edit/', views.EditView.as_view(), name='edit'),
path('<int:post_id>/edit/delete/', views.delete, name='delete'),
# /forum/<post_id>/
path('<int:post_id>/', views.ContentView.as_view() , name='content'),
path('<int:post_id>/clickup/', views.ClickUpView.as_view(), name='clickup'),
# /forum/<post_id>/comment/
path('<int:post_id>/comment/', views.CommentView.as_view(), name='new_comment'),
path('<int:post_id>/comment/<int:comment_id>/', views.CommentView.as_view(), name='comment'),
path('sign_up/', views.SignUpView.as_view(), name='sign_up'),
path('login/', views.LoginView.as_view(template_name='forum/login.html',
extra_context = {'next': '/forum/'}), name='login'),
path('logout/', views.LogoutView.as_view(), name = 'logout'),
# /forum/user/
path('user/<str:username>/', views.UserView.as_view(), name='user'),
] | 1,519 | 524 |
from leer.core.primitives.header import Header, PoPoW, VoteData
from leer.core.storage.txos_storage import TXOsStorage
from leer.core.chains.headers_manager import HeadersManager
from leer.core.storage.excesses_storage import ExcessesStorage
from leer.core.storage.headers_storage import HeadersStorage
from leer.core.primitives.transaction_skeleton import TransactionSkeleton
from leer.core.lubbadubdub.transaction import Transaction
from leer.core.lubbadubdub.ioput import IOput
from leer.core.lubbadubdub.offset_utils import sum_offset
from time import time
from leer.core.parameters.dynamic import next_reward, next_target
from leer.core.parameters.constants import initial_target
import functools
class Block():
def __init__(self, storage_space, header=None, transaction_skeleton=None):
self._header = header if header else Header()
self.transaction_skeleton = transaction_skeleton if transaction_skeleton else TransactionSkeleton()
self.tx=None
self.storage_space = storage_space
@property
def header(self):
try:
return self._header
except:
self._header = Header()
return self._header
@property
def hash(self):
return self.header.hash
@property
def partial_hash(self):
return self.header.partial_hash
def serialize(self, rtx, rich_block_format=False, max_size =40000):
serialized=b""
serialized += self.header.serialize()
serialized += self.transaction_skeleton.serialize(rich_format=rich_block_format, max_size=max_size,
full_tx = build_tx_from_skeleton(self.transaction_skeleton,\
self.storage_space.txos_storage,\
self.storage_space.excesses_storage,\
self.header.height, self.header.version, rtx=rtx,\
historical = True) if rich_block_format else None)
return serialized
@classmethod
@functools.lru_cache(maxsize=40)
def from_serialized(cls, serialized_block, storage_space):
b = cls(storage_space=storage_space)
b.deserialize(serialized_block)
return b
def deserialize(self, serialized):
self.deserialize_raw(serialized)
def deserialize_raw(self, serialized):
serialized = self.header.deserialize_raw(serialized)
serialized = self.transaction_skeleton.deserialize_raw(serialized, storage_space=self.storage_space)
return serialized
def non_context_verify(self, rtx):
'''
While this check is called 'non_context', it actually uses context since it needs:
a) fully validated headers chain up to this block
b) downloaded outputs
c) blocks which create inputs spent in checked(self) block should be applied
Currently if those conditions are not satisfied block is marked as not_downloaded and thus can not be validated.
To verify block we need to
0) check that header is known and valid
1) verify transaction
2) check that transaction can be applied
3) check reward size (actually in can be checked on headers level)
'''
# stage 1
assert self.storage_space.headers_storage.has(self.header.hash, rtx=rtx), "Block's header is unknown"
#self.storage_space.headers_storage.context_validation(self.header.hash)
assert not self.storage_space.headers_storage.get(self.header.hash, rtx=rtx).invalid, "Block's header is invalid. Reason: `%s`"%self.storage_space.headers_storage.get(self.header.hash, rtx=rtx).reason
#currently during building we automatically check that tx can ba applied and tx is valid
self.tx = build_tx_from_skeleton(self.transaction_skeleton, txos_storage=self.storage_space.txos_storage,
excesses_storage=self.storage_space.excesses_storage,
block_height=self.header.height, block_version = self.header.version, rtx=rtx, non_context = True)
# stage 3 => should be moved to blockchain
#commitment_root, txos_root = self.storage_space.txos_storage.apply_block_tx_get_merkles_and_rollback(tx)
#excesses_root = self.storage_space.excesses_storage.apply_block_tx_get_merkles_and_rollback(tx)
#assert [commitment_root, txos_root, excesses_root]==self.header.merkles
# This is context validation too??? TODO
miner_subsidy, dev_reward = next_reward(self.header.prev, self.storage_space.headers_storage, rtx=rtx)
assert self.tx.coinbase.value == (miner_subsidy+self.transaction_skeleton.relay_fee), "Wrong miner subsidy"
if dev_reward:
assert self.tx.dev_reward.value == dev_reward, "Wrong miner subsidy"
return True
def __str__(self):
return "Block< hash: %s..., height: %d, inputs: %d, outputs %d>"%(self.header.hash[:6], self.header.height
, len(self.transaction_skeleton.input_indexes),len(self.transaction_skeleton.output_indexes) )
def build_tx_from_skeleton(tx_skeleton, txos_storage, excesses_storage, block_height, block_version, rtx, historical=False, non_context = False):
'''
By given tx_skeleton and txos_storage return transaction.
If transaction is invalid or any input/output isn't available exception will be raised.
Optionally, if `historical` is True we will check output_indexes both in mempool and spent outputs.
'''
tx=Transaction(txos_storage=txos_storage, excesses_storage=excesses_storage)
for _i in tx_skeleton.input_indexes:
if historical or non_context:
tx.inputs.append(txos_storage.confirmed.find(_i, rtx=rtx))
else:
tx.inputs.append(txos_storage.confirmed.get(_i, rtx=rtx))
for _o in tx_skeleton.output_indexes:
if historical or non_context:
# About non_context: if we are on one branch and build block from another one
# and this block contain output which is already commited on our branch (tx is
# confirmed on both branches) we should get txo from confirmed storage
try:
tx.outputs.append(txos_storage.confirmed.find(_o, rtx=rtx))
except:
tx.outputs.append(txos_storage.mempool[_o])
else:
tx.outputs.append(txos_storage.mempool[_o])
tx.additional_excesses = tx_skeleton.additional_excesses.copy()
tx.updated_excesses = tx_skeleton.updated_excesses.copy()
tx.mixer_offset = tx_skeleton.mixer_offset
if historical or non_context:
assert tx.non_context_verify(block_height=block_height)
else:
assert tx.verify(block_height=block_height, block_version = block_version, rtx=rtx)
return tx
#To setup utils
def generate_genesis(tx, storage_space, wtx):
'''
1. spend inputs and add outputs and excesses from tx to storage
2. calc new mercles
3. generate header
4. rollback outputs
'''
storage = storage_space.txos_storage
excesses = storage_space.excesses_storage
exc_merkle = excesses.apply_block_tx_get_merkles_and_rollback(tx, wtx=wtx) # it should be calced first, since we nned to calc address_excess_num_index
merkles = storage.apply_block_tx_get_merkles_and_rollback(tx, wtx=wtx) + [exc_merkle]
popow = PoPoW([])
votedata = VoteData()
target = initial_target
full_offset = tx.mixer_offset
header=Header(height = 0, supply=tx.coinbase.value, full_offset=full_offset, merkles=merkles, popow=popow, votedata=votedata, timestamp=int(time()), target=target, version=int(1), nonce=b"\x00"*16)
tx_skeleton = TransactionSkeleton(tx=tx)
new_block = Block(storage_space, header, tx_skeleton)
return new_block
def generate_block_template(tx, storage_space, wtx, get_tx_from_mempool = True, timestamp = None, dev_reward_vote = b"\x00"):
'''
Generate block template: block is correct but nonce (by default) is equal to zero.
Thus difficulty target (almost always) isn't met.
arguments:
tx [mandatory]: transaction which contains coinbase output. It also may contain other inputs and outputs.
storage_space [mandatory] : -
get_tx_from_mempool [optional, default True]: if get_tx_from_mempool, transaction from mempool will be merged to block_transaction. If this merge will produce invalid tx (for instance tx from mempool spends the same inputs as tx with coinbase), tx from mempool will be discarded.
Inner logic:
1. apply block_tx to txos_storage and excesses_storage
2. calc new merkles
3. generate header with new merkles
4. generate block by appending tx_skeleton and new header
5. rollback block_tx
'''
storage = storage_space.txos_storage
excesses = storage_space.excesses_storage
current_block = storage_space.blocks_storage.get(storage_space.blockchain.current_tip(rtx=wtx), rtx=wtx)
if get_tx_from_mempool:
try:
tx = tx.merge(storage_space.mempool_tx.give_tx(), rtx=wtx)
except:
pass
exc_merkle = excesses.apply_block_tx_get_merkles_and_rollback(tx, wtx=wtx) # it should be calced first, since we nned to calc address_excess_num_index
merkles = storage.apply_block_tx_get_merkles_and_rollback(tx, wtx=wtx) + [exc_merkle]
popow = current_block.header.next_popow()
supply = current_block.header.supply + tx.minted_value - tx.calc_new_outputs_fee()
height = current_block.header.height+1
votedata = VoteData()
target = next_target(current_block.hash, storage_space.headers_storage, rtx=wtx)
full_offset = sum_offset(current_block.header.full_offset,tx.mixer_offset)
if not timestamp:
timestamp = max(int(time()), storage_space.headers_storage.get(storage_space.blockchain.current_tip(rtx=wtx), rtx=wtx).timestamp+1)
header=Header(height = height, supply=supply, full_offset=full_offset, merkles=merkles, popow=popow, votedata=votedata, timestamp=timestamp, target=target, version=int(1), nonce=b"\x00"*16)
tx_skeleton = TransactionSkeleton(tx=tx)
new_block = Block(storage_space, header, tx_skeleton)
return new_block
class ContextBlock(Block):
# TODO consider removing ContextBlock. For now we store all information about validity in ContextHeader
# (it allows headers_manager to provide less useless paths).
'''
Wrapper of Block for inner storage. It contains contextual info about block: for instance is it valid in chain or not.
'''
def __init__(self, storage_space = None, block=None):
if block:
Block.__init__(self, storage_space= block.storage_space, header=block.header, transaction_skeleton=block.transaction_skeleton)
if block.tx:
self.tx=block.tx
else:
if not storage_space:
raise TypeError("ContextBlock initialized without context")
Block.__init__(self, storage_space)
self.invalid = False
self.reason = None
def serialize_with_context(self):
ser = super(ContextBlock, self).serialize(rtx=None) # We can pass None as rtx, since rtx is required for rich block serialization
ser += int(self.invalid).to_bytes(1,'big')
reason = self.reason if self.reason else ""
ser += int(len(reason)).to_bytes(2,'big')
ser += reason.encode('utf-8')
return ser
@classmethod
@functools.lru_cache(maxsize=10)
def from_serialized(cls, serialized_block, storage_space):
b = cls(storage_space=storage_space)
b.deserialize(serialized_block)
return b
def deserialize(self, serialized):
self.deserialize_raw(serialized)
def deserialize_raw(self, serialized):
ser = super(ContextBlock, self).deserialize_raw(serialized)
self.invalid, ser = bool(ser[0]), ser[1:]
reason_len, ser = int.from_bytes(ser[:2], 'big'), ser[2:]
self.reason, ser = ser[:reason_len].decode('utf-8'), ser[reason_len:]
return ser
def __str__(self):
return "ContextBlock< hash: %s..., height: %d, inputs: %d, outputs %d, valid: %s, reason %s>"%(self.header.hash[:6], self.header.height
, len(self.transaction_skeleton.input_indexes),len(self.transaction_skeleton.output_indexes),
("-" if self.invalid else '+'), self.reason )
| 12,085 | 3,773 |
from datetime import datetime
from gpiozero import LED
from threading import Thread
from time import sleep
class GpioService:
def __init__(self):
self.pins = {}
self.heartbeat_last = datetime.now()
self.heartbeat_enabled = False
self.have_heartbeat = False;
self.heartbeat_timeout_seconds = 2
self.heartbeat_thread = Thread(target=self.service_heartbeat)
self.heartbeat_thread.start();
self.loss_action = self.default_heartbeat_loss_action;
def get_pin(self, id):
self.maybe_add(id)
return self.pins[id].value
def get_all_pins(self):
return [(id, led.value) for id, led in self.pins.items()]
def pin_on(self, id):
self.maybe_add(id)
return self.pins[id].on()
def pin_off(self, id):
self.maybe_add(id)
return self.pins[id].off()
def heartbeat(self):
self.heartbeat_last = datetime.now()
self.have_heartbeat = True;
def heartbeat_enable(self):
self.heartbeat_enabled = True
def heartbeat_disable(self):
self.heartbeat_enabled = False
def maybe_add(self, id):
if id not in self.pins:
self.pins[id] = LED(id)
def heartbeat_ok(self):
return self.heartbeat_enabled == False \
or (datetime.now() - self.heartbeat_last).total_seconds() \
< self.heartbeat_timeout_seconds
def service_heartbeat(self):
self.run = True;
while (self.run):
if self.have_heartbeat and not self.heartbeat_ok():
self.have_heartbeat = False;
print("Heartbeat lost")
self.loss_action();
sleep(1)
def default_heartbeat_loss_action(self):
for id, led in self.pins.items():
led.off()
def close(self):
self.run = False;
self.heartbeat_thread.join();
| 1,982 | 672 |
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
bank=pd.read_csv(path)
categorical_var=bank.select_dtypes(include='object')
print(categorical_var)
numerical_var=bank.select_dtypes(include='number')
print(numerical_var)
# code starts here
# code ends here
# --------------
# code starts here
banks=bank.drop('Loan_ID',axis=1)
print(banks.isnull().sum())
bank_mode=banks.mode().iloc[0]
print(type(bank_mode))
print(bank_mode)
banks.fillna(bank_mode, inplace=True)
print(banks.isnull().sum())
#code ends here
# --------------
# Code starts here
avg_loan_amount=banks.pivot_table(index=['Gender','Married','Self_Employed'],values='LoanAmount',aggfunc='mean')
print(avg_loan_amount)
# code ends here
# --------------
# code starts here
loan_approved_se=len(banks[(banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')])
loan_approved_nse=len(banks[(banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')])
percentage_se=loan_approved_se*100/614
percentage_nse=loan_approved_nse*100/614
# code ends here
# --------------
# code starts here
loan_term=banks['Loan_Amount_Term'].apply(lambda x:x/12)
big_loan_term=len(banks[loan_term>=25])
print(big_loan_term)
print(banks[loan_term>=25])
# code ends here
# --------------
# code starts here
loan_groupby=banks.groupby('Loan_Status')[['ApplicantIncome', 'Credit_History']]
mean_values=loan_groupby.mean()
# code ends here
| 1,465 | 593 |
import unittest
from sim.battle import Battle
from data import dex
class TestMega(unittest.TestCase):
def test_pidgeot(self):
battle = Battle(debug=False, rng=False)
battle.join(0, [{'species': 'pidgeot',
'item': 'pidgeotite',
'moves': ['tackle', 'protect']}])
battle.join(1, [{'species': 'mew', 'moves': ['tackle']}])
battle.choose(0, dex.Decision('move', 0, mega=True))
battle.choose(1, dex.Decision('move', 0, mega=True))
battle.do_turn()
pidgeot = battle.sides[0].pokemon[0]
self.assertEqual(pidgeot.species, 'pidgeotmega')
self.assertEqual(pidgeot.hp, pidgeot.maxhp-23)
def test_mewtwo_x(self):
battle = Battle(debug=False, rng=False)
battle.join(0, [{'species': 'mewtwo',
'item': 'mewtwonitex',
'moves': ['tackle', 'protect']
}])
battle.join(1, [{'species': 'charizard',
'item': 'charizarditex',
'moves': ['tackle']
}])
battle.choose(0, dex.Decision('move', 0, mega=True))
battle.choose(1, dex.Decision('move', 0, mega=False))
battle.do_turn()
mewtwo = battle.sides[0].pokemon[0]
charizard = battle.sides[1].pokemon[0]
self.assertEqual(mewtwo.species, 'mewtwomegax')
self.assertEqual(mewtwo.hp, mewtwo.maxhp-17)
def runTest(self):
self.test_pidgeot()
self.test_mewtwo_x
| 1,568 | 549 |
# Copyright 2014-2015 Yandex LLC and contributors <https://yandex.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
from rep.test.test_estimators import check_classifier, check_regression, check_params, \
generate_classification_data, check_classification_reproducibility
from rep.estimators.pybrain import PyBrainClassifier, PyBrainRegressor
from sklearn.ensemble import BaggingClassifier
from rep.estimators import SklearnClassifier
__author__ = 'Artem Zhirokhov'
classifier_params = {
'has_staged_pp': False,
'has_importances': False,
'supports_weight': False
}
regressor_params = {
'has_staged_predictions': False,
'has_importances': False,
'supports_weight': False
}
def test_pybrain_params():
check_params(PyBrainClassifier, layers=[1, 2], epochs=5, use_rprop=True, hiddenclass=['LinearLayer'])
check_params(PyBrainRegressor, layers=[1, 2], epochs=5, etaplus=1.3, hiddenclass=['LinearLayer'], learningrate=0.1)
def test_pybrain_classification():
clf = PyBrainClassifier(epochs=2)
check_classifier(clf, **classifier_params)
check_classifier(PyBrainClassifier(epochs=-1, continue_epochs=1, layers=[]), **classifier_params)
check_classifier(PyBrainClassifier(epochs=2, layers=[5, 2]), **classifier_params)
def test_pybrain_reproducibility():
try:
import numpy
X, y, _ = generate_classification_data()
clf1 = PyBrainClassifier(layers=[4], epochs=2).fit(X, y)
clf2 = PyBrainClassifier(layers=[4], epochs=2).fit(X, y)
print(clf1.predict_proba(X)-clf2.predict_proba(X))
assert numpy.allclose(clf1.predict_proba(X), clf2.predict_proba(X)), 'different predicitons'
check_classification_reproducibility(clf1, X, y)
except:
# This test fails. Because PyBrain can't reproduce training.
pass
def test_pybrain_Linear_MDLSTM():
check_classifier(PyBrainClassifier(epochs=2, layers=[10, 2], hiddenclass=['LinearLayer', 'MDLSTMLayer']),
**classifier_params)
check_regression(PyBrainRegressor(epochs=3, layers=[10, 2], hiddenclass=['LinearLayer', 'MDLSTMLayer']),
**regressor_params)
def test_pybrain_SoftMax_Tanh():
check_classifier(PyBrainClassifier(epochs=2, layers=[10, 5, 2], hiddenclass=['SoftmaxLayer', 'SoftmaxLayer', 'TanhLayer'], use_rprop=True),
**classifier_params)
check_regression(PyBrainRegressor(epochs=2, layers=[10, 5, 2], hiddenclass=['SoftmaxLayer', 'TanhLayer', 'TanhLayer']),
**regressor_params)
def pybrain_test_partial_fit():
clf = PyBrainClassifier(layers=[4], epochs=2)
X, y, _ = generate_classification_data()
clf.partial_fit(X, y)
clf.partial_fit(X[:2], y[:2])
def test_pybrain_multi_classification():
check_classifier(PyBrainClassifier(), n_classes=4, **classifier_params)
def test_pybrain_regression():
check_regression(PyBrainRegressor(), **regressor_params)
def test_pybrain_multi_regression():
check_regression(PyBrainRegressor(), n_targets=4, **regressor_params)
def test_simple_stacking_pybrain():
base_pybrain = PyBrainClassifier()
base_bagging = BaggingClassifier(base_estimator=base_pybrain, n_estimators=3)
check_classifier(SklearnClassifier(clf=base_bagging), **classifier_params)
| 3,872 | 1,341 |
from weaviate import Client
from uuid import uuid1
class TestConsecutiveCreateAndUpdate:
client: Client
img = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAABhGlDQ1BJQ0MgcHJvZmlsZQAAKJF9kT1Iw0AcxV/TSou0ONhBxCFD62RBVMRRq1CECqFWaNXB5NIvaNKSpLg4Cq4FBz8Wqw4uzro6uAqC4AeIm5uToouU+L+k0CLGg+N+vLv3uHsHCK0q08zAOKDplpFJJcVcflUMviKACEKIwy8zsz4nSWl4jq97+Ph6l+BZ3uf+HBG1YDLAJxLPsrphEW8QT29adc77xFFWllXic+Ixgy5I/Mh1xeU3ziWHBZ4ZNbKZeeIosVjqYaWHWdnQiKeIY6qmU76Qc1nlvMVZqzZY5578heGCvrLMdZojSGERS5AgQkEDFVRhIUGrToqJDO0nPfzDjl8il0KuChg5FlCDBtnxg//B727N4uSEmxROAn0vtv0RB4K7QLtp29/Htt0+AfzPwJXe9ddawMwn6c2uFjsCBraBi+uupuwBlzvA0FNdNmRH8tMUikXg/Yy+KQ8M3gL9a25vnX2cPgBZ6ip9AxwcAqMlyl73eHeot7d/z3T6+wEPO3J/B8olWgAAAAlwSFlzAAAuIwAALiMBeKU/dgAAAAd0SU1FB+UEDQgmFS2naPsAAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAADElEQVQI12NgYGAAAAAEAAEnNCcKAAAAAElFTkSuQmCC"
img2 = "/9j/4AAQSkZJRgABAQEASABIAAD/4QpKRXhpZgAASUkqAAgAAAAGABoBBQABAAAAVgAAABsBBQABAAAAXgAAACgBAwABAAAAAgAAADEBAgANAAAAZgAAADIBAgAUAAAAdAAAAGmHBAABAAAAiAAAAJoAAABIAAAAAQAAAEgAAAABAAAAR0lNUCAyLjEwLjE0AAAyMDIxOjAzOjI1IDE2OjI5OjQ3AAEAAaADAAEAAAABAAAAAAAAAAgAAAEEAAEAAAAAAQAAAQEEAAEAAADXAAAAAgEDAAMAAAAAAQAAAwEDAAEAAAAGAAAABgEDAAEAAAAGAAAAFQEDAAEAAAADAAAAAQIEAAEAAAAGAQAAAgIEAAEAAAA7CQAAAAAAAAgACAAIAP/Y/+AAEEpGSUYAAQEAAAEAAQAA/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8AAEQgA1wEAAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIBAgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEXGBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX29/j5+v/aAAwDAQACEQMRAD8A9/ooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK+YP+GjvGH/QN0P8A78Tf/Ha+n6+AKAPYP+GjvGH/AEDdD/78Tf8Ax2j/AIaO8Yf9A3Q/+/E3/wAdrx+igD2D/ho7xh/0DdD/AO/E3/x2j/ho7xh/0DdD/wC/E3/x2vH6KAPYP+GjvGH/AEDdD/78Tf8Ax2j/AIaO8Yf9A3Q/+/E3/wAdrx+igD2D/ho7xh/0DdD/AO/E3/x2j/ho7xh/0DdD/wC/E3/x2vH6KAPYP+GjvGH/AEDdD/78Tf8Ax2j/AIaO8Yf9A3Q/+/E3/wAdrx+igD2D/ho7xh/0DdD/AO/E3/x2j/ho7xh/0DdD/wC/E3/x2vH6KAPYP+GjvGH/AEDdD/78Tf8Ax2j/AIaO8Yf9A3Q/+/E3/wAdrx+igD2D/ho7xh/0DdD/AO/E3/x2j/ho7xh/0DdD/wC/E3/x2vH6KAPYP+GjvGH/AEDdD/78Tf8Ax2j/AIaO8Yf9A3Q/+/E3/wAdrx+igD2D/ho7xh/0DdD/AO/E3/x2j/ho7xh/0DdD/wC/E3/x2vH6KAPYP+GjvGH/AEDdD/78Tf8Ax2j/AIaO8Yf9A3Q/+/E3/wAdrx+igD7/AKKKKACiiigAooooAKKKKACvgCvv+vgCgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPv8AooooAKKKKACiiigAooooAK+AK+/6+AKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA+/wCiiigAooooAKKKKACiiigAr4Ar7/r4AoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigD7/AKKKKACiiigAooooAKKKKACvgCvv+vgCgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPv8AooooAKKKKACiiigAooooAK+AK+/6+AKACiiigAooooAKKKKACiiigAr0jwt8FPEni7w5aa5YXulR2t1v2JPLIHG12Q5AjI6qe9eb19f/AAS/5JDoX/bx/wClElAHkH/DOPjD/oJaH/3/AJv/AI1R/wAM4+MP+glof/f+b/41X0/RQB8wf8M4+MP+glof/f8Am/8AjVH/AAzj4w/6CWh/9/5v/jVfT9FAHzB/wzj4w/6CWh/9/wCb/wCNUf8ADOPjD/oJaH/3/m/+NV9P0UAfMH/DOPjD/oJaH/3/AJv/AI1R/wAM4+MP+glof/f+b/41X0/RQB8wf8M4+MP+glof/f8Am/8AjVH/AAzj4w/6CWh/9/5v/jVfT9FAHzB/wzj4w/6CWh/9/wCb/wCNUf8ADOPjD/oJaH/3/m/+NV9P0UAFFFFABRRRQAUUUUAFFFFABXwBX3/XwBQAUUUUAFFFFABRRRQAUUUUAFfX/wAEv+SQ6F/28f8ApRJXyBX1/wDBL/kkOhf9vH/pRJQB6BRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAV8AV9/wBfAFABRRRQAUUUUAFFFFABRRRQAV9f/BL/AJJDoX/bx/6USV8gV9f/AAS/5JDoX/bx/wClElAHoFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXwBX3/XwBQAUUUUAFFFFABRRRQAUUUUAFfX/wS/5JDoX/AG8f+lElfIFfX/wS/wCSQ6F/28f+lElAHoFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXwBX3/XwBQAUUUUAFFFFABRRRQAUUUUAFfX/AMEv+SQ6F/28f+lElfIFfX/wS/5JDoX/AG8f+lElAHoFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXwBRRQAUUUUAFFFFABRRRQAUUUUAFfX/wAEv+SQ6F/28f8ApRJRRQB6BRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAH//ZAP/iArBJQ0NfUFJPRklMRQABAQAAAqBsY21zBDAAAG1udHJSR0IgWFlaIAflAAMAGQAPABwAMmFjc3BBUFBMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD21gABAAAAANMtbGNtcwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADWRlc2MAAAEgAAAAQGNwcnQAAAFgAAAANnd0cHQAAAGYAAAAFGNoYWQAAAGsAAAALHJYWVoAAAHYAAAAFGJYWVoAAAHsAAAAFGdYWVoAAAIAAAAAFHJUUkMAAAIUAAAAIGdUUkMAAAIUAAAAIGJUUkMAAAIUAAAAIGNocm0AAAI0AAAAJGRtbmQAAAJYAAAAJGRtZGQAAAJ8AAAAJG1sdWMAAAAAAAAAAQAAAAxlblVTAAAAJAAAABwARwBJAE0AUAAgAGIAdQBpAGwAdAAtAGkAbgAgAHMAUgBHAEJtbHVjAAAAAAAAAAEAAAAMZW5VUwAAABoAAAAcAFAAdQBiAGwAaQBjACAARABvAG0AYQBpAG4AAFhZWiAAAAAAAAD21gABAAAAANMtc2YzMgAAAAAAAQxCAAAF3v//8yUAAAeTAAD9kP//+6H///2iAAAD3AAAwG5YWVogAAAAAAAAb6AAADj1AAADkFhZWiAAAAAAAAAknwAAD4QAALbEWFlaIAAAAAAAAGKXAAC3hwAAGNlwYXJhAAAAAAADAAAAAmZmAADypwAADVkAABPQAAAKW2Nocm0AAAAAAAMAAAAAo9cAAFR8AABMzQAAmZoAACZnAAAPXG1sdWMAAAAAAAAAAQAAAAxlblVTAAAACAAAABwARwBJAE0AUG1sdWMAAAAAAAAAAQAAAAxlblVTAAAACAAAABwAcwBSAEcAQv/bAEMAAwICAwICAwMDAwQDAwQFCAUFBAQFCgcHBggMCgwMCwoLCw0OEhANDhEOCwsQFhARExQVFRUMDxcYFhQYEhQVFP/bAEMBAwQEBQQFCQUFCRQNCw0UFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFP/CABEIABUAGQMBEQACEQEDEQH/xAAXAAADAQAAAAAAAAAAAAAAAAAABwgJ/8QAFAEBAAAAAAAAAAAAAAAAAAAAAP/aAAwDAQACEAMQAAABpkSAANUzlHgBVRABpSB//8QAGxAAAQUBAQAAAAAAAAAAAAAABQAEBhc2AhD/2gAIAQEAAQUCIPeBrC6giuoIrqCKWZYey7JP6VNqlTalmWiep8//xAAUEQEAAAAAAAAAAAAAAAAAAAAw/9oACAEDAQE/AU//xAAUEQEAAAAAAAAAAAAAAAAAAAAw/9oACAECAQE/AU//xAAhEAAABQQDAQEAAAAAAAAAAAAAAQIDBQQ0k9IRdLIxEP/aAAgBAQAGPwKpq3CUbbDanVEn7wRci1kMaNxayGNG4tZDGjcTPTe8GKakbNJOPuJaSavnJnwLqPyL0F1H5F6CZ6b3gxDdxn2X7//EABcQAQEBAQAAAAAAAAAAAAAAAAERICH/2gAIAQEAAT8hLs8otSCp2GcWLErbs8qEQWDyu8WJWr//2gAMAwEAAgADAAAAEIABAAJP/8QAFBEBAAAAAAAAAAAAAAAAAAAAMP/aAAgBAwEBPxBP/8QAFBEBAAAAAAAAAAAAAAAAAAAAMP/aAAgBAgEBPxBP/8QAFxABAQEBAAAAAAAAAAAAAAAAAREgMf/aAAgBAQABPxAT69h7QKUBQsqdz06dNin17D2oAKoLLB5vp02bP//Z"
def __init__(self, client):
self.client = client
def batch_callback_result(self, results: dict) -> int:
"""
Check batch results for errors and return the number of occurred errors.
Parameters
----------
results : dict
The Weaviate batch creation return value.
"""
if results is not None:
for result in results:
if 'result' in result and 'errors' in result['result']:
if 'error' in result['result']['errors']:
print(f"error: {result['result']['errors']}")
raise Exception("Some batch items failed!")
def deleteTestClass(self, schemas, cls_name):
if self.client.schema.contains(schemas):
self.client.schema.delete_class(cls_name)
def checkIfObjectsExist(self, uuids):
for _id in uuids:
# assert self.client.data_object.exists(_id)
resp = self.client.data_object.get_by_id(_id, with_vector=True)
if resp is None:
print(f"ERROR!!! Object with ID: {_id} doesn't exist!!!")
raise
def consecutive_create_and_update_operations(self):
print("Test started")
cls_name = 'Test123'
schemas = {
'classes': [
{
'class': cls_name,
"vectorizer": "none",
'vectorIndexConfig': {'skip': False},
'properties': [
{
'dataType': ['blob'],
'name': 'a',
'indexInverted': False,
}
],
},
]
}
self.deleteTestClass(schemas, cls_name)
uuids = [str(uuid1()) for _ in range(28000)]
assert len(list(set(uuids))) == len(uuids), 'uuids contain duplicates'
# extend
print(f"Create objects in batch of 50 items...")
with self.client.batch(batch_size=50, callback=self.batch_callback_result) as batch:
for _id in uuids:
batch.add_data_object(data_object={'a': self.img}, class_name=cls_name, uuid=_id)
self.client.batch.flush()
print(f"Update objects with vector started...")
x = 1
# embed
for _id in uuids:
self.client.batch.add_data_object(data_object={'a': self.img2}, class_name=cls_name, uuid=_id, vector=[3,2,1])
if x % 1000 == 0:
print(f"updated {x} objects...")
x += 1
print("Check if objects exist...")
# check
self.checkIfObjectsExist(uuids)
print(f"Update objects with new vector in batch of 50 items...")
x = 1
# update vectors
with self.client.batch(batch_size=50, callback=self.batch_callback_result) as batch:
for _id in uuids:
batch.add_data_object(data_object={'a': self.img}, class_name=cls_name, uuid=_id, vector=[1,2,3])
if x % 1000 == 0:
print(f"updated {x} objects...")
x += 1
self.client.batch.flush()
print("Check if objects exist...")
# check
self.checkIfObjectsExist(uuids)
self.deleteTestClass(schemas, cls_name)
print("Test done")
c = Client('http://localhost:8080')
test = TestConsecutiveCreateAndUpdate(c)
test.consecutive_create_and_update_operations() | 9,749 | 5,486 |
from flask import Flask
from rest_server import rest_api
from socket_server import socket_api
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
rest_api.init_app(app)
socket_api.init_app(app)
if __name__ == '__main__':
socket_api.run(app, host="127.0.0.1", port="5000", debug='true')
| 301 | 119 |
#
#
# This source file is part of ELINA (ETH LIbrary for Numerical Analysis).
# ELINA is Copyright © 2019 Department of Computer Science, ETH Zurich
# This software is distributed under GNU Lesser General Public License Version 3.0.
# For more information, see the ELINA project website at:
# http://elina.ethz.ch
#
# THE SOFTWARE IS PROVIDED "AS-IS" WITHOUT ANY WARRANTY OF ANY KIND, EITHER
# EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO ANY WARRANTY
# THAT THE SOFTWARE WILL CONFORM TO SPECIFICATIONS OR BE ERROR-FREE AND ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
# TITLE, OR NON-INFRINGEMENT. IN NO EVENT SHALL ETH ZURICH BE LIABLE FOR ANY
# DAMAGES, INCLUDING BUT NOT LIMITED TO DIRECT, INDIRECT,
# SPECIAL OR CONSEQUENTIAL DAMAGES, ARISING OUT OF, RESULTING FROM, OR IN
# ANY WAY CONNECTED WITH THIS SOFTWARE (WHETHER OR NOT BASED UPON WARRANTY,
# CONTRACT, TORT OR OTHERWISE).
#
#
from elina_coeff_h import *
# ====================================================================== #
# Basics
# ====================================================================== #
def elina_coeff_alloc(discr):
"""
Allocate a new ElinaCoeff, using a specific type as the core.
Parameters
----------
discr : c_int
Discriminant specifying the type of the core of the ElinaCoeff.
Returns
-------
coeff : ElinaCoeffPtr
Pointer to the newly allocated ElinaCoeff.
"""
coeff = None
try:
elina_coeff_alloc_c = elina_auxiliary_api.elina_coeff_alloc
elina_coeff_alloc_c.restype = ElinaCoeffPtr
elina_coeff_alloc_c.argtypes = [c_uint]
coeff = elina_coeff_alloc_c(discr)
except:
print('Problem with loading/calling "elina_coeff_alloc" from "libelinaux.so"')
print('Make sure you are passing c_uint to the function')
return coeff
def elina_coeff_reinit(coeff, coeff_discr, scalar_discr):
"""
Reinitialise a given ElinaCoeff, according to the provided types.
Parameters
----------
coeff : ElinaCoeffPtr
Pointer to the ElinaCoeff that needs to be reinitiliased.
coeff_discr : c_uint
Enum of type ElinaCoeffDiscr that defines the core of the ElinaCoeff.
scalar_discr : c_uint
Enum of type ElinaScalarDiscr that defines the core of the ElinaScalar (0 = double, 1 = mpq, 2 = mpfr).
Returns
-------
None
"""
try:
elina_coeff_reinit_c = elina_auxiliary_api.elina_coeff_reinit
elina_coeff_reinit_c.restype = None
elina_coeff_reinit_c.argtypes = [ElinaCoeffPtr, c_uint, c_uint]
elina_coeff_reinit_c(coeff, coeff_discr, scalar_discr)
except:
print('Problem with loading/calling "elina_coeff_reinit" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, c_uint, c_uint to the function')
def elina_coeff_free(coeff):
"""
Free an ElinaCoeff.
Parameters
----------
coeff : ElinaCoeffPtr
Pointer to the ElinaCoeff that needs to be freed.
Returns
-------
"""
try:
elina_coeff_free_c = elina_auxiliary_api.elina_coeff_free
elina_coeff_free_c.restype = None
elina_coeff_free_c.argtypes = [ElinaCoeffPtr]
elina_coeff_free_c(coeff)
except:
print('Problem with loading/calling "elina_coeff_free" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr to the function')
def elina_coeff_fprint(stream, coeff):
"""
Print an ElinaCoeff onto a given stream.
Parameters
----------
stream : c_void_p
Stream on which to print.
coeff : ElinaCoeffPtr
Pointer to the ElinaCoeff that needs to be printed.
Returns
-------
None
"""
try:
elina_coeff_fprint_c = elina_auxiliary_api.elina_coeff_fprint
elina_coeff_fprint_c.restype = None
elina_coeff_fprint_c.argtypes = [c_void_p, ElinaCoeffPtr]
elina_coeff_fprint_c(stream, coeff)
except:
print('Problem with loading/calling "elina_coeff_fprint" from "libelinaux.so"')
print('Make sure you are passing c_void_p, ElinaCoeffPtr to the function')
def elina_coeff_reduce(coeff):
"""
Reduce an ElinaCoeff of core type ElinaInterval [a, a], to an ElinaScalar.
Parameters
----------
coeff : ElinaCoeffPtr
Pointer to the ElinaCoeff that needs to be reduced.
Returns
-------
None
"""
try:
elina_coeff_reduce_c = elina_auxiliary_api.elina_coeff_reduce
elina_coeff_reduce_c.restype = None
elina_coeff_reduce_c.argtypes = [ElinaCoeffPtr]
elina_coeff_reduce_c(coeff)
except:
print('Problem with loading/calling "elina_coeff_reduce" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr to the function')
# ====================================================================== #
# Assignments
# ====================================================================== #
def elina_coeff_set(coeff1, coeff2):
"""
Set the value of one ElinaCoeff to the value of another ElinaCoeff.
Parameters
----------
coeff1 : ElinaCoeffPtr
Destination.
coeff2 : ElinaCoeffPtr
Source
Returns
-------
None
"""
try:
elina_coeff_set_c = elina_auxiliary_api.elina_coeff_set
elina_coeff_set_c.restype = None
elina_coeff_set_c.argtypes = [ElinaCoeffPtr, ElinaCoeffPtr]
elina_coeff_set_c(coeff1, coeff2)
except:
print('Problem with loading/calling "elina_coeff_set" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, ElinaCoeffPtr to the function')
def elina_coeff_set_scalar(coeff, scalar):
"""
Set the value of an ElinaCoeff with core ElinaScalar by using an ElinaScalar.
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
scalar : ElinaScalarPtr
Source.
Returns
-------
"""
try:
elina_coeff_set_scalar_c = elina_auxiliary_api.elina_coeff_set_scalar
elina_coeff_set_scalar_c.restype = None
elina_coeff_set_scalar_c.argtypes = [ElinaCoeffPtr, ElinaScalarPtr]
elina_coeff_set_scalar_c(coeff, scalar)
except:
print('Problem with loading/calling "elina_coeff_set_scalar" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, ElinaScalarPtr to the function')
def elina_coeff_set_scalar_mpq(coeff, mpq_t):
"""
Set the value of an ElinaCoeff with core ElinaScalar by using a Mpq_t.
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
mpq_t : Mpq_t
Source.
Returns
-------
None
"""
try:
elina_coeff_set_scalar_mpq_c = elina_auxiliary_api.elina_coeff_set_scalar_mpq
elina_coeff_set_scalar_mpq_c.restype = None
elina_coeff_set_scalar_mpq_c.argypes = [ElinaCoeffPtr, Mpq_t]
elina_coeff_set_scalar_mpq_c(coeff, mpq_t)
except:
print('Problem with loading/calling "elina_coeff_set_scalar_mpq_c" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, Mpq_t to the function')
def elina_coeff_set_scalar_int(coeff, num):
"""
Set the value of an ElinaCoeff with core ElinaScalar by using a long integer.
Parameters
----------
coeff : ElinaCoefPtr
Destination.
num : c_long
Source.
Returns
-------
None
"""
try:
elina_coeff_set_scalar_int_c = elina_auxiliary_api.elina_coeff_set_scalar_int
elina_coeff_set_scalar_int_c.restype = None
elina_coeff_set_scalar_int_c.argypes = [ElinaCoeffPtr, c_long]
elina_coeff_set_scalar_int_c(coeff, num)
except:
print('Problem with loading/calling "elina_coeff_set_scalar_int" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, c_long to the function')
def elina_coeff_set_scalar_frac(coeff, num, den):
"""
Set the value of an ElinaCoeff with core ElinaScalar by using fraction of two long integers.
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
num : c_long
Source.
den : c_ulong
Source.
Returns
-------
None
"""
try:
elina_coeff_set_scalar_frac_c = elina_auxiliary_api.elina_coeff_set_scalar_frac
elina_coeff_set_scalar_frac_c.restype = None
elina_coeff_set_scalar_frac_c.argypes = [ElinaCoeffPtr, c_long, c_ulong]
elina_coeff_set_scalar_frac_c(coeff, num, den)
except:
print('Problem with loading/calling "elina_coeff_set_scalar_frac" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, c_long, c_ulong to the function')
def elina_coeff_set_scalar_double(coeff, num):
"""
Set the value of an ElinaCoeff with core ElinaScalar by using a double.
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
num : c_double
Source.
Returns
-------
None
"""
try:
elina_coeff_set_scalar_double_c = elina_auxiliary_api.elina_coeff_set_scalar_double
elina_coeff_set_scalar_double_c.restype = None
elina_coeff_set_scalar_double_c.argypes = [ElinaCoeffPtr, c_double]
elina_coeff_set_scalar_double_c(coeff, num)
except:
print('Problem with loading/calling "elina_coeff_set_scalar_double" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, c_double to the function')
def elina_coeff_set_scalar_mpfr(coeff, mpfr_t):
"""
Set the value of an ElinaCoeff with core ElinaScalar by using a Mpfr_t.
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
mpfr_t : Mpfr_t
Source.
Returns
-------
None
"""
try:
elina_coeff_set_scalar_mpfr_c = elina_auxiliary_api.elina_coeff_set_scalar_mpfr
elina_coeff_set_scalar_mpfr_c.restype = None
elina_coeff_set_scalar_mpfr_c.argtypes = [ElinaCoeffPtr, Mpfr_t]
elina_coeff_set_scalar_mpfr_c(coeff, mpfr_t)
except:
print('Problem with loading/calling "elina_coeff_set_scalar_mpfr" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, Mpfr_t to the function')
def elina_coeff_set_interval(coeff, interval):
"""
Set the value of an ElinaCoeff with core ElinaInterval by using an ElinaInterval
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
interval : ElinaIntervalPtr
Source.
Returns
-------
None
"""
try:
elina_coeff_set_interval_c = elina_auxiliary_api.elina_coeff_set_interval
elina_coeff_set_interval_c.restype = None
elina_coeff_set_interval_c.argtypes = [ElinaCoeffPtr, ElinaIntervalPtr]
elina_coeff_set_interval_c(coeff, interval)
except:
print('Problem with loading/calling "elina_coeff_set_interval" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, ElinaIntervalPtr to the function')
def elina_coeff_set_interval_scalar(coeff, inf, sup):
"""
Set the value of an ElinaCoeff with core ElinaInterval by using two ElinaScalar-s.
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
inf : ElinaScalarPtr
Source.
sup : ElinaScalarPtr
Source.
Returns
-------
None
"""
try:
elina_coeff_set_interval_scalar_c = elina_auxiliary_api.elina_coeff_set_interval_scalar
elina_coeff_set_interval_scalar_c.restype = None
elina_coeff_set_interval_scalar_c.argtypes = [ElinaCoeffPtr, ElinaScalarPtr, ElinaScalarPtr]
elina_coeff_set_interval_scalar_c(coeff, inf, sup)
except:
print('Problem with loading/calling "elina_coeff_set_interval_scalar" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, ElinaScalarPtr, ElinaScalarPtr to the function')
def elina_coeff_set_interval_mpq(coeff, inf, sup):
"""
Set the value of an ElinaCoeff with core ElinaInterval by using two Mpq_t-s.
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
inf : Mpq_t
Source.
sup : Mpq_t
Source.
Returns
-------
None
"""
try:
elina_coeff_set_interval_mpq_c = elina_auxiliary_api.elina_coeff_set_interval_mpq
elina_coeff_set_interval_mpq_c.restype = None
elina_coeff_set_interval_mpq_c.argypes = [ElinaCoeffPtr, Mpq_t, Mpq_t]
elina_coeff_set_interval_mpq_c(coeff, inf, sup)
except:
print('Problem with loading/calling "elina_coeff_set_interval_mpq" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, Mpq_t, Mpq_t to the function')
def elina_coeff_set_interval_int(coeff, inf, sup):
"""
Set the value of an ElinaCoeff with core ElinaInterval by using two long integers.
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
inf : c_long
Source.
sup : c_long
Source.
Returns
-------
None
"""
try:
elina_coeff_set_interval_int_c = elina_auxiliary_api.elina_coeff_set_interval_int
elina_coeff_set_interval_int_c.restype = None
elina_coeff_set_interval_int_c.argtypes = [ElinaCoeffPtr, c_long, c_long]
elina_coeff_set_interval_int_c(coeff, inf, sup)
except:
print('Problem with loading/calling "elina_coeff_set_interval_int" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, c_long, c_long to the function')
def elina_coeff_set_interval_frac(coeff, numinf, deninf, numsup, densup):
"""
Set the value of an ElinaCoeff with core ElinaInterval by using two pairs of long integers as fractions.
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
numinf : c_long
Source.
deninf : c_ulong
Source.
numsup : c_long
Source.
densup : c_ulong
Source.
Returns
-------
None
"""
try:
elina_coeff_set_interval_frac_c = elina_auxiliary_api.elina_coeff_set_interval_frac
elina_coeff_set_interval_frac_c.restype = None
elina_coeff_set_interval_frac_c.argtypes = [ElinaCoeffPtr, c_long, c_ulong, c_long, c_ulong]
elina_coeff_set_interval_frac_c(coeff, numinf, deninf, numsup, densup)
except:
print('Problem with loading/calling "elina_coeff_set_interval_frac" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, c_long, c_ulong, c_long, c_ulong to the function')
def elina_coeff_set_interval_double(coeff, inf, sup):
"""
Set the value of an ElinaCoeff with core ElinaInterval by using two double-s.
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
inf : c_double
Source.
sup : c_double
Source.
Returns
-------
None
"""
try:
elina_coeff_set_interval_double_c = elina_auxiliary_api.elina_coeff_set_interval_double
elina_coeff_set_interval_double_c.restype = None
elina_coeff_set_interval_double_c.argtypes = [ElinaCoeffPtr, c_double, c_double]
elina_coeff_set_interval_double_c(coeff, inf, sup)
except:
print('Problem with loading/calling "elina_coeff_set_interval_double" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, c_double, c_double to the function')
def elina_coeff_set_interval_top(coeff):
"""
Set the value of an ElinaCoeff with core ElinaInterval by using the universe interval [-oo, +oo].
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
Returns
-------
None
"""
try:
elina_coeff_set_interval_top_c = elina_auxiliary_api.elina_coeff_set_interval_top
elina_coeff_set_interval_top_c.restype = None
elina_coeff_set_interval_top_c.argtypes = [ElinaCoeffPtr]
elina_coeff_set_interval_top_c(coeff)
except:
print('Problem with loading/calling "elina_coeff_set_interval_top" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr to the function')
def elina_coeff_set_interval_mpfr(coeff, inf, sup):
"""
Set the value of an ElinaCoeff with core ElinaInterval by using two Mpfr_t-s.
Parameters
----------
coeff : ElinaCoeffPtr
Destination.
inf : Mpfr_t
Source.
sup : Mpfr_t
Source.
Returns
-------
None
"""
try:
elina_coeff_set_interval_mpfr_c = elina_auxiliary_api.elina_coeff_set_interval_mpfr
elina_coeff_set_interval_mpfr_c.restype = None
elina_coeff_set_interval_mpfr_c.argtypes = [ElinaCoeffPtr, Mpfr_t, Mpfr_t]
elina_coeff_set_interval_mpfr_c(coeff, inf, sup)
except:
print('Problem with loading/calling "elina_coeff_set_interval_mpfr" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, Mpfr_t, Mpfr_t to the function')
# ====================================================================== #
# Combined allocation and assignment
# ====================================================================== #
def elina_coeff_alloc_set(coeff2):
"""
Allocate a new ElinaCoeff and initialise it with another ElinaCoeff.
Parameters
----------
coeff2 : ElinaCoeffPtr
Pointer to the ElinaCoeff used for initialisation.
Returns
-------
coeff1: ElinaCoeffPtr
Pointer to the newly allocated and initialised ElinaCoeff.
"""
coeff1 = None
try:
elina_coeff_alloc_set_c = elina_auxiliary_api.elina_coeff_alloc_set
elina_coeff_alloc_set_c.restype = ElinaCoeffPtr
elina_coeff_alloc_set_c.argtypes = [ElinaCoeffPtr]
coeff1 = elina_coeff_alloc_set_c(coeff2)
except:
print('Problem with loading/calling "elina_coeff_alloc_set" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr to the function')
return coeff1
def elina_coeff_alloc_set_scalar(scalar):
"""
Allocate a new ElinaCoeff and initialise it with an ElinaScalar.
Parameters
----------
scalar : ElinaScalarPtr
Pointer to the ElinaScalar used for initialisation.
Returns
-------
coeff : ElinaCoeffPtr
Pointer to the newly allocated and initialised ElinaCoeff.
"""
coeff = None
try:
elina_coeff_alloc_set_scalar_c = elina_auxiliary_api.elina_coeff_alloc_set_scalar
elina_coeff_alloc_set_scalar_c.restype = None
elina_coeff_alloc_set_scalar_c.argtypes = [ElinaScalarPtr]
coeff = elina_coeff_alloc_set_scalar_c(scalar)
except:
print('Problem with loading/calling "elina_coeff_alloc_set_scalar" from "libelinaux.so"')
print('Make sure you are passing ElinaScalarPtr to the function')
return coeff
def elina_coeff_alloc_set_interval(interval):
"""
Allocate a new ElinaCoeff and initialise it with an ElinaInterval.
Parameters
----------
interval : ElinaIntervalPtr
Pointer to the ElinaInterval used for initialisation.
Returns
-------
coeff : ElinaCoeffPtr
Pointer to the newly allocated and initialised ElinaCoeff.
"""
coeff = None
try:
elina_coeff_alloc_set_interval_c = elina_auxiliary_api.elina_coeff_alloc_set_interval
elina_coeff_alloc_set_interval_c.restype = ElinaCoeffPtr
elina_coeff_alloc_set_interval_c.argtypes = [ElinaIntervalPtr]
elina_coeff_alloc_set_interval_c(interval)
except:
print('Problem with loading/calling "elina_coeff_alloc_set_interval" from "libelinaux.so"')
print('Make sure you are passing ElinaIntervalPtr to the function')
return coeff
# ====================================================================== #
# Tests
# ====================================================================== #
def elina_coeff_cmp(coeff1, coeff2):
"""
Compare an ElinaCoeff with another ElinaCoeff.
Parameters
----------
coeff1 : ElinaCoeffPtr
Pointer to the ElinaCoeff that needs to be compared.
coeff2 : ElinaCoeffPtr
Pointer to the ElinaCoeff that needs to be compared.
Returns
-------
result : c_int
The result of the comparison.
Return
corresponding to elina_scalar_cmp if the two ElinaCoeff-s have ElinaScalar core
corresponding to elina_interval_cmp if the two ElinaCoeff-s have ElinaInterval core
-3 if the first ElinaCoeff has an ElinaScalar core
+3 if the second ElinaCoeff has an ElinaScalar core
"""
result = None
try:
elina_coeff_cmp_c = elina_auxiliary_api.elina_coeff_cmp
elina_coeff_cmp_c.restype = c_int
elina_coeff_cmp_c.argtypes = [ElinaCoeffPtr, ElinaCoeffPtr]
result = elina_coeff_cmp_c(coeff1, coeff2)
except:
print('Problem with loading/calling "elina_coeff_cmp" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, ElinaCoeffPtr to the function')
return result
def elina_coeff_equal(coeff1, coeff2):
"""
Test if an ElinaCoeff is equal to another ElinaCoeff.
Parameters
----------
coeff1 : ElinaCoeffPtr
Pointer to the ElinaCoeff that needs to be tested for equality.
coeff2 : ElinaCoefPtr
Pointer to the ElinaCoeff that needs to be tested for equality.
Returns
-------
result : c_bool
Result of the equality test.
"""
result = None
try:
elina_coeff_equal_c = elina_auxiliary_api.elina_coeff_equal
elina_coeff_equal_c.restype = c_bool
elina_coeff_equal_c.argtypes = [ElinaCoeffPtr, ElinaCoeffPtr]
result = elina_coeff_equal_c(coeff1, coeff2)
except:
print('Problem with loading/calling "elina_coeff_equal" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, ElinaCoeffPtr to the function')
return result
def elina_coeff_zero(coeff):
"""
Test if an ElinaCoeff is a zero ElinaScalar or an ElinaInterval with zero bounds.
Parameters
----------
coeff : ElinaCoefPtr
Pointer to the ElinaCoeff that needs to be tested.
Returns
-------
result : c_bool
Result of the zero test.
"""
result = None
try:
elina_coeff_zero_c = elina_auxiliary_api.elina_coeff_zero
elina_coeff_zero_c.restype = c_int
elina_coeff_zero_c.argtypes = [ElinaCoeffPtr]
result = elina_coeff_zero_c(coeff)
except:
print('Problem with loading/calling "elina_coeff_zero" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr to the function')
return result
def elina_coeff_equal_int(coeff, i):
"""
Test if an ElinaCoeff is equal to an integer.
Parameters
----------
coeff : ElinaCoeffPtr
Pointer to the ElinaCoeff that needs to be tested for equality.
i : c_int
Integer that needs to be tested for equality.
Returns
-------
result : c_bool
Result of the equality test.
"""
result = None
try:
elina_coeff_equal_int_c = elina_auxiliary_api.elina_coeff_equal_int
elina_coeff_equal_int_c.restype = c_bool
elina_coeff_equal_int_c.argtypes = [ElinaCoeffPtr, c_int]
result = elina_coeff_equal_int_c(coeff, i)
except:
print('Problem with loading/calling "elina_coeff_equal_int" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, c_int to the function')
return result
# ====================================================================== #
# Other operations
# ====================================================================== #
def elina_coeff_neg(coeff1, coeff2):
"""
Set the value of an ElinaCoeff to the negative of another ElinaCoeff.
Parameters
----------
coeff1 : ElinaCoeffPtr
Destination.
coeff2 : ElinaCoeffPtr
Source.
Returns
-------
None
"""
try:
elina_coeff_neg_c = elina_auxiliary_api.elina_coeff_neg
elina_coeff_neg_c.restype = None
elina_coeff_neg_c.argtypes = [ElinaCoeffPtr, ElinaCoeffPtr]
elina_coeff_neg_c(coeff1, coeff2)
except:
print('Problem with loading/calling "elina_coeff_neg" from "libelinaux.so"')
print('Make sure you are passing ElinaCoeffPtr, ElinaCoeffPtr to the function')
def elina_coeff_hash(coeff):
"""
Calculate the hash code of an ElinaCoeff.
Parameters
----------
coeff : ElinaCoeffPtr
Pointer to the ElinaCoeff that needs to be hashed.
Returns
-------
result : c_long
The resulting hash.
"""
result = None
try:
elina_coeff_hash_c = elina_auxiliary_api.elina_coeff_hash
elina_coeff_hash_c.restype = c_long
elina_coeff_hash_c.argtypes = [ElinaCoeffPtr]
result = elina_coeff_hash_c(coeff)
except:
print('Problem with loading/calling "elina_coeff_hash" from "libelinaux.so"')
print('Make sure you are passing c_long to the function')
return result
| 25,385 | 8,993 |
from flask import current_app, request
from functools import wraps
from typing import Set
def allowed_params(params: Set[str]):
def decorator(endpoint):
@wraps(endpoint)
def verify_fields(*args, **kwargs):
if not (request.json.keys() <= params):
return invalid_field_error(params)
return endpoint(*args, **kwargs)
return verify_fields
return decorator
def invalid_field_error(field_set):
if current_app.env == "production":
return {"message": "Invalid request field"}, 400
else:
raise GatekeeperError(field_set)
class GatekeeperError(Exception):
def __init__(self, field_set, message="Invalid request field"):
self.field_set = field_set
self.message = message
super().__init__(self.message)
def __str__(self):
return f"{self.message}: request fields must be one or more of {self.field_set}"
| 936 | 263 |
import os
import pandas as pd
from brats_toolkit.preprocessor import Preprocessor
# instantiate
prep = Preprocessor()
## convert mapping info
## survial
name_mapping = r"E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\name_mapping.csv"
survival_info = r"E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\survival_info.csv"
df_name_mapping = pd.read_csv(name_mapping)
df_survival_info = pd.read_csv(survival_info)
root_path_train = r"E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData"
outputDir = r"E:\Datasets\BraTS challenge\Output\Output_training"
list_of_dir = os.listdir(root_path_train)
for name_of_file in list_of_dir:
#if name_of_file contains .csv it skips iteration on the loop
if name_of_file.endswith('.csv'):
continue
#We make new path tto list to for loop through we list that dir
readable_path = os.path.join(root_path_train , name_of_file)
list_of_zips = os.listdir(readable_path)
# we for loop each folder
list_sort = []
outpath = os.path.join(outputDir, name_of_file)
for zips in list_of_zips:
readable_path_2nd = os.path.join(readable_path, zips)
list_sort.append(readable_path_2nd)
list_sort = sorted(list_sort)
## missing var for segmentation preprocessing # E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\BraTS20_Training_369\BraTS20_Training_369_seg.nii.gz 2 ??
examName = name_of_file
flaFile = list_sort[0] # E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\BraTS20_Training_369\BraTS20_Training_369_flair.nii.gz1 flaFile
t1File = list_sort[2] # E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\BraTS20_Training_369\BraTS20_Training_369_t1.nii.gz 3 t1File
t1cFile = list_sort[3] # E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\BraTS20_Training_369\BraTS20_Training_369_t1ce.nii.gz 4 t1cFile
t2File = list_sort[4] # E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\BraTS20_Training_369\BraTS20_Training_369_t2.nii.gz 5 t2File
## this code calls docker!
##dcm2niix conversion
prep.single_preprocess(t1File=t1File,
t1cFile=t1cFile,
t2File=t2File,
flaFile=flaFile,
outputFolder=outputDir,
mode="cpu",
confirm=True,
skipUpdate=False,
gpuid='0')
# start_docker(exam_import_folder=exam_import_folder, exam_export_folder=exam_export_folder,
# dicom_import_folder=dicom_import_folder, nifti_export_folder=nifti_export_folder, mode=self.mode, gpuid=self.gpuid)
## expected outtputs?
#hdbet_brats-space
#hdbet_native-space
#mask_hdbet_brats-space
#masks_hdbet-space
#niftis_brats-space
#png_slices
#registrations
| 2,756 | 1,108 |
import copy
import numbers
from gbkfit.params.interpreter import (
Interpreter,
load_exprs_file,
dump_exprs_file)
from gbkfit.params.utils import parse_param_values_strict
from gbkfit.utils import parseutils
def load_params_info_common(cls, info):
desc = parseutils.make_basic_desc(cls, 'params')
opts = parseutils.parse_options_for_callable(
info, desc, cls.__init__, fun_ignore_args=['descs'])
if 'expressions' in opts:
opts['expressions'] = load_exprs_file(opts['expressions'])
return opts
def dump_params_info_common(params, exprs_file):
info = dict(parameters=params.parameters())
exprs_func_gen = params.interpreter().exprs_func_gen()
exprs_func_obj = params.interpreter().exprs_func_obj()
exprs_func_src = params.interpreter().exprs_func_src()
if not exprs_func_gen and exprs_func_src:
info['expressions'] = dump_exprs_file(
exprs_file, exprs_func_obj, exprs_func_src)
return info
class EvalParams(parseutils.BasicParserSupport):
@classmethod
def load(cls, info, descs):
opts = load_params_info_common(cls, info)
return cls(descs, **opts)
def dump(self, exprs_file):
return dump_params_info_common(self, exprs_file)
def __init__(self, descs, parameters, expressions=None):
super().__init__()
value_type = (type(None), numbers.Real)
values, exprs = parse_param_values_strict(descs, parameters, value_type)
self._descs = copy.deepcopy(descs)
self._infos = values
self._parameters = copy.deepcopy(parameters)
self._interpreter = Interpreter(descs, values | exprs, expressions)
def descs(self):
return self._descs
def infos(self):
return self._infos
def parameters(self):
return self._parameters
def interpreter(self):
return self._interpreter
def load_eval_params(info, descs):
return EvalParams.load(info, descs)
def dump_eval_params(params, exprs_file='gbkfit_config_expressions.py'):
return params.dump(exprs_file)
| 2,081 | 686 |
import argparse
import os
import subprocess
import traceback
MODULE_NAME = 'make/cc/clang/gcc'
MODULE_DESCRIPTION = '''Run analysis of code built with commands like:
make [target]
clang [compiler_options] <filename>
gcc [compiler_options] <filename>
cc [compiler_options] <filename>
Analysis examples:
infer -- make all
infer -- clang -c srcfile.m
infer -- gcc -c srcfile.c'''
def gen_instance(*args):
return MakeCapture(*args)
def mkdir_if_not_exists(path):
if not os.path.exists(path):
os.mkdir(path)
def create_argparser(group_name=MODULE_NAME):
"""This defines the set of arguments that get added by this module to the
set of global args defined in the infer top-level module
Do not use this function directly, it should be invoked by the infer
top-level module"""
parser = argparse.ArgumentParser(add_help=False)
group = parser.add_argument_group(
"{grp} module".format(grp=MODULE_NAME),
description=MODULE_DESCRIPTION,
)
group.add_argument(
'-hd', '--headers',
action='store_true',
help='Analyze code in header files',
)
group.add_argument(
'--models_mode',
action='store_true',
dest='models_mode',
help='Mode for computing the models',
)
group.add_argument(
'--no_failures_allowed',
action='store_true',
dest='no_failures_allowed',
help='Fail if at least one of the translations fails',
)
group.add_argument(
'-tm', '--testing_mode',
dest='testing_mode',
action='store_true',
help='Testing mode for the translation: Do not translate libraries'
' (including enums)')
group.add_argument(
'-fs', '--frontend-stats',
dest='frontend_stats',
action='store_true',
help='Output statistics about the capture phase to *.o.astlog')
group.add_argument(
'-fd', '--frontend-debug',
dest='frontend_debug',
action='store_true',
help='Output debugging information to *.o.astlog during capture')
return parser
class MakeCapture:
def __init__(self, args, cmd):
self.args = args
self.cmd = [os.path.basename(cmd[0])] + cmd[1:]
def create_results_dir(self):
results_dir = self.args.infer_out
mkdir_if_not_exists(results_dir)
mkdir_if_not_exists(os.path.join(results_dir, 'specs'))
mkdir_if_not_exists(os.path.join(results_dir, 'captured'))
mkdir_if_not_exists(os.path.join(results_dir, 'sources'))
def get_envvars(self):
env_vars = dict(os.environ)
env_vars['INFER_RESULTS_DIR'] = self.args.infer_out
wrappers_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..', 'wrappers')
env_vars['INFER_OLD_PATH'] = env_vars['PATH']
env_vars['PATH'] = '{wrappers}{sep}{path}'.format(
wrappers=wrappers_path,
sep=os.pathsep,
path=env_vars['PATH'],
)
return env_vars
def capture(self):
self.create_results_dir()
env_vars = self.get_envvars()
frontend_args = []
if self.args.headers:
frontend_args.append('-headers')
if self.args.models_mode:
frontend_args.append('-models_mode')
if self.args.project_root:
frontend_args += ['-project_root', self.args.project_root]
if self.args.testing_mode:
frontend_args.append('-testing_mode')
if self.args.frontend_debug:
frontend_args += ['-debug']
env_vars['FCP_DEBUG_MODE'] = '1'
if self.args.frontend_stats:
frontend_args += ['-stats']
env_vars['FCP_DEBUG_MODE'] = '1'
if self.args.no_failures_allowed:
env_vars['FCP_REPORT_FRONTEND_FAILURE'] = '1'
# export an env variable with all the arguments to pass to InferClang
env_vars['FCP_INFER_FRONTEND_ARGS'] = ' '.join(frontend_args)
try:
subprocess.check_call(self.cmd, env=env_vars)
return os.EX_OK
except subprocess.CalledProcessError as exc:
if self.args.debug:
traceback.print_exc()
return exc.returncode
| 4,263 | 1,342 |
from common import *
from torch.autograd import Variable
def to_var(x, volatile=False):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, volatile=volatile)
def softmax_cross_entropy_criterion(logit, truth, is_average=True):
loss = F.cross_entropy(logit, truth, reduce=is_average)
return loss
def metric(logit, truth, is_average=True):
# with torch.no_grad():
prob = F.softmax(logit, 1)
value, top = prob.topk(3, dim=1, largest=True, sorted=True)
correct = top.eq(truth.view(-1, 1).expand_as(top))
if is_average==True:
# top-3 accuracy
correct = correct.float().sum(0, keepdim=False)
correct = correct/len(truth)
top = [correct[0], correct[0]+correct[1], correct[0]+correct[1]+correct[2]]
precision = correct[0]/1 + correct[1]/2 + correct[2]/3
return precision, top
else:
return correct
def do_valid( net, valid_loader, criterion ):
valid_num = 0
probs = []
truths = []
losses = []
corrects = []
for input, truth, _ in valid_loader:
input = input.cuda()
truth = truth.cuda()
input = to_var(input)
truth = to_var(truth)
logit = net(input)
prob = F.softmax(logit,1)
loss = criterion(logit, truth, False)
correct = metric(logit, truth, False)
valid_num += len(input)
probs.append(prob.data.cpu().numpy())
losses.append(loss.data.cpu().numpy())
corrects.append(correct.data.cpu().numpy())
truths.append(truth.data.cpu().numpy())
assert(valid_num == len(valid_loader.sampler))
#------------------------------------------------------
prob = np.concatenate(probs)
correct = np.concatenate(corrects)
truth = np.concatenate(truths).astype(np.int32).reshape(-1,1)
loss = np.concatenate(losses)
#---
#top = np.argsort(-predict,1)[:,:3]
loss = loss.mean()
correct = correct.mean(0)
top = [correct[0], correct[0]+correct[1], correct[0]+correct[1]+correct[2]]
precision = correct[0]/1 + correct[1]/2 + correct[2]/3
#----
valid_loss = np.array([
loss, top[0], top[2], precision
])
return valid_loss | 2,315 | 837 |
import os
from collections import defaultdict
with open(os.path.join(os.path.dirname(__file__), "input")) as f:
data = f.read().split('\n\n')
template, rules = data
rules = [x.split(' -> ') for x in rules.splitlines()]
rules = dict(rules)
pair_counts = defaultdict(int)
for i, pair in enumerate(zip(template, template[1:])):
pair_counts[''.join(pair)] += 1
rules2 = {}
for pair, inserted_char in rules.items():
rules2[pair] = (pair[0] + inserted_char, inserted_char + pair[1])
for x in range(10):
for pair, count in tuple(pair_counts.items()):
if pair in rules2 and count:
for pair2 in rules2[pair]:
pair_counts[pair2] += count
pair_counts[pair] -= count
c = defaultdict(int)
for pair, count in pair_counts.items():
c[pair[1]] += count
print(c[max(c, key=lambda x: c[x])] - c[min(c, key=lambda x: c[x])])
for x in range(30):
for pair, count in tuple(pair_counts.items()):
if pair in rules2 and count:
for pair2 in rules2[pair]:
pair_counts[pair2] += count
pair_counts[pair] -= count
c = defaultdict(int)
for pair, count in pair_counts.items():
c[pair[1]] += count
print(c[max(c, key=lambda x: c[x])] - c[min(c, key=lambda x: c[x])]) | 1,281 | 454 |
# -*- coding: utf-8 -*-
import os
import sys
import six
import argparse
import app
import app.taski as taski
def check_positive_int(val):
"""Make sure input argument is an positive integer"""
ival = int(val)
if ival <= 0:
raise argparse.ArgumentTypeError("%s is not a positive integer" % val)
return ival
def str2unicode(val):
"""
Python2 will set val to type `bytes` while Python3 will set val to
unicode. So we need to convert bytes to unicode in Python2.
https://stackoverflow.com/questions/22947181/dont-argparse-read-unicode-from-commandline
"""
if six.PY2:
return val.decode(sys.getfilesystemencoding())
return val
def parse(cmd=None):
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help="config file path")
parser.set_defaults(config=os.path.expanduser("~") + "/.taski.yaml")
parser.add_argument('-d', '--dryrun', help="dryrun", action='store_true')
parser.add_argument('-v', '--verbose',
help="enable debugging", action='store_true')
subparsers = parser.add_subparsers(help='available commands')
plan_parser = subparsers.add_parser('plan', help='plan tasks')
plan_parser.add_argument('-v', '--verbose',
help="enable debugging", action='store_true')
plan_parser.add_argument('-l', '--limit',
help='limit number of tasks to plan',
type=check_positive_int, default=30)
plan_parser.add_argument('-n', '--daily-goal',
help='number of tasks scheduled per day',
type=check_positive_int, default=10)
plan_parser.set_defaults(func=taski.plan)
rank_parser = subparsers.add_parser('rank', help='rank tasks')
rank_parser.add_argument('-v', '--verbose',
help="enable debugging", action='store_true')
rank_parser.add_argument('-p', '--project', help='project name',
type=str2unicode)
rank_parser.add_argument('-t', '--tui', help='Use terminal UI for ranking',
default=False, action='store_true')
rank_parser.set_defaults(func=taski.rank)
show_parser = subparsers.add_parser('show', help='show things')
show_parser.add_argument('show_cmd', help='show things',
choices=["api_token", "stats", "config", "old_tasks", "completed_tasks"])
show_parser.add_argument(
'--since', help='show completed task since this date. Format "2007-4-29T10:13"')
show_parser.add_argument(
'--until', help='show completed task until this date. Format "2007-4-29T10:13"')
show_parser.set_defaults(since=None)
show_parser.set_defaults(until=None)
show_parser.set_defaults(func=taski.show)
dump_parser = subparsers.add_parser('dump', help='dump tasks to csv file: todoist.csv')
dump_parser.add_argument('-f', '--file', help="output file name",
default="taski.csv")
dump_parser.add_argument('-c', '--completed', help="include completed tasks",
action='store_true', default=False)
dump_parser.add_argument('-v', '--verbose',
help="enable debugging", action='store_true')
dump_parser.set_defaults(func=taski.dump)
version_parser = subparsers.add_parser(
'version', help='print version number')
version_parser.set_defaults(
quick_func=lambda args: sys.stdout.write(app.VERSION + "\n"))
test_parser = subparsers.add_parser('test', help="¯\_(ツ)_/¯")
test_parser.set_defaults(func=taski.test)
if cmd:
args = parser.parse_args(cmd)
else:
args = parser.parse_args()
return args
| 3,793 | 1,184 |
from django.conf.urls import patterns
urlpatterns = patterns('',
(r'^$', 'tardis.apps.equipment.views.index'),
(r'^search/$', 'tardis.apps.equipment.views.search'),
(r'^(?P<object_id>\d+)/$',
'tardis.apps.equipment.views.view_id'),
(r'^(?P<object_key>\w+)/$',
'tardis.apps.equipment.views.view_key'),
)
| 467 | 150 |
"""The set of functions that enable Anvil to work in the given DCC."""
import api_proxy
import dcc_plugin
__all__ = ['api_proxy',
'dcc_plugin']
| 156 | 50 |
from typing import Dict, Optional, List, Any
from allennlp.common.checks import check_dimensions_match, ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules import Seq2VecEncoder, TimeDistributed, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.modules import FeedForward
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.modules.attention import DotProductAttention
from allennlp.models.model import Model
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import InitializerApplicator, RegularizerApplicator
import allennlp.nn.util as util
import numpy
from overrides import overrides
import torch
from torch.nn.modules.linear import Linear
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from nlp_uncertainty_ssl.metrics.jaccard_index import JaccardIndex
@Model.register("emotion_classifier")
class EmotionClassifier(Model):
"""
The ``emotion_classifier`` is a multi label classifier (predict 0-N labels per
sample).
Parameters
----------
vocab : ``Vocabulary``, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : ``TextFieldEmbedder``, required
Used to embed the tokens ``TextField`` we get as input to the model.
encoder : ``Seq2SeqEncoder``, optional (default=None)
The encoder that we will use in between embedding tokens and predicting output tags.
label_namespace : ``str``, optional (default=``labels``)
This is needed to compute the SpanBasedF1Measure metric.
Unless you did something unusual, the default value should be what you want.
feedforward : ``FeedForward``, optional, (default = None).
An optional feedforward layer to apply after the encoder.
label_encoding : ``str``, optional (default=``None``)
Label encoding to use when calculating span f1.
Valid options are "BIO", "BIOUL", "IOB1", "BMES".
Required if ``calculate_span_f1`` is true.
calculate_span_f1 : ``bool``, optional (default=``None``)
Calculate span-level F1 metrics during training. If this is ``True``, then
``label_encoding`` is required. If ``None`` and
label_encoding is specified, this is set to ``True``.
If ``None`` and label_encoding is not specified, it defaults
to ``False``.
dropout: ``float``, optional (default=``None``). Use `Variational Dropout
<https://arxiv.org/abs/1512.05287>`_ for sequence and normal
dropout for non sequences.
verbose_metrics : ``bool``, optional (default = False)
If true, metrics will be returned per label class in addition
to the overall statistics.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
label_namespace: str = "labels",
encoder: Optional[Seq2VecEncoder] = None,
seq_encoder: Optional[Seq2SeqEncoder] = None,
feedforward: Optional[FeedForward] = None,
dropout: Optional[float] = None,
incl_neutral: Optional[bool] = False,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self.label_namespace = label_namespace
self.text_field_embedder = text_field_embedder
self.num_labels = self.vocab.get_vocab_size(label_namespace)
self.encoder = encoder
self.seq_encoder = seq_encoder
if self.seq_encoder is not None:
self.attention_vector = Parameter(torch.Tensor(self.seq_encoder.get_output_dim()))
self.attention_layer = DotProductAttention(normalize=True)
embedding_output_dim = self.text_field_embedder.get_output_dim()
if dropout is not None:
self.dropout = torch.nn.Dropout(dropout)
self.variational_dropout = InputVariationalDropout(dropout)
else:
self.dropout = None
self._feedforward = feedforward
if feedforward is not None:
output_dim = feedforward.get_output_dim()
elif encoder is not None:
output_dim = self.encoder.get_output_dim()
elif seq_encoder is not None:
output_dim = self.seq_encoder.get_output_dim()
else:
output_dim = embedding_output_dim
# Have to create a tag projection layer for each label in the
# multi label classifier
self._tag_projection_layers: Any = []
for k in range(self.num_labels):
tag_projection_layer = Linear(output_dim, 1)
self.add_module(f'tag_projection_layer_{k}', tag_projection_layer)
self._tag_projection_layers.append(tag_projection_layer)
self.output_activation = torch.nn.Sigmoid()
self.loss_criterion = torch.nn.BCEWithLogitsLoss(reduction='mean')
self.incl_neutral = incl_neutral
self.metrics = {"jaccard_index": JaccardIndex(self.incl_neutral)}
if encoder is not None:
check_dimensions_match(embedding_output_dim, encoder.get_input_dim(),
"text field embedding dim", "encoder input dim")
if feedforward is not None and encoder is not None:
check_dimensions_match(encoder.get_output_dim(), feedforward.get_input_dim(),
"encoder output dim", "feedforward input dim")
elif feedforward is not None and encoder is None:
check_dimensions_match(embedding_output_dim, feedforward.get_input_dim(),
"text field output dim", "feedforward input dim")
if self.seq_encoder is not None:
self.reset_parameters()
initializer(self)
def reset_parameters(self):
'''
Intitalises the attnention vector
'''
torch.nn.init.uniform_(self.attention_vector, -0.01, 0.01)
@overrides
def forward(self, # type: ignore
tokens: Dict[str, torch.LongTensor],
labels: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None
) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
tokens : ``Dict[str, torch.LongTensor]``, required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``
tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{"tokens":
Tensor(batch_size, num_tokens)}``. This dictionary will have the same keys as were used
for the ``TokenIndexers`` when you created the ``TextField`` representing your
sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,
which knows how to combine different word representations into a single vector per
token in your input.
labels : ``torch.LongTensor``, optional (default = ``None``)
A torch tensor representing the multiple labels that the sample
can be as a one hot vector where each True label is 1 and the
rest 0.
``(batch_size, num_labels)``.
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
metadata containg:
1. ``text`` - Original sentence
2. ``words`` - Tokenised words from the sentence
3. ``ID`` - Optionally the ID of the sample
Returns
-------
An output dictionary consisting of:
logits : ``torch.FloatTensor``
The logits that are the output of the ``N`` tag projection layers
where each projection layer represents a different tag.
probs: ``torch.FloatTensor``
A tensor of shape ``(batch_size, num_labels)``
The probability that the sample is one of those labels. > 0.5
suggests that a label is associated to that sample.
labels : ``List[List[int]]``
The predicted labels where the inner list represents the multi label
classification.
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised. Only computed if gold label ``labels`` are provided.
words : ``List[List[str]]``
The tokens that were given as input
text: ``List[str]``
The text that was given to the tokeniser.
ID: ``List[str]``
The ID that is associated to the training example. Only returned if the ``ID`` are provided.
"""
embedded_text_input = self.text_field_embedder(tokens)
mask = util.get_text_field_mask(tokens)
encoded_text = embedded_text_input
batch_size = embedded_text_input.shape[0]
if self.dropout is not None:
encoded_text = self.variational_dropout(encoded_text)
if self.seq_encoder is not None:
encoded_text = self.seq_encoder(encoded_text, mask)
encoded_text = self.variational_dropout(encoded_text)
attention_vector = self.attention_vector.unsqueeze(0).expand(batch_size, -1)
attention_weights = self.attention_layer(attention_vector,
encoded_text,
mask)
attention_weights = attention_weights.unsqueeze(-1)
weighted_encoded_text_seq = encoded_text * attention_weights
weighted_encoded_text_vec = weighted_encoded_text_seq.sum(1)
encoded_text = self.dropout(weighted_encoded_text_vec)
if self.encoder is not None:
encoded_text = self.encoder(encoded_text, mask)
if self.dropout is not None:
encoded_text = self.dropout(encoded_text)
# Dropout is applied after each layer for feed forward if specified
# in the config.
if self._feedforward is not None:
encoded_text = self._feedforward(encoded_text)
all_label_logits = torch.empty(batch_size, self.num_labels)
for i in range(len(self._tag_projection_layers)):
tag_projection = getattr(self, f'tag_projection_layer_{i}')
i_tag_predictions = tag_projection(encoded_text).reshape(-1)
all_label_logits[:, i] = i_tag_predictions
probs = self.output_activation(all_label_logits)
predicted_labels = probs > 0.5
output = {'probs': probs, 'logits': all_label_logits,
'labels': predicted_labels}
if labels is not None:
labels = labels.type(torch.FloatTensor)
loss = self.loss_criterion(all_label_logits, labels)
output["loss"] = loss
for metric in self.metrics.values():
metric(predicted_labels, labels)
if metadata is not None:
words, texts, ids = [], [], []
for sample in metadata:
words.append(sample['words'])
texts.append(sample['text'])
if 'ID' in sample:
ids.append(sample['ID'])
output["words"] = words
output["text"] = texts
if ids:
output['ID'] = ids
return output
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Converts the labels to the actual labels. ``output_dict["readable_labels"]``
is a list of lists which will contain zero or more readable labels.
The type associated to the value of ``output_dict["readable_labels"]`` is
List[List[str]].
"""
readable_labels: List[List[str]] = []
for sample in output_dict['labels']:
sample_labels: List[str] = []
sample: List[int]
# This should be a list of 0's and 1's
for index, multi_label in enumerate(sample):
if multi_label:
word_label = self.vocab.get_token_from_index(index, namespace=self.label_namespace)
sample_labels.append(word_label)
readable_labels.append(sample_labels)
output_dict['readable_labels'] = readable_labels
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics_to_return = {metric_name: metric.get_metric(reset) for
metric_name, metric in self.metrics.items()}
return metrics_to_return | 13,158 | 3,596 |
import numpy as np
import math
from statistics import median
from scipy.stats import skew
import weightedstats as ws
from statsmodels.stats.stattools import medcouple
class Med_couple:
def __init__(self,data):
self.data = np.sort(data,axis = None)[::-1] # sorted decreasing
self.med = np.median(self.data)
self.scale = 2*np.amax(np.absolute(self.data))
self.Zplus = [(x-self.med)/self.scale for x in self.data if x>=self.med]
self.Zminus = [(x-self.med)/self.scale for x in self.data if x<=self.med]
self.p = len(self.Zplus)
self.q = len(self.Zminus)
def H(self,i,j):
a = self.Zplus[i]
b = self.Zminus[j]
if a==b:
return np.sign(self.p - 1 - i - j)
else:
return (a+b)/(a-b)
def greater_h(self,u):
P = [0]*self.p
j = 0
for i in range(self.p-1,-1,-1):
while j < self.q and self.H(i,j)>u:
j+=1
P[i]=j-1
return P
def less_h(self,u):
Q = [0]*self.p
j = self.q - 1
for i in range(self.p):
while j>=0 and self.H(i,j) < u:
j=j-1
Q[i]=j+1
return Q
#Kth pair algorithm (Johnson & Mizoguchi)
def kth_pair_algorithm(self):
L = [0]*self.p
R = [self.q-1]*self.p
Ltotal = 0
Rtotal = self.p*self.q
medcouple_index = math.floor(Rtotal / 2)
while Rtotal - Ltotal > self.p:
middle_idx = [i for i in range(self.p) if L[i]<=R[i]]
row_medians = [self.H(i,math.floor((L[i]+R[i])/2)) for i in middle_idx]
weight = [R[i]-L[i] + 1 for i in middle_idx]
WM = ws.weighted_median(row_medians,weights = weight)
P = self.greater_h(WM)
Q = self.less_h(WM)
Ptotal = np.sum(P)+len(P)
Qtotal = np.sum(Q)
if medcouple_index <= Ptotal-1:
R = P.copy()
Rtotal = Ptotal
else:
if medcouple_index > Qtotal - 1:
L = Q.copy()
Ltotal = Qtotal
else:
return WM
remaining = np.array([])
for i in range(self.p):
for j in range(L[i],R[i]+1):
remaining = np.append(remaining,self.H(i,j))
find_index = medcouple_index-Ltotal
k_minimum_element = remaining[np.argpartition(remaining,find_index)]
# print(find_index,'tim trong mang ',sorted(remaining))
return k_minimum_element[find_index]
def naive_algorithm_testing(self):
result = [self.H(i,j) for i in range(self.p) for j in range(self.q)]
return np.median(result)
if __name__ == '__main__':
sum=0
for i in range(1000):
data = np.random.randint(low = 0, high = 200000, size = 1000)
A = Med_couple(data)
sum+=abs(medcouple(data)-A.kth_pair_algorithm())
# print(skew(data))
# print("kth",A.kth_pair_algorithm())
# print("naive my code",A.naive_algorithm_testing())
# print("naive",medcouple(data))
print(sum) | 3,225 | 1,159 |
from .feedback_view import feedback_view # noqa
| 49 | 16 |
from sqlalchemy.orm import relationship
from sqlalchemy import and_
def create_attribute_associator(entity_id_col, eav_cls, eav_entity_id_col, eav_attr_col, eav_value_col):
'''
Returns a class method that allows one to associate attributes in an Entity-Attribute-Value table
with a sqlalchemy class and then access those attributes as properties of the entity class.
Example usage:
>>> from sqlalchemy import Column, ForeignKey, Index, Integer, String
>>> from sqlalchemy.orm import relationship
>>> from sqlalchemy.ext.declarative import declarative_base
>>> Base = declarative_base()
>>> metadata = Base.metadata
>>>
>>> class Eav(Base):
... __tablename__ = 'eav'
... __table_args__ = (
... Index('e_a_uq', 'entity_id', 'attribute', unique=True),
... )
... id = Column(Integer, primary_key=True)
... entity_id = Column(ForeignKey('entity.id', ondelete='CASCADE', onupdate='CASCADE'), nullable=False)
... attribute = Column(String(255), nullable=False)
... value = Column(String(255))
...
>>>
>>> class Entity(Base):
... __tablename__ = 'entity'
... id = Column(Integer, primary_key=True)
... name = Column(String(255), nullable=False)
... _add_attribute = create_attribute_associator(id, Eav, Eav.entity_id, Eav.attribute, Eav.value)
...
>>> Entity._add_attribute('foo')
>>> Entity._add_attribute('bar')
>>>
>>> dir(Entity)
['__class__', '__delattr__', '__dict__', '__doc__', '__format__', '__getattribute__', '__hash__', '__init__',
'__mapper__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__',
'__str__', '__subclasshook__', '__table__', '__tablename__', '__weakref__', '_add_attribute', '_bar_get',
'_bar_obj', '_bar_set', '_decl_class_registry', '_foo_get', '_foo_obj', '_foo_set', '_sa_class_manager',
'bar', 'foo', 'id', 'metadata', 'name']
:param entity_id_col: The id column of your entity
:param eav_cls: The sqlalchemy class of the entity attribute value (EAV) table
:param eav_entity_id_col: The foreign key column from the EAV table to the entity table
:param eav_attr_col: The EAV table column that stores the attribute name
:param eav_value_col: The EAV table column that stores the attribute value
:return: class method to with signature like add_attribute(cls, attr_name, lazy='joined')
'''
attr_col_name = eav_attr_col.key
value_col_name = eav_value_col.key
@classmethod
def add_attribute(cls, attr_name, lazy='joined'):
obj_name = '_%s_obj' % attr_name
getter_name = '_%s_get' % attr_name
setter_name = '_%s_set' % attr_name
rel = relationship(eav_cls,
primaryjoin=and_(entity_id_col == eav_entity_id_col,
eav_attr_col == attr_name),
uselist=False, lazy=lazy)
def getter(self):
obj = getattr(self, obj_name)
return getattr(obj, value_col_name)
def setter(self, value):
obj = getattr(self, obj_name)
if obj is None:
obj = eav_cls(**{attr_col_name: attr_name, value_col_name: value})
setattr(self, obj_name, obj)
else:
setattr(obj, value_col_name, value)
prop = property(getter, setter)
setattr(cls, obj_name, rel)
setattr(cls, getter_name, getter)
setattr(cls, setter_name, setter)
setattr(cls, attr_name, prop)
return add_attribute | 3,644 | 1,172 |
"""A script that defines a simple FC model for function solving"""
import torch.nn as nn
import numpy as np
import torch
class Net(nn.Module):
def __init__(self, model_params):
super(Net, self).__init__()
model_params = self.ingest_params_lvl1(model_params)
ins = model_params['in features']
outs = model_params['number of outputs']
self.out_size = outs
self.fc1 = nn.Linear(ins, 512)
self.fc2 = nn.Linear(512, 16)
self.fc3 = nn.Linear(64, 32)
self.fc4 = nn.Linear(16, outs)
self.drop = nn.Dropout(0.1)
self.act = nn.ReLU()
#self.act = nn.Tanh()
self.reps = 20
self.rep = 0
self.step = 0
self.val = torch.zeros(outs).half().cuda()
def ingest_params_lvl1(self, model_params):
assert type(model_params) is dict
default_params = {
"in features": 128,
"number of outputs": 18
}
default_params.update(model_params) # Update with user selections
return default_params
def generate_noise(self, x):
n = torch.empty_like(x)
n.normal_(mean=0., std=0.3)
return n.cuda()
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
#x = self.drop(x)
x = self.fc2(x)
x = self.act(x)
#x = self.drop(x)
#x = self.fc3(x)
#x = self.act(x)
#x = self.drop(x)
x = self.fc4(x).squeeze().clamp_(-1., 1.)
#self.repeat(x)
return x.cpu().detach().numpy()
def repeat(self, x):
if self.rep > self.reps:
self.reset(x)
self.rep=0
else:
self.rep +=1
print(self.val, self.rep)
def reset(self, x):
default = torch.zeros(self.out_size).cuda()
choice = np.random.choice([0, 1], p=[0.5, 0.5])
if choice == 0:
self.val = default
else:
self.val = x.clone()
| 2,168 | 759 |
"""
Twitch OAuth2 backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/goodgame.html
"""
from social_core.backends import oauth
class GoodGameOAuth2(oauth.BaseOAuth2):
"""GoodGame OAuth authentication backend"""
name = 'goodgame'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'https://api2.goodgame.ru/oauth/authorize'
ACCESS_TOKEN_URL = 'https://api2.goodgame.ru/oauth'
ACCESS_TOKEN_METHOD = 'POST'
#: TODO await when GG provide email user retrieve through scope
#: https://goodgame.ru/topic/67865#comment427
DEFAULT_SCOPE = ['channel.subscribers']
REDIRECT_STATE = False
def get_user_id(self, details, response):
return response["user"].get(self.ID_KEY)
def get_user_details(self, response):
return {
'username': response['user'].get('username'),
#: currently there's no email
'email': response['user'].get('email'),
'first_name': '',
'last_name': ''
}
def user_data(self, access_token, *args, **kwargs):
#: treat this as hacky as far as simple info does not return
#: email address, but we can retrieve it from another endpoint
return self.get_json(
'https://api2.goodgame.ru/info',
params={'access_token': access_token}
)
| 1,346 | 428 |
def recursive(func):
func.func_globals[func.__name__] = func
return func
class Test:
def method(self, x = False):
if x:
print(x)
else:
self.method("I'm method")
@staticmethod
def smethod(x = False):
if x:
print(x)
else:
method("I'm static method")
@staticmethod
@recursive
def rmethod(x = False):
if x:
print(x)
else:
rmethod("I'm recursive method")
test = Test()
test.method() # I'm method
test.rmethod() # I'm recursive method
test.smethod() # raises NameError: global name 'method' is not defined
| 666 | 208 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSearchWidgetWrapper.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2016-05'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import qgis # NOQA
from qgis.gui import (QgsSearchWidgetWrapper,
QgsDefaultSearchWidgetWrapper,
QgsValueMapSearchWidgetWrapper,
QgsValueRelationSearchWidgetWrapper,
QgsCheckboxSearchWidgetWrapper,
QgsDateTimeSearchWidgetWrapper)
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsProject,
)
from qgis.PyQt.QtCore import QDateTime, QDate, QTime
from qgis.PyQt.QtWidgets import QWidget
from qgis.testing import start_app, unittest
start_app()
class PyQgsSearchWidgetWrapper(unittest.TestCase):
def testFlagToString(self):
# test converting QgsSearchWidgetWrapper.FilterFlag to string
tests = [QgsSearchWidgetWrapper.EqualTo,
QgsSearchWidgetWrapper.NotEqualTo,
QgsSearchWidgetWrapper.GreaterThan,
QgsSearchWidgetWrapper.LessThan,
QgsSearchWidgetWrapper.GreaterThanOrEqualTo,
QgsSearchWidgetWrapper.LessThanOrEqualTo,
QgsSearchWidgetWrapper.Between,
QgsSearchWidgetWrapper.CaseInsensitive,
QgsSearchWidgetWrapper.Contains,
QgsSearchWidgetWrapper.DoesNotContain,
QgsSearchWidgetWrapper.IsNull,
QgsSearchWidgetWrapper.IsNotNull,
QgsSearchWidgetWrapper.IsNotBetween
]
for t in tests:
self.assertTrue(len(QgsSearchWidgetWrapper.toString(t)) > 0)
def testExclusiveFlags(self):
# test flag exclusive/non exclusive
exclusive = QgsSearchWidgetWrapper.exclusiveFilterFlags()
non_exclusive = QgsSearchWidgetWrapper.nonExclusiveFilterFlags()
for e in exclusive:
self.assertFalse(e in non_exclusive)
class PyQgsDefaultSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer&field=flddate:datetime",
"test", "memory")
parent = QWidget()
w = QgsDefaultSearchWidgetWrapper(layer, 0)
w.initWidget(parent)
line_edit = w.lineEdit()
line_edit.setText('test')
case_sensitive = w.caseSensitiveCheckBox()
case_sensitive.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), 'lower("fldtxt")=lower(\'test\')')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), 'lower("fldtxt")<>lower(\'test\')')
case_sensitive.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'test\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'test\'')
case_sensitive.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.Contains), '"fldtxt" ILIKE \'%test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.DoesNotContain), 'NOT ("fldtxt" ILIKE \'%test%\')')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.StartsWith), '"fldtxt" ILIKE \'test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EndsWith), '"fldtxt" ILIKE \'%test\'')
case_sensitive.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.Contains), '"fldtxt" LIKE \'%test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.DoesNotContain), 'NOT ("fldtxt" LIKE \'%test%\')')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.StartsWith), '"fldtxt" LIKE \'test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EndsWith), '"fldtxt" LIKE \'%test\'')
case_sensitive.setChecked(False)
# numeric field
parent = QWidget()
w = QgsDefaultSearchWidgetWrapper(layer, 1)
w.initWidget(parent)
# may need updating if widget layout changes:
line_edit = w.lineEdit()
line_edit.setText('5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"fldint">5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"fldint"<5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"fldint">=5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"fldint"<=5.5')
# date/time/datetime
parent = QWidget()
w = QgsDefaultSearchWidgetWrapper(layer, 2)
w.initWidget(parent)
# may need updating if widget layout changes:
line_edit = w.lineEdit()
line_edit.setText('2015-06-03')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"flddate"=\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"flddate"<>\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"flddate">\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"flddate"<\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"flddate">=\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"flddate"<=\'2015-06-03\'')
class PyQgsValueMapSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "test", "memory")
w = QgsValueMapSearchWidgetWrapper(layer, 0)
config = {"map": [{"val1": 1},
{"val2": 200}]}
w.setConfig(config)
c = w.widget()
# first, set it to the "select value" item
c.setCurrentIndex(0)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '')
c.setCurrentIndex(1)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'1\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'1\'')
c.setCurrentIndex(2)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'200\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'200\'')
# try with numeric field
w = QgsValueMapSearchWidgetWrapper(layer, 1)
w.setConfig(config)
c = w.widget()
c.setCurrentIndex(1)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=1')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>1')
class PyQgsValueRelationSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "test", "memory")
# setup value relation
parent_layer = QgsVectorLayer("Point?field=stringkey:string&field=intkey:integer&field=display:string", "parent", "memory")
f1 = QgsFeature(parent_layer.fields(), 1)
f1.setAttributes(['a', 1, 'value a'])
f2 = QgsFeature(parent_layer.fields(), 2)
f2.setAttributes(['b', 2, 'value b'])
f3 = QgsFeature(parent_layer.fields(), 3)
f3.setAttributes(['c', 3, 'value c'])
parent_layer.dataProvider().addFeatures([f1, f2, f3])
QgsProject.instance().addMapLayers([layer, parent_layer])
config = {"Layer": parent_layer.id(),
"Key": 'stringkey',
"Value": 'display'}
w = QgsValueRelationSearchWidgetWrapper(layer, 0)
w.setConfig(config)
c = w.widget()
# first, set it to the "select value" item
c.setCurrentIndex(0)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '')
c.setCurrentIndex(1)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'a\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'a\'')
c.setCurrentIndex(2)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'b\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'b\'')
# try with numeric field
w = QgsValueRelationSearchWidgetWrapper(layer, 1)
config['Key'] = 'intkey'
w.setConfig(config)
c = w.widget()
c.setCurrentIndex(c.findText('value c'))
self.assertEqual(c.currentIndex(), 3)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=3')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>3')
# try with allow null set
w = QgsValueRelationSearchWidgetWrapper(layer, 1)
config['AllowNull'] = True
w.setConfig(config)
c = w.widget()
c.setCurrentIndex(c.findText('value c'))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=3')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>3')
# try with line edit
w = QgsValueRelationSearchWidgetWrapper(layer, 1)
config['UseCompleter'] = True
w.setConfig(config)
l = w.widget()
l.setText('value b')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=2')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>2')
class PyQgsCheckboxSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "test", "memory")
w = QgsCheckboxSearchWidgetWrapper(layer, 0)
config = {"CheckedState": 5,
"UncheckedState": 9}
w.setConfig(config)
c = w.widget()
# first check with string field type
c.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'5\'')
c.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'9\'')
# try with numeric field
w = QgsCheckboxSearchWidgetWrapper(layer, 1)
w.setConfig(config)
c = w.widget()
c.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=5')
c.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=9')
class PyQgsDateTimeSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=date:date&field=time:time&field=datetime:datetime", "test", "memory")
w = QgsDateTimeSearchWidgetWrapper(layer, 0)
config = {"field_format": 'yyyy-MM-dd',
"display_format": 'yyyy-MM-dd'}
w.setConfig(config)
c = w.widget()
# first check with date field type
c.setDateTime(QDateTime(QDate(2013, 4, 5), QTime()))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"date" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"date" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"date"=\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"date"<>\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"date">\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"date"<\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"date">=\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"date"<=\'2013-04-05\'')
# time field type
w = QgsDateTimeSearchWidgetWrapper(layer, 1)
config = {"field_format": 'HH:mm:ss',
"display_format": 'HH:mm:ss'}
w.setConfig(config)
c = w.widget()
c.setDateTime(QDateTime(QDate(2013, 4, 5), QTime(13, 14, 15)))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"time" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"time" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"time"=\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"time"<>\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"time">\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"time"<\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"time">=\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"time"<=\'13:14:15\'')
# datetime field type
w = QgsDateTimeSearchWidgetWrapper(layer, 2)
config = {"field_format": 'yyyy-MM-dd HH:mm:ss',
"display_format": 'yyyy-MM-dd HH:mm:ss'}
w.setConfig(config)
c = w.widget()
c.setDateTime(QDateTime(QDate(2013, 4, 5), QTime(13, 14, 15)))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"datetime" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"datetime" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"datetime"=\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"datetime"<>\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"datetime">\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"datetime"<\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"datetime">=\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"datetime"<=\'2013-04-05 13:14:15\'')
if __name__ == '__main__':
unittest.main()
| 18,717 | 5,976 |
import os
import pytest
pytest_plugins = [
'pytester',
]
@pytest.fixture(autouse=True)
def copy_fixtures(testdir):
testdir.copy_example(os.path.join(os.path.dirname(__file__), 'tests', 'fixtures'))
yield
| 220 | 87 |
import pytest
import json
from unittest import mock
from gitlabber import gitlab_tree
URL = "http://gitlab.my.com/"
TOKEN = "MOCK_TOKEN"
GROUP_URL = "http://gitlab.my.com/group"
GROUP_NAME = "group"
SUBGROUP_URL = "http://gitlab.my.com/group/subgroup"
SUBGROUP_NAME = "subgroup"
PROJECT_URL = "http://gitlab.my.com/group/subgroup/project/project.git"
PROJECT_NAME = "project"
YAML_TEST_INPUT_FILE = "tests/test-input.yaml"
YAML_TEST_OUTPUT_FILE = "tests/test-output.yaml"
JSON_TEST_OUTPUT_FILE = "tests/test-output.json"
TREE_TEST_OUTPUT_FILE = "tests/test-output.tree"
class MockNode:
def __init__(self, id, name, url, subgroups=mock.MagicMock(), projects=mock.MagicMock(), parent_id=None):
self.id = id
self.name = name
self.path = name
self.url = url
self.web_url = url
self.ssh_url_to_repo = url
self.http_url_to_repo = url
self.subgroups = subgroups
self.projects = projects
self.parent_id = parent_id
class Listable:
def __init__(self, list_result, get_result=None, archive_result=None):
self.list_result = list_result
self.get_result = get_result
self.archive_result = archive_result
def list(self, as_list=False, archived=None):
if archived is None:
return [self.list_result, self.archive_result] if self.archive_result is not None else [self.list_result]
elif archived is True:
return [self.archive_result]
else:
return [self.list_result]
def get(self, id):
if self.get_result is not None:
return self.get_result
else:
return self.list_result
def validate_root(root):
assert root.is_leaf is False
assert root.name == ""
assert root.url == "http://gitlab.my.com/"
assert len(root.children) == 1
assert root.height == 3
def validate_group(group):
assert group.name == GROUP_NAME
assert group.url == GROUP_URL
assert group.is_leaf is False
assert len(group.children) == 1
assert group.height == 2
def validate_subgroup(subgroup):
assert subgroup.name == SUBGROUP_NAME
assert subgroup.url == SUBGROUP_URL
assert subgroup.is_leaf is False
assert len(subgroup.children) == 1
assert subgroup.height == 1
def validate_project(project):
assert project.name == PROJECT_NAME
assert project.url == PROJECT_URL
assert project.is_leaf is True
assert len(project.children) == 0
def validate_tree(root):
validate_root(root)
validate_group(root.children[0])
validate_subgroup(root.children[0].children[0])
validate_project(root.children[0].children[0].children[0])
def create_test_gitlab(monkeypatch, includes=None, excludes=None, in_file=None):
gl = gitlab_tree.GitlabTree(
URL, TOKEN, "ssh", "name", includes=includes, excludes=excludes, in_file=in_file)
projects = Listable(MockNode(2, PROJECT_NAME, PROJECT_URL))
subgroup_node = MockNode(2, SUBGROUP_NAME, SUBGROUP_URL, projects=projects)
subgroups = Listable(subgroup_node)
groups = Listable(MockNode(2, GROUP_NAME, GROUP_URL,
subgroups=subgroups), subgroup_node)
monkeypatch.setattr(gl.gitlab, "groups", groups)
return gl
def create_test_gitlab_with_toplevel_subgroups(monkeypatch):
gl = gitlab_tree.GitlabTree(URL, TOKEN, "ssh", "path")
groups = Listable([MockNode(2, GROUP_NAME, GROUP_URL),
MockNode(2, GROUP_NAME, GROUP_URL, parent_id=1)])
monkeypatch.setattr(gl.gitlab, "groups", groups)
return gl
def create_test_gitlab_with_archived(monkeypatch, includes=None, excludes=None, in_file=None, archived=None):
gl = gitlab_tree.GitlabTree(
URL, TOKEN, "ssh", "name", includes=includes, excludes=excludes, in_file=in_file, archived=archived)
project_node = MockNode(1, PROJECT_NAME, PROJECT_URL)
archived_project_node = MockNode(
2, "_archived_" + PROJECT_NAME, "_archived_" + PROJECT_URL)
projects = Listable(project_node, archive_result=archived_project_node)
subgroup_node = MockNode(2, SUBGROUP_NAME, SUBGROUP_URL, projects=projects)
archived_subgroup_node = MockNode(
2, "_archived_" + SUBGROUP_NAME, "_archived_" + SUBGROUP_URL, projects=projects)
subgroups = Listable(subgroup_node, archive_result=archived_subgroup_node)
archived_subgroups = Listable(archived_subgroup_node, archive_result=archived_subgroup_node)
group_node = MockNode(2, GROUP_NAME, GROUP_URL, subgroups=archived_subgroups)
archived_group_node = MockNode(2, "_archived_" + GROUP_NAME, "_archived_" + GROUP_URL, subgroups=archived_subgroups)
groups = Listable(group_node, get_result=subgroup_node, archive_result=archived_group_node)
monkeypatch.setattr(gl.gitlab, "groups", groups)
# gl.print_tree()
return gl
| 4,858 | 1,607 |
# -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simulator."""
import numpy as np
from mindquantum import mqbackend as mb
from mindquantum.core.circuit import Circuit
from mindquantum.core.gates import BarrierGate, Measure, MeasureResult
from mindquantum.core.gates.basic import BasicGate
from mindquantum.core.operators import Hamiltonian
from mindquantum.core.operators.hamiltonian import MODE
from mindquantum.core.parameterresolver import ParameterResolver
from mindquantum.utils import ket_string
from mindquantum.utils.type_value_check import (
_check_and_generate_pr_type,
_check_input_type,
_check_int_type,
_check_seed,
_check_value_should_not_less,
)
SUPPORTED_SIMULATOR = ['projectq']
def get_supported_simulator():
"""
Get simulator name that supported by MindQuantum.
Returns:
list, The supported simulator list.
"""
return SUPPORTED_SIMULATOR
class Simulator:
"""
Quantum simulator that simulate quantum circuit.
Args:
backend (str): which backend you want. The supported backend can be found
in SUPPORTED_SIMULATOR
n_qubits (int): number of quantum simulator.
seed (int): the random seed for this simulator, if None, seed will generate
by `numpy.random.randint`. Default: None.
Raises:
TypeError: if `backend` is not str.
TypeError: if `n_qubits` is not int.
TypeError: if `seed` is not int.
ValueError: if `backend` is not supported.
ValueError: if `n_qubits` is negative.
ValueError: if `seed` is less than 0 or great than 2**23 - 1.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import qft
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(qft(range(2)))
>>> sim.get_qs()
array([0.5+0.j, 0.5+0.j, 0.5+0.j, 0.5+0.j])
"""
def __init__(self, backend, n_qubits, seed=None):
"""Initialize a Simulator object."""
_check_input_type('backend', str, backend)
_check_int_type('n_qubits', n_qubits)
_check_value_should_not_less('n_qubits', 0, n_qubits)
if seed is None:
seed = np.random.randint(1, 2**23)
_check_seed(seed)
if backend not in SUPPORTED_SIMULATOR:
raise ValueError(f"backend {backend} not supported, now we support {SUPPORTED_SIMULATOR}!")
self.backend = backend
self.seed = seed
self.n_qubits = n_qubits
if backend == 'projectq':
self.sim = mb.projectq(seed, n_qubits)
def copy(self):
"""
Copy this simulator.
Returns:
Simulator, a copy version of this simulator.
Examples:
>>> from mindquantum import RX, Simulator
>>> sim = Simulator('projectq', 1)
>>> sim.apply_gate(RX(1).on(0))
>>> sim.flush()
>>> sim2 = sim.copy()
>>> sim2.apply_gate(RX(-1).on(0))
>>> sim2
projectq simulator with 1 qubit (little endian).
Current quantum state:
1¦0⟩
"""
sim = Simulator(self.backend, self.n_qubits, self.seed)
sim.sim = self.sim.copy()
return sim
def __str__(self):
"""Return a string representation of the object."""
state = self.get_qs()
s = f"{self.backend} simulator with {self.n_qubits} qubit{'s' if self.n_qubits > 1 else ''} (little endian)."
s += "\nCurrent quantum state:\n"
if self.n_qubits < 4:
s += '\n'.join(ket_string(state))
else:
s += state.__str__()
return s
def __repr__(self):
"""Return a string representation of the object."""
return self.__str__()
def reset(self):
"""
Reset simulator to zero state.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import qft
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(qft(range(2)))
>>> sim.reset()
>>> sim.get_qs()
array([1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j])
"""
self.sim.reset()
def flush(self):
"""
Flush gate that works for projectq simulator.
The projectq simulator will cache several gate and fushion these gate into a bigger gate, and than act on the
quantum state. The flush command will ask the simulator to fushion currently stored gate and act on the quantum
state.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import H
>>> sim = Simulator('projectq', 1)
>>> sim.apply_gate(H.on(0))
>>> sim.flush()
"""
if self.backend == 'projectq':
self.sim.run()
def apply_gate(self, gate, pr=None, diff=False):
"""
Apply a gate on this simulator, can be a quantum gate or a measurement operator.
Args:
gate (BasicGate): The gate you want to apply.
pr (Union[numbers.Number, numpy.ndarray, ParameterResolver, list]): The
parameter for parameterized gate. Default: None.
diff (bool): Whether to apply the derivative gate on this simulator. Default: False.
Returns:
int or None, if the gate if a measure gate, then return a collapsed state, Otherwise
return None.
Raises:
TypeError: if `gate` is not a BasicGate.
ValueError: if any qubit of `gate` is higher than simulator qubits.
ValueError: if `gate` is parameterized, but no parameter supplied.
TypeError: the `pr` is not a ParameterResolver if `gate` is parameterized.
Examples:
>>> import numpy as np
>>> from mindquantum import Simulator
>>> from mindquantum import RY, Measure
>>> sim = Simulator('projectq', 1)
>>> sim.apply_gate(RY('a').on(0), np.pi/2)
>>> sim.get_qs()
array([0.70710678+0.j, 0.70710678+0.j])
>>> sim.apply_gate(Measure().on(0))
1
>>> sim.get_qs()
array([0.+0.j, 1.+0.j])
"""
_check_input_type('gate', BasicGate, gate)
if not isinstance(gate, BarrierGate):
gate_max = max(max(gate.obj_qubits, gate.ctrl_qubits))
if self.n_qubits < gate_max:
raise ValueError(f"qubits of gate {gate} is higher than simulator qubits.")
if isinstance(gate, Measure):
return self.sim.apply_measure(gate.get_cpp_obj())
if pr is None:
if gate.parameterized:
raise ValueError("apply a parameterized gate needs a parameter_resolver")
self.sim.apply_gate(gate.get_cpp_obj())
else:
pr = _check_and_generate_pr_type(pr, gate.coeff.params_name)
self.sim.apply_gate(gate.get_cpp_obj(), pr.get_cpp_obj(), diff)
return None
def apply_circuit(self, circuit, pr=None):
"""
Apply a circuit on this simulator.
Args:
circuit (Circuit): The quantum circuit you want to apply on this simulator.
pr (Union[ParameterResolver, dict, numpy.ndarray, list, numbers.Number]): The
parameter resolver for this circuit. If the circuit is not parameterized,
this arg should be None. Default: None.
Returns:
MeasureResult or None, if the circuit has measure gate, then return a MeasureResult,
otherwise return None.
Examples:
>>> import numpy as np
>>> from mindquantum import Circuit, H
>>> from mindquantum import Simulator
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(Circuit().un(H, 2))
>>> sim.apply_circuit(Circuit().ry('a', 0).ry('b', 1), np.array([1.1, 2.2]))
>>> sim
projectq simulator with 2 qubits (little endian).
Current quantum state:
-0.0721702531972066¦00⟩
-0.30090405886869676¦01⟩
0.22178317006196263¦10⟩
0.9246947752567126¦11⟩
>>> sim.apply_circuit(Circuit().measure(0).measure(1))
shots: 1
Keys: q1 q0│0.00 0.2 0.4 0.6 0.8 1.0
───────────┼───────────┴───────────┴───────────┴───────────┴───────────┴
11│▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
│
{'11': 1}
"""
_check_input_type('circuit', Circuit, circuit)
if self.n_qubits < circuit.n_qubits:
raise ValueError(f"Circuit has {circuit.n_qubits} qubits, which is more than simulator qubits.")
if circuit.has_measure_gate:
res = MeasureResult()
res.add_measure(circuit.all_measures.keys())
if circuit.params_name:
if pr is None:
raise ValueError("Applying a parameterized circuit needs a parameter_resolver")
pr = _check_and_generate_pr_type(pr, circuit.params_name)
else:
pr = ParameterResolver()
if circuit.has_measure_gate:
samples = np.array(
self.sim.apply_circuit_with_measure(circuit.get_cpp_obj(), pr.get_cpp_obj(), res.keys_map)
)
samples = samples.reshape((1, -1))
res.collect_data(samples)
return res
if circuit.params_name:
self.sim.apply_circuit(circuit.get_cpp_obj(), pr.get_cpp_obj())
else:
self.sim.apply_circuit(circuit.get_cpp_obj())
return None
def sampling(self, circuit, pr=None, shots=1, seed=None):
"""
Samping the measure qubit in circuit. Sampling do not change the origin quantum state of this simulator.
Args:
circuit (Circuit): The circuit that you want to evolution and do sampling.
pr (Union[None, dict, ParameterResolver]): The parameter
resolver for this circuit, if this circuit is a parameterized circuit.
Default: None.
shots (int): How many shots you want to sampling this circuit. Default: 1
seed (int): Random seed for random sampling. If None, seed will be a random
int number. Default: None.
Returns:
MeasureResult, the measure result of sampling.
Examples:
>>> from mindquantum import Circuit, Measure
>>> from mindquantum import Simulator
>>> circ = Circuit().ry('a', 0).ry('b', 1)
>>> circ += Measure('q0_0').on(0)
>>> circ += Measure('q0_1').on(0)
>>> circ += Measure('q1').on(1)
>>> sim = Simulator('projectq', circ.n_qubits)
>>> res = sim.sampling(circ, {'a': 1.1, 'b': 2.2}, shots=100, seed=42)
>>> res
shots: 100
Keys: q1 q0_1 q0_0│0.00 0.122 0.245 0.367 0.49 0.612
──────────────────┼───────────┴───────────┴───────────┴───────────┴───────────┴
000│▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
│
011│▒▒▒▒▒▒▒▒▒
│
100│▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
│
111│▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
│
{'000': 18, '011': 9, '100': 49, '111': 24}
"""
if not circuit.all_measures.map:
raise ValueError("circuit must have at least one measurement gate.")
_check_input_type("circuit", Circuit, circuit)
if self.n_qubits < circuit.n_qubits:
raise ValueError(f"Circuit has {circuit.n_qubits} qubits, which is more than simulator qubits.")
_check_int_type("sampling shots", shots)
_check_value_should_not_less("sampling shots", 1, shots)
if circuit.parameterized:
if pr is None:
raise ValueError("Sampling a parameterized circuit need a ParameterResolver")
if not isinstance(pr, (dict, ParameterResolver)):
raise TypeError("pr requires a dict or a ParameterResolver, but get {}!".format(type(pr)))
pr = ParameterResolver(pr)
else:
pr = ParameterResolver()
if seed is None:
seed = int(np.random.randint(1, 2 << 20))
else:
_check_seed(seed)
res = MeasureResult()
res.add_measure(circuit.all_measures.keys())
sim = self
if circuit.is_measure_end and not circuit.is_noise_circuit:
sim = Simulator(self.backend, self.n_qubits, self.seed)
sim.set_qs(self.get_qs())
sim.apply_circuit(circuit.remove_measure(), pr)
circuit = Circuit(circuit.all_measures.keys())
samples = np.array(
sim.sim.sampling(circuit.get_cpp_obj(), pr.get_cpp_obj(), shots, res.keys_map, seed)
).reshape((shots, -1))
res.collect_data(samples)
return res
def apply_hamiltonian(self, hamiltonian: Hamiltonian):
"""
Apply hamiltonian to a simulator, this hamiltonian can be hermitian or non hermitian.
Note:
The quantum state may be not a normalized quantum state after apply hamiltonian.
Args:
hamiltonian (Hamiltonian): the hamiltonian you want to apply.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import Circuit, Hamiltonian
>>> from mindquantum.core.operators import QubitOperator
>>> import scipy.sparse as sp
>>> sim = Simulator('projectq', 1)
>>> sim.apply_circuit(Circuit().h(0))
>>> sim.get_qs()
array([0.70710678+0.j, 0.70710678+0.j])
>>> ham1 = Hamiltonian(QubitOperator('Z0'))
>>> sim.apply_hamiltonian(ham1)
>>> sim.get_qs()
array([ 0.70710678+0.j, -0.70710678+0.j])
>>> sim.reset()
>>> ham2 = Hamiltonian(sp.csr_matrix([[1, 2], [3, 4]]))
>>> sim.apply_hamiltonian(ham2)
>>> sim.get_qs()
array([1.+0.j, 3.+0.j])
"""
_check_input_type('hamiltonian', Hamiltonian, hamiltonian)
_check_hamiltonian_qubits_number(hamiltonian, self.n_qubits)
self.sim.apply_hamiltonian(hamiltonian.get_cpp_obj())
def get_expectation(self, hamiltonian):
r"""
Get expectation of the given hamiltonian. The hamiltonian could be non hermitian.
.. math::
E = \left<\psi\right|H\left|\psi\right>
Args:
hamiltonian (Hamiltonian): The hamiltonian you want to get expectation.
Returns:
numbers.Number, the expectation value.
Examples:
>>> from mindquantum.core.operators import QubitOperator
>>> from mindquantum import Circuit, Simulator
>>> from mindquantum import Hamiltonian
>>> sim = Simulator('projectq', 1)
>>> sim.apply_circuit(Circuit().ry(1.2, 0))
>>> ham = Hamiltonian(QubitOperator('Z0'))
>>> sim.get_expectation(ham)
(0.36235775447667357+0j)
"""
if not isinstance(hamiltonian, Hamiltonian):
raise TypeError(f"hamiltonian requires a Hamiltonian, but got {type(hamiltonian)}")
_check_hamiltonian_qubits_number(hamiltonian, self.n_qubits)
return self.sim.get_expectation(hamiltonian.get_cpp_obj())
def get_qs(self, ket=False):
"""
Get current quantum state of this simulator.
Args:
ket (bool): Whether to return the quantum state in ket format or not.
Default: False.
Returns:
numpy.ndarray, the current quantum state.
Examples:
>>> from mindquantum import qft, Simulator
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(qft(range(2)))
>>> sim.get_qs()
array([0.5+0.j, 0.5+0.j, 0.5+0.j, 0.5+0.j])
"""
if not isinstance(ket, bool):
raise TypeError(f"ket requires a bool, but get {type(ket)}")
state = np.array(self.sim.get_qs())
if ket:
return '\n'.join(ket_string(state))
return state
def set_qs(self, quantum_state):
"""
Set quantum state for this simulation.
Args:
quantum_state (numpy.ndarray): the quantum state that you want.
Examples:
>>> from mindquantum import Simulator
>>> import numpy as np
>>> sim = Simulator('projectq', 1)
>>> sim.get_qs()
array([1.+0.j, 0.+0.j])
>>> sim.set_qs(np.array([1, 1]))
>>> sim.get_qs()
array([0.70710678+0.j, 0.70710678+0.j])
"""
if not isinstance(quantum_state, np.ndarray):
raise TypeError(f"quantum state must be a ndarray, but get {type(quantum_state)}")
if len(quantum_state.shape) != 1:
raise ValueError(f"vec requires a 1-dimensional array, but get {quantum_state.shape}")
n_qubits = np.log2(quantum_state.shape[0])
if n_qubits % 1 != 0:
raise ValueError(f"vec size {quantum_state.shape[0]} is not power of 2")
n_qubits = int(n_qubits)
if self.n_qubits != n_qubits:
raise ValueError(f"{n_qubits} qubits vec does not match with simulation qubits ({self.n_qubits})")
self.sim.set_qs(quantum_state / np.sqrt(np.sum(np.abs(quantum_state) ** 2)))
def get_expectation_with_grad(
self,
hams,
circ_right,
circ_left=None,
simulator_left=None,
encoder_params_name=None,
ansatz_params_name=None,
parallel_worker=None,
):
r"""
Get a function that return the forward value and gradient w.r.t circuit parameters.
This method is designed to calculate the expectation and its gradient shown as below.
.. math::
E = \left<\varphi\right|U_l^\dagger H U_r \left|\psi\right>
where :math:`U_l` is circ_left, :math:`U_r` is circ_right, :math:`H` is hams
and :math:`\left|\psi\right>` is the current quantum state of this simulator,
and :math:`\left|\varphi\right>` is the quantum state of `simulator_left`.
Args:
hams (Hamiltonian): The hamiltonian that need to get expectation.
circ_right (Circuit): The :math:`U_r` circuit described above.
circ_left (Circuit): The :math:`U_l` circuit described above. By default, this circuit
will be none, and in this situation, :math:`U_l` will be equals to
:math:`U_r`. Default: None.
simulator_left (Simulator): The simulator that contains :math:`\left|\varphi\right>`. If
None, then :math:`\left|\varphi\right>` is assumed to be equals to :math:`\left|\psi\right>`.
Default: None.
encoder_params_name (list[str]): To specific which parameters belongs to encoder,
that will encoder the input data into quantum state. The encoder data
can be a batch. Default: None.
ansatz_params_name (list[str]): To specific which parameters belongs to ansatz,
that will be trained during training. Default: None.
parallel_worker (int): The parallel worker numbers. The parallel workers can handle
batch in parallel threads. Default: None.
Returns:
GradOpsWrapper, a grad ops wrapper than contains information to generate this grad ops.
Examples:
>>> import numpy as np
>>> from mindquantum import Simulator, Hamiltonian
>>> from mindquantum import Circuit
>>> from mindquantum.core.operators import QubitOperator
>>> circ = Circuit().ry('a', 0)
>>> ham = Hamiltonian(QubitOperator('Z0'))
>>> sim = Simulator('projectq', 1)
>>> grad_ops = sim.get_expectation_with_grad(ham, circ)
>>> grad_ops(np.array([1.0]))
(array([[0.54030231+0.j]]), array([[[-0.84147098+0.j]]]))
>>> sim1 = Simulator('projectq', 1)
>>> prep_circ = Circuit().h(0)
>>> ansatz = Circuit().ry('a', 0).rz('b', 0).ry('c', 0)
>>> sim1.apply_circuit(prep_circ)
>>> sim2 = Simulator('projectq', 1)
>>> ham = Hamiltonian(QubitOperator(""))
>>> grad_ops = sim2.get_expectation_with_grad(ham, ansatz, Circuit(), simulator_left=sim1)
>>> f, g = grad_ops(np.array([7.902762e-01, 2.139225e-04, 7.795934e-01]))
>>> f
array([[0.99999989-7.52279618e-05j]])
"""
if isinstance(hams, Hamiltonian):
hams = [hams]
elif not isinstance(hams, list):
raise TypeError(f"hams requires a Hamiltonian or a list of Hamiltonian, but get {type(hams)}")
for h_tmp in hams:
_check_input_type("hams's element", Hamiltonian, h_tmp)
_check_hamiltonian_qubits_number(h_tmp, self.n_qubits)
_check_input_type("circ_right", Circuit, circ_right)
if circ_right.is_noise_circuit:
raise ValueError("noise circuit not support yet.")
non_hermitian = False
if circ_left is not None:
_check_input_type("circ_left", Circuit, circ_left)
if circ_left.is_noise_circuit:
raise ValueError("noise circuit not support yet.")
non_hermitian = True
if simulator_left is not None:
_check_input_type("simulator_left", Simulator, simulator_left)
if self.backend != simulator_left.backend:
raise ValueError(
f"simulator_left should have the same backend as this simulator, \
which is {self.backend}, but get {simulator_left.backend}"
)
if self.n_qubits != simulator_left.n_qubits:
raise ValueError(
f"simulator_left should have the same n_qubits as this simulator, \
which is {self.n_qubits}, but get {simulator_left.n_qubits}"
)
non_hermitian = True
if non_hermitian and simulator_left is None:
simulator_left = self
if circ_left is None:
circ_left = circ_right
if circ_left.has_measure_gate or circ_right.has_measure_gate:
raise ValueError("circuit for variational algorithm cannot have measure gate")
if parallel_worker is not None:
_check_int_type("parallel_worker", parallel_worker)
if encoder_params_name is None and ansatz_params_name is None:
encoder_params_name = []
ansatz_params_name = list(circ_right.params_name)
for i in circ_left.params_name:
if i not in ansatz_params_name:
ansatz_params_name.append(i)
if encoder_params_name is None:
encoder_params_name = []
if ansatz_params_name is None:
ansatz_params_name = []
_check_input_type("encoder_params_name", list, encoder_params_name)
_check_input_type("ansatz_params_name", list, ansatz_params_name)
for i in encoder_params_name:
_check_input_type("Element of encoder_params_name", str, i)
for i in ansatz_params_name:
_check_input_type("Element of ansatz_params_name", str, i)
s1 = set(circ_right.params_name) | set(circ_left.params_name)
s2 = set(encoder_params_name) | set(ansatz_params_name)
if s1 - s2 or s2 - s1:
raise ValueError("encoder_params_name and ansatz_params_name are different with circuit parameters")
circ_n_qubits = max(circ_left.n_qubits, circ_right.n_qubits)
if self.n_qubits < circ_n_qubits:
raise ValueError(f"Simulator has {self.n_qubits} qubits, but circuit has {circ_n_qubits} qubits.")
version = "both"
if not ansatz_params_name:
version = "encoder"
if not encoder_params_name:
version = "ansatz"
def grad_ops(*inputs):
if version == "both" and len(inputs) != 2:
raise ValueError("Need two inputs!")
if version in ("encoder", "ansatz") and len(inputs) != 1:
raise ValueError("Need one input!")
if version == "both":
_check_encoder(inputs[0], len(encoder_params_name))
_check_ansatz(inputs[1], len(ansatz_params_name))
batch_threads, mea_threads = _thread_balance(inputs[0].shape[0], len(hams), parallel_worker)
inputs0 = inputs[0]
inputs1 = inputs[1]
if version == "encoder":
_check_encoder(inputs[0], len(encoder_params_name))
batch_threads, mea_threads = _thread_balance(inputs[0].shape[0], len(hams), parallel_worker)
inputs0 = inputs[0]
inputs1 = np.array([])
if version == "ansatz":
_check_ansatz(inputs[0], len(ansatz_params_name))
batch_threads, mea_threads = _thread_balance(1, len(hams), parallel_worker)
inputs0 = np.array([[]])
inputs1 = inputs[0]
if non_hermitian:
f_g1_g2 = self.sim.non_hermitian_measure_with_grad(
[i.get_cpp_obj() for i in hams],
[i.get_cpp_obj(hermitian=True) for i in hams],
circ_left.get_cpp_obj(),
circ_left.get_cpp_obj(hermitian=True),
circ_right.get_cpp_obj(),
circ_right.get_cpp_obj(hermitian=True),
inputs0,
inputs1,
encoder_params_name,
ansatz_params_name,
batch_threads,
mea_threads,
simulator_left.sim,
)
else:
f_g1_g2 = self.sim.hermitian_measure_with_grad(
[i.get_cpp_obj() for i in hams],
circ_right.get_cpp_obj(),
circ_right.get_cpp_obj(hermitian=True),
inputs0,
inputs1,
encoder_params_name,
ansatz_params_name,
batch_threads,
mea_threads,
)
res = np.array(f_g1_g2)
if version == 'both':
f = res[:, :, 0]
g1 = res[:, :, 1 : 1 + len(encoder_params_name)] # noqa:E203
g2 = res[:, :, 1 + len(encoder_params_name) :] # noqa:E203
return f, g1, g2
f = res[:, :, 0]
g = res[:, :, 1:]
return f, g
grad_wrapper = GradOpsWrapper(
grad_ops, hams, circ_right, circ_left, encoder_params_name, ansatz_params_name, parallel_worker
)
s = f'{self.n_qubits} qubit' + ('' if self.n_qubits == 1 else 's')
s += f' {self.backend} VQA Operator'
grad_wrapper.set_str(s)
return grad_wrapper
def _check_encoder(data, encoder_params_size):
if not isinstance(data, np.ndarray):
raise ValueError(f"encoder parameters need numpy array, but get {type(data)}")
data_shape = data.shape
if len(data_shape) != 2:
raise ValueError("encoder data requires a two dimension numpy array")
if data_shape[1] != encoder_params_size:
raise ValueError(
f"encoder parameters size do not match with encoder parameters name,\
need {encoder_params_size} but get {data_shape[1]}."
)
def _check_ansatz(data, ansatz_params_size):
"""Check ansatz."""
if not isinstance(data, np.ndarray):
raise ValueError(f"ansatz parameters need numpy array, but get {type(data)}")
data_shape = data.shape
if len(data_shape) != 1:
raise ValueError("ansatz data requires a one dimension numpy array")
if data_shape[0] != ansatz_params_size:
raise ValueError(
f"ansatz parameters size do not match with ansatz parameters name,\
need {ansatz_params_size} but get {data_shape[0]}"
)
def _thread_balance(n_prs, n_meas, parallel_worker):
"""Thread balance."""
if parallel_worker is None:
parallel_worker = n_meas * n_prs
if n_meas * n_prs <= parallel_worker:
batch_threads = n_prs
mea_threads = n_meas
else:
if n_meas < n_prs:
batch_threads = min(n_prs, parallel_worker)
mea_threads = min(n_meas, max(1, parallel_worker // batch_threads))
else:
mea_threads = min(n_meas, parallel_worker)
batch_threads = min(n_prs, max(1, parallel_worker // mea_threads))
return batch_threads, mea_threads
def _check_hamiltonian_qubits_number(hamiltonian, sim_qubits):
"""Check hamiltonian qubits number."""
if hamiltonian.how_to != MODE['origin']:
if hamiltonian.n_qubits != sim_qubits:
raise ValueError(
f"Hamiltonian qubits is {hamiltonian.n_qubits}, not match \
with simulator qubits number {sim_qubits}"
)
else:
if hamiltonian.n_qubits > sim_qubits:
raise ValueError(f"Hamiltonian qubits is {hamiltonian.n_qubits}, which is bigger than simulator qubits.")
class GradOpsWrapper:
"""
Wrapper the gradient operator that with the information that generate this gradient operator.
Args:
grad_ops (Union[FunctionType, MethodType])): A function or a method
that return forward value and gradient w.r.t parameters.
hams (Hamiltonian): The hamiltonian that generate this grad ops.
circ_right (Circuit): The right circuit that generate this grad ops.
circ_left (Circuit): The left circuit that generate this grad ops.
encoder_params_name (list[str]): The encoder parameters name.
ansatz_params_name (list[str]): The ansatz parameters name.
parallel_worker (int): The number of parallel worker to run the batch.
"""
def __init__(self, grad_ops, hams, circ_right, circ_left, encoder_params_name, ansatz_params_name, parallel_worker):
"""Initialize a GradOpsWrapper object."""
self.grad_ops = grad_ops
self.hams = hams
self.circ_right = circ_right
self.circ_left = circ_left
self.encoder_params_name = encoder_params_name
self.ansatz_params_name = ansatz_params_name
self.parallel_worker = parallel_worker
self.str = ''
def __call__(self, *args):
"""Definition of a function call operator."""
return self.grad_ops(*args)
def set_str(self, s):
"""
Set expression for gradient operator.
Args:
s (str): The string of QNN operator.
"""
self.str = s
def inner_product(bra_simulator: Simulator, ket_simulator: Simulator):
"""
Calculate the inner product of two state that in the given simulator.
Args:
bra_simulator (Simulator): The simulator that serve as bra state.
ket_simulator (Simulator): The simulator that serve as ket state.
Returns:
numbers.Number, the inner product of two quantum state.
Examples:
>>> from mindquantum import RX, RY, Simulator
>>> from mindquantum.simulator import inner_product
>>> bra_simulator = Simulator('projectq', 1)
>>> bra_simulator.apply_gate(RY(1.2).on(0))
>>> ket_simulator = Simulator('projectq', 1)
>>> ket_simulator.apply_gate(RX(2.3).on(0))
>>> inner_product(bra_simulator, ket_simulator)
"""
_check_input_type('bra_simulator', Simulator, bra_simulator)
_check_input_type('ket_simulator', Simulator, ket_simulator)
if bra_simulator.n_qubits != ket_simulator.n_qubits:
raise ValueError(
f"Two simulator should have same quantum state, \
but get {bra_simulator.n_qubits} and {ket_simulator.n_qubits}."
)
if bra_simulator.backend != ket_simulator.backend:
raise ValueError("The backend of two simulator should be same.")
if bra_simulator.backend == 'projectq' and ket_simulator.backend == 'projectq':
bra_simulator.flush()
ket_simulator.flush()
return mb.cpu_projectq_inner_product(bra_simulator.sim, ket_simulator.sim)
raise ValueError(f"backend for {bra_simulator.backend} not implement.")
__all__ = ['Simulator', 'get_supported_simulator', 'GradOpsWrapper', 'inner_product']
| 33,512 | 10,315 |
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-08-25 23:22:27
# @Last Modified by: KevinMidboe
# @Last Modified time: 2019-02-02 01:04:25
from guessit import guessit
from babelfish import Language, LanguageReverseError
import hashlib
import os, errno
import shutil
import re
import tvdb_api
import click
from pprint import pprint
from titlecase import titlecase
import langdetect
from exceptions import InsufficientNameError
import logging
logger = logging.getLogger('seasonedParser')
from video import VIDEO_EXTENSIONS, Episode, Movie, Video
from subtitle import SUBTITLE_EXTENSIONS, Subtitle, get_subtitle_path
from utils import sanitize, refine
def search_external_subtitles(path, directory=None):
dirpath, filename = os.path.split(path)
dirpath = dirpath or '.'
fileroot, fileext = os.path.splitext(filename)
subtitles = {}
for p in os.listdir(directory or dirpath):
if not p.endswith(SUBTITLE_EXTENSIONS):
continue
language = Language('und')
language_code = p[len(fileroot):-len(os.path.splitext(p)[1])].replace(fileext, '').replace('_','-')[1:]
if language_code:
try:
language = Language.fromietf(language_code)
except (ValueError, LanguageReverseError):
logger.error('Cannot parse language code %r', language_code)
f = open(os.path.join(dirpath, p), 'r', encoding='ISO-8859-15')
pattern = re.compile('[0-9:\,-<>]+')
# head = list(islice(f.read(), 10))
filecontent = pattern.sub('', f.read())
filecontent = filecontent[0:1000]
language = langdetect.detect(filecontent)
f.close()
subtitles[os.path.join(dirpath, p)] = language
logger.debug('Found subtitles %r', subtitles)
return subtitles
def find_file_size(video):
return os.path.getsize(video.name)
def scan_video(path):
"""Scan a video from a `path`.
:param str path: existing path to the video.
:return: the scanned video.
:rtype: :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check video extension
if not path.endswith(VIDEO_EXTENSIONS):
raise ValueError('%r is not a valid video extension' % os.path.splitext(path)[1])
dirpath, filename = os.path.split(path)
logger.info('Scanning video %r in %r', filename, dirpath)
# guess
video = Video.fromguess(path, guessit(filename))
video.subtitles |= set(search_external_subtitles(video.name))
refine(video)
# hash of name
# if isinstance(video, Movie):
# if type(video.title) is str and type(video.year) is int:
# home_path = '{} ({})'.format(video.title, video.year)
# hash_str = ''.join([video.title, str(video.year) or ''])
# elif isinstance(video, Episode):
# if type(video.series) is str and type(video.season) is int and type(video.episode) is int:
# home_path = '{} ({})'.format(video.title, video.year)
# hash_str = ''.join([video.series, str(video.season), str(video.episode)])
# video.hash = hashlib.md5(hash_str.encode()).hexdigest()
# except:
# print(video)
return video
def scan_subtitle(path):
if not os.path.exists(path):
raise ValueError('Path does not exist')
dirpath, filename = os.path.split(path)
logger.info('Scanning subtitle %r in %r', filename, dirpath)
# guess
parent_path = path.strip(filename)
subtitle = Subtitle.fromguess(parent_path, guessit(path))
return subtitle
def subtitle_path(sibling, subtitle):
parent_path = os.path.dirname(sibling)
return os.path.join(parent_path, os.path.basename(subtitle))
def scan_videos(path):
"""Scan `path` for videos and their subtitles.
See :func:`refine` to find additional information for the video.
:param str path: existing directory path to scan.
:return: the scanned videos.
:rtype: list of :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check for non-directory path
if not os.path.isdir(path):
raise ValueError('Path is not a directory')
# setup progress bar
path_children = 0
for _ in os.walk(path): path_children += 1
with click.progressbar(length=path_children, show_pos=True, label='Collecting videos') as bar:
# walk the path
videos = []
insufficient_name = []
errors_path = []
for dirpath, dirnames, filenames in os.walk(path):
logger.debug('Walking directory %r', dirpath)
# remove badly encoded and hidden dirnames
for dirname in list(dirnames):
if dirname.startswith('.'):
logger.debug('Skipping hidden dirname %r in %r', dirname, dirpath)
dirnames.remove(dirname)
# scan for videos
for filename in filenames:
if not (filename.endswith(VIDEO_EXTENSIONS)):
logger.debug('Skipping non-video file %s', filename)
continue
# skip hidden files
if filename.startswith('.'):
logger.debug('Skipping hidden filename %r in %r', filename, dirpath)
continue
# reconstruct the file path
filepath = os.path.join(dirpath, filename)
if os.path.islink(filepath):
logger.debug('Skipping link %r in %r', filename, dirpath)
continue
# scan
if filename.endswith(VIDEO_EXTENSIONS): # video
try:
video = scan_video(filepath)
except InsufficientNameError as e:
logger.info(e)
insufficient_name.append(filepath)
continue
except ValueError: # pragma: no cover
logger.exception('Error scanning video')
errors_path.append(filepath)
continue
else: # pragma: no cover
raise ValueError('Unsupported file %r' % filename)
videos.append(video)
bar.update(1)
return videos, insufficient_name, errors_path
def organize_files(path):
hashList = {}
mediafiles = scan_files(path)
# print(mediafiles)
for file in mediafiles:
hashList.setdefault(file.__hash__(),[]).append(file)
# hashList[file.__hash__()] = file
return hashList
def save_subtitles(files, single=False, directory=None, encoding=None):
t = tvdb_api.Tvdb()
if not isinstance(files, list):
files = [files]
for file in files:
# TODO this should not be done in the loop
dirname = "%s S%sE%s" % (file.series, "%02d" % (file.season), "%02d" % (file.episode))
createParentfolder = not dirname in file.parent_path
if createParentfolder:
dirname = os.path.join(file.parent_path, dirname)
print('Created: %s' % dirname)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# TODO Clean this !
try:
tvdb_episode = t[file.series][file.season][file.episode]
episode_title = tvdb_episode['episodename']
except:
episode_title = ''
old = os.path.join(file.parent_path, file.name)
if file.name.endswith(SUBTITLE_EXTENSIONS):
lang = file.getLanguage()
sdh = '.sdh' if file.sdh else ''
filename = "%s S%sE%s %s%s.%s.%s" % (file.series, "%02d" % (file.season), "%02d" % (file.episode), episode_title, sdh, lang, file.container)
else:
filename = "%s S%sE%s %s.%s" % (file.series, "%02d" % (file.season), "%02d" % (file.episode), episode_title, file.container)
if createParentfolder:
newname = os.path.join(dirname, filename)
else:
newname = os.path.join(file.parent_path, filename)
print('Moved: %s ---> %s' % (old, newname))
os.rename(old, newname)
def scan_folder(path):
videos = []
insufficient_name = []
errored_paths = []
logger.debug('Collecting path %s', path)
# non-existing
if not os.path.exists(path):
errored_paths.append(path)
logger.exception("The path '{}' does not exist".format(path))
# file
# if path is a file
if os.path.isfile(path):
logger.info('Path is a file')
try:
video = scan_video(path)
videos.append(video)
except InsufficientNameError as e:
logger.info(e)
insufficient_name.append(path)
# directories
if os.path.isdir(path):
logger.info('Path is a directory')
scanned_videos = []
try:
videos, insufficient_name, errored_paths = scan_videos(path)
except:
logger.exception('Unexpected error while collecting directory path %s', path)
errored_paths.append(path)
click.echo('%s video%s collected / %s file%s with insufficient name / %s error%s' % (
click.style(str(len(videos)), bold=True, fg='green' if videos else None),
's' if len(videos) > 1 else '',
click.style(str(len(insufficient_name)), bold=True, fg='yellow' if insufficient_name else None),
's' if len(insufficient_name) > 1 else '',
click.style(str(len(errored_paths)), bold=True, fg='red' if errored_paths else None),
's' if len(errored_paths) > 1 else '',
))
return videos, insufficient_name
def pickforgirlscouts(video):
if video.sufficientInfo():
video.moveLocation()
return True
return False
def moveHome(video):
wantedFilePath = video.wantedFilePath()
dir = os.path.dirname(wantedFilePath)
if not os.path.exists(dir):
logger.info('Creating directory {}'.format(dir))
os.makedirs(dir)
logger.info("Moving video file from: '{}' to: '{}'".format(video.name, wantedFilePath))
shutil.move(video.name, wantedFilePath)
for sub in video.subtitles:
if not os.path.isfile(sub):
continue
oldpath = sub
newpath = subtitle_path(wantedFilePath, sub)
logger.info("Moving subtitle file from: '{}' to: '{}'".format(oldpath, newpath))
shutil.move(oldpath, newpath)
# Give feedback before delete ?
def empthDirectory(paths):
pass
| 10,853 | 3,391 |
# Copyright 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db.models import agent as agents_db
from neutron_lib.db import api as db_api
from networking_vsphere.common import constants
def get_agent_by_host(agent_host):
"""Return a L2 agent on the host."""
session = db_api.get_writer_session()
with session.begin(subtransactions=True):
query = session.query(agents_db.Agent)
agent = query.filter(
agents_db.Agent.host == agent_host,
agents_db.Agent.agent_type == constants.AGENT_TYPE_DVS,
agents_db.Agent.admin_state_up.is_(True)).first()
if agent and agent.is_active:
return agent
return None
| 1,266 | 379 |
import json
# TODO: Make symlinks between the Server and Client slide, message, and ConfigParser classes
class Slide():
"""
Class for Slide Show Slide Objects
@ivar url: The Slide's URL
@type url: String
@ivar duration: The Duration to show the slide (in seconds)
@type duration: Integer
@ivar id: The Slide's ID
@type id: Integer
@ivar meta: The Slide's meta content
@type meta: Dictionary
@copyright: Northeastern University Crew 2014
"""
@staticmethod
def makeSlide(url, duration, id, meta):
"""
Slide Constructor based on the given input (instead of a dictionary)
@param url: The Slide's URL
@type url: String
@param duration: The Duration to show the slide (in seconds)
@type duration: Integer
@param id: The Slide's ID
@type id: Integer
@param meta: The Slide's meta content
@type meta: Dictionary
@return: The constructed Slide
@rtype: Slide
"""
return Slide({"permalink": url, "duration": duration, "ID": id, "meta": meta})
def __init__(self, infoDict):
"""
Slide Constructor
@param infoDict: The relevant information for the slide
@type infoDict: Dictionary
@return: The constructed Slide
@rtype: Slide
"""
self.__type__ = "slide"
print "Got meta:", infoDict["meta"]
self.url = infoDict["permalink"]
if (not (isinstance(infoDict["meta"], str)) and
(infoDict["meta"]["dds_external_url"][0] != "")):
self.url = infoDict["meta"]["dds_external_url"][0]
self.duration = infoDict["duration"]
self.id = infoDict["ID"]
self.meta = infoDict["meta"]
def toJSON(self):
"""
@return: A JSON Representation of the slide
@rtype: String
"""
text = json.dumps(self.__dict__)
return text
def sameID(self, id):
"""
Predicate method which checks if the given id is equal to the slide's
@param id: The id to check
@type id: Integer
@return: Whether the id matches the slide's
@rtype: Boolean
@todo: Do we I{really} need a method for this?
"""
return self.id == id
def __str__(self):
return "Slide[url=" + str(self.url) + ", duration=" + str(self.duration) + ", id=" + str(
self.id) + ", meta=" + str(self.meta) + "]"
def __repr__(self):
# Simplified output (Shown when Arrays of Slides are Printed)
return "Slide(" + str(self.url) + "," + str(self.duration) + ")"
| 2,645 | 789 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from utils import prepare_cifar
from fgsm_attack import FGSMAttack
from tqdm import tqdm, trange
from pgd20 import pgd20_attack
from model import get_model_for_attack
def eval_model(model, test_loader, device):
correct_adv, correct = [], []
distance = []
num = 0
with trange(10000) as pbar:
for x, label in test_loader:
x, label = x.to(device), label.to(device)
batch, c, h, w = x.shape
model.eval()
with torch.no_grad():
output = model(x)
pred = output.argmax(dim=1)
correct.append(pred == label)
num += x.shape[0]
pbar.set_description(f"Acc: {torch.cat(correct).float().mean():.5f}")
pbar.update(x.shape[0])
natural_acc = torch.cat(correct).float().mean()
return natural_acc, distance
def eval_model_pgd(model, test_loader, device, step_size, epsilon, perturb_steps):
correct_adv, correct = [], []
distance = []
num = 0
with trange(10000) as pbar:
for x, label in test_loader:
x, label = x.to(device), label.to(device)
batch, c, h, w = x.shape
x_adv = pgd20_attack(model, x.clone(), label.clone(), step_size, epsilon, perturb_steps)
x_adv = x_adv.to(device)
model.eval()
with torch.no_grad():
output = model(x)
output_adv = model(x_adv)
distance.append(torch.max((x - x_adv).reshape(batch, -1).abs(), dim=1)[0])
pred = output.argmax(dim=1)
pred_adv = output_adv.argmax(dim=1)
correct.append(pred == label)
correct_adv.append(pred_adv == label)
num += x.shape[0]
pbar.set_description(
f"Acc: {torch.cat(correct).float().mean():.5f}, Robust Acc:{torch.cat(correct_adv).float().mean():.5f}")
pbar.update(x.shape[0])
natural_acc = torch.cat(correct).float().mean()
robust_acc = torch.cat(correct_adv).float().mean()
distance = torch.cat(distance).max()
return natural_acc, robust_acc, distance
def eval_model_with_attack(model, test_loader, attack, epsilon, device):
correct_adv, correct = [], []
distance = []
num = 0
nb = 0
with trange(10000) as pbar:
for x, label in test_loader:
x, label = x.to(device), label.to(device)
batch, c, h, w = x.shape
# x_adv = attack(x.clone(), label.clone())
x_adv = attack(model, x.clone(), label.clone())
# x_adv = attack.perturb(x)
# x_adv = torch.min(torch.max(x_adv, x - epsilon), x + epsilon)
x_adv = x_adv.clamp(0, 1)
x_adv = x_adv.to(device)
model.eval()
with torch.no_grad():
output = model(x)
output_adv = model(x_adv)
distance.append(torch.max((x - x_adv).reshape(batch, -1).abs(), dim=1)[0])
pred = output.argmax(dim=1)
pred_adv = output_adv.argmax(dim=1)
correct.append(pred == label)
correct_adv.append(pred_adv == label)
num += x.shape[0]
nb += 1
pbar.set_description(
f"Acc: {torch.cat(correct).float().mean():.5f}, Robust Acc:{torch.cat(correct_adv).float().mean():.5f}")
pbar.update(x.shape[0])
natural_acc = torch.cat(correct).float().mean()
robust_acc = torch.cat(correct_adv).float().mean()
distance = torch.cat(distance).max()
return natural_acc, robust_acc, distance
| 3,692 | 1,229 |
from fastapi import FastAPI, File, UploadFile, BackgroundTasks, Depends, HTTPException,status,Query
from fastapi.responses import FileResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.security import HTTPBearer,OAuth2AuthorizationCodeBearer,HTTPBasicCredentials
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware
from dotenv import load_dotenv
from typing import List,Optional
import os
import sys
from services.serveUploadedFiles import handle_upload_image_file, handle_multiple_image_file_uploads, handle_upload_video_file
from services.serveQrcode import handle_qr_code
from services.security.customBearerCheck import validate_token
from services.storage.local import response_image_file
from services.serveDataFromUrl import handle_download_data_from_url, handle_multiple_image_file_downloads
load_dotenv()
app = FastAPI(docs_url=None if os.environ.get('docs_url') == 'None' else '/docs', redoc_url=None if os.environ.get('redoc_url') == 'None' else '/redoc')
# If you want to serve files from local server you need to mount your static file directory
if os.environ.get('PREFERED_STORAGE') == 'local' and 'pytest' not in sys.modules.keys():
app.mount("/static", StaticFiles(directory="static"), name="static")
# If you want cors configuration also possible thanks to fast-api
origins = os.environ.get('CORS_ORIGINS').split(',')
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/", tags=["main"])
def root(
cpu_load: Optional[str] = Query(
False,
description='True/False depending your needs, gets average CPU load value',
regex='^(True|False)$'
),
token: str = Depends(validate_token)):
result = {
"Hello": f"Token is {token}",
}
if cpu_load == 'True':
result['cpu_average_load'] = os.getloadavg()
return result
# File size validates NGINX
@app.post("/image", tags=["image"])
async def upload_image_file(
thumbnail: Optional[str] = Query(
os.environ.get('IMAGE_THUMBNAIL'),
description='True/False depending your needs',
regex='^(True|False)$'
),
file: UploadFile = File(...),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_upload_image_file(True if thumbnail == 'True' else False, file)
@app.post("/images", tags=["image"])
async def upload_image_files(
thumbnail: Optional[str] = Query(
os.environ.get('IMAGE_THUMBNAIL'),
description='True/False depending your needs',
regex='^(True|False)$'
),
files: List[UploadFile] = File(...),
OAuth2AuthorizationCodeBearer = Depends(validate_token)
):
fileAmount = len(files)
if fileAmount > int(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT')):
raise HTTPException(
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
detail='Amount of files must not be more than {}'.format(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT'))
)
return handle_multiple_image_file_uploads(files, fileAmount, True if thumbnail == 'True' else False)
@app.get("/image", tags=["image"])
async def get_image(
image: str = Query(...,
description='uploaded image name',
max_length=50
),
image_type: str = Query(
...,
description='Should provide verision of image you want from localStorage original, thumbnail or qrImage',
regex='^(original|thumbnail|qrImage)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)
):
return response_image_file(image, image_type)
@app.post("/qrImage", tags=["image"])
async def text_to_generate_qr_image(
qr_text: str = Query(
...,
description='Provide text to generate qr image',
),
with_logo: Optional[str] = Query(
os.environ.get('QR_IMAGE_WITH_LOGO'),
description='True/False depending your needs default is {}'.format(os.environ.get('QR_IMAGE_WITH_LOGO')),
regex='^(True|False)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_qr_code(qr_text, True if with_logo == 'True' else False)
@app.post("/video", tags=["video"])
async def upload_video_file(
optimize: Optional[str] = Query(
os.environ.get('VIDEO_OPTIMIZE'),
description='True/False depending your needs default is {}'.format(os.environ.get('VIDEO_OPTIMIZE')),
regex='^(True|False)$'
),
file: UploadFile = File(..., description='Allows mov, mp4, m4a, 3gp, 3g2, mj2'),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_upload_video_file(True if optimize == 'True' else False, file)
@app.get("/imageUrl", tags=["from url"])
async def image_from_url(
image_url: str = Query(
None,
description = "Pass valid image url to upload",
min_length = 5
),
thumbnail: Optional[str] = Query(
os.environ.get('IMAGE_THUMBNAIL'),
description='True/False depending your needs',
regex='^(True|False)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_download_data_from_url(image_url, True if thumbnail == 'True' else False, file_type='image')
@app.get("/imageUrls", tags=["from url"])
async def images_from_urls(
image_urls: List[str] = Query(
None,
description = "Pass valid image urls to upload",
min_length = 5
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
fileAmount = len(image_urls)
if fileAmount > int(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT')):
raise HTTPException(
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
detail='Amount of files must not be more than {}'.format(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT'))
)
return handle_multiple_image_file_downloads(image_urls, fileAmount)
@app.get("/videoUrl", tags=["from url"])
async def video_from_url(
video_url: str = Query(
None,
description = "Pass valid video url to upload",
min_length = 5
),
optimize: Optional[str] = Query(
os.environ.get('VIDEO_OPTIMIZE'),
description='True/False depending your needs default is {}'.format(os.environ.get('VIDEO_OPTIMIZE')),
regex='^(True|False)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_download_data_from_url(video_url, False, True if optimize == 'True' else False, file_type='video') | 6,613 | 2,062 |
from modules.LoadAccel import *
from modules.LoadOmega import *
import os
from tkinter import *
defaultdir = "../data"
def LoadDataSet(dirpath=None):
if(dirpath==None):
root = Tk()
root.withdraw()
dirpath = filedialog.askdirectory(parent=root,initialdir=defaultdir,title='Please select a dataset')
files = os.listdir(dirpath)
print("-------Found "+str(len(files))+ " files-------")
for i in files:
print("Found: "+i)
print("----------------------------")
i = 1
runs_files = []
while(True):
run = list(filter(lambda x: x == "run"+str(i), files))
if(run != []):
runs_files += run
else:
break
i+=1
print("Found "+str(len(runs_files))+" runs")
runs_data = []
for run in runs_files:
print("\n\n-----------------"+run+"-----------------")
runs_data.append(LoadRun(dirpath+"/"+run+"/"))
return runs_data
# load a single AccelData object and RotaryData object
# simpler front-end for LoadRun()
def LoadSingleRun( dirpath=None):
run = LoadRun(dirpath)
return { "accel": run["accel"][0], "omega": run["omega"][0]}
# deprecated:
def LoadRun(dirpath=None):
return LoadMultiRun(dirpath)
# Load multiple runs as a list of AccelData objects and list of RotaryData objects
def LoadMultiRun(dirpath=None):
if(dirpath==None):
root = Tk()
root.withdraw()
dirpath = filedialog.askdirectory(parent=root,initialdir=defaultdir,title='Please select a run')
found_files = os.listdir(dirpath)
print("-------Found "+str(len(found_files))+ " files-------")
for i in found_files:
print("Found: "+i)
print("The Following Files Will be Ignored:")
not_file = list(filter(lambda x: ((x.split(".")[type_index]!="accel" and
x.split(".")[type_index]!="omega") or
x.split(".")[-1].lower()!="csv" or
len(x.split(".")) != 4
),
found_files))
for i in not_file:
print("- "+i+("(Wrong File Structure)" if len(i.split(".")) != 4
else "(Wrong File Format)" if i.split(".")[-1].lower()!="csv"
else "(Unsupported Type)" if i.split(".")[type_index]!="accel" and i.split(".")[type_index]!="omega"
else ""
))
if(not_file == []):
print("--None--")
print("----------------------------")
files = list(filter(lambda x: not_file.count(x) == 0,
found_files))
accels_files = list(filter(lambda x: x.split(".")[type_index]=="accel", files))
accels_data = []
for file in accels_files:
print("processing "+file+"...")
data = LoadAccelFile(dirpath+"/"+file)
if(data != "Model is not currently supported"):
accels_data.append(data)
else:
print("Failed to Load: "+file+" (Model not supported)")
omega_files = list(filter(lambda x: x.split(".")[type_index]=="omega", files))
omega_data = []
for file in omega_files:
print("processing "+file+"...")
omega_data.append(Load_Omega(filepath=str(dirpath+"/"+file)))
if accels_data == [] and omega_data == []:
raise FileNotFoundError('No files were found.')
return {"accel": accels_data, "omega": omega_data}
| 3,570 | 1,148 |
# 力学系(dynamical systems)
from abc import ABC as _ABC, abstractmethod as _abstractmethod
import numpy as _np
from ._tu import TU as _TU
class DynamicalSystems(_ABC):
def __init__(self, t=None, u=None, **params):
self.dim = 0
self.parameter()
if self.dim == 0:
class_name = self.__class__.__name__
msg = (f"need to set {class_name}'s dimension "
f"({class_name}.dim=? in parameter())")
raise NotImplementedError(msg)
self.t = t or 0
self.u = u if u is not None else _np.zeros(self.dim)
self._t_seq, self._u_seq = [], []
self.parameter(**params)
@property
def inf(self):
return any(_np.isinf(self.u))
@property
def internal_state(self):
return _TU(self.t, self.u)
@property
def t_seq(self):
return _np.array(self._t_seq)
@property
def u_seq(self):
return _np.array(self._u_seq)
@classmethod
def on_attractor(cls, t0=None, u0=None, h=0.01, *, T_0=5000, **params):
c = cls(t0, u0)
c.parameter(**params)
c.settle_on_attractor(t0, u0, h=h, T_0=T_0)
return c
@classmethod
def get_u_seq(cls, n, *args, **kwargs):
c = cls.on_attractor(*args, **kwargs)
c.solve_n_times(n)
return c.u_seq
@_abstractmethod
def equation(self, t, u):
"""equation"""
def j(self, **params):
return _np.array(self.jacobian(**params))
def jacobian(self):
"""jacobian"""
return None
def make_inital(self):
return _np.random.rand(self.dim)
def parameter(self):
"""set parameter for equation"""
def reset_u_seq(self):
self._u_seq = []
def settle_on_attractor(self, t0=None, u0=None,
*, T_0=5000, notsave=True, **params):
self.u = self.make_inital() if u0 is None else u0
self.t = t0 or 0
for _ in range(T_0):
self.solve(*self.internal_state, **params)
if notsave:
self._u_seq, self._t_seq = [], []
if t0 is None:
self.t = 0
self.t, self.u = self.internal_state
return self.internal_state
def solve(self, *args, **kwargs):
tu = self._solve(*args, **kwargs)
if kwargs.get('save', True):
self._u_seq.append(tu.u)
self._t_seq.append(tu.t)
return tu
def solve_n_times(self, n):
for _ in range(n):
self.solve(*self.internal_state)
return self.t_seq, self.u_seq
def __call__(self, t, u):
return _np.array(self.equation(t, u))
def __repr__(self):
v = vars(self)
p = ', '.join(f'{key}={_np.round(v[key], 3)}'
for key in v.keys()
if ('_' not in key) and (key not in ['t', 'u']))
name = self.__class__.__name__
return f'{name}({p})'
@_abstractmethod
def _solve(self, t, u):
self.t, self.u = t, u
return _TU(self.t, self.u)
| 3,069 | 1,116 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TrackingType'
db.create_table('statistics_trackingtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('statistics', ['TrackingType'])
# Adding model 'Tracking'
db.create_table('statistics_tracking', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tracking_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['statistics.TrackingType'])),
('message', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True, blank=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('statistics', ['Tracking'])
def backwards(self, orm):
# Deleting model 'TrackingType'
db.delete_table('statistics_trackingtype')
# Deleting model 'Tracking'
db.delete_table('statistics_tracking')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'statistics.tracking': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Tracking'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'tracking_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['statistics.TrackingType']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'statistics.trackingtype': {
'Meta': {'object_name': 'TrackingType'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['statistics'] | 3,919 | 1,226 |
r1 = float(input('Primeiro seguimento: '))
r2 = float(input('Segundo seguimento: '))
r3 = float(input('Terceiro seguimento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('Os segmentos acima PODEM FORMAR UM TRIÂNGULO!')
else:
print('Os segmentos acima NÃO PODEM FORMAR UM TRIÂGULO!')
| 306 | 134 |
# Faça um programa que leia o nome completo de uma pessoa, mostrando em seguida o primeiro e o último nome separadamente.
#
# Ex: Ana Maria de Souza
#primeiro = Ana
#último = Souza
'''Tive dificuldades com a ultima linha desse código. Estude mais a função Split e a aula 9!'''
nome = input('Digite um nome completo: ').strip()
n = nome.split()
print(f'O seu primeiro nome é {n[0]}')
print(f'Já o seu ultimo nome é {n[len(n)-1]}') | 429 | 152 |
import random
class Card(object):
suits = ("Clubs","Hearts","Spades","Diamonds")
pips = ("2","3","4","5","6","7","8","9","10","Jack","Queen","King","Ace")
def __init__(self, pip,suit):
self.pip=pip
self.suit=suit
def __str__(self):
return "%s %s"%(self.pip,self.suit)
class Deck(object):
def __init__(self):
self.deck = [Card(pip,suit) for suit in Card.suits for pip in Card.pips]
def __str__(self):
return "[%s]"%", ".join( (str(card) for card in self.deck))
def shuffle(self):
random.shuffle(self.deck)
def deal(self):
self.shuffle() # Can't tell what is next from self.deck
return self.deck.pop(0)
| 705 | 256 |
import json
import pytest
from bddcli import Given, given, when, stdout, stderr, Application
from itsdangerous import TimedJSONWebSignatureSerializer
from itsdangerous.exc import SignatureExpired
from nanohttp import settings
from restfulpy import Application as RestfulpyApplication
foo = RestfulpyApplication(name='jwt')
foo.__configuration__ = ''
def foo_main():
return foo.cli_main()
app = Application('foo', 'restfulpy.tests.test_jwt_cli:foo_main')
def test_jwt():
foo.configure(force=True)
pirincipal = TimedJSONWebSignatureSerializer(
settings.jwt.secret,
algorithm_name=settings.jwt.algorithm
)
with Given(app, ['jwt', 'create']):
assert stderr == ''
token = f'{stdout}'[:-1]
assert pirincipal.loads(token) == {}
# Create a jwt token with a payload
payload = dict(a=1)
when(given + f'\'{json.dumps(payload)}\'')
assert stderr == ''
token = f'{stdout}'[:-1]
assert pirincipal.loads(token) == payload
# Create a expired token
when(given + '-e -1')
assert stderr == ''
token = f'{stdout}'[:-1]
with pytest.raises(SignatureExpired):
pirincipal.loads(token)
if __name__ == '__main__':
foo.cli_main(['jwt', 'create'])
| 1,300 | 411 |
class BinaryNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BinaryTree:
def __init__(self):
self.root = None
def tree_max(self):
"""
To find the maximum node value
Input:
None
Output:
Return maximum value
"""
if self.root == None:
raise Exception("Empty Tree")
elif self.root.left == None and self.root.right == None:
return self.root.value
max = self.root.value
def search(current):
nonlocal max
if current.value > max:
max = current.value
if current.left:
search(current.left)
if current.right:
search(current.right)
search(self.root)
return max
if __name__ == "__main__":
pass
| 913 | 249 |
class Solution:
# @param {string} s A string
# @return {int} the length of last word
def lengthOfLastWord(self, s):
# Write your code here
if s == '':
return 0
return len(s.split()[-1])
| 234 | 72 |
from launch import LaunchDescription
from launch_ros.actions import LifecycleNode
# from launch_ros.actions import Node
import sys
def generate_launch_description():
return LaunchDescription([
LifecycleNode(package='marvelmind_nav', node_executable='marvelmind_nav',
node_name='lc_marvel2', output='screen'),
])
def main(argv=sys.argv[1:]):
print("Running main") | 406 | 114 |
"""
Initializes settings for elastic.py.
To make the index dynamic (which also allows to switch it out for tests), the value must be explicitly initialized
by some other module. If this does not happen, and somebody attempts to load elastic.py, 'ready_or_die' will get
executed which will shut down the application, thus preventing any shenanigans with the wrong parameters being used.
At least in normal circumstances :)
"""
from datetime import timezone, datetime
from typing import Optional, List, Union
from elasticsearch_dsl import Document, Date, integer_types, ValidationException, Search
from elasticsearch_dsl.query import Query
from sahyun_bot.the_danger_zone import nuke_from_orbit
from sahyun_bot.utils import NON_EXISTENT
from sahyun_bot.utils_settings import read_config, parse_bool, parse_list
DEFAULT_HOST = 'localhost'
DEFAULT_CUSTOMSFORGE_INDEX = 'cdlcs'
DEFAULT_USER_INDEX = 'users'
DEFAULT_FUZZINESS = 'auto:5,11'
DEFAULT_SHINGLE_CEILING = 3
DEFAULT_PLATFORMS = ['pc']
DEFAULT_PARTS = ['lead', 'rhythm']
DEFAULT_OFFICIAL = False
TEST_CUSTOMSFORGE_INDEX = DEFAULT_CUSTOMSFORGE_INDEX + '_test'
TEST_USER_INDEX = DEFAULT_USER_INDEX + '_test'
TEST_ONLY_VALUES = frozenset([
TEST_CUSTOMSFORGE_INDEX,
TEST_USER_INDEX,
])
e_host = NON_EXISTENT
e_cf_index = NON_EXISTENT
e_rank_index = NON_EXISTENT
e_fuzzy = NON_EXISTENT
e_shingle = NON_EXISTENT
e_explain = NON_EXISTENT
e_refresh = False
e_platforms = NON_EXISTENT
e_parts = NON_EXISTENT
e_allow_official = NON_EXISTENT
def important_values() -> List:
return [e_cf_index, e_rank_index]
def ready_or_die():
"""
Immediately shuts down the application if the module is not properly configured.
Make the call immediately after imports in every module that depends on this configuration to be loaded.
"""
if NON_EXISTENT in important_values():
nuke_from_orbit('programming error - elastic module imported before elastic_settings is ready!')
def init():
global e_host
global e_cf_index
global e_rank_index
global e_fuzzy
global e_shingle
global e_explain
global e_platforms
global e_parts
global e_allow_official
e_host = read_config('elastic', 'Host', fallback=DEFAULT_HOST)
e_cf_index = read_config('elastic', 'CustomsforgeIndex', fallback=DEFAULT_CUSTOMSFORGE_INDEX)
e_rank_index = read_config('elastic', 'RankIndex', fallback=DEFAULT_USER_INDEX)
e_fuzzy = read_config('elastic', 'Fuzziness', fallback=DEFAULT_FUZZINESS)
e_shingle = read_config('elastic', 'ShingleCeiling', convert=int, fallback=DEFAULT_SHINGLE_CEILING)
e_explain = read_config('elastic', 'Explain', convert=parse_bool, fallback=False)
# noinspection PyTypeChecker
e_platforms = read_config('elastic', 'Platforms', convert=parse_list, fallback=DEFAULT_PLATFORMS)
# noinspection PyTypeChecker
e_parts = read_config('elastic', 'Parts', convert=parse_list, fallback=DEFAULT_PARTS)
e_allow_official = read_config('elastic', 'RandomOfficial', convert=parse_bool, fallback=DEFAULT_OFFICIAL)
e_shingle = max(2, e_shingle)
for value in important_values():
if value in TEST_ONLY_VALUES:
nuke_from_orbit('configuration error - cannot use TEST values for REAL initialization')
def init_test():
global e_host
global e_cf_index
global e_rank_index
global e_fuzzy
global e_shingle
global e_explain
global e_refresh
global e_platforms
global e_parts
global e_allow_official
e_host = DEFAULT_HOST
e_cf_index = TEST_CUSTOMSFORGE_INDEX
e_rank_index = TEST_USER_INDEX
e_fuzzy = DEFAULT_FUZZINESS
e_shingle = DEFAULT_SHINGLE_CEILING
e_explain = True
e_refresh = True
e_platforms = DEFAULT_PLATFORMS
e_parts = DEFAULT_PARTS
e_allow_official = DEFAULT_OFFICIAL
RANDOM_SORT = {
'_script': {
'script': 'Math.random()',
'type': 'number',
},
}
class BaseDoc(Document):
@classmethod
def index_name(cls) -> Optional[str]:
return cls._index._name if cls._index else None
@classmethod
def mapping(cls) -> Optional[dict]:
return cls._doc_type.mapping.to_dict()
@classmethod
def search(cls, **kwargs) -> Search:
return super().search(**kwargs).extra(explain=e_explain)
@classmethod
def as_lucine(cls, query: Union[Query, dict], **kwargs) -> str:
"""
:returns given query as it will be interpreted by the index of this document in Lucine format
"""
kwargs['explain'] = True
kwargs['rewrite'] = True
es = cls._get_connection()
body = query if isinstance(query, dict) else {'query': query.to_dict()}
result = es.indices.validate_query(body, cls._default_index(), **kwargs)
if 'error' in result:
raise ValueError(result['error'])
return result['explanations'][0]['explanation']
def explain(self, query: Query, **kwargs) -> dict:
"""
:returns lucine query, whether it matches this document & basic explanation why or why not
"""
es = self._get_connection()
body = {'query': query.to_dict()}
response = es.explain(self._get_index(), self.meta.id, body=body, **kwargs)
return {
'search': self.as_lucine(body),
'match': response['matched'],
'reason': response['explanation'],
}
def terms(self, *fields: str, **kwargs) -> dict:
"""
:returns for every field, the terms that have been analyzed for this particular document
"""
vectors = self.term_vectors(*fields, **kwargs)
return {field_name: list(data['terms'].keys()) for field_name, data in vectors.items()}
def term_vectors(self, *fields: str, **kwargs) -> dict:
"""
:returns for every field, information about the terms that have been analyzed for this particular document
"""
es = self._get_connection()
response = es.termvectors(index=self._get_index(), id=self.meta.id, fields=fields, **kwargs)
return response['term_vectors']
def delete(self, **kwargs):
kwargs.setdefault('refresh', e_refresh)
super().delete(**kwargs)
def update(self, **kwargs):
kwargs.setdefault('refresh', e_refresh)
return super().update(**kwargs)
def save(self, **kwargs):
kwargs.setdefault('refresh', e_refresh)
return super().save(**kwargs)
class EpochSecond(Date):
def __init__(self, *args, **kwargs):
kwargs.pop('default_timezone', None)
kwargs['format'] = 'epoch_second'
super().__init__(default_timezone=timezone.utc, *args, **kwargs)
def _deserialize(self, data):
if not isinstance(data, integer_types):
raise ValidationException(f'Could not parse epoch second from the value <{data}>')
return datetime.fromtimestamp(data, tz=timezone.utc)
| 6,928 | 2,211 |
import copy
import logging
import numpy as np
from ipec.cnn.evaluator import Evaluator, CNNEvaluator, initialise_cnn_evaluator
from ipec.cnn.layers import ConvLayer
from ipec.cnn.layers import DisabledLayer
from ipec.cnn.layers import FullyConnectedLayer
from ipec.cnn.layers import PoolingLayer
from ipec.ip.decoder import Decoder
from .chromosome import Chromosome, CNNChromosome
POPULATION_DEFAULT_PARAMS = {
'pop_size': 3, #50,
'chromosome_length': 5, #15,
'max_full': 2, #5,
'elitism_rate': 0.5,
'mutation_rate': np.asarray([0.1, 0.2]),
'layers': {
'conv': ConvLayer(),
'pooling': PoolingLayer(),
'full': FullyConnectedLayer(),
'disabled': DisabledLayer()
},
'max_generation': 3, #50
}
def initialise_cnn_population(pop_size=None, chromosome_length=None, max_fully_connected_length=None, elitism_rate=None, mutation_rate=None, layers=None, evaluator=None, max_generation=None):
"""
initialise a cnn population
:param pop_size: population size
:type pop_size: int
:param chromosome_length: the length/dimension of the chromosome
:type chromosome_length: int
:param max_fully_connected_length: the max length of fully-connected layers
:type max_fully_connected_length: int
:param elitism_rate: elitism rate
:type elitism_rate: float
:param mutation_rate: mutation rate. [mutation rate for interfaces in a chromosome, mutation rate for bits in an interface]
:type mutation_rate: numpy.array
:param layers: a dict of (layer_name, layer) pairs; keys: conv, pooling, full, disabled
:type layers: dict
:param max_generation: max DE generation
:type max_generation: int
:return: a cnn population
:rtype: CNNPopulation
"""
if pop_size is None:
pop_size = POPULATION_DEFAULT_PARAMS['pop_size']
if chromosome_length is None:
chromosome_length = POPULATION_DEFAULT_PARAMS['chromosome_length']
if max_fully_connected_length is None:
max_fully_connected_length = POPULATION_DEFAULT_PARAMS['max_full']
if mutation_rate is None:
mutation_rate = POPULATION_DEFAULT_PARAMS['mutation_rate']
if elitism_rate is None:
elitism_rate = POPULATION_DEFAULT_PARAMS['elitism_rate']
if max_generation is None:
max_generation = POPULATION_DEFAULT_PARAMS['max_generation']
if layers is None:
layers = POPULATION_DEFAULT_PARAMS['layers']
logging.info('===initialise the PSO population with the following parameters===')
logging.info('population size: %d, chromosome length: %d, max fully-connected length: %d, max generation: %d', pop_size, chromosome_length, max_fully_connected_length, max_generation)
return CNNPopulation(pop_size, chromosome_length, max_fully_connected_length, elitism_rate, mutation_rate, layers, evaluator, max_generation).initialise()
class Population:
"""
Population class
"""
def __init__(self, pop_size, chromosome_length, elitism_rate, mutation_rate, layers, evaluator=None, max_generation=None):
"""
constructor
:param pop_size: population size
:type pop_size: int
:param chromosome_length: the length/dimension of the chromosome
:type chromosome_length: int
:param elitism_rate: elitism rate
:type elitism_rate: float
:param mutation_rate: mutation rate. [mutation rate for interfaces in a chromosome, mutation rate for bits in an interface]
:type mutation_rate: numpy.array
:param layers: a dict of (layer_name, layer) pairs; keys: conv, pooling, full, disabled
:type layers: dict
:param evaluator: evaluator to calculate the fitness
:type evaluator: Evaluator
:param max_generation: max generation
:type max_generation: int
"""
self.pop_size = pop_size
self.pop = np.empty(pop_size, dtype=Chromosome)
self.chromosome_length = chromosome_length
self.elitism_rate = elitism_rate
self.mutation_rate = mutation_rate
self.layers = layers
self.max_generation = max_generation if max_generation > 0 else POPULATION_DEFAULT_PARAMS['max_generation']
self.evaluator = evaluator
self.decoder = Decoder()
self.best_chromosome = None
self.roulette_proportions = None
def evolve(self):
"""
evolve the population
"""
for g in range(self.max_generation):
logging.info('===start updating population at step-%d===', g)
# evaluate the first generation as the chromosomes are not evaluated during initialisation
if g == 0:
for chromosome in self.pop:
eval_result = self.evaluator.eval(chromosome)
# use minus standard deviation which is the less the better
# use minus number of connections which is the less the better
chromosome.fitness = (eval_result[0], -eval_result[1], -eval_result[2])
# generate new pop
new_pop = np.empty(self.pop_size, dtype=Chromosome)
new_pop_index = 0
# add elite chromosomes in the new generation
elite_chromosomes = self.elitism()
if elite_chromosomes is not None:
for chromosome in elite_chromosomes:
new_chromosome = copy.deepcopy(chromosome)
new_chromosome.id = new_pop_index
new_pop[new_pop_index] = new_chromosome
new_pop_index = new_pop_index + 1
# generate children (after doing selection, crossover, mutation) in the population
while new_pop_index < self.pop_size:
chromosome_1, chromosome_2 = self.select()
candidate_chromosome = self.crossover(chromosome_1, chromosome_2)
candidate_chromosome = self.mutate(candidate_chromosome)
candidate_chromosome.id = new_pop_index
eval_result = self.evaluator.eval(chromosome)
# use minus standard deviation which is the less the better
# use minus number of connections which is the less the better
chromosome.fitness = (eval_result[0], -eval_result[1], -eval_result[2])
# update best chromosome
if self.best_chromosome is None:
self.best_chromosome = copy.deepcopy(self.pop[new_pop_index])
elif self.best_chromosome.compare_with(self.pop[new_pop_index]) < 0:
self.best_chromosome = copy.deepcopy(self.pop[new_pop_index])
logging.info('===fitness of Chromosome-%d at generation-%d: %s===', new_pop_index, g, str(self.pop[new_pop_index].fitness))
new_pop[new_pop_index] = candidate_chromosome
new_pop_index = new_pop_index + 1
logging.info('===fitness of best chromosome at generation-%d: %s===', g, str(self.best_chromosome.fitness))
logging.info('===finish updating population at generation-%d===', g)
return self.best_chromosome
def elitism(self):
"""
GA elitism
:return: elitism array of chromosome
:type: numpy.array
"""
elitism_pop = None
elitism_amount = int(self.elitism_rate * self.pop_size)
if elitism_amount > 0:
# construct a sortable array
dtype = [('chromosome', Chromosome), ('s_0', float), ('s_1', float), ('s_2', float)]
sortable_pop = np.empty(self.pop_size, dtype=dtype)
for i in range(self.pop_size):
fitness = self.pop[i].fitness
sortable_pop[i] = (self.pop[i], fitness[0], fitness[1], fitness[2])
sorted_pop = np.sort(sortable_pop, order=['s_0', 's_1', 's_2'])
elitism_pop = np.empty(elitism_amount, dtype=Chromosome)
for i in range(self.pop_size-elitism_amount, self.pop_size):
elitism_pop[i-(self.pop_size-elitism_amount)] = sorted_pop[i][0]
return elitism_pop
def select(self):
"""
select two chromosomes for crossover and mutation
:return: two unique chromosomes
:rtype: tuple
"""
# roulette-select chromosome_1
c1_index = self.spin_roulette()
chromosome_1 = self.pop[c1_index]
# roulette-select chromosome_2
c2_index = c1_index
while c1_index == c2_index:
c2_index = self.spin_roulette()
chromosome_2 = self.pop[c2_index]
return (chromosome_1, chromosome_2)
def spin_roulette(self):
if self.roulette_proportions is None:
self.roulette_proportions = self.calculate_roulette_proportions()
prob = np.random.uniform(0, 1)
roulette_index = self.pop_size - 1
for i in range(self.roulette_proportions.shape[0]):
if prob < self.roulette_proportions[i]:
roulette_index = i
break
return roulette_index
def calculate_roulette_proportions(self):
"""
calculate roulette proportions for selection
:return:
"""
# calculate the accumulated fitness
accumulated_fitness = 0
for chromosome in self.pop:
accumulated_fitness += chromosome.fitness[0]
# calculate the proportion
previous_roulette_point = 0
self.roulette_proportions = np.zeros(29)
for i in range(self.pop_size-1):
new_roulette_point = previous_roulette_point + self.pop[i].fitness[0]/accumulated_fitness
self.roulette_proportions[i] = new_roulette_point
previous_roulette_point = new_roulette_point
return self.roulette_proportions
def crossover(self, chromosome_1, chromosome_2):
"""
crossover
:param chromosome_1: first parent chromosome
:type chromosome_1: Chromosome
:param chromosome_2: second parent chromosome
:type chromosome_2: Chromosome
:return: candidate chromosome
:rtype: Chromosome
"""
candidate_chromosome = copy.deepcopy(chromosome_1)
start_point = np.random.randint(0, self.chromosome_length)
mutation_length = np.random.randint(1, self.chromosome_length - start_point+1)
for i in range(start_point, start_point+mutation_length):
candidate_chromosome.x[i] = chromosome_2.x[i]
return candidate_chromosome
def mutate(self, candidate_chromosome):
"""
mutation
:param candidate_chromosome: candidate chromosome after crossover
:type candidate_chromosome: Chromosome
:return: candidate chromosome
:rtype: Chromosome
"""
for i in range(self.chromosome_length):
interface = candidate_chromosome.x[i]
rand = np.random.uniform(0, 1)
# check whether to mutate the interface
if rand < self.mutation_rate[0]:
bin_ip_list = list(interface.ip.bin_ip)
bin_ip_length = len(bin_ip_list)
field_length = interface.ip_structure.fields_length
# mutate fields of a specific layer type instead of the entire IP
for j in range(bin_ip_length - field_length, bin_ip_length):
# check whether to mutate the bit
rand = np.random.uniform(0, 1)
if rand < self.mutation_rate[1]:
bin_ip_list[j] = '0' if bin_ip_list[j] == '1' else '1'
candidate_chromosome.x[i].update_ip_by_binary_string(''.join(bin_ip_list))
if self.layers is not None:
candidate_chromosome.x[i].update_subnet_and_structure(self.layers)
else:
continue
# fix invalid interface after crossover
candidate_chromosome.fix_invalid_interface()
return candidate_chromosome
class CNNPopulation(Population):
"""
CNNPopulation class
"""
def __init__(self, pop_size, chromosome_length, max_fully_connected_length, elitism_rate, mutation_rate, layers, evaluator=None, max_generation=None):
"""
constructor
:param pop_size: population size
:type pop_size: int
:param chromosome_length: the length/dimension of the chromosome
:type chromosome_length: int
:param max_fully_connected_length: the max length of fully-connected layers
:type max_fully_connected_length: int
:param f: F value in the update equation at the mutation step
:type f: float
:param cr: crossover rate at the mutation step
:type cr: float
:param layers: a dict of (layer_name, layer) pairs; keys: conv, pooling, full, disabled
:type layers: dict
:param evaluator: evaluator to calculate the fitness
:type evaluator: CNNEvaluator
:param max_generation: max generation
:type max_generation: int
"""
self.max_fully_connected_length = max_fully_connected_length
super(CNNPopulation, self).__init__(pop_size, chromosome_length, elitism_rate, mutation_rate, layers, evaluator, max_generation)
def initialise(self):
"""
initialise the population
"""
# set default evaluator
if self.evaluator is None:
self.evaluator = initialise_cnn_evaluator()
logging.info('===start initialising population')
for i in range(self.pop_size):
chromosome = CNNChromosome(i, self.chromosome_length, self.max_fully_connected_length, self.layers).initialise()
self.pop[i] = chromosome
logging.info('===finish initialising population')
return self
| 13,816 | 4,189 |
"""Adjusts for Seed development
Revision ID: 8a480de4de4c
Revises: 7addb7587b1a
Create Date: 2021-07-13 17:16:20.807567
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from limonero.migration_utils import (is_mysql, is_psql, upgrade_actions,
downgrade_actions, get_psql_enum_alter_commands, is_sqlite)
# revision identifiers, used by Alembic.
revision = '8a480de4de4c'
down_revision = '7addb7587b1a'
branch_labels = None
depends_on = None
def upgrade():
if is_mysql():
op.execute("""
ALTER TABLE `storage` CHANGE `type` `type` ENUM(
'CASSANDRA','ELASTIC_SEARCH','HDFS','HIVE', 'HIVE_WAREHOUSE',
'JDBC', 'KAFKA', 'LOCAL','MONGODB'
) CHARSET utf8 COLLATE
utf8_unicode_ci NOT NULL;""")
elif is_psql():
storage_values = ['CASSANDRA','ELASTIC_SEARCH','HDFS',
'HIVE', 'HIVE_WAREHOUSE', 'JDBC', 'KAFKA', 'LOCAL','MONGODB']
all_commands = [
[
get_psql_enum_alter_commands(['storage'], ['type'],
'StorageTypeEnumType', storage_values, 'HDFS'),
None
]
]
upgrade_actions(all_commands)
# ### end Alembic commands ###
def downgrade():
if is_mysql():
op.execute("""
ALTER TABLE `storage` CHANGE `type` `type` ENUM(
'CASSANDRA','ELASTIC_SEARCH','HDFS','HIVE', 'HIVE_WAREHOUSE',
'KAFKA', 'JDBC','LOCAL','MONGODB'
) CHARSET utf8 COLLATE
utf8_unicode_ci NOT NULL;""")
elif is_psql():
storage_values = ['CASSANDRA','ELASTIC_SEARCH','HDFS',
'HIVE', 'HIVE_WAREHOUSE', 'JDBC','LOCAL','MONGODB']
all_commands = [
[
None,
get_psql_enum_alter_commands(['storage'], ['type'],
'StorageTypeEnumType', storage_values, 'HDFS'),
]
]
downgrade_actions(all_commands)
| 2,030 | 715 |
import flask
from flask_restful import Resource
from utils import decoraters
from logic.test.checkalive_logic import CheckaliveLogic
class CheckaliveApi(Resource):
"""CheckaliveApi
/api/checkalive
"""
def __init__(self):
self.log_id = 1 #todo 暂时用1代替
@decoraters.response_format
def get(self):
"""
"""
checkalive_logic = CheckaliveLogic()
res, err = checkalive_logic.checkalive()
if err:
return 1, err, self.log_id
return 0, res, self.log_id
| 537 | 182 |
# Написати функцію, яка перевіряє чи в списку є ім'я "Євген"
def is_in_list(l, e):
if e in l:
return True
else:
return False
print(is_in_list(['Ярослав', 'Богдан', 'Катя', 'Євген'], "Євгенпше"))
| 226 | 99 |
# Kamesh Vedula
# Problem: Binary Tree Right Side View
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
def rightSideView(self, root: TreeNode) -> List[int]:
if root is None:
return []
# q = []
# q.append(root)
# levelOrder = []
# while q:
# count = len(q)
# level = []
# for i in range(count):
# temp = q.pop(0)
# level.append(temp.val)
# if temp.right:
# q.append(temp.right)
# if temp.left:
# q.append(temp.left)
# levelOrder.append(level)
# rightVals = [lvl[-1] for lvl in levelOrder]
# return rightVals
q = collections.deque()
q.append(root)
levelOrder = []
while q:
count = len(q)
for i in range(count):
temp = q.popleft()
if i == 0:
levelOrder.append(temp.val)
if temp.right:
q.append(temp.right)
if temp.left:
q.append(temp.left)
return levelOrder
| 1,313 | 395 |
#!/usr/bin/env python
# coding: utf-8
from skimage import exposure
from matplotlib import pyplot as plt
from matplotlib import colors as colors
import numpy as np
import matplotlib.image as mpimg
import colorsys
import math
import os
PATH = "C:\\Users\\dekelmeirom\\OneDrive - Technion\\Documents\\university\\pdl_project_res\\"
SAVE_PATH = "C:\\Users\\dekelmeirom\\OneDrive - Technion\\Documents\\university\\pdl_project_res\\"
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
def gamma_correction(img):
gray = rgb2gray(img)
# compute gamma = log(mid*255)/log(mean)
mid = 0.5
mean = np.mean(gray)
gamma = math.log(mid)/math.log(mean)
img_gamma = exposure.adjust_gamma(img, gamma)
return img_gamma
for filename in os.listdir(PATH):
with open(PATH + filename, "rb") as img_file:
img = plt.imread(img_file)
img_gamma = gamma_correction(img)
plt.imsave(SAVE_PATH + filename[:-4] + "gamma.png", img_gamma)
| 984 | 371 |
import statistics
datos = [2,4,6,8]
datos2 = [2, 2, 3, 5, 8, 9]
mean_r = statistics.mean(datos)
median_r = statistics.median(datos2)
print(mean_r)
print(median_r)
| 182 | 97 |
# Generated by Django 2.0.8 on 2018-12-01 23:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0004_remove_annotation_is_done'),
]
operations = [
migrations.AddField(
model_name='itemtemplatefield',
name='type',
field=models.CharField(choices=[('INT', 'INT'), ('STR', 'STR'), ('BOOL', 'BOOL'), ('LIST', 'LIST')], default='STR', max_length=10),
),
]
| 491 | 166 |
from evaluate.filter import Filter
from .vcf import VCF
class CoverageFilter(Filter):
def __init__(self, coverage_threshold: float):
self._coverage_threshold = coverage_threshold
@property
def coverage_threshold(self) -> float:
return self._coverage_threshold
def record_should_be_filtered_out(self, record: VCF) -> bool:
return record.coverage < self.coverage_threshold
| 415 | 123 |
import os
import flask
import gigaspoon as gs
app = flask.Flask(__name__)
app.secret_key = os.urandom(24)
class CustomSelect(gs.v.Validator):
def __init__(self, name, options):
self.name = name
self._options = set(options)
def __repr__(self):
return "%r %r" % (type(self), self._options)
def populate(self):
return {
"options": self._options,
"name": self.name
}
def validate(self, form, key, value):
if value not in self._options:
self.raise_error(key, value)
html = """
<!DOCTYPE HTML>
{% for message in get_flashed_messages() -%}
<pre>{{ message }}</pre>
{%- endfor %}
<form method="POST">
{% autoescape false %}
{{ g.csrf_token_validator.csrf_tag }}
{% endautoescape %}
<select required name="{{ g.user_validator.name }}">
{% for user in g.user_validator.options -%}
<option value="{{ user }}">{{ user }}</option>
{%- endfor %}
<option value="break!">Bad input!</option>
</select>
<input type="submit" value="submit">
</form>
"""
@app.route("/", methods=["GET", "POST"])
@gs.set_methods("POST")
@gs.validator(CustomSelect("user", ["Fred", "George"]))
@gs.validator(gs.v.CSRF())
@gs.base
def index(form):
if form.is_form_mode():
# Method is POST and form fields are valid
flask.flash(repr(form))
return flask.redirect(flask.url_for('index'))
return flask.render_template_string(html)
@app.errorhandler(gs.e.FormError)
def handle_form_error(exc):
return flask.escape(str(exc)), 400
if __name__ == "__main__":
app.run()
| 1,632 | 559 |
import time
import sys
import logging
# Import PS-Drone
import cv2
import numpy as np
import gdk.config as config
logger = logging.getLogger(__name__)
class CheckerBoardTracker():
def __init__(self):
self.tracking = False
def update(self, frame):
self.tracking, self.corners = self.__get_corners_from_marker(frame)
if self.tracking:
self.centroid = self.__get_centroid_from_corners()
self.outer_corners = self.__get_main_corners_from_corners()
self.height, self.width = frame.shape[:2]
return self.tracking
def get_centroid_error(self):
if self.tracking:
errx = (self.centroid[0][0] - config.XY_TRACK_POINT[0])#/(config.XY_TRACK_POINT[0])
erry = (self.centroid[0][1] - config.XY_TRACK_POINT[1])#/(config.XY_TRACK_POINT[1])
return errx, erry
def get_distance_error(self):
if self.tracking:
short_1 = np.linalg.norm(self.outer_corners[0]-self.outer_corners[1])
short_2 = np.linalg.norm(self.outer_corners[3]-self.outer_corners[2])
long_1 = np.linalg.norm(self.outer_corners[1]-self.outer_corners[3])
long_2 = np.linalg.norm(self.outer_corners[2]-self.outer_corners[0])
avg_short = (short_1+short_2)/2.0
avg_long = (long_1+long_2)/2.0
dif_short = (
avg_short - config.BEST_DISTANCE[0])/config.BEST_DISTANCE[0]
dif_long = (avg_long - config.BEST_DISTANCE[1])/config.BEST_DISTANCE[1]
return (dif_short+dif_long)/2.0
def __get_main_corners_from_corners(self):
return np.array([self.corners[0][0], self.corners[3][0], self.corners[16][0], self.corners[19][0]])
def __get_centroid_from_corners(self):
return np.sum(self.corners, 0) / float(len(self.corners))
def __get_corners_from_marker(self, frame):
corners = None
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
found, corners = cv2.findChessboardCorners(
gray, config.PATTERN_SIZE, corners, cv2.CALIB_CB_ADAPTIVE_THRESH+cv2.CALIB_CB_NORMALIZE_IMAGE+cv2.CALIB_CB_FAST_CHECK)
npcorners = np.array(corners)
return found, npcorners
| 2,244 | 871 |
"""
* Copyright 2018 University of Liverpool
* Author: John Heap, Computational Biology Facility, UoL
* Based on original scripts of Sara Silva Pereira, Institute of Infection and Global Health, UoL
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
"""
import subprocess
import pandas as pd
import re
import os
import sys
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
pList = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'P9', 'P10', 'P11', 'P12', 'P13', 'P14', 'P15']
quietString = "" #"">> Vap_log.txt 2>&1"
def transcriptMapping(inputname, strain, forwardFN,reverseFN):
#where is our Reference data -
dir_path = os.path.dirname(os.path.realpath(__file__))
refName = dir_path+"/data/Reference/Tc148" #default
if strain == "Tc148":
refName = dir_path+"/data/Reference/Tc148"
if strain == "IL3000":
refName = dir_path+"/data/Reference/IL3000"
#argString = "bowtie2 -x Refe4rence/IL3000 -1 data/"+forwardFN+" -2 data/"+reverseFN+" -S "+inputname+".sam" #>log.txt
#argString = "bowtie2 -x Reference/Tc148 -1 data/"+forwardFN+" -2 data/"+reverseFN+" -S "+inputname+".sam" #>log.txt
argString = "bowtie2 -x "+refName+" -1 "+forwardFN+" -2 "+reverseFN+" -S "+inputname+".sam"+quietString #>log.txt
#print(argString)
returncode = subprocess.call(argString, shell=True)
def processSamFiles(inputname):
#debug use a mapping sam file we have already found
#dir_path = os.path.dirname(os.path.realpath(__file__))
#bugName = dir_path+"/data/T_Test" #defasult
cur_path = os.getcwd()
samName = cur_path+"/"+inputname
#argString = "samtools view -bS "+bugName+" > "+inputname+".bam"
argString = "samtools view -bS "+inputname+".sam > "+samName+".bam"+quietString
#print(argString)
returncode = subprocess.call(argString, shell=True)
#argString = "samtools sort "+bugName+" -o "+inputname+".sorted"
argString = "samtools sort "+samName+".bam -o "+samName+".sorted"+quietString
#print("argstring = "+argString)
returncode = subprocess.call(argString, shell=True)
#argString = "samtools index "+bugName+".sorted "+inputname+".sorted.bai"
argString = "samtools index "+samName+".sorted "+samName+".sorted.bai"+quietString
#print("argstring = " + argString)
returncode = subprocess.call(argString, shell=True)
def transcriptAbundance(inputname, strain):
dir_path = os.path.dirname(os.path.realpath(__file__))
refName = dir_path + "/data/Reference/ORFAnnotation.gtf" # defasult
if strain == "Tc148":
refName = dir_path + "/data/Reference/ORFAnnotation.gtf"
if strain == "IL3000":
refName = dir_path + "/data/Reference/IL3000.gtf"
#argString = "cufflinks -G Reference/IL3000.gtf -o "+inputname+".cuff -u -p 8 "+inputname+".sorted"
#argString = "cufflinks -G Reference/ORFAnnotation.gtf -o "+inputname+".cuff -u -p 8 "+inputname+".sorted"
argString = "cufflinks -q -G "+refName+" -o "+inputname+".cuff -u -p 8 "+inputname+".sorted"+quietString
returncode = subprocess.call(argString, shell = True)
def convertToFasta(inputName, strain): #equivalent to Sara's awk scripte
dir_path = os.path.dirname(os.path.realpath(__file__))
refName = dir_path + "/data/Reference/ORFAnnotation.gtf" # default
if strain == "Tc148":
refName = dir_path + "/data/Reference/148_prot.fasta"
if strain == "IL3000":
refName = dir_path + "/data/Reference/IL3000_prot.fasta"
cuff_df = pd.read_csv(inputName+".cuff/genes.fpkm_tracking", sep='\t')
cuff_df = cuff_df[(cuff_df['FPKM'] > 0)]
cuff_df.to_csv("cuffTest.csv")
gene_id_List = cuff_df['gene_id'].tolist()
#print(gene_id_List)
#print ("Found from 8880="+str(found))
# need to load in IL3000_prot.fasta
# for each line with >TcIL3000_1_1940
# search within cuff_df[gene_id] for match
# add it to the outfile. (need to save it as used by hmmer later
number = 0
all = 0
with open(inputName+"_6frame.fas", 'w') as outfile:
ref = open(refName,'r')
#ref = open(r"Reference/IL3000_prot.fasta",'r')
n = 0
line = ref.readline()
while line:
if line[0] == '>':
all = all+1
ln = line[1:] #remove >
ln = ln.rstrip() #remove /n /r etc
#print (ln)
if ln in gene_id_List:
number = number+1
outfile.write(line)
line = ref.readline()
if line:
while line[0] != '>':
outfile.write(line)
line=ref.readline()
if not line:
break;
else:
line = ref.readline()
else:
line =ref.readline()
ref.close()
print(str(len(gene_id_List))+":"+str(number)+" from "+str(all))
return cuff_df
def HMMerMotifSearch(name, strain, cuff_df):
motifs = ['1', '2a', '2b', '3', '4a', '4b', '4c', '5', '6', '7', '8a', '8b', '9a', '9b',
'9c', '10a', '10b', '11a', '11b', '12', '13a', '13b', '13c', '13d', '14', '15a', '15b', '15c']
dir_path = os.path.dirname(os.path.realpath(__file__))
phylopath = dir_path + "/data/Motifs/Phylotype"
lineCounts = []
compoundList = []
for m in motifs:
argString = "hmmsearch "+phylopath + m + ".hmm " + name + "_6frame.fas > Phy" + m + ".out"
print(argString)
subprocess.call(argString, shell=True)
hmmResult = open("Phy" + m + ".out", 'r')
regex = r"Tc148[0-9]{1,8}"
if strain == "Tc148":
regex = r"Tc148[0-9]{1,8}"
if strain == "IL3000":
regex = r"TcIL3000_[0-9]{1,4}_[0-9]{1,5}"
n = 0
outList = []
for line in hmmResult:
m = re.search(regex, line)
if m:
outList.append(""+m.group())
n += 1
if re.search(r"inclusion", line):
print("inclusion threshold reached")
break
compoundList.append(outList)
lineCounts.append(n)
hmmResult.close()
#print(lineCounts)
#print(cuff_df)
concatGroups = [1, 2, 1, 3, 1, 1, 1, 2, 3, 2, 2, 1, 4, 1, 3]
countList = []
weightList = []
countIndex = 0
totalCount = 0
totalWeigth = 0
for c in concatGroups:
a = []
weight = []
for n in range(0, c):
a = a + compoundList.pop(0)
t = set(a)
countList.append(len(t))
wa = 0
for w in t:
wt = cuff_df.loc[cuff_df['gene_id'] == w, 'FPKM'].iloc[0]
#print(w)
#print(wt)
wa = wa+wt
weightList.append(wa)
totalWeigth+=wa
totalCount += len(t)
countList.append(totalCount)
weightList.append(totalWeigth)
#print(countList)
#print("--------")
#print(weightList)
#print("--------")
return countList,weightList
def relativeFrequencyTable(countList, name, htmlresource):
relFreqList = []
c = float(countList[15])
for i in range(0, 15):
relFreqList.append(countList[i] / c)
data = {'Phylotype': pList, 'Relative Frequency': relFreqList}
relFreq_df = pd.DataFrame(data)
j_fname = htmlresource+ "/" + name + "_t_relative_frequency.csv"
relFreq_df.to_csv(j_fname)
return relFreqList # 0-14 = p1-p15 counts [15] = total counts
def weightedFrequencyTable(countList, name, htmlresource):
relFreqList = []
c = float(countList[15])
for i in range(0, 15):
relFreqList.append(countList[i] / c)
data = {'Phylotype': pList, 'Weighted Frequency': relFreqList}
relFreq_df = pd.DataFrame(data)
j_fname = htmlresource+ "/" + name + "_t_weighted_frequency.csv"
relFreq_df.to_csv(j_fname)
return relFreqList # 0-14 = p1-p15 counts [15] = total counts
def createStackedBar(name,freqList,strain,pdf,html_resource):
palette = ["#0000ff", "#6495ed", "#00ffff", "#caff70",
"#228b22", "#528b8b", "#00ff00", "#a52a2a",
"#ff0000", "#ffff00", "#ffa500", "#ff1493",
"#9400d3", "#bebebe", "#000000", "#ff00ff"]
VAP_148 = [0.072, 0.032, 0.032, 0.004, 0.007,
0.005, 0.202, 0.004, 0.006, 0.014,
0.130, 0.133, 0.054, 0.039, 0.265]
VAP_IL3000 = [0.073, 0.040, 0.049, 0.018, 0.060,
0.055, 0.054, 0.025, 0.012, 0.060,
0.142, 0.100, 0.061, 0.078, 0.172]
cmap = plt.cm.get_cmap('tab20')
palette = [cmap(i) for i in range(cmap.N)]
if strain == "Tc148":
VAPtable = VAP_148
VAPname='Tc148\nGenome VAP'
if strain == "IL3000":
VAPtable = VAP_IL3000
VAPname= 'IL3000\nGenome VAP'
width = 0.35 # the width of the bars: can also be len(x) sequence
plots = []
fpos = 0
vpos = 0
for p in range(0, 15):
tp = plt.bar(0, freqList[p], width, color= palette[p], bottom = fpos)
fpos +=freqList[p]
tp = plt.bar(1, VAPtable[p], width, color= palette[p], bottom = vpos)
vpos +=VAPtable[p]
plots.append(tp)
plt.xticks([0,1],[name,VAPname])
plt.legend(plots[::-1],['p15','p14','p13','p12','p11','p10','p9','p8','p7','p6','p5','p4','p3','p2','p1'])
title = "Figure Legend: The transcriptomic Variant Antigen Profile of $\itTrypanosoma$ $\itcongolense$ estimated as phylotype " \
"proportion adjusted for transcript abundance and the reference genomic Variant Antigen Profile. " \
"\nData was produced with the 'Variant Antigen Profiler' (Silva Pereira et al., 2019)."
#plt.title(title, wrap="True")
#plt.text(-0.2, -0.05, title, va="top", transform=ax.transAxes, wrap="True")
plt.text(-0.3, -0.15, title, va="top", wrap="True")
plt.tight_layout(pad=1.5)
plt.subplots_adjust(bottom = 0.3,top=0.99,left=0.125,right=0.9,hspace=0.2,wspace=0.2)
plt.savefig(html_resource + "/stackedbar.png")
if pdf == 'PDF_Yes':
plt.savefig(html_resource + "/stackedbar.pdf")
#plt.show()
def createHTML(name,htmlfn,htmlresource,freqList,weightList):
#assumes imgs are heatmap.png, dheatmap.png, vapPCA.png and already in htmlresource
htmlString = r"<html><title>T.congolense VAP</title><body><div style='text-align:center'><h2><i>Trypanosoma congolense</i> Variant Antigen Profile</h2><h3>"
htmlString += name
htmlString += r"<br>Transcriptomic Analysis</h3></p>"
htmlString += "<p style = 'margin-left:20%; margin-right:20%'>Table Legend: Variant Antigen Profiles of a transcriptome of <i>Trypanosoma congolense</i> estimated as phylotype proportion. " \
"Weighted frequency refers to the phylotype proportion based transcript abundance. " \
"Data was produced with the 'Variant Antigen Profiler' (Silva Pereira et al., 2019).</p> "
htmlString += r"<style> table, th, tr, td {border: 1px solid black; border-collapse: collapse;}</style>"
htmlString += r"<table style='width:50%;margin-left:25%;text-align:center'><tr><th>Phylotype</th><th>Relative Frequency</th><th>Weighted Frequency</th></tr>"
tabString = ""
# flush out table with correct values
for i in range(0, 15):
f = format(freqList[i], '.4f')
w = format(weightList[i], '.4f')
tabString += "<tr><td>phy" + str(i + 1) + "</td><td>" + f + "</td><td>" + w + "</td></tr>"
htmlString += tabString + "</table><br><br><br><br><br>"
htmlString += r"<p> <h3>Stacked Bar chart of Phylotype Frequency</h3> The 'weighted' relative frequency of each phylotype alongside the VAP of selected strain.</p>"
imgString = r"<img src = 'stackedbar.png' alt='Stacked bar chart of phylotype variation' style='max-width:100%'><br><br>"
htmlString += imgString
# htmlString += r"<p><h3>The Deviation Heat Map and Dendogram</h3>The phylotype variation expressed as the deviation from your sample mean compared to the model dataset</p>"
# imgString = r"<img src = 'dheatmap.png' alt='Deviation Heatmap' style='max-width:100%'><br><br>"
# htmlString += imgString
# htmlString += r"<p><h3>The Variation PCA plot</h3>PCA analysis corresponding to absolute variation. Colour coded according to location</p>"
# imgString = r"<img src = 'vapPCA.png' alt='PCA Analysis' style='max-width:100%'><br><br>"
# htmlString += imgString + r"</div></body></html>"
with open(htmlfn, "w") as htmlfile:
htmlfile.write(htmlString)
#argdict = {'name':2, 'pdfexport': 3, 'strain': 4, 'forward': 5, 'reverse': 6, 'html_file': 7, 'html_resource': 8}
def transcriptomicProcess(args,dict):
transcriptMapping(args[dict['name']], args[dict['strain']], args[dict['forward']], args[dict['reverse']]) #uses bowtie
processSamFiles(args[dict['name']]) #uses samtools
transcriptAbundance(args[dict['name']],args[dict['strain']]) #uses cufflinks -> ?.cuff/*.*
cuff_df = convertToFasta(args[dict['name']],args[dict['strain']])
countList, weightList = HMMerMotifSearch(args[dict['name']],args[dict['strain']], cuff_df)
relFreqList = relativeFrequencyTable(countList,args[dict['name']],args[dict['html_resource']])
relWeightList = weightedFrequencyTable(weightList,args[dict['name']],args[dict['html_resource']])
createStackedBar(args[dict['name']],relWeightList, args[dict['strain']],args[dict['pdfexport']],args[dict['html_resource']])
createHTML(args[dict['name']],args[dict['html_file']],args[dict['html_resource']], relFreqList, relWeightList)
if __name__ == "__main__":
#print("Commencing Transcript Mapping")
#transcriptMapping("T_Test", "Transcripts.1","Transcripts.2")
#print("Processimg Sam Files")
#processSamFiles("T_Test")
#print("Assessing Transcript Abundance")
#transcriptAbundance("T_Test")
#print ("Converting to Fasta Subset")
#cuff_df = convertToFasta("T_Test")
#print("Commencing HMMer search")
#countList, weightList = HMMerMotifSearch("T_Test",cuff_df)
#relativeFrequencyTable(countList,'T_Test')
#weightedFrequencyTable(weightList,'T_Test')
relFreqList = [0.111842105,0.059210526,0.026315789,0.013157895,
0.006578947,0.013157895,0.032894737,0.019736842,
0.039473684,0.046052632,0.217105263,0.065789474,
0.151315789,0.059210526,0.138157895]
relWeightList = [0.07532571,0.05900545,0.009601452,0.042357532,0.01236219,0.001675663,0.04109726,
0.097464248,0.057491666,0.05826875,0.279457473,0.070004772,0.065329007,0.085361298,0.045197529]
createStackedBar('T_Test',relWeightList, 'Tc148','PDF_Yes','results')
createHTML("t_test","results/t_test.html","results",relFreqList,relWeightList)
| 15,402 | 6,002 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('node', '0002_node_pause_reason'),
]
operations = [
migrations.AddField(
model_name='node',
name='is_paused_errors',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
| 442 | 136 |
################################################################
# Author: Ronell Bresler
# Module: VideoLineDetect.py
#
#
# References:
# https://www.analyticsvidhya.com/blog/2020/05/tutorial-real-time-lane-detection-opencv/
# https://towardsdatascience.com/tutorial-build-a-lane-detector-679fd8953132
# https://medium.com/computer-car/udacity-self-driving-car-nanodegree-project-1-finding-lane-lines-9cd6a846c58c
# https://campushippo.com/lessons/detect-highway-lane-lines-with-opencv-and-python-21438a3e2
# https://www.youtube.com/watch?v=G0cHyaP9HaQ
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
################################################################
import cv2
import matplotlib.pyplot as plt
import numpy as np
class Inputfile:
def __init__(self, cap, height, width, frame):
self.cap = cap
self.height = height
self.width = width
self.frame = frame
def main():
inputfile = Inputfile(cv2.VideoCapture('SampleIMG/gmod2.mp4'), 0, 0, 0)
while inputfile.cap.isOpened():
ret, frame = inputfile.cap.read()
inputfile.frame = frame
inputfile.height = inputfile.frame.shape[0]
inputfile.width = inputfile.frame.shape[1]
frame1 = One_frame(inputfile)
cv2.imshow('frame', frame1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
################################################################
def One_frame(inputfile):
region_of_interest_vertices = Set_region_of_interest_vertices(inputfile.height, inputfile.width)
# Canny filter
canny_edges = Canny_edge_detector(inputfile.frame)
# Crop img with roi
cropped_image = Region_of_interest(canny_edges, np.array([region_of_interest_vertices], np.int32), inputfile.height, inputfile.width)
lines = cv2.HoughLinesP(cropped_image,
rho=6,
theta=np.pi/180,
threshold=160,
lines=np.array([]),
minLineLength=40,
maxLineGap=25)
return Draw_lines(inputfile.frame, lines)
################################################################
def Canny_edge_detector(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
canny_image = cv2.Canny(gray, 100, 200)
return canny_image
################################################################
def Region_of_interest(img, vertices, height, width):
mask = np.zeros_like(img)
cv2.fillPoly(mask, vertices, 255)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
################################################################
def Draw_lines(img, lines):
color = [0, 255, 0] # green
thickness = 10
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1,y1), (x2,y2), color, thickness)
return img
################################################################
def Set_region_of_interest_vertices(height, width):
region_of_interest_vertices = [
(0, height),
(round(width/1.9), round(height/1.9)),
(width, height)
]
return region_of_interest_vertices
if __name__ == "__main__":
main() | 3,482 | 1,227 |
from .raffle import Raffle
def setup(bot):
bot.add_cog(Raffle(bot)) | 75 | 35 |
import os
__version__ = "0.9.2"
root_dir = os.path.dirname(os.path.abspath(__file__))
| 88 | 40 |
import types
from tf.advanced.helpers import dh
from tf.advanced.find import loadModule
from tf.advanced.app import App
def transform_prime(app, n, p):
return ("'" * int(p)) if p else ""
def transform_ctype(app, n, t):
if t == "uncertain":
return "?"
elif t == "properName":
return "="
elif t == "supplied":
return ">"
else:
return ""
def transform_atf(app, n, a):
return app.atfFromSign(n, flags=True)
class TfApp(App):
def __init__(app, *args, silent=False, **kwargs):
app.transform_ctype = types.MethodType(transform_ctype, app)
app.transform_prime = types.MethodType(transform_prime, app)
app.transform_atf = types.MethodType(transform_atf, app)
atf = loadModule("atf", *args)
atf.atfApi(app)
app.atf = atf
super().__init__(*args, silent=silent, **kwargs)
app.image = loadModule("image", *args)
app.image.getImagery(app, silent, checkout=kwargs.get("checkout", ""))
app.reinit()
def reinit(app):
customMethods = app.customMethods
customMethods.afterChild.clear()
customMethods.afterChild.update(quad=app.getOp)
customMethods.plainCustom.clear()
customMethods.plainCustom.update(
sign=app.plainAtfType, quad=app.plainAtfType, cluster=app.plainAtfType,
)
customMethods.prettyCustom.clear()
customMethods.prettyCustom.update(
case=app.caseDir, cluster=app.clusterBoundaries, comments=app.commentsCls
)
def cdli(app, n, linkText=None, asString=False):
(nType, objectType, identifier) = app.image.imageCls(app, n)
if linkText is None:
linkText = identifier
result = app.image.wrapLink(linkText, objectType, "main", identifier)
if asString:
return result
else:
dh(result)
# PRETTY HELPERS
def getGraphics(app, isPretty, n, nType, outer):
api = app.api
F = api.F
E = api.E
result = ""
isOuter = outer or (all(F.otype.v(parent) != "quad" for parent in E.sub.t(n)))
if isOuter:
width = "2em" if nType == "sign" else "4em"
height = "4em" if nType == "quad" else "6em"
theGraphics = app.image.getImages(
app,
n,
kind="lineart",
width=width,
height=height,
_asString=True,
withCaption=False,
warning=False,
)
if theGraphics:
result = f"<div>{theGraphics}</div>" if isPretty else f" {theGraphics}"
return result
def lineart(app, ns, key=None, asLink=False, withCaption=None, **options):
return app.image.getImages(
app,
ns,
kind="lineart",
key=key,
asLink=asLink,
withCaption=withCaption,
**options,
)
def photo(app, ns, key=None, asLink=False, withCaption=None, **options):
return app.image.getImages(
app,
ns,
kind="photo",
key=key,
asLink=asLink,
withCaption=withCaption,
**options,
)
def imagery(app, objectType, kind):
return set(app._imagery.get(objectType, {}).get(kind, {}))
| 3,407 | 1,054 |
import logging
logger = logging.getLogger( __name__ )
import copy
import tempfile
import os
import jinja2
import yaml
##
# Interface functions for Hiearchichal Maps (hmaps)
# which are jsut dictionaries-of-dictionaries :)
TEMPLATE_HANDLEBAR_START = "{{"
TEMPLATE_HANDLEBAR_END = "}}"
JINJA_VARIABLE_KEY = "_"
##============================================================================
##
# Returns true iff the given object is a structured key with
# given delimiter
def is_structured_key( x, delim='/' ):
return isinstance( x, str ) and delim in x
##============================================================================
##
# Convert from a structured key to a path.
# A structured key is just a delimited single-string key
# much like a file system path or url :)
def structured_key_to_path( sk, delim='/' ):
def _numerate(x):
try:
return int(x)
except:
return x
return list(map(_numerate, sk.split( delim )))
##============================================================================
##
# Take a path of a structured key and return a path
def ensure_path( sk_or_path, delim='/' ):
if isinstance( sk_or_path, str ):
return structured_key_to_path( sk_or_path, delim=delim )
return sk_or_path
##============================================================================
##
# Traverse a hiearchical map (dict of dict) structure with a path
# (a list of keys).
# This will return the parent dictionary and key for the last
# item in the path or None,None if the path is not valid
#
# This will *change* the given hmap (potentially) since it will
# *create* the hmap structure down the path if it was not
# previously created in the hmap
def hmap_probe( hmap, path ):
path = ensure_path( path )
if path is None or hmap is None or len(path) < 1:
return None, None
if len(path) == 1:
return hmap, path[0]
if path[0] not in hmap:
hmap[ path[0] ] = {}
return hmap_probe( hmap[ path[0] ], path[1:] )
##============================================================================
##
# Get the value for a path from an hmap
# Or returns the given default value.
# This may change the given hmap by probing it.
def hmap_get( hmap, path, default ):
node, key = hmap_probe( hmap, path )
if node is None or key not in node:
return default
return node[ key ]
##============================================================================
##
# Sets the value of the given path in an hmap to the
# given value.
# This will create the path layers if need be
def hmap_set( hmap, path, value ):
node, key = hmap_probe( hmap, path )
if node is None:
raise ValueError( "Could not probe hmap, returned None. This usually means that the hmap itself was None!" )
old = node.get( key, None )
node[ key ] = value
return old
##============================================================================
##
# returns true if the given path has a set value in the given hmap
def hmap_has_path( hmap, path ):
node, key = hmap_probe( hmap_probe, path )
return node is not None and key in node
##============================================================================
##============================================================================
##
# Given an hmap that *may* have structured keys as keys,
# returns a new hmap which has the structured keys resolves into
# an actual structure in the hmap (so not more keys are strucutred-keys)
#
# The resulting hmap *may* share structure with the input hmap
def resolve_structured_keys( hmap, delim='/' ):
# ok, create a new dict as the base
base = {}
# now, let's check each key of the given hmap
# and resolve if it is a strucutred key, otherwise
# use the value of the input hjmap
for key, value in hmap.items():
# recurse to value irregardless of key if it is an hmap node
if isinstance( value, dict ):
value = resolve_structured_keys( value, delim=delim )
# nothing to resolve for this key, jsut use hte value
if not is_structured_key( key ):
base[ key ] = value
else:
# resolve the key
path = ensure_path( key )
temp_map = base
for p in path[:-1]:
temp_map[ p ] = {}
temp_map = temp_map[p]
# ok, last part of path gets the value
temp_map[path[-1]] = value
# return the resolved map
return base
##============================================================================
##============================================================================
##============================================================================
##
# Returns true iff the given object does not have any free variables
# (which are template {{ }} handlebar slots) in it
def has_free_variables( x ):
if isinstance( x, (list,tuple) ):
return not any( has_free_variables, x )
if isinstance( x, dict ):
return not any( has_free_variables, x.items() )
s = str(x)
return TEMPLATE_HANDLEBAR_START not in s and TEMPLATE_HANDLEBAR_END not in s
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##
# Resolves the free variables within the hmap.
# This does a global resolve on all the free variables since
# the templates are treated globally
#
# Returns a new parse state with given parse state as parent
def resolve_free_variables( parse_state, template_context ):
# first, translate any variable blocks into jinja set statements
# for use within the hmap
hmap_with_jinja_vars = add_jinja_variable_nodes( parse_state.hmap, template_context )
# write out the resulting hmap's YAML
with tempfile.NamedTemporaryFile( mode='w', prefix='shunt-pre-resolve_') as f:
f.write( yaml.dump( hmap_with_jinja_vars ) )
f.flush()
logger.info( "dumping pre-resolve into '{0}'".format( f.name ) )
# ok, load in the jinja template
template, render_context = template_context.load_intermediate_template( f.name )
# now render the template
template_string = template.render(render_context)
opened_file = None
with open( f.name + ".rendered", 'w' ) as wf:
opened_file = f.name + ".rendered"
wf.write( template_string )
# ok, repase the resulting yaml
try:
new_parse_state = parse_yaml( opened_file, parent=parse_state )
except Exception as e:
msg = "Unable to re-load rendered template as YAML. Rendering at '{0}'".format( opened_file )
raise RuntimeError( msg ) from e
# ok, remove rendered temporary file
os.remove( opened_file )
# return the resulting parse
return new_parse_state
##============================================================================
##============================================================================
##============================================================================
##
# Given a ParseState, returns a new hmap with any 'var' nodes
# having and additional '_' key with jinja template code to
# actually set the variables for jinja templates
def add_jinja_variable_nodes( hmap, template_context ):
# deal with non-dictionaries
if not isinstance( hmap, dict ):
# lists and tuples and just recursed over each element :)
if isinstance( hmap, (list,tuple) ):
return type(hmap)( map( lambda x: add_jinja_variable_nodes(x,template_context), hmap ) )
# everything else is an atom and cannot have vars
return hmap
# new structure to return
new_hmap = copy.copy( hmap )
# ok, grab any immediate variables
if 'vars' in hmap:
# create jinaj set equivalents
accum = hmap['vars']
jinja_sets = []
for (key,value) in accum.items():
jinja_sets.append(
"{{%- set {name} = \"{value}\" -%}}".format(
name = discard_handlebars( key ),
value = discard_handlebars( value ) ) )
# assign jinja sets to special key
new_hmap[ JINJA_VARIABLE_KEY ] = "\n".join( jinja_sets )
# recurse to children
for (key, value) in hmap.items():
if key == 'vars':
continue
new_hmap[ key ] = add_jinja_variable_nodes( value, template_context )
# return new structure
return new_hmap
##============================================================================
##
# Given a string, discards any enclosing handlebars (first order)
def discard_handlebars( x ):
if not isinstance( x, str ):
return x
find_start_idx = x.find( TEMPLATE_HANDLEBAR_START )
res = x
if find_start_idx >= 0:
res = res[0:find_start_idx] + res[find_start_idx+len(TEMPLATE_HANDLEBAR_START):]
find_end_idx = res.rfind( TEMPLATE_HANDLEBAR_END )
if find_end_idx >= 0:
res = res[0:find_end_idx] + res[find_end_idx+len(TEMPLATE_HANDLEBAR_END):]
return res
##============================================================================
##============================================================================
##============================================================================
##
# A template context allows us to load "intermediate" templates.
# This also includes the jinja Environment and loaders being used
class TemplateContext( object ):
##
#
def __init__( self,
environment = None,
context = None):
if environment is None:
self.environment = jinja2.Environment(
loader = jinja2.FileSystemLoader([
"templates",
".",
] )
)
else:
self.environment = environment
if context is None:
self.context = {}
else:
self.context = context
##
#
def load_intermediate_template( self, template_filename ):
with open( template_filename ) as f:
template = self.environment.from_string( f.read() )
context = self.context
return template, context
DEFAULT_TEMPLATE_CONTEXT = TemplateContext()
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
| 12,632 | 3,115 |
import linecache
import numpy
import json
from ..sources import ChunkSource
###############################################################################
class JSONLSource(ChunkSource):
""" Data source for tensors stored in JSONL format
"""
###########################################################################
def __init__(self, source, key, num_entries, *args, **kwargs):
""" Creates a new JSONL source for file named `source`.
"""
super().__init__(*args, **kwargs)
self.source = source
self.num_entries = num_entries
self.key = key
self.indices = numpy.arange(len(self))
###########################################################################
def __iter__(self):
""" Return an iterator to the data. Get the value (tensor) for self.key
from each object and yield batches of these tensors
"""
start = 0
while start < self.num_entries:
end = min(self.num_entries, start + self.chunk_size)
# linecache line numbering starts at 1
batch = [
json.loads(linecache.getline(self.source, i + 1).strip())[self.key]
for i in self.indices[start:end]
]
yield batch
start = end
###########################################################################
def __len__(self):
""" Returns the total number of entries that this source can return, if
known.
"""
return self.num_entries
###########################################################################
def shape(self):
""" Return the shape of the tensor (excluding batch size) returned by
this data source.
"""
return numpy.array(json.loads(linecache.getline(self.source, 0 + 1))[self.key]).shape
###########################################################################
def can_shuffle(self):
""" This source can be shuffled.
"""
return True
###########################################################################
def shuffle(self, indices):
""" Applies a permutation to the data.
"""
if len(indices) > len(self):
raise ValueError('Shuffleable was asked to apply permutation, but '
'the permutation is longer than the length of the data set.')
self.indices[:len(indices)] = self.indices[:len(indices)][indices]
| 2,496 | 596 |
from tests.utils import W3CTestCase
class TestFlexbox_StfTableCell(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_stf-table-cell'))
| 161 | 62 |
'''
Created on Jun 4, 2014
@author: benjamin
'''
from pages.models import Page
class PagesMiddleware(object):
def process_template_response(self, request, response):
''' add pages to response context '''
response.context_data['page_list'] = Page.objects.filter(activated=True).order_by('orderid')
return response | 346 | 103 |
#!/usr/local/bin/python
import requests, argparse, p3c, os, json, subprocess, keyring
def main():
parser = argparse.ArgumentParser(description='Upload a notebook to a gist and 3point/SimPEG blog.')
parser.add_argument('notebook', type=str, help='The file name of the notebook.')
parser.add_argument('-m', type=str, help='Description of the notebook.')
args = parser.parse_args()
jsonFile = '/'.join(p3c.__file__.split('/')[:-1]+['nb2blog.json'])
if os.path.exists(jsonFile):
with file(jsonFile,'r') as f:
R = json.loads(f.read())
else:
f = file(jsonFile,'w')
f.write('{}\n')
f.close()
R = {}
# Get the data ready for uploading to gist.github.com
ipynb = file(args.notebook,'r')
data = {
"description": args.m,
"public": True,
"files": {}
}
data['files'][args.notebook] = {"content": str(ipynb.read())}
ipynb.close()
token = keyring.get_password('3pt','github.gist')
if token is None:
raise Exception("""keyring could not find your gist token:
ipython
> import keyring
> keyring.set_password('3pt', 'github.gist', 'YOUR GITHUB TOKEN')
Go to github to create one if you haven't made it yet (make sure you enable gist,repo,user):
https://github.com/settings/applications#personal-access-tokens
""")
# Check if the ipynb is in the dict, and post to gist.github.com
if args.notebook in R:
url = R[args.notebook]['gistURL']
resp = requests.patch("%s?access_token=%s"%(url,token), data=json.dumps(data))
else:
resp = requests.post("https://api.github.com/gists?access_token=%s"%token, data=json.dumps(data))
url = resp.json()['url']
R[args.notebook] = {"gistURL": url}
gitResp = resp.json()
f = file(jsonFile,'w')
f.write(json.dumps(R))
f.close()
# Convert the notebook to html
subprocess.check_output("ipython nbconvert %s --to html --template basic" % (args.notebook.replace(' ','\\ ')), shell=True)
f = file(args.notebook.replace('ipynb','html'),'r')
nbhtml = f.read()
f.close()
subprocess.check_output("rm %s" % (args.notebook.replace(' ','\\ ')).replace('ipynb','html'), shell=True)
uid = args.notebook[:-6].lower().replace(' ','-')
title = args.notebook[:-6].title()
b = p3c.Blog.new({'uid':uid,"content":nbhtml, "title":title, "description": args.m, 'setTags':'simpeg'})
if __name__ == "__main__":
main()
| 2,571 | 883 |
from bollards_api.main.forms import ContactForm
def test_home_page(client):
"""Test that home page displays correctly"""
rv = client.get('/')
assert b'<h1 class="text-center">Welcome to Bollards API</h1>' in rv.data
assert b'<p class="card-text">Discover all bollards between Vaud, Switzerland and France.</p>' in rv.data
assert b'Welcome to the bollards.ch API.' in rv.data
# /home should be equal to /
rv_home = client.get('/home')
assert rv_home.data == rv.data
def test_about_page(client):
rv = client.get('/about')
assert b'42' in rv.data
def test_about_page2(client):
rv = client.get('/about')
assert b'42' in rv.data
def test_contact_form_works(app):
"""Currently not in use"""
with app.app_context():
contactForm = ContactForm()
assert True
def test_404_on_bad_request(client):
rv = client.get('/randomlink')
assert b'<h1>Looks like you ran into 404.</h1>' in rv.data | 970 | 349 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List, Dict, Any, Union, Optional, Callable, Sequence
from bs4 import BeautifulSoup, Comment, element
import pandas as pd
import re
from urllib.request import urlopen
import os
import datetime
from tqdm import tqdm as tqdm_notebook
import time
from src.constants import DATA_DIR
def get_scores(date: str, metrics: List[str]) -> pd.DataFrame:
path_check = os.path.join(DATA_DIR, "dates", f"{date}.csv")
if os.path.exists(path_check):
df_games = pd.read_csv(path_check)
return df_games
url_parent: str = "https://www.basketball-reference.com"
url: str = (f"https://www.basketball-reference.com/boxscores/?month="
f"{date[4:6]}&day={date[6:8]}&year={date[0:4]}")
soup: BeautifulSoup = BeautifulSoup(urlopen(url), "lxml")
games: Sequence[Optional[element.Tag]] = soup.find_all(
"div", class_="game_summary expanded nohover")
if len(games) == 0:
return pd.DataFrame(columns=metrics)
df_games: List[Any] = []
for game in tqdm_notebook(games, desc=f"Date: {date}", total=len(games)):
summary: Dict[str, List[Any]] = {}
# host = game.find_all('table')[1].find_all('a')[1]['href'][7:10]
# away = game.find_all('table')[1].find_all('a')[0]['href'][7:10]
winner: Sequence[Optional[element.Tag]] = game.find(
"tr", class_="winner").find_all("td")
loser: Sequence[Optional[element.Tag]] = game.find(
"tr", class_="loser").find_all("td")
summary["winner"] = [
winner[0].find("a")["href"][7:10],
int(winner[1].get_text()),
]
summary["loser"] = [
loser[0].find("a")["href"][7:10],
int(loser[1].get_text())
]
url_game: str = url_parent + game.find("a", text="Box Score")["href"]
soup_game: BeautifulSoup = BeautifulSoup(urlopen(url_game), "lxml")
box_score: Optional[element.Tag] = game.find("a",
text="Box Score")["href"]
date = re.findall(r"\d\d\d\d\d\d\d\d", box_score)[0]
for result, (side, score) in summary.items():
game_result: Optional[element.Tag] = soup_game.find(
"table",
class_="sortable stats_table",
id=f"box-{side}-game-basic")
player_list: List[Any] = game_result.find_all("tr",
class_=None)[1:-1]
team: List[Dict[str, Optional[Union[float, int, str]]]] = []
for player in player_list:
player_name: Optional[str] = player.find("th")["csk"]
player_dict: Dict[str, Optional[Union[str, int, str]]] = {
"name": player_name,
"date": date
}
for metric in metrics:
try:
res: Union[str, int, float] = player.find(
"td", {
"data-stat": metric
}).contents[0]
except Exception:
res: Union[str, int, float] = 0
player_dict.update({metric: res})
if result == "winner":
player_dict.update({
"result": 1,
"score": score,
"team": summary["winner"][0],
"opp": summary["loser"][0],
"opp_score": summary["loser"][1],
})
if result == "loser":
player_dict.update({
"result": 0,
"score": score,
"team": summary["winner"][0],
"opp": summary["winner"][0],
"opp_score": summary["winner"][1],
})
if int(str(player_dict["mp"]).split(":")[0]) >= 10:
team.append(player_dict)
team_df: pd.DataFrame = pd.DataFrame(team)
team_df["score"] = score
df_games.append(pd.DataFrame(team_df))
df_games_df: pd.DataFrame = pd.concat(df_games)
if ' trb' in df_games_df.columns:
df_games_df.rename({' trb': 'trb'}, inplace=True)
Data_scrapper.write_csv(df=df_games_df, name=date, extra_path="dates")
return df_games_df
class Data_scrapper(object):
def __init__(self, start: str, end: str) -> None:
self.metrics: List[str] = [
"mp",
"fg",
"fga",
"fg_pct",
"fg3",
"fg3a",
"fg3_pct",
"ft",
"fta",
"ft_pct",
"orb",
"drb",
" trb",
"ast",
"stl",
"blk",
"tov",
"pf",
"pts",
"plus_minus",
]
self.start: datetime.datetime = datetime.datetime.strptime(
start, "%Y%m%d")
self.end: datetime.datetime = datetime.datetime.strptime(end, "%Y%m%d")
self.timeframe: pd.DataFrame = self.generate_time_frame()
@staticmethod
def write_csv(df: pd.DataFrame, name: str, extra_path: str = None) -> None:
if extra_path is not None:
path_data: str = os.path.join(DATA_DIR, extra_path)
else:
path_data = os.path.join(DATA_DIR)
if not os.path.exists(path_data):
os.mkdir(path_data)
full_path: str = os.path.join(path_data, f"{name}.csv")
df.to_csv(full_path, index=False)
def get_timeframe_data(self,
sleep: int = 0,
name: str = "default",
write: bool = True,
get_scores: Callable = get_scores) -> pd.DataFrame:
full_time_list: List[pd.DataFrame] = []
for date in tqdm_notebook(self.timeframe,
total=len(self.timeframe),
desc="Main Frame"):
# get_scores_cached: Callable = memory1.cache(get_scores)
# date_df: pd.DataFrame = get_scores_cached(date, self.metrics)
date_df: pd.DataFrame = get_scores(date, self.metrics)
full_time_list.append(date_df)
time.sleep(sleep)
full_time_df: pd.DataFrame = pd.concat(full_time_list, sort=True)
if write:
Data_scrapper.write_csv(full_time_df, name=name)
return full_time_df
def generate_time_frame(self) -> List[str]:
date_range: List[str] = [
(self.start + datetime.timedelta(days=x)).strftime("%Y%m%d")
for x in range(0, (self.end - self.start).days + 1)
]
return date_range
@staticmethod
def get_next_games(
date: str,
season_year: Union[str, int]) -> List[Dict[str, Optional[str]]]:
month: str = datetime.datetime.strptime(
date, "%Y%m%d").strftime("%B").lower()
url_games: str = (f"https://www.basketball-reference.com/leagues/"
f"NBA_{season_year}_games-{month}.html")
print(url_games)
soup: BeautifulSoup = BeautifulSoup(urlopen(url_games), "lxml")
month_games: Sequence[Any] = soup.find_all("tr")
match_ups: List[Dict[str, Optional[str]]] = []
for month_game in month_games:
try:
check_date: bool = month_game.find("th")["csk"].startswith(
date)
except Exception:
continue
if check_date:
visitor: Optional[str] = month_game.find(
"td", {
"data-stat": "visitor_team_name"
}).find("a")["href"][7:10]
home: Optional[str] = month_game.find(
"td", {
"data-stat": "home_team_name"
}).find("a")["href"][7:10]
match_ups.append({"home": home, "visitor": visitor})
return match_ups
@staticmethod
def get_all_players(
team: Optional[str], date: str,
season_year: Union[str, int]) -> List[Dict[str, Optional[str]]]:
url: str = (f"https://www.basketball-reference.com/"
f"teams/{team}/{season_year}.html")
print(url)
soup: BeautifulSoup = BeautifulSoup(urlopen(url), "lxml")
table_players: Optional[element.Tag] = soup.find("tbody")
players: List[Dict[str, Optional[element.Tag]]] = []
for player in table_players.find_all("tr"):
name: Optional[str] = player.find("td",
{"data-stat": "player"})["csk"]
players.append({"name": name, "team": team, "date": date})
return players
@staticmethod
def get_injured_players(team: Optional[str], date: str,
season_year: Union[str, int]) -> List:
url: str = (f"https://www.basketball-reference.com/"
f"teams/{team}/{season_year}.html")
soup: BeautifulSoup = BeautifulSoup(urlopen(url), "lxml")
div_inj: Optional[element.Tag] = soup.find("div", id="all_injury")
try:
comments: Sequence[Optional[element.Tag]] = div_inj.find_all(
string=lambda text: isinstance(text, Comment))
comms: Optional[str] = re.sub("\n", "", comments[0]).strip()
soup = BeautifulSoup(comms, "lxml")
body: Optional[element.Tag] = soup.find("tbody")
players: List[Dict[str, Optional[str]]] = []
for player in body.find_all("tr"):
name: Optional[str] = player.find(
"th", {"data-stat": "player"})["csk"]
players.append({"name": name, "team": team, "date": date})
return players
except Exception:
return list()
@staticmethod
def get_next_games_player(date: str,
season_year: Union[str, int]) -> pd.DataFrame:
match_ups: List[Dict[str,
Optional[str]]] = Data_scrapper.get_next_games(
date, season_year)
all_players_list: List = []
for match_up in match_ups:
for i, team in enumerate(match_up.values()):
all_players: List[Dict[
str, Optional[str]]] = Data_scrapper.get_all_players(
team, date, season_year)
injured_players: List = Data_scrapper.get_injured_players(
team, date, season_year)
injured_players_names: List = ([
player["name"] for player in injured_players
] if len(injured_players) > 0 else [])
available_players: List = [
player for player in all_players
if player["name"] not in injured_players_names
]
for player in available_players:
ind: int = 1 if i == 0 else 0
player["opp"] = list(match_up.values())[ind]
all_players_list.extend(available_players)
return pd.DataFrame(all_players_list)
| 11,382 | 3,465 |
from src.CartClass import *
from src.utilis import *
from src.utilis import *
from tqdm import tqdm
csv = 'data_rnn'
number_of_experiments = 10
length_of_experiment = 1e3
dt_main_simulation = dt_main_simulation_globals
track_relative_complexity = 0.5 # randomly placed points/s
track_complexity = int(dt_main_simulation*length_of_experiment*track_relative_complexity) # Total number of randomly placed points
mode = 2
MyCart = Cart()
for i in range(number_of_experiments):
print(i)
sleep(0.1)
Generate_Experiment(MyCart,
mode=mode,
exp_len=length_of_experiment,
dt=dt_main_simulation,
track_complexity=track_complexity,
csv=csv) | 766 | 246 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Event",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("name", models.CharField(max_length=100, verbose_name="n\xe1zov")),
("start_time", models.DateTimeField(verbose_name="\u010das za\u010diatku")),
("end_time", models.DateTimeField(verbose_name="\u010das konca")),
(
"registration_deadline",
models.DateTimeField(
null=True, verbose_name="deadline pre registr\xe1ciu", blank=True
),
),
(
"text",
models.TextField(
default="",
help_text='Obsah bude prehnan\xfd <a href="http://en.wikipedia.org/wiki/Markdown">Markdownom</a>.',
blank=True,
),
),
],
options={
"ordering": ["-end_time", "-start_time"],
"verbose_name": "akcia",
"verbose_name_plural": "akcie",
},
),
migrations.CreateModel(
name="EventType",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("name", models.CharField(max_length=100, verbose_name="n\xe1zov")),
("is_camp", models.BooleanField(verbose_name="s\xfastredko")),
],
options={"verbose_name": "typ akcie", "verbose_name_plural": "typy akci\xed"},
),
migrations.CreateModel(
name="Invitation",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
(
"type",
models.SmallIntegerField(
default=0,
verbose_name="typ pozv\xe1nky",
choices=[
(0, "\xfa\u010dastn\xedk"),
(1, "n\xe1hradn\xedk"),
(2, "ved\xfaci"),
],
),
),
("going", models.NullBooleanField(verbose_name="z\xfa\u010dastn\xed sa")),
],
options={"verbose_name": "pozv\xe1nka", "verbose_name_plural": "pozv\xe1nky"},
),
migrations.CreateModel(
name="Link",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("title", models.CharField(max_length=100, verbose_name="titulok")),
("name", models.CharField(max_length=300, verbose_name="meno")),
("url", models.URLField(max_length=300)),
],
options={"verbose_name": "odkaz", "verbose_name_plural": "odkazy"},
),
migrations.CreateModel(
name="Place",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("name", models.CharField(max_length=100, verbose_name="n\xe1zov")),
],
options={"verbose_name": "miesto akcie", "verbose_name_plural": "miesta akci\xed"},
),
migrations.CreateModel(
name="Registration",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("name", models.CharField(max_length=100, verbose_name="n\xe1zov")),
(
"text",
models.TextField(
help_text='Obsah bude prehnan\xfd <a href="http://en.wikipedia.org/wiki/Markdown">Markdownom</a>.'
),
),
],
options={
"verbose_name": "Prihl\xe1\u0161ka",
"verbose_name_plural": "Prihl\xe1\u0161ky",
},
),
]
| 4,970 | 1,339 |
#!/usr/bin/env python3
n = 2
l = "foo"
chunks = [l[i - n : i] for i in range(n, len(l) + n, n)]
print(chunks)
| 111 | 57 |
# -*- coding: utf-8 -*-
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A setuptools based setup module.
--------------------------------------------------------
See:
http://packaging.python.org/en/latest/distributing.html
http://github.com/pypa/sampleproject
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
from setuptools import setup
setup(
name='SHTTPServer_plus',
url='https://github.com/pandapan0021/SHTTPServer_plus.git',
classifiers=[
'Programing Language :: Python :: 3.5',
],
)
| 545 | 172 |
# import opencv
import numpy as np
import cv2
# Read image
src = cv2.imread("exercise_images/lion.jpg",0)
# Set threshold and maxValue
thresh = 25
thresh3 = 255
thresh4 = 205
thresh5 = 105
thresh2 = 155
maxValue = 255
# Basic threshold example
th, dst = cv2.threshold(src, thresh, maxValue, cv2.THRESH_BINARY);
th, dsts = cv2.threshold(src, thresh2, maxValue, cv2.THRESH_BINARY);
th, dsts1 = cv2.threshold(src, thresh3, maxValue, cv2.THRESH_BINARY);
th, dsts2 = cv2.threshold(src, thresh4, maxValue, cv2.THRESH_BINARY);
th, dsts3 = cv2.threshold(src, thresh5, maxValue, cv2.THRESH_BINARY);
improved = np.hstack((src,dsts)) #stacking images side-by-side
improvedmore = np.hstack((src,dsts)) #stacking images side-by-side
imp = np.hstack((dst,dsts)) #stacking images side-by-side
cv2.imshow('Have You of 165',dst)
cv2.imshow('Got You of 155',dsts2)
cv2.imshow('Have You of 255',dsts3)
cv2.imshow('Got You of 205',dsts1)
cv2.imshow('Have You of 100',dsts)
cv2.imwrite('doc.jpeg',improved)
cv2.imwrite('doc2.jpeg',improvedmore)
cv2.imwrite('alike.jpeg',imp)
#cv2.imshow('Image',src)
| 1,128 | 519 |
from django.test import TestCase
from .models import Image, Category, Location
# Create your tests here.
class CategoryTest(TestCase):
# set up method
def setUp(self):
self.new_category = Category(name='newCategory')
# tear down method
def tearDown(self):
Category.objects.all().delete()
# testing instance
def test_instance(self):
self.assertTrue(self.new_category, Category)
# testing saving image category
def test_save_category(self):
self.new_category.save_category()
categories = Category.objects.all()
self.assertTrue(len(categories) > 0)
# testing deleting a category
def test_delete_category(self):
self.new_category.save_category()
categories = Category.objects.all()
self.new_category.delete_category()
self.assertTrue(len(categories) < 1)
class LocationTest(TestCase):
# set up method
def setUp(self):
self.new_location = Location(name='canada')
# tear down method
def tearDown(self):
Location.objects.all().delete()
# testing instance
def test_instance(self):
self.assertTrue(self.new_location, Location)
# testing saving image location
def test_save_location(self):
self.new_location.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations) > 0)
# testing deleting a location
def test_delete_location(self):
self.new_location.save_location()
locations = Location.objects.all()
self.new_location.delete_location()
self.assertTrue(len(locations) < 1)
class ImageTest(TestCase):
# set up method
def setUp(self):
# creating a new image category and saving
self.new_category = Category(name='newCategory')
self.new_category.save()
# creating aa new image location and saving
self.new_location = Location(name='Canada')
self.new_location.save()
# creating a new image
self.new_image = Image(image_url='building.png', name='building', description='Image of building taken at sunset', location=self.new_location, category=self.new_category)
self.new_image.save()
# tear down method
def tearDown(self):
Category.objects.all().delete()
Location.objects.all().delete()
Image.objects.all().delete()
# testing saving an image
def test_save_image(self):
self.new_image.save_image()
images = Image.objects.all()
self.assertTrue(len(images) > 0)
# testing saving multiple images
def test_save_multiple_images(self):
self.new_image.save_image()
image2 = Image(image_url='building2.png', name='building2', description='Image of building taken at sunrise', location=self.new_location, category=self.new_category)
image2.save_image()
images = Image.objects.all()
self.assertTrue(len(images) > 1)
# testing deleting an image
def test_delete_image(self):
self.new_image.save_image()
images = Image.objects.all()
self.new_image.delete_image()
self.assertTrue(len(images) < 1)
| 3,185 | 919 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
import numpy as np
import chainer
import chainer.functions as F
import chainercv.links as C
import mxnet
import onnx_chainer
def save_as_onnx_then_import_from_mxnet(model, fn):
# Prepare an input tensor
x = np.random.rand(1, 3, 224, 224).astype(np.float32) * 255
# Run the model on the data
with chainer.using_config('train', False):
chainer_out = model(x).array
# Export Chainer model into ONNX
onnx_chainer.export(model, x, fn)
# Load ONNX model into MXNet symbol
sym, arg, aux = mxnet.contrib.onnx.import_model(fn)
# Find the name of input tensor
data_names = [graph_input for graph_input in sym.list_inputs()
if graph_input not in arg and graph_input not in aux]
data_shapes = [(data_names[0], x.shape)]
# Create MXNet model
mod = mxnet.mod.Module(
symbol=sym, data_names=data_names, context=mxnet.cpu(),
label_names=None)
mod.bind(
for_training=False, data_shapes=data_shapes,
label_shapes=None)
mod.set_params(
arg_params=arg, aux_params=aux, allow_missing=True,
allow_extra=True)
# Create input data
Batch = collections.namedtuple('Batch', ['data'])
input_data = Batch([mxnet.nd.array(x)])
# Forward computation using MXNet
mod.forward(input_data)
# Retrieve the output of forward result
mxnet_out = mod.get_outputs()[0].asnumpy()
# Check the prediction results are same
assert np.argmax(chainer_out) == np.argmax(mxnet_out)
# Check both outputs have same values
np.testing.assert_almost_equal(chainer_out, mxnet_out, decimal=5)
def main():
model = C.VGG16(pretrained_model='imagenet')
save_as_onnx_then_import_from_mxnet(model, 'vgg16.onnx')
model = C.ResNet50(pretrained_model='imagenet', arch='he')
# Change cover_all option to False to match the default behavior of MXNet's pooling
model.pool1 = lambda x: F.max_pooling_2d(
x, ksize=3, stride=2, cover_all=False)
save_as_onnx_then_import_from_mxnet(model, 'resnet50.onnx')
if __name__ == '__main__':
main()
| 2,167 | 785 |
# -*- coding: utf-8 -*-
"""Notebook-check script."""
import os
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from art import tprint
NOTEBOOKS_LIST = [
"Document",
"Example1",
"Example2",
"Example3",
"Example4",
"Example5",
"Example6",
"Example7",
"Example8"]
EXTENSION = ".ipynb"
if __name__ == "__main__":
tprint("PYCM", "bulbhead")
tprint("Document", "bulbhead")
print("Processing ...")
for index, notebook in enumerate(NOTEBOOKS_LIST):
ep = ExecutePreprocessor(timeout=6000, kernel_name='python3')
path = os.path.join("Document", notebook)
with open(path + EXTENSION, "r", encoding="utf-8") as f:
nb = nbformat.read(f, as_version=4)
ep.preprocess(nb, {'metadata': {'path': 'Document/'}})
with open(path + EXTENSION, 'w', encoding='utf-8') as f:
nbformat.write(nb, f)
print("{0}.{1} [OK]".format(str(index + 1), notebook))
| 982 | 352 |
from typing import List
import discord
import yaml
from src.models.models import Mute, session
from src.utils.embeds_manager import EmbedsManager
from src.utils.permissions_manager import PermissionsManager
async def revoke_mute(client: discord.Client, message: discord.Message, args: List[str]):
with open('run/config/config.yml', 'r') as file:
config = yaml.safe_load(file)
if not PermissionsManager.has_perm(message.author, 'mute'):
return await message.channel.send(
embed=EmbedsManager.error_embed(
"Vous n'avez pas les permissions pour cette commande."
)
)
# Help message
if args and args[0] == '-h':
return await message.channel.send(
embed=EmbedsManager.information_embed(
"Rappel de la commande : \n"
f"`{config['prefix']}rmute <mute_id>`"
)
)
if len(args) != 1:
return await message.channel.send(
embed=EmbedsManager.error_embed(
f":x: Erreur dans la commande, merci de spécifier l'index du mute."
)
)
if not args[0].startswith("m"):
return await message.channel.send(
embed=EmbedsManager.error_embed(
":x: Erreur, index invalide."
)
)
index = int(args[0][1:])
current_mute: Mute = session.query(Mute).filter_by(id=index).first()
if current_mute is None:
return await message.channel.send(
embed=EmbedsManager.error_embed(
":x: Erreur, index invalide."
)
)
if not current_mute.is_active:
return await message.channel.send(
embed=EmbedsManager.error_embed(
":x: Erreur, ce mute est déjà révoqué."
)
)
current_mute.is_active = False
session.commit()
target: discord.Member = message.guild.get_member(current_mute.target_id)
for channel in message.guild.channels:
if not target.permissions_in(channel).send_messages:
await channel.set_permissions(target,
overwrite=None)
await message.channel.send(
embed=EmbedsManager.complete_embed(
f"⚠ Le mute **{args[0]}** a été révoqué."
)
)
| 2,315 | 699 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-15 18:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modoboa_public_api', '0004_auto_20160614_1717'),
]
operations = [
migrations.AddField(
model_name='modoboainstance',
name='domain_alias_counter',
field=models.PositiveIntegerField(default=0),
),
]
| 489 | 179 |
import QUANTAXIS as QA
from numpy import *
from scipy.signal import savgol_filter
import numpy as np
import matplotlib.pyplot as plt
from QUANTAXIS.QAIndicator.talib_numpy import *
import mpl_finance as mpf
import matplotlib.dates as mdates
def smooth_demo():
data2 = QA.QA_fetch_crypto_asset_day_adv(['huobi'],
symbol=['btcusdt'],
start='2017-10-01',
end='2020-06-30 23:59:59')
xn = data2.close.values
ma5 = talib.MA(data2.close.values, 10)
hma5 = TA_HMA(data2.close.values, 10)
kama5 = TA_KAMA(data2.close.values, 10)
window_size, poly_order = 5, 1
yy_sg = savgol_filter(xn, window_size, poly_order)
plt.figure(figsize = (22,9))
ax1 = plt.subplot(111)
mpf.candlestick2_ochl(ax1, data2.data.open.values, data2.data.close.values, data2.data.high.values, data2.data.low.values, width=0.6, colorup='r', colordown='green', alpha=0.5)
#ax1.title("The smoothing windows")
#plt.plot(xn, lw=1, alpha=0.8)
ax1.plot(hma5, lw=2, linestyle="--", color='darkcyan', alpha=0.6)
ax1.plot(yy_sg, lw=1, color='darkcyan', alpha=0.8)
ax1.plot(ma5, lw=1, color='orange', alpha=0.8)
ax1.plot(kama5, lw=1, color='lightskyblue', alpha=0.8)
l=['Hull Moving Average', 'savgol_filter', 'talib.MA10', 'KAMA10']
ax1.legend(l)
plt.title("Smoothing a MA10 line")
plt.show()
if __name__=='__main__':
smooth_demo()
| 1,398 | 623 |
import time
import os
from datetime import datetime
import breakpoint
def log(msg):
curr_date = datetime.now()
l = open('/usr/share/Tknet/Server/tknet.log','a')
l.write(f'\n[{curr_date}]: {msg}')
l.close()
def file_transfer_handle(c,x,d_name,address):
c.send('FILEMODE'.encode())
c.send(f'DIRADD {d_name.split(".")[0]}'.encode())
time.sleep(0.5)
log(f'Reached breakpoint of directory transfer for {address}')
breakpoint.wait(c)
log(f'Breakpoint resolved for {address}')
log(f'Sending {x[1]} to {address}')
c.send(f'FILEADD {x[1]}'.encode())
time.sleep(1)
f = open(f'{x[1]}')
c.send(f'FILECONT {x[1]} {f.read()}'.encode())
time.sleep(1)
c.send('END'.encode())
def dir_transfer_handle(c,x,d_name,address):
log(f"{[x[1]]}-Found directory, sending all files...")
c.send('DIRMODE'.encode())
time.sleep(0.5)
c.send("The selected option contains multiple files, be warned...".encode())
files = os.listdir(f'{x[1]}')
time.sleep(0.5)
c.send(f"DIRADD {d_name}".encode())
log(f'Reached breakpoint of directory transfer for {address}')
breakpoint.wait(c)
log(f'Breakpoint resolved for {address}')
for item in files:
if os.path.isdir(f'{x[1]}/{item}'):
print(f'Dir found {item}')
else:
log(f'Sending {item} to {address}')
c.send(f"FILEADD {item}".encode())
time.sleep(1)
f = open(f'{x[1]}/{item}')
c.send(f'FILECONT {item} {f.read()}'.encode())
time.sleep(1)
#End directory transfer
c.send('END'.encode()) | 1,626 | 606 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class SequenceModel(nn.Module):
def __init__(self, model_num, feature_dim, feature_num,
lstm_layers, hidden, drop_out, Add_position):
super(SequenceModel, self).__init__()
self.feature_num=feature_num
# seq model 1
self.fea_conv = nn.Sequential(
nn.Dropout2d(drop_out),
nn.Conv2d(feature_dim, 512, kernel_size=(1, 1), stride=(1,1), padding=(0,0), bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Dropout2d(drop_out),
nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout2d(drop_out),
)
self.fea_first_final = nn.Sequential(nn.Conv2d(128 * feature_num, 6, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True))
# # bidirectional GRU
self.hidden_fea = hidden
self.fea_lstm = nn.GRU(128 * feature_num, self.hidden_fea, num_layers=lstm_layers, batch_first=True, bidirectional=True)
self.fea_lstm_final = nn.Sequential(nn.Conv2d(1, 6, kernel_size=(1, self.hidden_fea*2), stride=(1, 1), padding=(0, 0), dilation=1, bias=True))
ratio = 4
if Add_position:
model_num += 2
else:
model_num += 1
# seq model 2
self.conv_first = nn.Sequential(nn.Conv2d(model_num, 128*ratio, kernel_size=(5, 1), stride=(1,1), padding=(2,0), dilation=1, bias=False),
nn.BatchNorm2d(128*ratio),
nn.ReLU(),
nn.Conv2d(128*ratio, 64*ratio, kernel_size=(3, 1), stride=(1, 1), padding=(2, 0), dilation=2, bias=False),
nn.BatchNorm2d(64*ratio),
nn.ReLU())
self.conv_res = nn.Sequential(nn.Conv2d(64 * ratio, 64 * ratio, kernel_size=(3, 1), stride=(1, 1), padding=(4, 0), dilation=4, bias=False),
nn.BatchNorm2d(64 * ratio),
nn.ReLU(),
nn.Conv2d(64 * ratio, 64 * ratio, kernel_size=(3, 1), stride=(1, 1), padding=(2, 0), dilation=2, bias=False),
nn.BatchNorm2d(64 * ratio),
nn.ReLU(),)
self.conv_final = nn.Sequential(nn.Conv2d(64*ratio, 1, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0), dilation=1,bias=False))
# bidirectional GRU
self.hidden = hidden
self.lstm = nn.GRU(64*ratio*6, self.hidden, num_layers=lstm_layers, batch_first=True, bidirectional=True)
self.final = nn.Sequential(nn.Conv2d(1, 6, kernel_size=(1, self.hidden*2), stride=(1, 1), padding=(0, 0), dilation=1, bias=True))
def forward(self, fea, x):
batch_size, _, _, _ = x.shape
fea = self.fea_conv(fea)
fea = fea.permute(0, 1, 3, 2).contiguous()
fea = fea.view(batch_size, 128 * self.feature_num, -1).contiguous()
fea = fea.view(batch_size, 128 * self.feature_num, -1, 1).contiguous()
fea_first_final = self.fea_first_final(fea)
#################################################
out0 = fea_first_final.permute(0, 3, 2, 1)
#################################################
# bidirectional GRU
fea = fea.view(batch_size, 128 * self.feature_num, -1).contiguous()
fea = fea.permute(0, 2, 1).contiguous()
fea, _ = self.fea_lstm(fea)
fea = fea.view(batch_size, 1, -1, self.hidden_fea * 2)
fea_lstm_final = self.fea_lstm_final(fea)
fea_lstm_final = fea_lstm_final.permute(0, 3, 2, 1)
#################################################
out0 += fea_lstm_final
#################################################
out0_sigmoid = torch.sigmoid(out0)
x = torch.cat([x, out0_sigmoid], dim = 1)
x = self.conv_first(x)
x = self.conv_res(x)
x_cnn = self.conv_final(x)
#################################################
out = x_cnn
#################################################
# bidirectional GRU
x = x.view(batch_size, 256, -1, 6)
x = x.permute(0,2,1,3).contiguous()
x = x.view(batch_size, x.size()[1], -1).contiguous()
x, _= self.lstm(x)
x = x.view(batch_size, 1, -1, self.hidden*2)
x = self.final(x)
x = x.permute(0,3,2,1)
#################################################
out += x
#################################################
#res
return out, out0
if __name__ == '__main__':
model = SequenceModel(model_num=15, feature_dim = 128, feature_num=16,
lstm_layers = 2, hidden=128, drop_out=0.5,
Add_position = True)
print(model) | 5,297 | 1,889 |
from django.contrib.gis import admin
from .models import WorldBorder
# admin.site.register(WorldBorder, admin.GeoModelAdmin)
admin.site.register(WorldBorder, admin.OSMGeoAdmin)
| 179 | 56 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from qmt.tasks import Task, SweepManager
sweep = SweepManager.create_empty_sweep() # our dask sweep manager
class HelloTask(Task):
def __init__(self):
super().__init__() # required init
@staticmethod
def _solve_instance(inputs, options): # required task solver function
print('Hello World')
hi = HelloTask() # create a new task
sweep.run(hi).result() # run through dask and resolve future.result()
hi.run_daskless() # can also run locally
class HelloOptionTask(Task):
def __init__(self, language_options):
super().__init__(options=language_options)
@staticmethod
def _solve_instance(inputs, options):
greetings = {'English': 'Hello', 'Spanish': 'Hola'}
print(greetings[options['language']] + ' World')
hola = HelloOptionTask({'language': 'Spanish'})
sweep.run(hola).result()
class NameTask(Task):
def __init__(self, name_options):
super().__init__(options=name_options)
@staticmethod
def _solve_instance(inputs, options):
return options['name']
class HelloDependentTask(Task):
def __init__(self, name_task, language_options):
super().__init__(task_list=[name_task], options=language_options)
@staticmethod
def _solve_instance(inputs, options):
name = inputs[0]
greetings = {'English': 'Hello', 'Spanish': 'Hola'}
print(greetings[options['language']] + ' ' + name)
name = NameTask({'name': 'John'})
hola = HelloDependentTask(name, {'language': 'Spanish'})
sweep.run(hola).result()
| 1,597 | 520 |
import dj_database_url
from .base import * # NOQA
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# SECURITY WARNING: keep the secret key used in production secret!
if 'CFG_SECRET_KEY' in os.environ:
SECRET_KEY = os.environ['CFG_SECRET_KEY']
if 'CFG_ALLOWED_HOSTS' in os.environ:
ALLOWED_HOSTS = os.environ['CFG_ALLOWED_HOSTS'].split(',')
# Database
# https://docs.djangoproject.com/en/stable/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default='postgis://postgis:postgis@postgis/postgis',
),
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/stable/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.getenv('CFG_STATIC_ROOT', os.path.join(BASE_DIR, 'static'))
MEDIA_URL = '/media/'
MEDIA_ROOT = os.getenv('CFG_MEDIA_ROOT', os.path.join(BASE_DIR, 'media'))
# ManifestStaticFilesStorage
# https://docs.djangoproject.com/en/stable/ref/contrib/staticfiles/#manifeststaticfilesstorage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
| 1,107 | 414 |
import random as _random
import numpy as _np
import collections as _collections
from abc import ABC, abstractmethod
from sklearn.cluster import DBSCAN
def _k_best(tuple_list, k):
"""For a list of tuples [(distance, value), ...] - Get the k-best tuples by
distance.
Args:
tuple_list: List of tuples. (distance, value)
k: Number of tuples to return.
"""
tuple_lst = sorted(tuple_list, key=lambda x: x[0],
reverse=False)[:k]
return tuple_lst
class ClusterSelector(ABC):
@abstractmethod
def select_clusters(self, features):
pass
class DefaultClusterSelector(ClusterSelector):
"""
Default cluster selector, picks sqrt(num_records) random points (at most 1000)
and allocates points to their nearest category. This can often end up splitting
similar points into multiple paths of the tree
"""
def __init__(self, distance_type):
self._distance_type = distance_type
def select_clusters(self, features):
# number of points to cluster
num_records = features.shape[0]
matrix_size = max(int(_np.sqrt(num_records)), 1000)
# set num_clusters = min(max(sqrt(num_records), 1000), num_records))
clusters_size = min(matrix_size, num_records)
# make list [0, 1, ..., num_records-1]
records_index = list(_np.arange(features.shape[0]))
# randomly choose num_clusters records as the cluster roots
# this randomizes both selection and order of features in the selection
clusters_selection = _random.sample(records_index, clusters_size)
clusters_selection = features[clusters_selection]
# create structure to store clusters
item_to_clusters = _collections.defaultdict(list)
# create a distance_type object containing the cluster roots
# labeling them as 0 to N-1 in their current (random) order
root = self._distance_type(clusters_selection,
list(_np.arange(clusters_selection.shape[0])))
# remove duplicate cluster roots
root.remove_near_duplicates()
# initialize distance type object with the remaining cluster roots
root = self._distance_type(root.matrix,
list(_np.arange(root.matrix.shape[0])))
rng_step = matrix_size
# walk features in steps of matrix_size = max(sqrt(num_records), 1000)
for rng in range(0, features.shape[0], rng_step):
# don't exceed the array length on the last step
max_rng = min(rng + rng_step, features.shape[0])
records_rng = features[rng:max_rng]
# find the nearest cluster root for each feature in the step
for i, clstrs in enumerate(root.nearest_search(records_rng)):
_random.shuffle(clstrs)
for _, cluster in _k_best(clstrs, k=1):
# add each feature to its nearest cluster, here the cluster label
# is the label assigned to the root feature after it had been selected at random
item_to_clusters[cluster].append(i + rng)
# row index in clusters_selection maps to key in item_to_clusters
# but the values in item_to_clusters are row indices of the original features matrix
return clusters_selection, item_to_clusters
class DbscanClusterSelector(ClusterSelector):
"""
Dbscan based cluster selector, picks sqrt(num_records) random points (at most 1000)
and then forms groups inside the random selection, before allocating other features
to the groups
"""
def __init__(self, distance_type):
self._distance_type = distance_type
self._eps = 0.4
def select_clusters(self, features):
# number of points to cluster
num_records = features.shape[0]
matrix_size = max(int(_np.sqrt(num_records)), 1000)
# set num_clusters = min(max(sqrt(num_records), 1000), num_records))
clusters_size = min(matrix_size, num_records)
# make list [0, 1, ..., num_records-1]
records_index = list(_np.arange(features.shape[0]))
# randomly choose num_clusters records as the cluster roots
# this randomizes both selection and order of features in the selection
random_clusters_selection = _random.sample(records_index, clusters_size)
random_clusters_selection = features[random_clusters_selection]
# now cluster the cluster roots themselves to avoid
# randomly separating neighbours, this probably means fewer clusters per level
# TODO might want to propagate the distance type to the clustering
db_scan_clustering = DBSCAN(eps=self._eps, min_samples=2).fit(random_clusters_selection)
# get all the individual points from the cluster
unique_indices = _np.where(db_scan_clustering.labels_ == -1)[0]
# and the first item from each cluster
_, cluster_start_indices = _np.unique(db_scan_clustering.labels_, return_index=True)
# merge and uniquefy, the result is sorted
all_indices = _np.concatenate((unique_indices, cluster_start_indices))
all_indices_unique = _np.unique(all_indices)
# create a matrix where rows are the first item in each dbscan cluster
# set that as cluster selection and then allocate features to cluster
clusters_selection = random_clusters_selection[all_indices_unique]
# create structure to store clusters
item_to_clusters = _collections.defaultdict(list)
# create a distance_type object containing the cluster root
root = self._distance_type(clusters_selection,
list(_np.arange(clusters_selection.shape[0])))
rng_step = matrix_size
# walk features in steps of matrix_size = max(sqrt(num_records), 1000)
for rng in range(0, features.shape[0], rng_step):
max_rng = min(rng + rng_step, features.shape[0])
records_rng = features[rng:max_rng]
# find the nearest cluster root for each feature in the step
for i, clstrs in enumerate(root.nearest_search(records_rng)):
# this is slow, disable until proven useful
# _random.shuffle(clstrs)
for _, cluster in _k_best(clstrs, k=1):
# add each feature to its nearest cluster
item_to_clusters[cluster].append(i + rng)
# row index in clusters_selection maps to key in item_to_clusters
# but the values in item_to_clusters are row indices of the original features matrix
return clusters_selection, item_to_clusters
| 6,708 | 1,906 |