id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
144657
|
from aw_nas.objective.base import BaseObjective
class ContainerObjective(BaseObjective):
NAME = "container"
def __init__(self, search_space, sub_objectives,
losses_coef=None, rewards_coef=None,
schedule_cfg=None):
super().__init__(search_space, schedule_cfg=schedule_cfg)
self.objectives = [
BaseObjective.get_class_(obj["objective_type"])(
search_space, **obj["objective_cfg"]) for obj in sub_objectives
]
self.losses_coef = losses_coef
self.rewards_coef = rewards_coef
if self.losses_coef is None:
self.losses_coef = [1.] * len(self.objectives)
if self.rewards_coef is None:
self.rewards_coef = [1.] * len(self.objectives)
assert len(self.rewards_coef) == len(self.losses_coef) == len(self.objectives), \
("expect rewards_coef and losses_coef have the exactly"
"same length with objectives, got {}, {} and {} instead.").format(
len(rewards_coef), len(self.losses_coef), len(self.objectives))
@classmethod
def supported_data_types(cls):
return ["image"]
def aggregate_fn(self, perf_name, is_training=True):
for obj in self.objectives:
if perf_name in obj.perf_names():
return obj.aggregate_fn(perf_name, is_training)
else:
return super().aggregate_fn(perf_name, is_training)
def perf_names(self):
return sum([obj.perf_names() for obj in self.objectives], [])
def get_perfs(self, inputs, outputs, targets, cand_net):
perfs = []
for obj in self.objectives:
perfs.extend(obj.get_perfs(inputs, outputs, targets, cand_net))
assert len(perfs) == len(self.perf_names()), \
("expect performances have the exactly "
"same length with perf_names, got {} and {} instead.").format(
len(perfs), len(self.perf_names()))
return perfs
def get_loss(self, inputs, outputs, targets, cand_net,
add_controller_regularization=True, add_evaluator_regularization=True):
losses = [
obj.get_loss(
inputs, outputs, targets, cand_net,
add_controller_regularization, add_evaluator_regularization
) for obj in self.objectives
]
weighted_loss = [l * c for l, c in zip(losses, self.losses_coef)]
return sum(weighted_loss)
def get_reward(self, inputs, outputs, targets, cand_net):
rewards = [
obj.get_reward(
inputs, outputs, targets, cand_net
) for obj in self.objectives
]
weighted_rewards = [l * c for l, c in zip(rewards, self.rewards_coef)]
return sum(weighted_rewards)
|
144658
|
import rospy
import PyKDL
from geometry_msgs.msg import Twist, Point, PoseStamped, TwistStamped
from std_msgs.msg import String
import numpy
import math
import sys
import copy
from gazebo_msgs.srv import GetModelState
class ReturnHome:
def __init__(self,uav_id):
self.uav_type = 'typhoon_h480'
self.id = int(uav_id)
self.uav_num = 6
self.f = 30 # pin lv
self.count = 0
self.local_pose = PoseStamped()
self.following_local_pose = [PoseStamped() for i in range(self.uav_num)]
self.following_local_pose_sub = [None]*(self.uav_num)
self.uav_current_pose = Point()
self.uav_current_yaw = 0.0
self.uav_vel = Twist()
self.arrive_point = False
self.Kp = 0.5
self.Kpy = 1
self.Kpvel = 1
self.z = 12.0 # height
self.velxy_max = 4
self.velz_max = 3
self.angz_max = 3
self.bias = [[-10, 32.7],[100,33.5],[75,30],[60,-3],[-30,-40.5],[45,-15]]
self.target_position = Twist()
self.target_yaw = 0.0
self.last_yaw = 0.0
self.arrive_count = 0
self.safe_dis = 0.3
self.safe_height = 0.5
self.cmd = ''
self.last_ugv0_pose = Point()
self.current_ugv0_pose = Point()
self.last_ugv1_pose = Point()
self.current_ugv1_pose = Point()
self.situation_flag = 0
self.change_task_flag = False
#variables of rostopic
rospy.init_node('uav'+str(self.id))
self.local_pose_sub = rospy.Subscriber(self.uav_type + '_' + str(self.id) + "/mavros/local_position/pose",
PoseStamped, self.local_pose_callback)
self.vel_enu_pub = rospy.Publisher('/xtdrone/'+self.uav_type+'_'+str(self.id)+'/cmd_vel_flu', Twist, queue_size=10)
self.cmd_pub = rospy.Publisher('/xtdrone/'+self.uav_type+'_'+str(self.id)+'/cmd',String,queue_size=10)
self.gazeboModelstate = rospy.ServiceProxy('gazebo/get_model_state', GetModelState)
def local_pose_callback(self, msg):
self.local_pose = msg
self.uav_current_pose = self.local_pose.pose.position
self.uav_current_pose.x = self.uav_current_pose.x+self.bias[self.id][0]
self.uav_current_pose.y = self.uav_current_pose.y+self.bias[self.id][1]
self.uav_current_pose.z = self.uav_current_pose.z
# change Quaternion to TF:
x = self.local_pose.pose.orientation.x
y = self.local_pose.pose.orientation.y
z = self.local_pose.pose.orientation.z
w = self.local_pose.pose.orientation.w
rot = PyKDL.Rotation.Quaternion(x, y, z, w)
res = rot.GetRPY()[2]
while res > math.pi:
res -= 2.0*math.pi
while res < -math.pi:
res += 2.0*math.pi
if res < 0:
res = res + 2.0 * math.pi
self.uav_current_yaw = res # 0 to 2pi
def following_local_pose_callback(self, msg, id):
self.following_local_pose[id] = msg
self.following_local_pose[id].pose.position.x = self.following_local_pose[id].pose.position.x+self.bias[id][0]
self.following_local_pose[id].pose.position.y = self.following_local_pose[id].pose.position.y+self.bias[id][1]
self.following_local_pose[id].pose.position.z = self.following_local_pose[id].pose.position.z
def loop(self):
rate = rospy.Rate(self.f)
count_situ_one = 0
for i in range(self.uav_num):
if not i == self.id:
self.following_local_pose_sub[i] = rospy.Subscriber(self.uav_type + '_' + str(i) + "/mavros/local_position/pose", PoseStamped, self.following_local_pose_callback, i)
while not rospy.is_shutdown():
self.count += 1
self.velxy_max = 4.0
self.cmd = ''
# get position of ugvs:
try:
get_ugv0_state = self.gazeboModelstate('ugv_0', 'ground_plane')
self.last_ugv0_pose = self.current_ugv0_pose
self.current_ugv0_pose = get_ugv0_state.pose.position
get_ugv1_state = self.gazeboModelstate('ugv_1', 'ground_plane')
self.last_ugv1_pose = self.current_ugv1_pose
self.current_ugv1_pose = get_ugv1_state.pose.position
except rospy.ServiceException as e:
print("Gazebo model state service"+" call failed: %s") % e
# fly to the same altitude xian offboard hou arm
if self.count == 40 or self.count == 42 or self.count == 44:
self.cmd = 'OFFBOARD'
if self.count == 94 or self.count == 96 or self.count == 98:
self.cmd = 'ARM'
if self.situation_flag == 0: # ding gao
self.target_position.linear.z = self.z
self.target_position.linear.x = self.uav_current_pose.x
self.target_position.linear.y = self.uav_current_pose.y
self.uav_vel.angular.x = self.uav_vel.angular.x
self.uav_vel.angular.y = self.uav_vel.angular.y
self.uav_vel.angular.z = self.uav_vel.angular.z
if self.situation_flag == 1 and self.change_task_flag: # chu shi hua
self.change_task_flag = False
#if not self.avoid_start_flag:
self.init_point()
if self.situation_flag == 2 and self.change_task_flag: # chu shi hua
self.change_task_flag = False
#if not self.avoid_start_flag:
self.return_home()
print 'flag222222'
distance_tar_cur = self.VectNorm3(self.target_position.linear, self.uav_current_pose)
if distance_tar_cur < 0.5:
self.arrive_count += 1
if self.arrive_count > 5:
self.arrive_point = True
self.arrive_count = 0
else:
self.arrive_point = False
else:
self.arrive_count = 0
self.arrive_point = False
# task changes:
if (self.situation_flag == 0) and self.arrive_point:
self.change_task_flag = True
self.situation_flag = 1
self.arrive_point = False
elif (self.situation_flag == 1) and self.arrive_point:
self.situation_flag = 2
# self.start_yolo_pub.publish('gogogo')
self.arrive_point = False
self.change_task_flag = True
if self.situation_flag == 2:
self.velz_max = 1
if self.uav_current_pose.z < 2.2:
self.target_position.linear.x = self.uav_current_pose.x
self.target_position.linear.y = self.uav_current_pose.y
if self.uav_current_pose.z < 1.9:
self.cmd = 'DISARM'
self.get_control_vel()
# self.obstacle_avoid()
self.vel_enu_pub.publish(self.uav_vel)
self.cmd_pub.publish(self.cmd)
rate.sleep()
def get_control_vel(self): # P kong zhi, flu zuo biao xi
uav_dis_curtar = self.VectNorm2(self.target_position.linear, self.uav_current_pose) #distance
temp = self.VectDiff(self.target_position.linear, self.uav_current_pose) # vector
uav_vel_total = self.Kp * uav_dis_curtar # velocity
if uav_vel_total > self.velxy_max:
uav_vel_total = self.velxy_max
'''
if not uav_dis_curtar == 0.0:
self.uav_vel.linear.x = (temp.x/uav_dis_curtar) * uav_vel_total
self.uav_vel.linear.y = (temp.y/uav_dis_curtar) * uav_vel_total
else:
self.uav_vel.linear.x = 0.0
self.uav_vel.linear.y = 0.0
'''
self.target_yaw = self.pos2ang(self.target_position.linear.x, self.target_position.linear.y, self.uav_current_pose.x, self.uav_current_pose.y)
mid_yaw = self.target_yaw - self.uav_current_yaw
if mid_yaw > math.pi:
mid_yaw = mid_yaw - 2*math.pi
elif mid_yaw < -math.pi:
mid_yaw = 2*math.pi + mid_yaw
self.uav_vel.angular.z = self.Kpy * mid_yaw
if self.uav_vel.angular.z > self.angz_max:
self.uav_vel.angular.z = self.angz_max
elif self.uav_vel.angular.z < -self.angz_max:
self.uav_vel.angular.z = -self.angz_max
self.uav_vel.linear.x = uav_vel_total * math.cos(mid_yaw)
self.uav_vel.linear.y = uav_vel_total * math.sin(mid_yaw)
self.uav_vel.linear.z = self.Kp * (self.target_position.linear.z - self.uav_current_pose.z)
if self.uav_vel.linear.z > self.velz_max:
self.uav_vel.linear.z = self.velz_max
elif self.uav_vel.linear.z < - self.velz_max:
self.uav_vel.linear.z = - self.velz_max
def init_point(self):
if self.id == 0: # middle circle 3
self.target_position.linear.x = self.current_ugv0_pose.x # ugv0 -0.3
self.target_position.linear.y = self.current_ugv0_pose.y-0.2
elif self.id == 1: # middle circle 2
self.target_position.linear.x = self.current_ugv0_pose.x # ugv0 0.5
self.target_position.linear.y = self.current_ugv0_pose.y+0.6
elif self.id == 2: # outer loop 0
self.target_position.linear.x = self.current_ugv0_pose.x #ugv0 1.3
self.target_position.linear.y = self.current_ugv0_pose.y+1.3
elif self.id == 3: # outer loop 4
self.target_position.linear.x = self.current_ugv1_pose.x #ugv1 -0.3
self.target_position.linear.y = self.current_ugv1_pose.y-0.2
elif self.id == 4: # outer loop 1
self.target_position.linear.x = self.current_ugv1_pose.x
self.target_position.linear.y = self.current_ugv1_pose.y+0.6
elif self.id == 5: # outer loop 5
self.target_position.linear.x = self.current_ugv1_pose.x
self.target_position.linear.y = self.current_ugv1_pose.y+1.3
def return_home(self):
self.target_position.linear.z = 0.0
# ji jian bi zhang
def obstacle_avoid(self):
self.avo_id = []
for i in range(self.uav_num):
if not i == self.id:
dis_partner = math.sqrt(
(self.uav_current_pose.x - self.following_local_pose[i].pose.position.x) ** 2 + (self.uav_current_pose.y - self.following_local_pose[i].pose.position.y) ** 2)
if dis_partner < self.safe_dis:
if (self.uav_current_pose.z - self.following_local_pose[i].pose.position.z) < self.safe_height:
self.avo_id.append(i)
avoid_num = len(self.avo_id)
heigher_num = 0
if avoid_num > 0:
for j in range(avoid_num):
if self.following_local_pose[self.avo_id[j]].pose.position.z > self.uav_current_pose.z:
heigher_num = heigher_num + 1
if heigher_num == 0:
self.target_position.linear.z = self.target_position.linear.z + self.safe_height
else:
self.target_position.linear.z = self.target_position.linear.z - self.safe_height * heigher_num
else:
self.target_position.linear.z = self.target_position.linear.z
def pos2ang(self, xa, ya, xb, yb): #([xb,yb] to [xa, ya])
if not xa-xb == 0:
angle = math.atan2((ya - yb),(xa - xb))
if (ya-yb > 0) and (angle < 0):
angle = angle + math.pi
elif (ya-yb < 0) and (angle > 0):
angle = angle - math.pi
elif ya-yb == 0:
if xa-xb > 0:
angle = 0.0
else:
angle = math.pi
else:
if ya-yb > 0:
angle = math.pi / 2
elif ya-yb <0:
angle = -math.pi / 2
else:
angle = 0.0
if angle < 0:
angle = angle + 2 * math.pi # 0 to 2pi
return angle
def VectNorm3(self, Pt1, Pt2):
norm = math.sqrt(pow(Pt1.x - Pt2.x, 2) + pow(Pt1.y - Pt2.y, 2)+ pow(Pt1.z - Pt2.z, 2))
return norm
def VectNorm2(self, Pt1, Pt2):
norm = math.sqrt(pow(Pt1.x - Pt2.x, 2) + pow(Pt1.y - Pt2.y, 2))
return norm
def VectDiff(self, endPt, startPt):
temp = Point()
temp.x = endPt.x - startPt.x
temp.y = endPt.y - startPt.y
return temp
if __name__ == '__main__':
returnhome = ReturnHome(sys.argv[1])
returnhome.loop()
|
144683
|
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt, copysign
from scipy.optimize import brenth
from scipy.optimize import fsolve,fmin_l_bfgs_b,fmin_cg,fminbound
"""
sign of the number
"""
def sign(x):
if x==0:
return 0
else:
return copysign(1,x)
"""
if function f can't be computed, return None
"""
def f_None(f,x):
try:
return f(x)
except:
return None
"""
if the bound was touched returns None
L is the level of the function f
"""
def correct(x,y,f,L):
eps=10e-5
if abs(f(x,y)-L)>eps:
return None
else:
return y
"""
if output can't be produced, return 0, if there's division by zero, then it looks for the limit and returns it
"""
def _(f,*x):
try:
out=f(*x)
if out is None:
return float("inf")
else:
return out
except ZeroDivisionError:
l=len(x)
eps=abs(f(*[1e-02]*l)-f(*[1e-04]*l))
if abs(f(*[1e-04]*l)-f(*[1e-06]*l))<eps and abs(f(*[1e-06]*l)-f(*[1e-08]*l))<eps:
return f(*[1e-10]*l)
else:
return sign(f(*[1e-10]*l))*float("inf")
"""
produces the array of the first items of the element of the array
"""
def fst(X):
return list(map(lambda x: x[0],X))
"""
produces the array of the second items of the element of the array
"""
def snd(X):
return list(map(lambda x: x[1],X))
"""
unpacks [(X_1,Y_1),...,(X_k,Y_k),...,(X_n,Y_n)] into [(X_1,...,X_k,...,X_n),(Y_1,...,Y_k,...,Y_n)]
"""
def unpack(X):
return [fst(X),snd(X)]
"""
find the root of the function. If the ends of the interval have the same signs, try to make it smaller
"""
def rootalt(f,a,b):
eps=(b-a)/64.0
turn=0
N_iter=10
while abs(a-b)>eps and N_iter > 0:
N_iter-=1
try:
#return fmin_cg(f,(a+b)/2.0)[0]
return brenth(f,a,b)
except ValueError:
if turn==0:
a=a+eps
turn=1
else:
b=b+eps
turn=0
#return root2(f,a,b)
return None
def root(f,a,b):
a_init=a
b_init=b
eps=(b-a)/16.0
turn=0
N_iter=12
while abs(a-b)>eps and N_iter > 0 and f(a)*f(b)>0:
N_iter-=1
if turn==0:
a=a+eps
turn=1
else:
b=b-eps
turn=0
try:
return brenth(f,a,b)
except ValueError:
return fminbound(f,a_init,b_init)
def root2(f,a,b):
return fmin_cg(f,(a+b)/2.0,disp=False)[0]
def root3(f,a,b):
return fmin_l_bfgs_b(func=f,x0=(a+b)/2,bounds=[a,b])
"""
2-point numerical derivative
"""
def prime(f,dt=10e-3):
return lambda x: (f(x+dt)-f(x-dt))/(2*dt)
"""
Marginal rate of substitution of a utility function u(.)
"""
def MRS(u):
u_x=lambda x,y: prime(lambda z: u(z,y))(x)
u_y=lambda x,y: prime(lambda z: u(x,z))(y)
return lambda x,y: u_x(x,y)/u_y(x,y)
"""
Edgeworth Box parameter determine that to show on the plot
"""
class EdgeBoxParameter:
#def __init__(self,pareto,core,U1,U2,endow,walras,budget,N):
#boll_array=[pareto,core,U1,U2,endow,walras,budget]
def __init__(self,N,pareto=True,core=True,eq=True,budget=True):
self.N=N
self.pareto=pareto
self.core=core
self.eq=eq
self.budget=budget
defaultEBP=EdgeBoxParameter(100)
class EdgeBox():
def __init__(self,u1,u2,IE1,IE2,EBP=defaultEBP):
self.core=0
self.pareto=0
self.eq=0
self.p=[None,1]
self.p_weighted=[None,None]
self.u1=u1
self.u2=u2
self.u2_compl=lambda x,y: u2(self.IE[0]-x,self.IE[1]-y)
self.IE1=IE1
self.IE2=IE2
self.IE=[IE1[0]+IE2[0],IE1[1]+IE2[1]]
self.EBP=EBP
self.dt=min(self.IE)/float(EBP.N)
self.X=np.linspace(self.dt,self.IE[0]-self.dt,EBP.N)
self.Y=np.linspace(self.dt,self.IE[1]-self.dt,EBP.N)
self.calc_init()
self.calc()
def calc(self):
"""
calculate all solutions of the box
"""
self.calc_pareto()
self.calc_core()
self.calc_eq()
self.calc_budget()
def calc_init(self):
self.u1(*self.IE1)
self.UIE1=self.u1(*self.IE1) # utility of the 1-st player at her initial endowment
self.UIE2=self.u2(*self.IE2) # utility of the 2-nd player at her initial endowment
self.u_ie_1=lambda x: root(lambda y: self.u1(x,y)-self.UIE1,self.Y[0],self.Y[-1]) # utility function at initial endowment of the 1-st participant
self.u_ie_2=lambda x: root(lambda y: self.u2(x,y)-self.UIE2,self.Y[0],self.Y[-1]) # utility function at initial endowment of the 2-nd participant
self.u_ie_2_compl=lambda x: -self.u_ie_2(self.IE[0]-x)+self.IE[1] # utility function at initial endowment of the 2-nd participant in terms of the 1-st
U1 = list(map(lambda x: correct(x,f_None(self.u_ie_1,x),self.u1,self.UIE1),self.X))
U2 = list(map(lambda x: correct(x,f_None(self.u_ie_2_compl,x),self.u2_compl,self.UIE2),self.X))
self.U1 = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U1)))
self.U2 = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U2)))
U1_sort = sorted(self.U1,key=lambda x: x[1])
U2_sort = sorted(self.U2,key=lambda x: x[1])
if len(U1_sort)>0:
self.U1_min=U1_sort[0]
self.U1_max=U1_sort[-1]
else:
self.U1_min=None
self.U1_max=None
if len(U2_sort)>0:
self.U2_min=U2_sort[0]
self.U2_max=U2_sort[-1]
else:
self.U2_min=None
self.U2_max=None
self._B=lambda x,y,p: y-(p*(self.IE1[0]-x)+self.IE1[1]) # budget constraint
def calc_pareto(self):
self.MRS1=MRS(self.u1) # marginal rate of substitution of the 1st participant
self.MRS2=MRS(self.u2) # marginal rate of substitution of the 2nd participant
self._pareto=lambda x: root(lambda y: _(self.MRS1,x,y)-_(self.MRS2,self.IE[0]-x,self.IE[1]-y),self.Y[0],self.Y[-1]) # Pareto solutions in functional form
P = list(map(lambda x: f_None(self._pareto,x),self.X[1:-1]))
self.PARETO=list(zip(self.X[1:-1],P)) # set of some Pareto solution points (enough to draw it)
self._Bx=lambda x: root(lambda y: self._B(x,y,self.MRS1(x,y)),self.Y[0],self.Y[-1])
#plot_pareto,=plt.plot(X,P,linewidth=2)
PU1_X=root(lambda x: _(self._pareto,x)-_(self.u_ie_1,x),self.U1_min[0],self.U1_max[0])
PU2_X=root(lambda x: _(self._pareto,x)-_(self.u_ie_2_compl,x),self.U2_min[0],self.U2_max[0])
PU1_Y=self.u_ie_1(PU1_X)
PU2_Y=self.u_ie_2_compl(PU2_X)
self.PU1=[PU1_X,PU1_Y]
self.PU2=[PU2_X,PU2_Y]
self._Bx=lambda x: root(lambda y: _(self._B,x,y,_(self.MRS1,x,y)),self.Y[0],self.Y[-1])
def calc_core(self):
CORE_X = list(filter(lambda x: x>=self.PU1[0] and x<=self.PU2[0], self.X))
CORE_Y = list(map(lambda x: self._pareto(x), CORE_X))
self.CORE = list(zip(CORE_X,CORE_Y)) # set of some solutions in the core (could be one, could be many or none)
def calc_eq(self):
EQ_X1=root(lambda x: _(self._pareto,x)-_(self._Bx,x),self.PU1[0],self.PU2[0])
EQ_Y1=self._pareto(EQ_X1)
EQ_X2=self.IE[0]-EQ_X1
EQ_Y2=self.IE[1]-EQ_Y1
self.EQ1=[EQ_X1,EQ_Y1] # equilibrium solution for the 1st participant
self.EQ2=[EQ_X2,EQ_Y2] # equilibrium solution for the 2nd participant
self.p=self.MRS1(*self.EQ1) # price vector
self.p_weighted=[self.p/(self.p+1),1/(self.p+1)]
self.UEQ1=self.u1(*self.EQ1) # value of utility function of the 1st participant at her equilibrium point (functional form)
self.UEQ2=self.u2(*self.EQ2) # value of utility function of the 2nd participant at her equilibrium point (functional form)
self.u_eq_1=lambda x: root(lambda y: self.u1(x,y)-self.UEQ1,self.Y[0],self.Y[-1])
self.u_eq_2=lambda x: root(lambda y: self.u2(x,y)-self.UEQ2,self.Y[0],self.Y[-1])
self.u_eq_2_compl=lambda x: -self.u_eq_2(self.IE[0]-x)+self.IE[1]
U1_EQ = list(map(lambda x: correct(x,f_None(self.u_eq_1,x),self.u1,self.UEQ1),self.X))
U2_EQ = list(map(lambda x: correct(x,f_None(self.u_eq_2_compl,x),self.u2_compl,self.UEQ2),self.X))
self.U1_EQ = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U1_EQ)))
self.U2_EQ = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U2_EQ)))
def calc_budget(self,price=None):
if price is None:
price=self.p
self.Bp=lambda x: price*self.IE1[0]+self.IE1[1]-price*x # budget line (functional form)
Budget = list(map(self.Bp,self.X)) # set of some points from the budget line
self.BUDGET = list(zip(self.X,Budget))
def plot(self,fname=None):
plot_endow,=plt.plot(self.IE1[0],self.IE1[1],color="white",marker="o")
m=max(self.IE[0],self.IE[1])
plt.axis([0,m,0,m],autoscale=False)
plot_U1,=plt.plot(*unpack(self.U1),color="blue")
plot_U2,=plt.plot(*unpack(self.U2),color="brown")
plot_pareto,=plt.plot(*unpack(self.PARETO),linewidth=2,color="red")
plot_core,=plt.plot(*unpack(self.CORE),color="black",linewidth=4)
plot_U1_EQ,=plt.plot(*unpack(self.U1_EQ),ls='--',color="blue")
plot_U2_EQ,=plt.plot(*unpack(self.U2_EQ),ls='--',color="brown")
plot_budget,=plt.plot(*unpack(self.BUDGET),color="green")
plt.plot(self.PU1[0],self.PU1[1],color="blue",marker="o")
plt.plot(self.PU2[0],self.PU2[1],color="brown",marker="o")
plot_walras,=plt.plot(self.EQ1[0],self.EQ1[1],color="green",marker="o")
# annotation
plt.annotate("(%s;%s)"%(round(self.EQ1[0],2),round(self.EQ1[1],2)), xy=self.EQ1, xytext=(self.EQ1[0]+self.dt,self.EQ1[1]-self.dt))
plt.title("Edgeworth Box")
plt.legend([plot_pareto,plot_U1,plot_U2,plot_endow,plot_core,plot_walras,plot_budget,plot_U1_EQ,plot_U2_EQ]
,["Pareto","U1 before trade","U2 before trade","Init. endow.","Core","Equilibrium","Budget constraint","U1 at eq.","U2 at eq."])
#Axes Dscription
plt.xlabel("Units of 1-st good")
plt.ylabel("Units of 2-nd good")
if fname is not None:
plt.savefig(fname)
plt.close()
else:
plt.show(block=False)
|
144691
|
from openiec.calculate.calcsigma import SigmaPure, SigmaSolLiq, SigmaCoherent
from openiec.property.molarvolume import MolarVolume, InterficialMolarVolume
from openiec.property.meltingenthalpy import MeltingEnthalpy
# binary
# ternary
|
144749
|
from Foundation import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
class TestNSOperation (TestCase):
def testConstants(self):
self.assertEqual(NSOperationQueuePriorityVeryLow, -8)
self.assertEqual(NSOperationQueuePriorityLow, -4)
self.assertEqual(NSOperationQueuePriorityNormal, 0)
self.assertEqual(NSOperationQueuePriorityHigh, 4)
self.assertEqual(NSOperationQueuePriorityVeryHigh, 8)
self.assertIsInstance(NSInvocationOperationVoidResultException, unicode)
self.assertIsInstance(NSInvocationOperationCancelledException, unicode)
self.assertEqual(NSOperationQueueDefaultMaxConcurrentOperationCount, -1)
def testMethods(self):
self.assertResultIsBOOL(NSOperation.isCancelled)
self.assertResultIsBOOL(NSOperation.isExecuting)
self.assertResultIsBOOL(NSOperation.isFinished)
self.assertResultIsBOOL(NSOperation.isConcurrent)
self.assertResultIsBOOL(NSOperation.isReady)
self.assertResultIsBOOL(NSOperationQueue.isSuspended)
self.assertArgIsBOOL(NSOperationQueue.setSuspended_, 0)
@min_os_level('10.6')
def testMethods10_6(self):
self.assertResultIsBlock(NSOperation.completionBlock, b'v')
self.assertArgIsBlock(NSOperation.setCompletionBlock_, 0, b'v')
self.assertArgIsBlock(NSBlockOperation.blockOperationWithBlock_, 0, b'v')
self.assertArgIsBlock(NSBlockOperation.addExecutionBlock_, 0, b'v')
self.assertArgIsBOOL(NSOperationQueue.addOperations_waitUntilFinished_, 1)
self.assertArgIsBlock(NSOperationQueue.addOperationWithBlock_, 0, b'v')
if __name__ == "__main__":
main()
|
144754
|
TYPE_MAP = {
0: 'bit',
1: 'u32',
2: 's32',
3: 'float',
'bit': 0,
'u32': 1,
's32': 2,
'float': 3,
}
class HalType(object):
bit = 0
u32 = 1
s32 = 2
float = 3
@classmethod
def toString(self, typ):
return TYPE_MAP[typ]
|
144772
|
from typing import Dict
from gopay.http import Request, Response, Browser
from gopay.enums import Language
import json
JSON = 'application/json'
FORM = 'application/x-www-form-urlencoded'
class GoPay:
def __init__(self, config: dict, browser: Browser) -> None:
self.browser = browser
self.config = config
def url(self, path: str):
if 'gatewayUrl' in self.config:
host = self.config['gatewayUrl']
if not host.endswith('/api/'):
host += "api/" if host.endswith('/') else "/api/"
return host + path
host = 'https://gate.gopay.cz/api/' if self.config['isProductionMode'] else 'https://gw.sandbox.gopay.com/api/'
return host + path
def call(self, url: str, content_type: str, authorization: str, data: Dict) -> Response:
request = Request()
request.url = self.url(url)
request.headers = {
'Accept': 'application/json',
'Accept-Language': 'cs-CZ' if self.config['language'] in [Language.CZECH, Language.SLOVAK] else 'en-US',
'Authorization': authorization
}
if content_type:
request.headers["Content-Type"] = content_type
if data is None:
request.method = 'get'
else:
request.method = 'post'
request.body = json.dumps(data) if content_type == JSON else data
return self.browser.browse(request)
def add_defaults(data: dict, defaults: dict) -> dict:
full = defaults.copy()
if data is not None:
full.update(data)
return full
|
144789
|
import asyncio
import os
import tempfile
from pathlib import Path
from typing import Optional, Union
from aiofiles import os as aiofiles_os
from aiohttp.abc import AbstractStreamWriter
from aiohttp.typedefs import LooseHeaders
from aiohttp.web import FileResponse
makedirs = aiofiles_os.wrap(os.makedirs) # as in aiofiles.os.py module
rename = aiofiles_os.wrap(os.rename) # as in aiofiles.os.py module
path_getsize = aiofiles_os.wrap(os.path.getsize) # as in aiofiles.os.py module
def _candidate_tmp_dir() -> Path:
# pylint: disable=protected-access
# let us all thank codeclimate for this beautiful piece of code
return Path("/") / f"tmp/{next(tempfile._get_candidate_names())}"
async def get_empty_tmp_dir() -> str:
candidate = _candidate_tmp_dir()
while candidate.is_dir() or candidate.is_file() or candidate.is_symlink():
candidate = _candidate_tmp_dir()
await makedirs(candidate, exist_ok=True)
return str(candidate)
async def remove_dir(directory: str) -> None:
await asyncio.create_subprocess_exec("rm", "-rf", directory)
class CleanupFileResponse(FileResponse): # pylint: disable=too-many-ancestors
"""
After the FileResponse finishes a callback to remove the
tmp directory where the export data was stored is scheduled and ran.
"""
def __init__(
self,
temp_dir: str,
path: Union[str, Path],
chunk_size: int = 256 * 1024,
status: int = 200,
reason: Optional[str] = None,
headers: Optional[LooseHeaders] = None,
) -> None:
super().__init__(
path=path,
chunk_size=chunk_size,
status=status,
reason=reason,
headers=headers,
)
self._temp_dir = temp_dir
async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]:
try:
return await super().prepare(request=request)
finally:
await asyncio.get_event_loop().create_task(remove_dir(self._temp_dir))
|
144805
|
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
import unittest
from jsonpath_ng.lexer import JsonPathLexer
from jsonpath_ng.parser import JsonPathParser
from jsonpath_ng.jsonpath import *
class TestParser(unittest.TestCase):
# TODO: This will be much more effective with a few regression tests and `arbitrary` parse . pretty testing
@classmethod
def setup_class(cls):
logging.basicConfig()
def check_parse_cases(self, test_cases):
parser = JsonPathParser(debug=True, lexer_class=lambda:JsonPathLexer(debug=False)) # Note that just manually passing token streams avoids this dep, but that sucks
for string, parsed in test_cases:
print(string, '=?=', parsed) # pytest captures this and we see it only on a failure, for debugging
assert parser.parse(string) == parsed
def test_atomic(self):
self.check_parse_cases([('foo', Fields('foo')),
('*', Fields('*')),
('baz,bizzle', Fields('baz','bizzle')),
('[1]', Index(1)),
('[1:]', Slice(start=1)),
('[:]', Slice()),
('[*]', Slice()),
('[:2]', Slice(end=2)),
('[1:2]', Slice(start=1, end=2)),
('[5:-2]', Slice(start=5, end=-2))
])
def test_nested(self):
self.check_parse_cases([('foo.baz', Child(Fields('foo'), Fields('baz'))),
('foo.baz,bizzle', Child(Fields('foo'), Fields('baz', 'bizzle'))),
('foo where baz', Where(Fields('foo'), Fields('baz'))),
('foo..baz', Descendants(Fields('foo'), Fields('baz'))),
('foo..baz.bing', Descendants(Fields('foo'), Child(Fields('baz'), Fields('bing'))))])
|
144832
|
import pandas as pd
from .constants import *
def compare_frameworks(results_raw, frameworks=None, banned_datasets=None, folds_to_keep=None, filter_errors=True, verbose=True, columns_to_agg_extra=None, datasets=None):
columns_to_agg = [DATASET, FRAMEWORK, PROBLEM_TYPE, TIME_TRAIN_S, METRIC_ERROR]
if columns_to_agg_extra:
columns_to_agg += columns_to_agg_extra
if frameworks is None:
frameworks = sorted(list(results_raw[FRAMEWORK].unique()))
if filter_errors: # FIXME: This should not be toggled, instead filter_errors should be passed to filter_results
results = filter_results(results_raw=results_raw, valid_frameworks=frameworks, banned_datasets=banned_datasets, folds_to_keep=folds_to_keep)
else:
results = results_raw.copy()
results_agg = results[columns_to_agg].groupby([DATASET, FRAMEWORK, PROBLEM_TYPE]).mean().reset_index()
worst_scores = results_agg.sort_values(METRIC_ERROR, ascending=False).drop_duplicates(DATASET)
worst_scores = worst_scores[[DATASET, METRIC_ERROR]]
worst_scores.columns = [DATASET, 'WORST_ERROR']
best_scores = results_agg.sort_values(METRIC_ERROR, ascending=True).drop_duplicates(DATASET)
best_scores = best_scores[[DATASET, METRIC_ERROR]]
best_scores.columns = [DATASET, 'BEST_ERROR']
results_agg = results_agg.merge(best_scores, on=DATASET)
results_agg = results_agg.merge(worst_scores, on=DATASET)
results_agg[BESTDIFF] = 1 - (results_agg['BEST_ERROR'] / results_agg[METRIC_ERROR])
results_agg[LOSS_RESCALED] = (results_agg[METRIC_ERROR] - results_agg['BEST_ERROR']) / (results_agg['WORST_ERROR'] - results_agg['BEST_ERROR'])
results_agg[BESTDIFF] = results_agg[BESTDIFF].fillna(0)
results_agg[LOSS_RESCALED] = results_agg[LOSS_RESCALED].fillna(0)
results_agg = results_agg.drop(['BEST_ERROR'], axis=1)
results_agg = results_agg.drop(['WORST_ERROR'], axis=1)
valid_tasks = list(results_agg[DATASET].unique())
results_ranked, results_ranked_by_dataset = rank_result(results_agg)
rank_1 = results_ranked_by_dataset[results_ranked_by_dataset[RANK] == 1]
rank_1_count = rank_1[FRAMEWORK].value_counts()
results_ranked['rank=1_count'] = rank_1_count
results_ranked['rank=1_count'] = results_ranked['rank=1_count'].fillna(0).astype(int)
rank_2 = results_ranked_by_dataset[(results_ranked_by_dataset[RANK] > 1) & (results_ranked_by_dataset[RANK] <= 2)]
rank_2_count = rank_2[FRAMEWORK].value_counts()
results_ranked['rank=2_count'] = rank_2_count
results_ranked['rank=2_count'] = results_ranked['rank=2_count'].fillna(0).astype(int)
rank_3 = results_ranked_by_dataset[(results_ranked_by_dataset[RANK] > 2) & (results_ranked_by_dataset[RANK] <= 3)]
rank_3_count = rank_3[FRAMEWORK].value_counts()
results_ranked['rank=3_count'] = rank_3_count
results_ranked['rank=3_count'] = results_ranked['rank=3_count'].fillna(0).astype(int)
rank_l3 = results_ranked_by_dataset[(results_ranked_by_dataset[RANK] > 3)]
rank_l3_count = rank_l3[FRAMEWORK].value_counts()
results_ranked['rank>3_count'] = rank_l3_count
results_ranked['rank>3_count'] = results_ranked['rank>3_count'].fillna(0).astype(int)
if datasets is None:
datasets = sorted(list(results_ranked_by_dataset[DATASET].unique()))
errors_list = []
for framework in frameworks:
results_framework = filter_results(results_raw=results_raw, valid_frameworks=[framework], banned_datasets=banned_datasets, folds_to_keep=folds_to_keep)
results_framework_agg = results_framework[columns_to_agg].groupby([DATASET, FRAMEWORK, PROBLEM_TYPE]).mean().reset_index()
num_valid = len(results_framework_agg[results_framework_agg[FRAMEWORK] == framework])
num_errors = len(datasets) - num_valid
errors_list.append(num_errors)
errors_series = pd.Series(data=errors_list, index=frameworks)
results_ranked['error_count'] = errors_series
results_ranked['error_count'] = results_ranked['error_count'].fillna(0).astype(int)
results_ranked = results_ranked.reset_index()
if verbose:
print('valid_tasks:', len(valid_tasks))
with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 1000):
print(results_ranked)
print()
return results_ranked, results_ranked_by_dataset
def filter_results(results_raw, valid_frameworks, banned_datasets=None, folds_to_keep=None):
results = results_raw.copy()
if folds_to_keep is not None:
results = results[results[FOLD].isin(folds_to_keep)]
results = keep_only_valid_datasets(results, valid_models=valid_frameworks)
if banned_datasets is not None:
results = results[~results[DATASET].isin(banned_datasets)]
return results
def keep_only_valid_datasets(result_df, valid_models):
tasks = list(result_df[DATASET].unique())
frameworks = list(result_df[FRAMEWORK].unique())
dfs = []
for task in tasks:
df = result_df[result_df[DATASET] == task]
df = df[df[FRAMEWORK].isin(valid_models)]
if set(list(df[FRAMEWORK].unique())) == set(valid_models):
dfs.append(df)
else:
valid_models_found = list(df[FRAMEWORK].unique())
models_not_found = [model for model in valid_models if model not in valid_models_found]
# print('NOT ALL FRAMEWORKS HAVE RESULT FOR', task)
# print('Required:', valid_models)
# print('Found: ', valid_models_found)
# print('Missing: ', models_not_found)
if len(dfs) == 0:
valid_result_df = pd.DataFrame(columns=result_df.columns)
else:
valid_result_df = pd.concat(dfs, ignore_index=True)
return valid_result_df
def rank_result(result_df):
datasets = list(result_df[DATASET].unique())
dfs = []
for dataset in datasets:
dataset_df = result_df[result_df[DATASET] == dataset].copy()
dataset_df[METRIC_ERROR] = [round(x[0], 5) for x in zip(dataset_df[METRIC_ERROR])]
sorted_df = dataset_df.sort_values(by=[METRIC_ERROR])
sorted_df[RANK] = sorted_df[METRIC_ERROR].rank()
dfs.append(sorted_df)
sorted_df_full = pd.concat(dfs, ignore_index=True)
model_ranks_df = sorted_df_full.groupby([FRAMEWORK]).mean().sort_values(by=RANK)
return model_ranks_df, sorted_df_full
|
144835
|
r"""undocumented
用于辅助生成 fastNLP 文档的代码
"""
__all__ = []
import inspect
import sys
def doc_process(m):
for name, obj in inspect.getmembers(m):
if inspect.isclass(obj) or inspect.isfunction(obj):
if obj.__module__ != m.__name__:
if obj.__doc__ is None:
# print(name, obj.__doc__)
pass
else:
module_name = obj.__module__
# 识别并标注类和函数在不同层次中的位置
while 1:
defined_m = sys.modules[module_name]
try:
if "undocumented" not in defined_m.__doc__ and name in defined_m.__all__:
obj.__doc__ = r"别名 :class:`" + m.__name__ + "." + name + "`" \
+ " :class:`" + module_name + "." + name + "`\n" + obj.__doc__
break
module_name = ".".join(module_name.split('.')[:-1])
if module_name == m.__name__:
# print(name, ": not found defined doc.")
break
except:
print("Warning: Module {} lacks `__doc__`".format(module_name))
break
# 识别并标注基类,只有基类也在 fastNLP 中定义才显示
if inspect.isclass(obj):
for base in obj.__bases__:
if base.__module__.startswith("fastNLP"):
parts = base.__module__.split(".") + []
module_name, i = "fastNLP", 1
for i in range(len(parts) - 1):
defined_m = sys.modules[module_name]
try:
if "undocumented" not in defined_m.__doc__ and name in defined_m.__all__:
obj.__doc__ = r"基类 :class:`" + defined_m.__name__ + "." + base.__name__ + "` \n\n" + obj.__doc__
break
module_name += "." + parts[i + 1]
except:
print("Warning: Module {} lacks `__doc__`".format(module_name))
break
|
144864
|
import moai.utils.engine as mieng
import omegaconf.omegaconf
import logging
log = logging.getLogger(__name__)
__all__ = ["Latent_Visualizers"]
class LatentVisualizers(mieng.Collection, mieng.Interval):
def __init__(self,
batch_interval:int,
visualizers: omegaconf.DictConfig,
latent_visualizers: omegaconf.DictConfig,
):
mieng.Interval.__init__(self, batch_interval)
mieng.Collection.__init__(
self,
items=visualizers,
name="visualizers"
)
mieng.Collection.__init__(
self,
items=latent_visualizers,
name="latent_visualizers"
)
|
144890
|
import numpy as np
import pytest
from ome_zarr.scale import Scaler
class TestScaler:
@pytest.fixture(
params=(
(1, 2, 1, 256, 256),
(3, 512, 512),
(256, 256),
),
ids=["5D", "3D", "2D"],
)
def shape(self, request):
return request.param
def create_data(self, shape, dtype=np.uint8, mean_val=10):
rng = np.random.default_rng(0)
return rng.poisson(mean_val, size=shape).astype(dtype)
def check_downscaled(self, downscaled, shape, scale_factor=2):
expected_shape = shape
for data in downscaled:
assert data.shape == expected_shape
expected_shape = expected_shape[:-2] + tuple(
sh // scale_factor for sh in expected_shape[-2:]
)
def test_nearest(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.nearest(data)
self.check_downscaled(downscaled, shape)
# this fails because of wrong channel dimension; need to fix in follow-up PR
@pytest.mark.xfail
def test_gaussian(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.gaussian(data)
self.check_downscaled(downscaled, shape)
# this fails because of wrong channel dimension; need to fix in follow-up PR
@pytest.mark.xfail
def test_laplacian(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.laplacian(data)
self.check_downscaled(downscaled, shape)
def test_local_mean(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.local_mean(data)
self.check_downscaled(downscaled, shape)
@pytest.mark.skip(reason="This test does not terminate")
def test_zoom(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.zoom(data)
self.check_downscaled(downscaled, shape)
|
144946
|
import random
import types
import typing
import torch
class Preprocessing(object):
def __init__(self, augmentation: str='hvr') -> None:
self.augmentation = augmentation
return
def _apply(self, f: typing.Callable, **kwargs) -> dict:
applied = {k: f(v) for k, v in kwargs.items()}
return applied
@torch.no_grad()
def augment(self, augmentation: typing.Optional[str], **kwargs) -> dict:
if augmentation is None:
augmentation = self.augmentation
hflip = 'h' in augmentation and random.random() < 0.5
vflip = 'v' in augmentation and random.random() < 0.5
rot90 = 'r' in augmentation and random.random() < 0.5
def _augment(x: torch.Tensor) -> torch.Tensor:
if hflip:
x = x.flip(-1)
if vflip:
x = x.flip(-2)
if rot90:
x = x.transpose(-2, -1)
applied = self._apply(_augment, **kwargs)
return applied
|
144955
|
from .source_gateway import SourceGateway
from .meta_source_gateway import MetaSourceGateway
from .source_service import SourceService
|
144969
|
import sys, os, pytest
sys.path.append('.')
import submit
output_worked = 'Your submission has been accepted and will be graded shortly.'
from io import StringIO
class TestCorrectMetadata:
def test_001(self):
meta_data = submit.load_metadata('test/_coursera')
assert len(meta_data.part_data) == 6
class TestBrokenMetadata:
# file not found
def test_001(self):
with pytest.raises(SystemExit):
submit.load_metadata('test/_missing')
# bad meta data format
def test_002(self):
with pytest.raises(SystemExit):
submit.load_metadata('test/_empty')
class TestLogin:
def setup_class(self):
self.parser = submit.build_parser()
def test_001(self):
sys.stdin = StringIO(u'username\ntoken\n')
login, token = submit.login_prompt('')
assert(login == 'username')
assert(token == 'token')
def test_002(self):
login, token = submit.login_prompt('test/_credentials')
assert(login == '<EMAIL>')
assert(token == '<KEY>')
# testing manual override when credentials file is incorrect
# def test_003(self, capfd):
# login, token = submit.login_prompt('test/_credentials')
# sys.stdin = StringIO(u'1\n%s\n%s\n' % (login, token))
# submit.main(self.parser.parse_args(['-o', './test/model/model.mzn', '-m', './test/_coursera', '-c', './test/_credentials3']))
# resout, reserr = capfd.readouterr()
# assert(output_worked in resout)
class TestPartsPrompt:
def setup_class(self):
self.metadata = submit.load_metadata('test/_coursera')
def test_001(self, capfd):
sys.stdin = StringIO(u'0.1\n1\n')
problems = submit.part_prompt(self.metadata.part_data)
assert(len(problems) == 1)
resout, reserr = capfd.readouterr()
assert('It is not an integer.' in resout)
def test_002(self, capfd):
sys.stdin = StringIO(u'100\n1\n')
problems = submit.part_prompt(self.metadata.part_data)
assert(len(problems) == 1)
resout, reserr = capfd.readouterr()
assert('It is out of the valid range' in resout)
def test_003(self, capfd):
sys.stdin = StringIO(u'-1\n1\n')
problems = submit.part_prompt(self.metadata.part_data)
assert(len(problems) == 1)
resout, reserr = capfd.readouterr()
assert('It is out of the valid range' in resout)
def test_004(self, capfd):
sys.stdin = StringIO(u'1,2\n')
problems = submit.part_prompt(self.metadata.part_data)
assert(len(problems) == 2)
def test_005(self, capfd):
sys.stdin = StringIO(u'0\n')
problems = submit.part_prompt(self.metadata.part_data)
assert(len(problems) == len(self.metadata.part_data))
class TestProblemSubmission:
def setup_class(self):
self.parser = submit.build_parser()
# # tests problem selection
# def test_001(self, capfd):
# sys.stdin = StringIO(u'1\n')
# submit.main(self.parser.parse_args(['-m', './test/_coursera', '-c', './test/_credentials']))
# output = 'Unable to locate assignment file'
# resout, reserr = capfd.readouterr()
# assert(output in resout)
# tests running a problem
def test_002(self, capfd):
sys.stdin = StringIO(u'1\n')
submit.main(self.parser.parse_args(['-m', './test/_coursera', '-c', './test/_credentials']))
resout, reserr = capfd.readouterr()
assert(output_worked in resout)
# tests running a problem in record mode
def test_003(self, capfd):
sys.stdin = StringIO(u'1\n')
submit.main(self.parser.parse_args(['-m', './test/_coursera', '-c', './test/_credentials', '-rs']))
output = 'writting submission file: _awPVV'
resout, reserr = capfd.readouterr()
assert(output in resout)
assert(not output_worked in resout)
os.remove('_awPVV/submission.sub')
os.rmdir('_awPVV')
# tests running a solver with a int return value
def test_004(self, capfd):
# sys.stdin = StringIO(u'2\n')
# submit.main(self.parser.parse_args(['-m', './test/_coursera', '-c', './test/_credentials']))
# resout, reserr = capfd.readouterr()
# assert(output_worked in resout)
sys.path.insert(0, 'test/solver')
submission = submit.output('./test/_empty', 'int_val_solver.py')
assert('0' in submission)
resout, reserr = capfd.readouterr()
assert('Warning' in resout)
# tests running a solver with a unicode return value
def test_005(self, capfd):
# sys.stdin = StringIO(u'3\n')
# submit.main(self.parser.parse_args(['-m', './test/_coursera', '-c', './test/_credentials']))
# resout, reserr = capfd.readouterr()
# assert(output_worked in resout)
sys.path.insert(0, 'test/solver')
submission = submit.output('./test/_empty', 'unicode_solver.py')
assert(u'\u03BB' in submission)
# class TestBrokenSubmission:
# def setup_method(self, _):
# self.parser = submit.build_parser()
# # should throw incorrect problem parts
# def test_001(self, capfd):
# sys.stdin = StringIO(u'1\n')
# submit.main(self.parser.parse_args(['-m', './test/_coursera3', '-c', './test/_credentials2', '-o', './test/model/model.mzn']))
# output_1 = 'Unexpected response code, please contact the course staff.'
# output_2 = 'Expected parts: '
# output_3 = 'but found: '
# resout, reserr = capfd.readouterr()
# print(resout)
# assert(output_1 in resout)
# assert(output_2 in resout)
# assert(output_3 in resout)
# # should throw incorrect login details
# def test_002(self, capfd):
# sys.stdin = StringIO(u'1\n')
# submit.main(self.parser.parse_args(['-m', './test/_coursera3', '-c', './test/_credentials', '-o', './test/model/model.mzn']))
# output = 'Please use a token for the assignment you are submitting.'
# resout, reserr = capfd.readouterr()
# print(resout)
# assert(output in resout)
|
144986
|
from ..serializer import Serializable
from .performance import PerformanceInfo
from .message import Message
class Response(Serializable):
"""
Response from chatbot
Attributes
----------
messages : list of minette.Message
Response messages
headers : dict
Response header
performance : minette.PerformanceInfo
Performance information of each steps in chat()
"""
def __init__(self, messages=None, headers=None, performance=None):
"""
Parameters
----------
messages : list of minette.Message, default None
Response messages. If None, `[]` is set to `messages`.
headers : dict, default None
Response headers. If None, `{}` is set to `headers`
performance : minette.PerformanceInfo, default None
Performance information of each steps in chat().
If None, create new PerformanceInfo object.
"""
self.messages = messages or []
self.headers = headers or {}
self.performance = performance or PerformanceInfo()
@classmethod
def _types(cls):
return {
"messages": Message,
"performance": PerformanceInfo
}
|
144994
|
import cv2
import sys, os, glob, re
import json
from os.path import join, dirname, abspath, realpath, isdir
from os import makedirs
import numpy as np
from shutil import rmtree
from ipdb import set_trace
from .bench_utils.bbox_helper import rect_2_cxy_wh, cxy_wh_2_rect
def center_error(rects1, rects2):
"""Center error.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
centers1 = rects1[..., :2] + (rects1[..., 2:] - 1) / 2
centers2 = rects2[..., :2] + (rects2[..., 2:] - 1) / 2
errors = np.sqrt(np.sum(np.power(centers1 - centers2, 2), axis=-1))
return errors
def _intersection(rects1, rects2):
r"""Rectangle intersection.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
assert rects1.shape == rects2.shape
x1 = np.maximum(rects1[..., 0], rects2[..., 0])
y1 = np.maximum(rects1[..., 1], rects2[..., 1])
x2 = np.minimum(rects1[..., 0] + rects1[..., 2],
rects2[..., 0] + rects2[..., 2])
y2 = np.minimum(rects1[..., 1] + rects1[..., 3],
rects2[..., 1] + rects2[..., 3])
w = np.maximum(x2 - x1, 0)
h = np.maximum(y2 - y1, 0)
return np.stack([x1, y1, w, h]).T
def rect_iou(rects1, rects2, bound=None):
r"""Intersection over union.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
bound (numpy.ndarray): A 4 dimensional array, denotes the bound
(min_left, min_top, max_width, max_height) for ``rects1`` and ``rects2``.
"""
assert rects1.shape == rects2.shape
if bound is not None:
# bounded rects1
rects1[:, 0] = np.clip(rects1[:, 0], 0, bound[0])
rects1[:, 1] = np.clip(rects1[:, 1], 0, bound[1])
rects1[:, 2] = np.clip(rects1[:, 2], 0, bound[0] - rects1[:, 0])
rects1[:, 3] = np.clip(rects1[:, 3], 0, bound[1] - rects1[:, 1])
# bounded rects2
rects2[:, 0] = np.clip(rects2[:, 0], 0, bound[0])
rects2[:, 1] = np.clip(rects2[:, 1], 0, bound[1])
rects2[:, 2] = np.clip(rects2[:, 2], 0, bound[0] - rects2[:, 0])
rects2[:, 3] = np.clip(rects2[:, 3], 0, bound[1] - rects2[:, 1])
rects_inter = _intersection(rects1, rects2)
areas_inter = np.prod(rects_inter[..., 2:], axis=-1)
areas1 = np.prod(rects1[..., 2:], axis=-1)
areas2 = np.prod(rects2[..., 2:], axis=-1)
areas_union = areas1 + areas2 - areas_inter
eps = np.finfo(float).eps
ious = areas_inter / (areas_union + eps)
ious = np.clip(ious, 0.0, 1.0)
return ious
def overlap_ratio(rect1, rect2):
'''
Compute overlap ratio between two rects
- rect: 1d array of [x,y,w,h] or
2d array of N x [x,y,w,h]
'''
if rect1.ndim==1:
rect1 = rect1[None,:]
if rect2.ndim==1:
rect2 = rect2[None,:]
left = np.maximum(rect1[:,0], rect2[:,0])
right = np.minimum(rect1[:,0]+rect1[:,2], rect2[:,0]+rect2[:,2])
top = np.maximum(rect1[:,1], rect2[:,1])
bottom = np.minimum(rect1[:,1]+rect1[:,3], rect2[:,1]+rect2[:,3])
intersect = np.maximum(0,right - left) * np.maximum(0,bottom - top)
union = rect1[:,2]*rect1[:,3] + rect2[:,2]*rect2[:,3] - intersect
iou = np.clip(intersect / union, 0, 1)
return iou
def calc_curves(ious, center_errors, nbins_iou, nbins_ce):
ious = np.asarray(ious, float)[:, np.newaxis]
center_errors = np.asarray(center_errors, float)[:, np.newaxis]
thr_iou = np.linspace(0, 1, nbins_iou)[np.newaxis, :]
thr_ce = np.arange(0, nbins_ce)[np.newaxis, :]
bin_iou = np.greater(ious, thr_iou)
bin_ce = np.less_equal(center_errors, thr_ce)
succ_curve = np.mean(bin_iou, axis=0)
prec_curve = np.mean(bin_ce, axis=0)
return succ_curve, prec_curve
def compute_success_overlap(gt_bb, result_bb):
thresholds_overlap = np.arange(0, 1.05, 0.05)
n_frame = len(gt_bb)
success = np.zeros(len(thresholds_overlap))
iou = overlap_ratio(gt_bb, result_bb)
for i in range(len(thresholds_overlap)):
success[i] = sum(iou > thresholds_overlap[i]) / float(n_frame)
return success
def compute_success_error(gt_center, result_center):
thresholds_error = np.arange(0, 51, 1)
n_frame = len(gt_center)
success = np.zeros(len(thresholds_error))
dist = np.sqrt(np.sum(np.power(gt_center - result_center, 2), axis=1))
for i in range(len(thresholds_error)):
success[i] = sum(dist <= thresholds_error[i]) / float(n_frame)
return success
def get_result_bb(arch, seq):
result_path = join(arch, seq + '.txt')
temp = np.loadtxt(result_path, delimiter=',').astype(np.float)
return np.array(temp)
def convert_bb_to_center(bboxes):
return np.array([(bboxes[:, 0] + (bboxes[:, 2] - 1) / 2),
(bboxes[:, 1] + (bboxes[:, 3] - 1) / 2)]).T
def test_otb(v_id, tracker, video, args):
toc, regions = 0, []
image_files, gt = video['image_files'], video['gt']
for f, image_file in enumerate(image_files):
im = cv2.imread(image_file)
tic = cv2.getTickCount()
if f == 0:
init_pos, init_sz = rect_2_cxy_wh(gt[f])
state = tracker.setup(im, init_pos, init_sz)
location = gt[f]
regions.append(gt[f])
elif f > 0:
state = tracker.track(im, state)
location = cxy_wh_2_rect(state['target_pos'], state['target_sz'])
regions.append(location)
toc += cv2.getTickCount() - tic
if args.viz and f > 0: # visualization
if f == 0: cv2.destroyAllWindows()
if len(gt[f]) == 8:
cv2.polylines(im, [np.array(gt[f], np.int).reshape((-1, 1, 2))],
True, (0, 255, 0), 3)
else:
cv2.rectangle(im, (gt[f, 0], gt[f, 1]), (gt[f, 0] + gt[f, 2], gt[f, 1] + gt[f, 3]),
(0, 255, 0), 3)
if len(location) == 8:
cv2.polylines(im, [location.reshape((-1, 1, 2))], True, (0, 255, 255), 3)
else:
location = [int(l) for l in location] #
cv2.rectangle(im, (location[0], location[1]),
(location[0] + location[2], location[1] + location[3]),
(0, 255, 255), 3)
cv2.putText(im, "score: {:.4f}".format(state['score']), (40, 40),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
cv2.imshow(video['name'], im)
cv2.moveWindow(video['name'], 200, 50)
cv2.waitKey(1)
cv2.destroyAllWindows()
toc /= cv2.getTickFrequency()
# save result
video_path = join('benchmark/results/', args.dataset, args.save_path)
if not isdir(video_path): makedirs(video_path)
result_path = join(video_path, '{:s}.txt'.format(video['name']))
with open(result_path, "w") as fin:
for x in regions:
fin.write(','.join([str(i) for i in x])+'\n')
print('({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}fps'.format(
v_id, video['name'], toc, f / toc))
return f / toc
def eval_otb(save_path, delete_after):
base_path = join(realpath(dirname(__file__)), '../data', 'OTB2015')
json_path = base_path + '.json'
annos = json.load(open(json_path, 'r'))
seqs = list(annos.keys())
video_path = join('benchmark/results/OTB2015/', save_path)
trackers = glob.glob(join(video_path))
_, _, files = next(os.walk(trackers[0]))
num_files = len(files)
thresholds_overlap = np.arange(0, 1.05, 0.05)
success_overlap = np.zeros((num_files, len(trackers), len(thresholds_overlap)))
thresholds_error = np.arange(0, 51, 1)
success_error = np.zeros((num_files, len(trackers), len(thresholds_error)))
for i, f in enumerate(files):
seq = f.replace('.txt', '')
gt_rect = np.array(annos[seq]['gt_rect']).astype(np.float)
gt_center = convert_bb_to_center(gt_rect)
for j in range(len(trackers)):
tracker = trackers[j]
bb = get_result_bb(tracker, seq)
center = convert_bb_to_center(bb)
success_overlap[i][j] = compute_success_overlap(gt_rect, bb)
success_error[i][j] = compute_success_error(gt_center, center)
max_auc = 0.0
max_prec = 0.0
for i in range(len(trackers)):
auc = success_overlap[:, i, :].mean()
if auc > max_auc:
max_auc = auc
prec = success_error[:, i, :].mean()
if prec > max_prec:
max_prec = prec
if delete_after:
rmtree(trackers[0])
return {'auc': max_auc, 'precision': prec}
|
145046
|
import os
import pytest
from helpers.runner import generate_project, run_main
from helpers.cli import cmdout
TEST_MODULE = """import lemoncheesecake.api as lcc
@lcc.suite("My Suite")
@lcc.prop("suite_prop", "suite_prop_value")
@lcc.tags("suite_tag")
@lcc.link("http://bug.tra.cker/1234", "#1234")
class mysuite:
@lcc.test("My Test")
@lcc.prop("test_prop", "test_prop_value")
@lcc.tags("test_tag")
@lcc.link("http://bug.tra.cker/1235", "#1235")
def mytest(self):
pass
"""
@pytest.fixture()
def project(tmpdir):
generate_project(tmpdir.strpath, "mysuite", TEST_MODULE)
old_cwd = os.getcwd()
os.chdir(tmpdir.strpath)
yield
os.chdir(old_cwd)
def test_show_default_options(project, cmdout):
assert run_main(["show"]) == 0
cmdout.assert_substrs_in_line(0, ["mysuite", "suite_prop", "suite_prop_value", "suite_tag", "#1234"])
cmdout.assert_substrs_in_line(1, ["mysuite.mytest", "test_prop", "test_prop_value", "test_tag", "#1235"])
def test_show_opt_show_description(project, cmdout):
assert run_main(["show", "--show-description"]) == 0
cmdout.assert_substrs_in_line(0, ["My Suite", "suite_prop", "suite_prop_value", "suite_tag", "#1234"])
cmdout.assert_substrs_in_line(1, ["My Test", "test_prop", "test_prop_value", "test_tag", "#1235"])
def test_show_with_filter(project, cmdout):
assert "does not match" in run_main(["show", "--tag", "doesnotexist"])
|
145125
|
from .mapper import ApiResponse, ApiResponseInterface
from .mapper.types import Timestamp, AnyType
from .model import SharedFollower
__all__ = ['SharedFollowersResponse']
class SharedFollowersResponseInterface(ApiResponseInterface):
users: [SharedFollower]
class SharedFollowersResponse(ApiResponse, SharedFollowersResponseInterface):
pass
|
145171
|
import sys
import numpy as np
import skvideo.io
import concurrent.futures
import time
def _detect_black_bars_from_video(frames, blackbar_threshold=16, max_perc_to_trim=.2):
"""
:param frames: [num_frames, height, width, 3]
:param blackbar_threshold: Pixels must be this intense for us to not trim
:param max_perc_to_prim: Will trim 20% by default of the image at most in each dimension
:return:
"""
# Detect black bars####################
has_content = frames.max(axis=(0, -1)) >= blackbar_threshold
h, w = has_content.shape
y_frames = np.where(has_content.any(1))[0]
if y_frames.size == 0:
print("Oh no, there are no valid yframes")
y_frames = [h // 2]
y1 = min(y_frames[0], int(h * max_perc_to_trim))
y2 = max(y_frames[-1] + 1, int(h * (1 - max_perc_to_trim)))
x_frames = np.where(has_content.any(0))[0]
if x_frames.size == 0:
print("Oh no, there are no valid xframes")
x_frames = [w // 2]
x1 = min(x_frames[0], int(w * max_perc_to_trim))
x2 = max(x_frames[-1] + 1, int(w * (1 - max_perc_to_trim)))
return y1, y2, x1, x2
def extract_all_frames_from_video(video_file, blackbar_threshold=32, max_perc_to_trim=0.2,
every_nth_frame=1, verbosity=0):
"""
Same as exact_frames_from_video but no times meaning we grab every single frame
:param video_file:
:param r:
:param blackbar_threshold:
:param max_perc_to_trim:
:return:
"""
reader = skvideo.io.FFmpegReader(video_file, outputdict={'-r': '1', '-q:v': '2', '-pix_fmt': 'rgb24'},
verbosity=verbosity)
# frames = [x for x in iter(reader.nextFrame())]
frames = []
for i, frame in enumerate(reader.nextFrame()):
if (i % every_nth_frame) == 0:
frames.append(frame)
frames = np.stack(frames)
y1, y2, x1, x2 = _detect_black_bars_from_video(frames, blackbar_threshold=blackbar_threshold,
max_perc_to_trim=max_perc_to_trim)
frames = frames[:, y1:y2, x1:x2]
return frames
def extract_single_frame_from_video(video_file, t, verbosity=0):
"""
Reads the video, seeks to the given second option
:param video_file: input video file
:param t: where 2 seek to
:param use_rgb: True if use RGB, else BGR
:return: the frame at that timestep.
"""
timecode = '{:.3f}'.format(t)
input_dict ={ '-ss': timecode, '-threads': '1',}
reader = skvideo.io.FFmpegReader(video_file,
inputdict=input_dict,
outputdict={'-r': '1', '-q:v': '2', '-pix_fmt': 'rgb24', '-frames:v': '1'},
verbosity=verbosity,
)
try:
frame = next(iter(reader.nextFrame()))
except StopIteration:
frame = None
return frame
def extract_frames_from_video(video_file, times, info, use_multithreading=False, use_rgb=True,
blackbar_threshold=32, max_perc_to_trim=.20, verbose=False):
"""
Extracts multiple things from the video and even handles black bars
:param video_file: what we are loading
:param times: timestamps to use
:param use_multithreading: Whether to use multithreading
:param use_rgb whether to use RGB (default) or BGR
:param blackbar_threshold: Pixels must be this intense for us to not trim
:param max_perc_to_prim: Will trim 20% by default of the image at most in each dimension
:return:
"""
def _extract(i):
return i, extract_single_frame_from_video(video_file, times[i], verbosity=10 if verbose else 0)
time1 = time.time()
if not use_multithreading:
frames = [_extract(i)[1] for i in range(len(times))]
else:
frames = [None for t in times]
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
submitted_threads = (executor.submit(_extract, i) for i in range(len(times)))
for future in concurrent.futures.as_completed(submitted_threads):
try:
i, img = future.result()
frames[i] = img
except Exception as exc:
print("Oh no {}".format(str(exc)), flush=True)
if verbose:
print("Extracting frames from video, multithreading={} took {:.3f}".format(use_multithreading,
time.time() - time1), flush=True)
if any([x is None for x in frames]):
print(f"Fail on {video_file}", flush=True)
return None
frames = np.stack(frames)
y1, y2, x1, x2 = _detect_black_bars_from_video(frames, blackbar_threshold=blackbar_threshold,
max_perc_to_trim=max_perc_to_trim)
frames = frames[:, y1:y2, x1:x2]
#############
return frames
|
145180
|
from __future__ import absolute_import
import ctypes
from .._base import _LIB
from .. import ndarray as _nd
def cross_entropy(y, y_, out, stream=None):
assert isinstance(y, _nd.NDArray)
assert isinstance(y_, _nd.NDArray)
assert isinstance(out, _nd.NDArray)
_LIB.DLGpuCrossEntropy(
y.handle, y_.handle, out.handle, stream.handle if stream else None)
def cross_entropy_gradient(grad_arr, y_arr, label, out_arr, stream=None):
assert isinstance(grad_arr, _nd.NDArray)
assert isinstance(y_arr, _nd.NDArray)
assert isinstance(label, _nd.NDArray)
assert isinstance(out_arr, _nd.NDArray)
_LIB.DLGpuCrossEntropyGradient(
grad_arr.handle, y_arr.handle, label.handle, out_arr.handle, stream.handle if stream else None)
|
145205
|
from module.princess import unitproc
from module.image import proc
import cv2
report = cv2.imread("assets/b.png") # 讀取圖片
report = proc.preprocessing(report) # 中央視窗裁剪
report = proc.report_processing(report) # 傷害報告類型圖片處理
char_list = unitproc.process(report) # 角色頭像鎖定
for char in char_list:
objUnit = unitproc.unit(char)
objUnit.detect()
result = objUnit.getResult()
if result == False:
print("找不到這角色")
else:
print(result)
|
145212
|
from flask import jsonify
from utils.endpoint import Endpoint, setup
from random import choice
@setup
class YoMomma(Endpoint):
"""
This endpoint only returns a yo momma joke. No parameters are required.
"""
params = []
def generate(self, avatars, text, usernames, kwargs):
choices = ['Yo momma so fat when she walked past the TV I missed three episodes',
'Yo momma so stupid she stuck a battery up her ass and said "I GOT THE POWER"',
'Yo momma so dumb, when y\'all were driving to Disneyland, she saw a sign that said "Disneyland left", so she went home',
'Yo momma so fat she needs cheat codes for Wii Fit',
'Yo momma so fat when she went to KFC and they asker her what size of bucket, she said "The one on the roof"',
'Yo momma so fat, I took a picture of her last Christmas and it\'s still printing',
'Yo momma so fat and old when God said "Let there be light" he asked your momma to step out of the way',
'Yo momma so fat when she stept out in a yellow jacket people yell TAXI',
'Yo momma so fat I tried driving around her and I ran out of gas',
'Yo momma so fat it took Thanos two snaps to kill her',
'Yo momma so fat she sued Nintendo for guessing her weight',
'Yo momma so dumb, she tripped over WiFi',
'Yo momma so fat she has two watches, one for each timezone',
'Yo momma so fat she left the house in high heels and came back in flip flops',
'Yo momma so fat her blood type is Nutella',
'Yo momma so fat she uses Google Earth to take a selfie',
'Yo momma so fat even Dora could not explore her',
'Yo momma so fat she jumped in the air and got stuck',
'Yo momma so fat that when we were born, she gave the hospital stretch marks',
'Yo momma so fat she wears a sock on each toe',
'Yo momma so fat the army uses her underwear as parachutes',
'Yo momma so fat her patronus is a cake',
'Yo momma so fat when she tripped over on 4th Ave, she landed on 12th',
'Yo momma so fat the only way she burns calories is when her food is on fire',
'Yo momma so fat she won all 75 Hunger Games',
'Yo momma so fat when she steps on a scale it says "One at a time please"',
'Yo momma so fat she got her own area code',
'Yo momma so fat even Kirby can'' eat her',
'Yo momma so fat when she went to the beach Greenpeace threw her into the ocean',
'Yo momma so fat a vampire bit her and got Type 2 diabetes',
'Yo momma so fat she uses butter for her chapstick',
'Yo momma so fat when she walks backwards she beeps',
'Yo momma so fat she puts mayo on her diet pills']
return jsonify({"text": choice(choices)})
|
145215
|
import unicodedata
encodings = 'ascii latin1 cp1252 cp437 gb2312 utf-8 utf-16le'.split()
widths = {encoding:1 for encoding in encodings[:-3]}
widths.update(zip(encodings[-3:], (2, 4, 4)))
chars = sorted([
'A', # \u0041 : LATIN CAPITAL LETTER A
'¿', # \u00bf : INVERTED QUESTION MARK
'Ã', # \u00c3 : LATIN CAPITAL LETTER A WITH TILDE
'á', # \u00e1 : LATIN SMALL LETTER A WITH ACUTE
'Ω', # \u03a9 : GREEK CAPITAL LETTER OMEGA
'µ',
'Ц',
'€', # \u20ac : EURO SIGN
'“',
'┌',
'气',
'氣', # \u6c23 : CJK UNIFIED IDEOGRAPH-6C23
'𝄞', # \u1d11e : MUSICAL SYMBOL G CLEF
])
callout1_code = 0x278a # ➊ DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT ONE
missing_mark = '*'
def list_chars():
for char in chars:
print('%r, # \\u%04x : %s' % (char, ord(char), unicodedata.name(char)))
def show_encodings():
print(end='\t\t')
for encoding in encodings:
print(encoding.ljust(widths[encoding] * 2), end='\t')
print()
for lineno, char in enumerate(chars):
codepoint = 'U+{:04X}'.format(ord(char))
print(char, codepoint, sep='\t', end='\t')
for encoding in encodings:
try:
bytes = char.encode(encoding)
dump = ' '.join('%02X' % byte for byte in bytes)
except UnicodeEncodeError:
dump = missing_mark
dump = dump.ljust(widths[encoding] * 2)
print(dump, end='\t')
# print(chr(callout1_code + lineno))
print(unicodedata.name(char))
# print()
#list_chars()
show_encodings()
|
145218
|
from twentyc.rpc import RestClient
from twentyc.rpc.client import NotFoundException, PermissionDeniedException
from peeringdb import get_backend
from peeringdb.resource import Network
from . import _data
# try: from peeringdb import _debug_http
# except: pass
__data = {Network: {20: _data.twentyc}}
class Fetcher(RestClient):
def __init__(self, **kwargs):
super(Fetcher, self).__init__(**kwargs)
def fetch(self, R, pk, depth):
return __data[R][pk]
def fetch_latest(self, R, pk, depth=0):
return fetch(R, pk, depth), None
def fetch_all_latest(self, R, params={}, depth=0):
return list(__data[R].values())
def fetch_deleted(self, R, pk, depth=0):
return __deleted[R][pk]
|
145239
|
from botocore.exceptions import ClientError
import boto3
import json
import sys
POLICY_DOCUMENT = {
"Statement":[
{
"Action": ["dynamodb:Scan", "dynamodb:Query"],
"Effect": "Allow",
"Resource": "*"
},
{
"Action":[
"ec2:Describe*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["rds:Describe*"],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["s3:Get*",
"s3:List*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["sdb:GetAttributes",
"sdb:List*",
"sdb:Select*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["sns:Get*",
"sns:List*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["sqs:ListQueues",
"sqs:GetQueueAttributes",
"sqs:ReceiveMessage"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["autoscaling:Describe*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["elasticloadbalancing:Describe*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["cloudwatch:Describe*",
"cloudwatch:List*",
"cloudwatch:Get*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":[
"iam:Get*",
"iam:List*"
],
"Effect":"Allow",
"Resource":"*"
}
]
}
LAMBDA_ASSUME_POLICY = {
"Version": "2012-10-17",
"Statement":[{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
},{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "apigateway.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
client = boto3.client('iam')
ROLE_NAME = 'awslimits'
try:
client.create_role(
RoleName=ROLE_NAME,
AssumeRolePolicyDocument=json.dumps(LAMBDA_ASSUME_POLICY),
)
except ClientError as exc:
if exc.response['Error']['Code'] != 'EntityAlreadyExists':
raise
client.put_role_policy(
RoleName=ROLE_NAME,
PolicyName=ROLE_NAME,
PolicyDocument=json.dumps(POLICY_DOCUMENT),
)
env, settings = sys.argv[1:]
settings = json.loads(open(settings).read())[env]
function_name = "-".join([settings['project_name'], env])
lambda_client = boto3.client("lambda", region_name='us-east-1')
try:
lambda_client.delete_function(
FunctionName=function_name,
)
except ClientError as exc:
if exc.response['Error']['Code'] != 'ResourceNotFoundException':
raise
apigateway_client = boto3.client("apigateway", region_name='us-east-1')
apis = apigateway_client.get_rest_apis()['items']
matching_api_ids = [api['id'] for api in apis if api['name'] == function_name]
for matching_api_id in matching_api_ids:
response = apigateway_client.delete_rest_api(
restApiId=matching_api_id
)
|
145265
|
from decimal import Decimal
import pickle
import pytest
import simplejson
from toloka.client import TolokaClient
import toloka.client as client
from .testutils.util_functions import check_headers
@pytest.fixture
def random_url():
return 'https://testing.toloka.yandex.ru'
def test_client_create_exceptions(random_url):
with pytest.raises(ValueError):
TolokaClient('fake-token', '<PASSWORD>', url=random_url)
with pytest.raises(ValueError):
TolokaClient('fake-token')
def test_client_pickleable(random_url):
toloka_client = TolokaClient('fake-token', '<PASSWORD>')
dumped = pickle.dumps(toloka_client) # Check that it's possible.
loaded = pickle.loads(dumped)
assert loaded
def test_different_urls(requests_mock, random_url):
result = {
'id': '566ec2b0ff0deeaae5f9d500',
'balance': Decimal('120.3'),
'public_name': {
'EN': '<NAME>',
'RU': '<NAME>',
},
'company': {
'id': '1',
'superintendent_id': 'superintendent-1id',
},
}
def get_requester(request, context):
expected_headers = {
'X-Caller-Context': 'client',
'X-Top-Level-Method': 'get_requester',
'X-Low-Level-Method': 'get_requester',
}
check_headers(request, expected_headers)
return simplejson.dumps(result)
requests_mock.get(f'{random_url}/api/v1/requester', text=get_requester)
toloka_client = TolokaClient('fake-token', url=random_url)
requester = toloka_client.get_requester()
assert result == client.unstructure(requester)
toloka_client = TolokaClient('fake-token', url=f'{random_url}/')
requester = toloka_client.get_requester()
assert result == client.unstructure(requester)
|
145311
|
import numpy as np
import scipy
from ... import spectrum
from ... import utilits as ut
def ica_kurtosis(x, order, mode = 'full'):
'''
FUNCTION IN TEST
Max-kurtosis Independent Component Analysis (ICA)
References
------------------------
[1] http://www.cs.nyu.edu/~roweis/kica.html
'''
X = signals.matrix.kernel_martix(x, mode=mode, ktype='linear', kpar=0.001, lags = x.size//2)
invCov = np.linalg.inv(X.T.dot(np.conj(X)))
W = scipy.linalg.sqrtm(invCov)
Xcw = np.dot(W , X)
gg = repmat(np.sum(np.square(Xcw),axis=1), Xcw.shape[0], 1)
TEST= np.dot(gg*Xcw, Xcw.T)
es,ev = np.linalg.eig(TEST)
Zica = np.dot(ev[:order,:], Xcw)
return Zica
def repmat(a, m, n):
a = np.asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1, 1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0)
return c.reshape(rows, cols)
|
145357
|
from i3pystatus.updates import Backend
import sys
# Remove first dir from sys.path to avoid shadowing dnf module from
# site-packages dir when this module executed directly on the CLI.
__module_dir = sys.path.pop(0)
try:
import dnf
HAS_DNF_BINDINGS = True
except ImportError:
HAS_DNF_BINDINGS = False
finally:
# Replace the directory we popped earlier
sys.path.insert(0, __module_dir)
class Dnf(Backend):
"""
Gets updates for RPM-based distributions using the `DNF API`_
The notification body consists of the package name and version for each
available update.
.. _`DNF API`: http://dnf.readthedocs.io/en/latest/api.html
.. note::
Users running i3pystatus from a virtualenv may see the updates display
as ``?`` due to an inability to import the ``dnf`` module. To ensure
that i3pystatus can access the DNF Python bindings, the virtualenv
should be created with ``--system-site-packages``.
If using `pyenv-virtualenv`_, the virtualenv must additionally be
created to use the system Python binary:
.. code-block:: bash
$ pyenv virtualenv --system-site-packages --python=/usr/bin/python3 pyenv_name
To invoke i3pystatus with this virtualenv, your ``bar`` section in
``~/.config/i3/config`` would look like this:
.. code-block:: bash
bar {
position top
status_command PYENV_VERSION=pyenv_name python /path/to/i3pystatus/script.py
}
.. _`pyenv-virtualenv`: https://github.com/yyuu/pyenv-virtualenv
"""
@property
def updates(self):
if HAS_DNF_BINDINGS:
try:
with dnf.Base() as base:
base.read_all_repos()
base.fill_sack()
upgrades = base.sack.query().upgrades().run()
notif_body = ''.join([
'%s: %s-%s\n' % (pkg.name, pkg.version, pkg.release)
for pkg in upgrades
])
return len(upgrades), notif_body
except Exception as exc:
self.logger.error('DNF update check failed', exc_info=True)
return '?', exc.__str__()
else:
return '?', 'Failed to import DNF Python bindings'
Backend = Dnf
if __name__ == "__main__":
"""
Call this module directly; Print the update count and notification body.
"""
print("Updates: {}\n\n{}".format(*Backend().updates))
|
145380
|
from typing import List, Dict, Set
import pandas as pd
from ray.data import Dataset
from ray.ml.preprocessor import Preprocessor
class OrdinalEncoder(Preprocessor):
"""Encode values within columns as ordered integer values.
Currently, order within a column is based on the values from the fitted
dataset in sorted order.
Transforming values not included in the fitted dataset will be encoded as ``None``.
Args:
columns: The columns that will individually be encoded.
"""
def __init__(self, columns: List[str]):
# TODO: allow user to specify order of values within each column.
super().__init__()
self.columns = columns
def _fit(self, dataset: Dataset) -> Preprocessor:
self.stats_ = _get_unique_value_indices(dataset, *self.columns)
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, *self.columns)
def column_ordinal_encoder(s: pd.Series):
s_values = self.stats_[f"unique_values({s.name})"]
return s.map(s_values)
df.loc[:, self.columns] = df.loc[:, self.columns].transform(
column_ordinal_encoder
)
return df
def __repr__(self):
return f"<Encoder columns={self.columns} stats={self.stats_}>"
class OneHotEncoder(Preprocessor):
"""Encode columns as new columns using one-hot encoding.
The transformed dataset will have a new column in the form ``{column}_{value}``
for each of the values from the fitted dataset. The value of a column will
be set to 1 if the value matches, otherwise 0.
Transforming values not included in the fitted dataset will result in all
of the encoded column values being 0.
Args:
columns: The columns that will individually be encoded.
"""
def __init__(self, columns: List[str]):
# TODO: add `drop` parameter.
super().__init__()
self.columns = columns
def _fit(self, dataset: Dataset) -> Preprocessor:
self.stats_ = _get_unique_value_indices(dataset, *self.columns)
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, *self.columns)
# Compute new one-hot encoded columns
for column in self.columns:
column_values = self.stats_[f"unique_values({column})"]
for column_value in column_values:
df[f"{column}_{column_value}"] = (df[column] == column_value).astype(
int
)
# Drop original unencoded columns.
df = df.drop(columns=self.columns)
return df
def __repr__(self):
return f"<Encoder columns={self.columns} stats={self.stats_}>"
class LabelEncoder(Preprocessor):
"""Encode values within a label column as ordered integer values.
Currently, order within a column is based on the values from the fitted
dataset in sorted order.
Transforming values not included in the fitted dataset will be encoded as ``None``.
Args:
label_column: The label column that will be encoded.
"""
def __init__(self, label_column: str):
super().__init__()
self.label_column = label_column
def _fit(self, dataset: Dataset) -> Preprocessor:
self.stats_ = _get_unique_value_indices(dataset, self.label_column)
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, self.label_column)
def column_label_encoder(s: pd.Series):
s_values = self.stats_[f"unique_values({s.name})"]
return s.map(s_values)
df[self.label_column] = df[self.label_column].transform(column_label_encoder)
return df
def __repr__(self):
return f"<Encoder label column={self.label_column} stats={self.stats_}>"
def _get_unique_value_indices(
dataset: Dataset, *columns: str
) -> Dict[str, Dict[str, int]]:
results = {}
for column in columns:
values = _get_unique_values(dataset, column)
if any(pd.isnull(v) for v in values):
raise ValueError(
f"Unable to fit column '{column}' because it contains null values. "
f"Consider imputing missing values first."
)
value_to_index = _sorted_value_indices(values)
results[f"unique_values({column})"] = value_to_index
return results
def _get_unique_values(dataset: Dataset, column: str) -> Set[str]:
agg_ds = dataset.groupby(column).count()
# TODO: Support an upper limit by using `agg_ds.take(N)` instead.
return {row[column] for row in agg_ds.iter_rows()}
def _sorted_value_indices(values: Set) -> Dict[str, int]:
"""Converts values to a Dict mapping to unique indexes.
Values will be sorted.
Example:
>>> _sorted_value_indices({"b", "a", "c", "a"})
{"a": 0, "b": 1, "c": 2}
"""
return {value: i for i, value in enumerate(sorted(values))}
def _validate_df(df: pd.DataFrame, *columns: str) -> None:
null_columns = [column for column in columns if df[column].isnull().values.any()]
if null_columns:
raise ValueError(
f"Unable to transform columns {null_columns} because they contain "
f"null values. Consider imputing missing values first."
)
|
145397
|
import requests
from celery import Celery
from celery.schedules import crontab
from bhagavad_gita_api.config import settings
app = Celery(
"cronjobs",
broker=settings.CELERY_BROKER,
backend=settings.CELERY_BACKEND,
)
app.conf.timezone = "Asia/Calcutta"
@app.task
def set_verse():
url = "{}/v2/set-daily-verse/".format(settings.CRONJOB_BASE_URL)
data = {
"accept": "application/json",
"X-API-KEY": settings.TESTER_API_KEY,
}
r = requests.post(url=url, data=data, headers=data)
print(r)
app.conf.beat_schedule = {
"setup-verse-everyday": {
"task": "bhagavad_gita_api.cronjobs.celery.set_verse",
"schedule": crontab(hour=0, minute=0),
},
}
if __name__ == "__main__":
app.start()
|
145410
|
import os
import sys
sys.path.append(os.path.join(os.getcwd(), '../common'))
from runner_helper2 import *
def get_fgnn_logtable():
return LogTable(
num_row=18,
num_col=4
).update_col_definition(
col_id=0,
definition='epoch_time:sample_total'
).update_col_definition(
col_id=1,
definition='epoch_time:copy_time'
).update_col_definition(
col_id=2,
definition='epoch_time:train_total'
).update_col_definition(
col_id=3,
definition='pipeline_train_epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=1,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=1,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=2,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=2,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=3,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=3,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=4,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=4,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=5,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=5,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=6,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=6,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=7,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=7,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=1,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=8,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=2,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=9,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=3,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=10,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=4,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=11,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=5,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=12,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=6,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=13,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=1,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=14,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=2,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=15,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=3,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=16,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=4,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=17,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=5,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=0,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=1,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=1,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=2,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=2,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=3,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=3,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=4,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=4,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=5,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=5,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=6,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=6,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=7,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=7,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=1,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=8,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=2,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=9,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=3,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=10,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=4,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=11,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=5,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=12,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=6,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=13,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=1,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=14,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=2,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=15,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=3,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=16,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=4,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=17,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=5,
BOOL_pipeline='pipeline'
).create()
|
145437
|
import os
import numpy as np
import json
import logging
from pipeline_monitor import prometheus_monitor as monitor
from pipeline_logger import log
import tensorflow as tf
from tensorflow.contrib import predictor
_logger = logging.getLogger('pipeline-logger')
_logger.setLevel(logging.INFO)
_logger_stream_handler = logging.StreamHandler()
_logger_stream_handler.setLevel(logging.INFO)
_logger.addHandler(_logger_stream_handler)
__all__ = ['invoke']
_labels = {
'name': 'transfer',
'tag': 'v2',
'runtime': 'tflite',
'chip': 'cpu',
'resource_name': '83f05e58transfer',
# 'resource_tag': '',
'resource_type': 'model',
'resource_subtype': 'keras',
}
def _initialize_upon_import():
""" Initialize / Restore Model Object.
"""
saved_model_path = './pipeline_tfserving/0'
optimized_model_base_path = './tflite'
os.makedirs(optimized_model_base_path, exist_ok=True)
converter = tf.contrib.lite.TocoConverter.from_saved_model(saved_model_path)
tflite_model = converter.convert()
file_size_bytes = open('%s/optimized_model.tflite' % optimized_model_base_path, "wb").write(tflite_model)
# Load TFLite model and allocate tensors.
interpreter = tf.contrib.lite.Interpreter(model_path='%s/optimized_model.tflite' % optimized_model_base_path)
interpreter.allocate_tensors()
return interpreter
# This is called unconditionally at *module import time*...
_model = _initialize_upon_import()
@log(labels=_labels, logger=_logger)
def invoke(request):
"""Where the magic happens..."""
with monitor(labels=_labels, name="transform_request"):
transformed_request = _transform_request(request)
with monitor(labels=_labels, name="invoke"):
input_details = _model.get_input_details()
_model.set_tensor(input_details[0]['index'], transformed_request)
_model.invoke()
response = _model.get_output_details()
with monitor(labels=_labels, name="transform_response"):
transformed_response = _transform_response(response)
return transformed_response
# input: bytes
# return: dict{}
def _transform_request(request):
"""
Convert from bytes/json/etc to dict of tf.tensor/np.array/etc
:param request:
:return:
"""
# TODO: Uncomment out one of the examples below - or provide your own implementation
#
# Note: The dict keys used below (ie. 'image') depend on the TF SignatureDef of your exported SavedModel
#
# Example 1: Convert json version of an image starting from raw bytes => dict{} to feed TF Serving
#
# request_str = request.decode('utf-8')
# request_json = json.loads(request_str)
# request_np = (np.array(request_json['image'], dtype=np.float32) / 255.0).reshape(1, 28, 28)
# return {"image": request_np}
# Example 2: Convert raw bytes version of an image => dict{} to feed TF Serving
#
# image_tensor = tf.make_tensor_proto([request], shape=[1])
# transformed_request_dict['image'] = image_tensor
# return transformed_request_dict # Becomes `PredictRequest.inputs['image'] = image_tensor`
# input: dict{}
# return: anything you want! (json, bytes, etc)
def _transform_response(response):
"""
Convert from dict{tf.tensor/np.array/etc} to bytes/json/etc
:param response:
:return:
"""
# TODO: Uncomment out the example below - or provide your own implementation
#
# Note: The dict keys used below (ie. 'classes', 'probabilities') depend on the TF SignatureDef of your exported SavedModel
#
# Example 1:
#
# classes_np = _model.get_tensor(response[0]['index'])[0].tolist()
# probabilities = _model.get_tensor(response[1]['index']).tolist()
# return json.dumps({"classes": classes_np,
# "probabilities": probabilities
# })
|
145444
|
import pytest
from mock import Mock
from botocore.exceptions import NoCredentialsError
from formica import cli
from tests.unit.constants import STACK, STACK_ID, PROFILE, REGION, CHANGESETNAME, EVENT_ID
@pytest.fixture
def logger(mocker):
return mocker.patch('formica.cli.logger')
def test_catches_common_aws_exceptions(session, stack_waiter):
session.side_effect = NoCredentialsError()
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.main(['deploy', '--stack', STACK])
assert pytest_wrapped_e.value.code == 1
def test_fails_if_no_stack_given(logger):
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.main(['deploy'])
assert pytest_wrapped_e.value.code == 1
logger.error.assert_called()
out = logger.error.call_args[0][0]
assert '--stack' in out
assert '--config-file' in out
def test_executes_change_set_and_waits(session, stack_waiter, client, boto_client):
client.describe_change_set.return_value = {'Status': 'CREATE_COMPLETE'}
client.describe_stack_events.return_value = {'StackEvents': [{'EventId': EVENT_ID}]}
client.describe_stacks.return_value = {'Stacks': [{'StackId': STACK_ID}]}
cli.main(['deploy', '--stack', STACK, '--profile', PROFILE, '--region', REGION])
boto_client.assert_called_with('cloudformation')
client.describe_stack_events.assert_called_with(StackName=STACK)
client.execute_change_set.assert_called_with(ChangeSetName=CHANGESETNAME, StackName=STACK)
client.describe_change_set.assert_called_with(ChangeSetName=CHANGESETNAME, StackName=STACK)
stack_waiter.assert_called_with(STACK_ID)
stack_waiter.return_value.wait.assert_called_with(EVENT_ID)
def test_executes_change_set_with_timeout(stack_waiter, client):
client.describe_change_set.return_value = {'Status': 'CREATE_COMPLETE'}
client.describe_stack_events.return_value = {'StackEvents': [{'EventId': EVENT_ID}]}
client.describe_stacks.return_value = {'Stacks': [{'StackId': STACK_ID}]}
cli.main(['deploy', '--stack', STACK, '--profile', PROFILE, '--region', REGION, '--timeout', '15'])
stack_waiter.assert_called_with(STACK_ID, timeout=15)
stack_waiter.return_value.wait.assert_called_with(EVENT_ID)
def test_does_not_execute_changeset_if_no_changes(stack_waiter, client):
client.describe_change_set.return_value = {'Status': 'FAILED',
"StatusReason": "The submitted information didn't contain changes. Submit different information to create a change set."}
client.describe_stack_events.return_value = {'StackEvents': [{'EventId': EVENT_ID}]}
client.describe_stacks.return_value = {'Stacks': [{'StackId': STACK_ID}]}
cli.main(['deploy', '--stack', STACK])
client.execute_change_set.assert_not_called()
stack_waiter.assert_called_with(STACK_ID)
stack_waiter.return_value.wait.assert_called_with(EVENT_ID)
def test_does_not_execute_changeset_if_in_failed_state(stack_waiter, client):
client.describe_change_set.return_value = {'Status': 'FAILED'}
client.describe_stack_events.return_value = {'StackEvents': [{'EventId': EVENT_ID}]}
client.describe_stacks.return_value = {'Stacks': [{'StackId': STACK_ID}]}
with pytest.raises(SystemExit):
cli.main(['deploy', '--stack', STACK])
client.execute_change_set.assert_not_called()
|
145465
|
from django.conf import settings
from django.core.mail import EmailMessage
try:
from django.urls import reverse
except:
from django.core.urlresolver import reverse
def send(to,subject,body):
from_email_address = settings.EMAIL_HOST_USER
if '@' not in from_email_address:
from_email_address = settings.DEFAULT_FROM_EMAIL
From = "%s <%s>" % (settings.EMAIL_FROM, from_email_address)
email = EmailMessage(subject,body,From,to)
email.content_subtype = "html"
return email.send(False)
def get_redirect_url():
return {"redirect_html": reverse(getattr(settings, 'MFA_REDIRECT_AFTER_REGISTRATION', 'mfa_home')),
"reg_success_msg":getattr(settings,"MFA_SUCCESS_REGISTRATION_MSG")}
|
145485
|
from database.mariadb import Database
from broker.fxcm.session import FXCMBroker
from threading import Thread
from queue import Queue, Empty
from subprocess_reader import SubprocessReader
import sys
import time
class SubprocessWorker(object):
def __init__(self, offer):
self._s = input
self._q = Queue()
self._o = offer
self._broker = FXCMBroker(
offers_table=False,
market_data=True,
trading=False
).market_data
self._database = Database(self._broker.whoami())
self._sir = SubprocessReader(
identifer=self._o,
stream=self._s,
events_queue=self._q,
expected=5,
log=False,
option='input'
)
self._queue_stream()
def _queue_stream(self):
while True:
try:
try:
job_event = self._q.get(False)
except Empty:
time.sleep(0.01)
else:
if isinstance(job_event, list):
self._on_data_request(job_event)
else:
if job_event is 'KILL':
self._send_message(
'K',self._o,'K'
)
break
else: # EXCEPTION
self._send_message(
'E',self._o,'E'
)
break
except KeyboardInterrupt:
break
if self._broker.is_connected():
self._broker._logout_session()
def _price_data_collection(
self, offer, timeframe, dtfm, dtto
):
return self._broker.data_collection(
offer, timeframe, dtfm, dtto
)
def _write_to_database(
self, offer, timeframe, data_gen
):
for data in data_gen:
self._database.write(offer, timeframe, data)
def _send_message(
self, jobno, offer, timeframe
):
msg = "{0}, {1}, {2}\n".format(jobno, offer, timeframe)
sys.stdout.write(msg)
sys.stdout.flush()
def _on_data_request(self, job_event):
jobno, offer, timeframe, dtfm, dtto = job_event
# Contact broker for data
data_gen = self._price_data_collection(
offer, timeframe, dtfm, dtto)
# Write data to database
self._write_to_database(
offer, timeframe, data_gen)
# Respond back to the main process once finished.
self._send_message(
jobno, offer, timeframe)
offer = sys.argv[1]
s = SubprocessWorker(offer)
|
145493
|
import os
import numpy as np
import json
import random
from PIL import Image
from PIL import ImageDraw
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
class DatasetBase(Dataset):
"""Base dataset for VITON-GAN.
"""
def __init__(self, opt, mode, data_list, train=True):
super(DatasetBase, self).__init__()
self.data_path = os.path.join(opt.data_root, mode)
self.train = train
self.fine_height = opt.fine_height
self.fine_width = opt.fine_width
self.radius = opt.radius
self.transform = transforms.Compose([
transforms.ToTensor(), # [0,255] to [0,1]
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # [0,1] to [-1,1]
])
person_names = []
cloth_names = []
with open(os.path.join(opt.data_root, data_list), 'r') as f:
for line in f.readlines():
person_name, cloth_name = line.strip().split()
person_names.append(person_name)
cloth_names.append(cloth_name)
self.person_names = person_names
self.cloth_names = cloth_names
def __len__(self):
return len(self.person_names)
def _get_mask_arrays(self, person_parse):
"""Split person_parse array into mask channels
"""
shape = (person_parse > 0).astype(np.float32)
head = (person_parse == 1).astype(np.float32) + \
(person_parse == 2).astype(np.float32) + \
(person_parse == 4).astype(np.float32) + \
(person_parse == 13).astype(np.float32) # Hat, Hair, Sunglasses, Face
head = (head > 0).astype(np.float32)
cloth = (person_parse == 5).astype(np.float32) + \
(person_parse == 6).astype(np.float32) + \
(person_parse == 7).astype(np.float32) # Upper-clothes, Dress, Coat
cloth = (cloth > 0).astype(np.float32)
body = (person_parse == 1).astype(np.float32) + \
(person_parse == 2).astype(np.float32) + \
(person_parse == 3).astype(np.float32) + \
(person_parse == 4).astype(np.float32) + \
(person_parse > 7).astype(np.float32) # Neither cloth nor background
body = (body > 0).astype(np.float32)
return shape, head, cloth, body # [0,1]
def _downsample(self, im):
im = im.resize((self.fine_width//16, self.fine_height//16), Image.BILINEAR)
return im.resize((self.fine_width, self.fine_height), Image.BILINEAR)
def _load_pose(self, pose_name):
"""Load pose json file
"""
with open(os.path.join(self.data_path, 'pose', pose_name), 'r') as f:
pose_label = json.load(f)
pose_data = pose_label['people'][0]['pose_keypoints']
pose_data = np.array(pose_data)
pose_data = pose_data.reshape((-1,3))
point_num = pose_data.shape[0]
feature_pose_tensor = torch.zeros(point_num, self.fine_height, self.fine_width) # 18 channels
r = self.radius
pose_im = Image.new('L', (self.fine_width, self.fine_height)) # For visualization
pose_draw = ImageDraw.Draw(pose_im)
for i in range(point_num):
one_map = Image.new('L', (self.fine_width, self.fine_height))
draw = ImageDraw.Draw(one_map)
pointx = pose_data[i,0]
pointy = pose_data[i,1]
if pointx > 1 and pointy > 1:
draw.rectangle((pointx-r, pointy-r, pointx+r, pointy+r), 'white', 'white')
pose_draw.rectangle((pointx-r, pointy-r, pointx+r, pointy+r), 'white', 'white')
one_map = self.transform(one_map)
feature_pose_tensor[i] = one_map[0]
pose_tensor = self.transform(pose_im) # [-1,1]
return feature_pose_tensor, pose_tensor
def _get_item_base(self, index):
# Person
person_name = self.person_names[index]
person_im = Image.open(os.path.join(self.data_path, 'person', person_name))
person_tensor = self.transform(person_im) # [-1,1]
# Person-parse
parse_name = person_name.replace('.jpg', '.png')
person_parse = Image.open(os.path.join(self.data_path, 'person-parse', parse_name))
person_parse = np.array(person_parse) # shape: (256,192,3)
shape_mask, head_mask, cloth_mask, body_mask = self._get_mask_arrays(person_parse)
shape_im = Image.fromarray((shape_mask*255).astype(np.uint8))
feature_shape_tensor = self.transform(self._downsample(shape_im)) # [-1,1]
head_mask_tensor = torch.from_numpy(head_mask) # [0,1]
feature_head_tensor = person_tensor * head_mask_tensor - (1 - head_mask_tensor) # [-1,1], fill -1 for other parts
cloth_mask_tensor = torch.from_numpy(cloth_mask) # [0,1]
cloth_parse_tensor = person_tensor * cloth_mask_tensor + (1 - cloth_mask_tensor) # [-1,1], fill 1 for other parts
body_mask_tensor = torch.from_numpy(body_mask).unsqueeze(0) # Tensor [0,1]
# Pose keypoints
pose_name = person_name.replace('.jpg', '_keypoints.json')
feature_pose_tensor, pose_tensor = self._load_pose(pose_name)
# Cloth-agnostic representation
feature_tensor = torch.cat([feature_shape_tensor, feature_head_tensor, feature_pose_tensor], 0)
data = {
'person_name': person_name, # For visualization or ground truth
'person': person_tensor, # For visualization or ground truth
'feature': feature_tensor, # For input
'pose': pose_tensor, # For visualization
'head': feature_head_tensor, # For visualization
'shape': feature_shape_tensor, # For visualization
'cloth_parse': cloth_parse_tensor, # For ground truth
'body_mask': body_mask_tensor # For ground truth
}
return data
def binarized_tensor(arr):
mask = (arr >= 128).astype(np.float32)
return torch.from_numpy(mask).unsqueeze(0) # [0,1]
def random_horizontal_flip(data):
rand = random.random()
if rand < 0.5:
return data
else:
for key, value in data.items():
if 'name' in key:
continue
else:
data[key] = torch.flip(value, [2]) # 2 for width
return data
class GMMDataset(DatasetBase):
def __getitem__(self, index):
cloth_name = self.cloth_names[index]
cloth_im = Image.open(os.path.join(self.data_path, 'cloth', cloth_name))
cloth_tensor = self.transform(cloth_im) # [-1,1]
cloth_mask_im = Image.open(os.path.join(self.data_path, 'cloth-mask', cloth_name))
cloth_mask_tensor = binarized_tensor(np.array(cloth_mask_im))
grid_im = Image.open('grid.png')
grid_tensor = self.transform(grid_im)
data = self._get_item_base(index)
data['cloth_name'] = cloth_name # For visualization or input
data['cloth'] = cloth_tensor # For visualization or input
data['cloth_mask'] = cloth_mask_tensor # For input
data['grid'] = grid_tensor # For visualization
if self.train:
data = random_horizontal_flip(data) # Data augmentation
return data
class TOMDataset(DatasetBase):
def __getitem__(self, index):
cloth_name = self.cloth_names[index]
cloth_im = Image.open(os.path.join(self.data_path, 'warp-cloth', cloth_name))
cloth_tensor = self.transform(cloth_im) # [-1,1]
cloth_mask_im = Image.open(os.path.join(self.data_path, 'warp-cloth-mask', cloth_name))
cloth_mask_tensor = binarized_tensor(np.array(cloth_mask_im))
data = self._get_item_base(index)
data['cloth_name'] = cloth_name # For visualization or input
data['cloth'] = cloth_tensor # For visualization or input
data['cloth_mask'] = cloth_mask_tensor # For input
if self.train:
data = random_horizontal_flip(data) # Data augmentation
return data
|
145494
|
T = int(input())
for x in range(1, T + 1):
N = int(input())
names = [input() for index in range(N)]
y = 0
previous = names[0]
for name in names[1:]:
if name < previous:
y += 1
else:
previous = name
print(f"Case #{x}: {y}", flush = True)
|
145502
|
import mmcv
import numpy as np
import pytest
from os import path as osp
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet3d.datasets.pipelines import (LoadAnnotations3D, LoadPointsFromFile,
LoadPointsFromMultiSweeps)
def test_load_points_from_indoor_file():
sunrgbd_info = mmcv.load('./tests/data/sunrgbd/sunrgbd_infos.pkl')
sunrgbd_load_points_from_file = LoadPointsFromFile(6, shift_height=True)
sunrgbd_results = dict()
data_path = './tests/data/sunrgbd'
sunrgbd_info = sunrgbd_info[0]
sunrgbd_results['pts_filename'] = osp.join(data_path,
sunrgbd_info['pts_path'])
sunrgbd_results = sunrgbd_load_points_from_file(sunrgbd_results)
sunrgbd_point_cloud = sunrgbd_results['points']
assert sunrgbd_point_cloud.shape == (100, 4)
scannet_info = mmcv.load('./tests/data/scannet/scannet_infos.pkl')
scannet_load_data = LoadPointsFromFile(shift_height=True)
scannet_results = dict()
data_path = './tests/data/scannet'
scannet_info = scannet_info[0]
scannet_results['pts_filename'] = osp.join(data_path,
scannet_info['pts_path'])
scannet_results = scannet_load_data(scannet_results)
scannet_point_cloud = scannet_results['points']
repr_str = repr(scannet_load_data)
expected_repr_str = 'LoadPointsFromFile(shift_height=True, ' \
'file_client_args={\'backend\': \'disk\'}), ' \
'load_dim=6, use_dim=[0, 1, 2])'
assert repr_str == expected_repr_str
assert scannet_point_cloud.shape == (100, 4)
def test_load_points_from_outdoor_file():
data_path = 'tests/data/kitti/a.bin'
load_points_from_file = LoadPointsFromFile(4, 4)
results = dict()
results['pts_filename'] = data_path
results = load_points_from_file(results)
points = results['points']
assert points.shape == (50, 4)
assert np.allclose(points.sum(), 2637.479)
load_points_from_file = LoadPointsFromFile(4, [0, 1, 2, 3])
results = dict()
results['pts_filename'] = data_path
results = load_points_from_file(results)
new_points = results['points']
assert new_points.shape == (50, 4)
assert np.allclose(points.sum(), 2637.479)
np.equal(points, new_points)
with pytest.raises(AssertionError):
LoadPointsFromFile(4, 5)
def test_load_annotations3D():
# Test scannet LoadAnnotations3D
scannet_info = mmcv.load('./tests/data/scannet/scannet_infos.pkl')[0]
scannet_load_annotations3D = LoadAnnotations3D(
with_bbox_3d=True,
with_label_3d=True,
with_mask_3d=True,
with_seg_3d=True)
scannet_results = dict()
data_path = './tests/data/scannet'
if scannet_info['annos']['gt_num'] != 0:
scannet_gt_bboxes_3d = scannet_info['annos']['gt_boxes_upright_depth']
scannet_gt_labels_3d = scannet_info['annos']['class']
else:
scannet_gt_bboxes_3d = np.zeros((1, 6), dtype=np.float32)
scannet_gt_labels_3d = np.zeros((1, ))
# prepare input of loading pipeline
scannet_results['ann_info'] = dict()
scannet_results['ann_info']['pts_instance_mask_path'] = osp.join(
data_path, scannet_info['pts_instance_mask_path'])
scannet_results['ann_info']['pts_semantic_mask_path'] = osp.join(
data_path, scannet_info['pts_semantic_mask_path'])
scannet_results['ann_info']['gt_bboxes_3d'] = DepthInstance3DBoxes(
scannet_gt_bboxes_3d, box_dim=6, with_yaw=False)
scannet_results['ann_info']['gt_labels_3d'] = scannet_gt_labels_3d
scannet_results['bbox3d_fields'] = []
scannet_results['pts_mask_fields'] = []
scannet_results['pts_seg_fields'] = []
scannet_results = scannet_load_annotations3D(scannet_results)
scannet_gt_boxes = scannet_results['gt_bboxes_3d']
scannet_gt_lbaels = scannet_results['gt_labels_3d']
scannet_pts_instance_mask = scannet_results['pts_instance_mask']
scannet_pts_semantic_mask = scannet_results['pts_semantic_mask']
repr_str = repr(scannet_load_annotations3D)
expected_repr_str = 'LoadAnnotations3D(\n with_bbox_3d=True, ' \
'with_label_3d=True, with_mask_3d=True, ' \
'with_seg_3d=True, with_bbox=False, ' \
'with_label=False, with_mask=False, ' \
'with_seg=False, poly2mask=True)'
assert repr_str == expected_repr_str
assert scannet_gt_boxes.tensor.shape == (27, 7)
assert scannet_gt_lbaels.shape == (27, )
assert scannet_pts_instance_mask.shape == (100, )
assert scannet_pts_semantic_mask.shape == (100, )
def test_load_points_from_multi_sweeps():
load_points_from_multi_sweeps = LoadPointsFromMultiSweeps()
sweep = dict(
data_path='./tests/data/nuscenes/sweeps/LIDAR_TOP/'
'n008-2018-09-18-12-07-26-0400__LIDAR_TOP__1537287083900561.pcd.bin',
timestamp=1537290014899034,
sensor2lidar_translation=[-0.02344713, -3.88266051, -0.17151584],
sensor2lidar_rotation=np.array(
[[9.99979347e-01, 3.99870769e-04, 6.41441690e-03],
[-4.42034222e-04, 9.99978299e-01, 6.57316197e-03],
[-6.41164929e-03, -6.57586161e-03, 9.99957824e-01]]))
results = dict(
points=np.array([[1., 2., 3., 4., 5.], [1., 2., 3., 4., 5.],
[1., 2., 3., 4., 5.]]),
timestamp=1537290014899034,
sweeps=[sweep])
results = load_points_from_multi_sweeps(results)
points = results['points']
repr_str = repr(load_points_from_multi_sweeps)
expected_repr_str = 'LoadPointsFromMultiSweeps(sweeps_num=10)'
assert repr_str == expected_repr_str
assert points.shape == (403, 4)
|
145510
|
import iotbx.phil
import iotbx.file_reader
import iotbx.reflection_file_utils
from cctbx.array_family import flex
from mmtbx.scaling.absolute_scaling import kernel_normalisation
from yamtbx import util
from yamtbx.util.xtal import CellConstraints
from yamtbx.dataproc.xds import xds_ascii
import collections
master_params_str = """\
lstin = None
.type = path
.help = "List of XDS_ASCII.HKL"
dat_out = "cc_with_targets.dat"
.type = path
.help = "Output file"
normalization = *no E rel_B wilson_B
.type = choice(multi=False)
.help = "Normalization of each intensities"
d_min = None
.type = float
d_max = None
.type = float
"""
def calc_cc(ari, arj):
ari, arj = ari.common_sets(arj, assert_is_similar_symmetry=False)
corr = flex.linear_correlation(ari.data(), arj.data())
if corr.is_well_defined():
return corr.coefficient(), ari.size()
else:
return float("nan"), ari.size()
# calc_cc()
def read_target_files(target_files, d_min, d_max, normalization, log_out):
ret = collections.OrderedDict()
for i, f in enumerate(target_files):
f = iotbx.file_reader.any_file(f, force_type="hkl", raise_sorry_if_errors=True)
arrays = f.file_server.get_miller_arrays(None)
scores = iotbx.reflection_file_utils.get_xray_data_scores(arrays, ignore_all_zeros=True,
prefer_anomalous=False, prefer_amplitudes=False)
array = arrays[scores.index(max(scores))]
log_out.write("# target%.3d = %s %s\n" % (i, array.info(), array.d_max_min()))
if array.anomalous_flag(): array = array.average_bijvoet_mates()
array = array.as_intensity_array().resolution_filter(d_max=d_max, d_min=d_min)
if normalization == "E":
normaliser = kernel_normalisation(array, auto_kernel=True)
ret[f] = array.customized_copy(data=array.data()/normaliser.normalizer_for_miller_array,
sigmas=array.sigmas()/normaliser.normalizer_for_miller_array if array.sigmas() else None)
else:
ret[f] = array
return ret
# read_target_files()
def run(params, target_files):
assert params.normalization in ("no", "E")
ofs = open(params.dat_out, "w")
xac_files = util.read_path_list(params.lstin)
targets = read_target_files(target_files, params.d_min, params.d_max, params.normalization, ofs)
cellcon = CellConstraints(targets.values()[0].space_group())
#for i, t in enumerate(targets): ofs.write("# target%.3d = %s\n" % (i,t))
ofs.write("# normalization = %s\n" % params.normalization)
ofs.write("# d_min, d_max = %s, %s\n" % (params.d_min, params.d_max))
ofs.write("file %s " % cellcon.get_label_for_free_params())
ofs.write(" ".join(map(lambda x: "cc.%.3d nref.%.3d"%(x,x), xrange(len(targets)))))
ofs.write("\n")
for xac_file in xac_files:
print "reading", xac_file
xac = xds_ascii.XDS_ASCII(xac_file)
xac.remove_rejected()
iobs = xac.i_obs(anomalous_flag=False).merge_equivalents(use_internal_variance=False).array()
ofs.write("%s %s" % (xac_file, cellcon.format_free_params(iobs.unit_cell())))
fail_flag = False
if params.normalization == "E":
try:
normaliser = kernel_normalisation(iobs, auto_kernel=True)
iobs = iobs.customized_copy(data=iobs.data()/normaliser.normalizer_for_miller_array,
sigmas=iobs.sigmas()/normaliser.normalizer_for_miller_array)
except:
fail_flag = True
for i, ta in enumerate(targets.values()):
if fail_flag:
ofs.write(" % .4f %4d" % cc_num)
else:
cc_num = calc_cc(iobs, ta)
ofs.write(" % .4f %4d" % cc_num)
ofs.write("\n")
# run()
if __name__ == "__main__":
import sys
cmdline = iotbx.phil.process_command_line(args=sys.argv[1:],
master_string=master_params_str)
params = cmdline.work.extract()
targets = cmdline.remaining_args
run(params, targets)
|
145511
|
import numpy as np
from collections import deque
import random
class Buffer:
def __init__(self, max_size=1000, seed=None):
self.buffer = deque(maxlen=max_size)
self.max_size = max_size
random.seed(seed)
@property
def size(self):
return len(self.buffer)
def sample(self, ct):
ct = min(ct, self.size)
batch = random.sample(self.buffer, ct)
s = np.float32([x[0] for x in batch])
a = np.float32([x[1] for x in batch])
r = np.float32([x[2] for x in batch])
s1 = np.float32([x[3] for x in batch])
a1 = np.float32([x[4] for x in batch])
return s,a,r,s1, a1
def sample_(self, ct):
ct = min(ct, self.size)
batch = random.sample(self.buffer, ct)
s = [x[0] for x in batch]
a = [x[1] for x in batch]
r = [x[2] for x in batch]
s1 = [x[3] for x in batch]
a1 = [x[4] for x in batch]
ano = [x[5] for x in batch]
return s,a,r,s1, a1, ano
def add(self, s,a,r,s1, a1=None, ano=None):
arr= [s,a,r,s1, a1, ano]
self.buffer.append(arr)
class PriortizedReplay(Buffer):
def __init__(self,max_size=1000, seed=None, beta=1., eps = 0.1):
super(PriortizedReplay, self).__init__(max_size, seed)
self.beta = beta
self.probs = deque(maxlen=self.max_size)
self.rg = np.random.RandomState(seed)
self.eps = eps
def add(self,s,a,r,s1, a1=None, ano=None, td=0):
arr= [s,a,r,s1, a1, ano]
self.probs.append(td+self.eps)
self.buffer.append(arr)
def sample(self, ct):
ct = min(ct, self.size)
probs = np.array(self.probs)
probs = probs ** self.beta
probs = probs/probs.sum()
idx = [self.rg.choice(self.size, p=probs) for _ in range(ct)]
s = np.float32([self.buffer[i][0] for i in idx])
a = np.float32([self.buffer[i][1] for i in idx])
r = np.float32([self.buffer[i][2] for i in idx])
s1 = np.float32([self.buffer[i][3] for i in idx])
a1 = np.float32([self.buffer[i][4] for i in idx])
return s,a,r,s1,a1
def sample_(self,ct):
ct = min(ct, self.size)
probs = np.array(self.probs)
probs = probs.argsort() +1
probs = (1/probs)
probs = probs ** self.beta
probs = probs/probs.sum()
idx = [self.rg.choice(self.size, p=probs) for _ in range(ct)]
s = [self.buffer[i][0] for i in idx]
a = [self.buffer[i][1] for i in idx]
r = [self.buffer[i][2] for i in idx]
s1 =[self.buffer[i][3] for i in idx]
a1 =[self.buffer[i][4] for i in idx]
ano =[self.buffer[i][5] for i in idx]
return s,a,r,s1,a1,ano
|
145524
|
from django.db.backends.base.creation import BaseDatabaseCreation
from django.db.backends.utils import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
def sql_table_creation_suffix(self):
test_settings = self.connection.settings_dict['TEST']
assert test_settings['COLLATION'] is None, (
"PostgreSQL does not support collation setting at database creation time."
)
if test_settings['CHARSET']:
return "WITH ENCODING '%s'" % test_settings['CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
output = []
db_type = f.db_type(connection=self.connection)
if db_type is not None and (f.db_index or f.unique):
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
if tablespace_sql:
tablespace_sql = ' ' + tablespace_sql
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
if not f.unique:
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
return output
|
145566
|
import tensorflow as tf
import scipy.misc
import model
import cv2
from subprocess import call
import driving_data
import time
import TensorFI as ti
sess = tf.InteractiveSession()
saver = tf.train.Saver()
saver.restore(sess, "save/model.ckpt")
img = cv2.imread('steering_wheel_image.jpg',0)
rows,cols = img.shape
smoothed_angle = 0
resFile = open("eachFIres.csv", "a")
eachFI = open("eachInjtionRes.csv", "a")
fi = ti.TensorFI(sess, logLevel = 50, name = "convolutional", disableInjections=True)
imgIndex = 140
#while(cv2.waitKey(10) != ord('q')):
for i in range(1): # num of imgs to be injected
full_image = scipy.misc.imread("driving_dataset/" + str(imgIndex) + ".jpg", mode="RGB")
image = scipy.misc.imresize(full_image[-150:], [66, 200]) / 255.0
'''
degrees = model.y.eval(feed_dict={model.x: [image], model.keep_prob: 1.0})[0][0] * 180.0 / scipy.pi
# call("clear")
print(i , ".png", " Predicted steering angle: " + str(degrees) + " degrees", driving_data.ys[i])
resFile.write(`i` + "," + `degrees` + "," + `driving_data.ys[i]` + "\n")
cv2.imshow("frame", cv2.cvtColor(full_image, cv2.COLOR_RGB2BGR))
#make smooth angle transitions by turning the steering wheel based on the difference of the current angle
#and the predicted angle
smoothed_angle += 0.2 * pow(abs((degrees - smoothed_angle)), 2.0 / 3.0) * (degrees - smoothed_angle) / abs(degrees - smoothed_angle)
M = cv2.getRotationMatrix2D((cols/2,rows/2),-smoothed_angle,1)
dst = cv2.warpAffine(img,M,(cols,rows))
cv2.imshow("steering wheel", dst)
i += 1
'''
fi.turnOffInjections()
degrees = model.y.eval(feed_dict={model.x: [image], model.keep_prob: 1.0})[0][0] * 180.0 / scipy.pi
golden = degrees
print(i , ".png", " Predicted steering angle: " + str(degrees) + " degrees", driving_data.ys[i])
fi.turnOnInjections()
totalFI = 0.
sdcCount = 0
fiCount = 100
for k in range(fiCount):
# steering angle under fault
degrees = model.y.eval(feed_dict={model.x: [image], model.keep_prob: 1.0})[0][0] * 180.0 / scipy.pi
totalFI += 1
if(abs(degrees - golden) < 5):
print(fiCnt, "no SDC")
resFile.write(`1` + ",")
sdc = 1
else:
print(fiCnt, "SDC")
sdcCount += 1
resFile.write(`0` + ",")
sdc = 0
eachFI.write( `degrees` + "," + `sdc` + "," + `golden` + "," + `ti.faultTypes.indexOfInjectedData` + "," + `ti.faultTypes.indexOfInjectedBit` + "\n" )
print(totalFI , " Predicted steering angle: " + str(degrees) + " golden", golden)
resFile.write("\n")
print("sdc", sdcCount/totalFI , totalFI)
# cv2.imshow("frame", cv2.cvtColor(full_image, cv2.COLOR_RGB2BGR))
# #make smooth angle transitions by turning the steering wheel based on the difference of the current angle
# #and the predicted angle
# smoothed_angle += 0.2 * pow(abs((degrees - smoothed_angle)), 2.0 / 3.0) * (degrees - smoothed_angle) / abs(degrees - smoothed_angle)
# M = cv2.getRotationMatrix2D((cols/2,rows/2),-smoothed_angle,1)
# dst = cv2.warpAffine(img,M,(cols,rows))
# cv2.imshow("steering wheel", dst)
cv2.destroyAllWindows()
|
145603
|
import click
import frida
from frida.core import Session, Device, Script
allow_script = "js/allow.js"
signature_script = "js/signature.js"
patche10_script = "js/patch_e10.js"
class FridaWrapper:
def __init__(self, device: Device, session: Session):
self.device = device
self.session = session
@staticmethod
def attach(device: Device, app_name: str):
session: Session = device.attach(app_name)
return FridaWrapper(device, session)
def readjs(self, path: str) -> Script:
with open(path) as f:
code = f.read()
return self.session.create_script(code)
def inject(self, js_file: str) -> Script:
script: Script = self.readjs(js_file)
script.load()
return script
@click.group()
def main():
pass
@main.command("list-allowed")
def list_allowed():
"""List all the app names allowed by Exposure Notifications"""
device = frida.get_usb_device()
gms = FridaWrapper.attach(device, "com.google.android.gms.persistent")
gms.inject(allow_script)
input()
@main.command("get-signature")
@click.option("-p", "--package", "package", help="Package name", required=True)
def get_signature(package):
"""Get signature of the specified app"""
device = frida.get_usb_device()
app = FridaWrapper.attach(device, package)
app.inject(signature_script)
input()
def sign(package, signature, patche10, forcedk, unlimiteddk):
device = frida.get_usb_device()
gms = FridaWrapper.attach(device, "com.google.android.gms.persistent")
allow = gms.inject(allow_script)
if patche10:
gms.inject(patche10_script)
payload = {
"packageName": package,
"signatureSha": signature,
"forcedk": forcedk,
"unlimiteddk": unlimiteddk
}
allow.post({"type": "signature", "payload": payload})
input()
@main.command("sign")
@click.option("-p", "--package", "package", help="Package name (has to be one of the allowed apps)", required=True)
@click.option("-s", "--signature", "signature", help="SHA-256 of the app signature", required=True)
@click.option("-f", "--force-dk", "forcedk", is_flag=True, help="Force Diagnosis Keys signature validation")
@click.option("-u", "--unlimited-dk", "unlimiteddk", is_flag=True,
help="Limit on number of calls to provideDiagnosisKeys resets every 1ms instead of 24h "
"(careful - going back to the previous behavior after using this option requires "
"cleaning all the app data)")
@click.option("-e", "--patch-e10", "patche10", is_flag=True,
help="Patch bug in Play Services causing error 10 (Pipe is closed, affects Android 6)")
def sign_command(**kwargs):
"""Allow the custom app to use Exposure Notifications"""
sign(**kwargs)
@main.command("patch")
def patch():
"""Patch a bug in Play Services affecting Android 6"""
sign("dummy", "dummy", True, False, False)
if __name__ == "__main__":
main()
|
145614
|
from dassl.engine import TRAINER_REGISTRY,TrainerXU
from dassl.data import DataManager
from torch.utils.data import Dataset as TorchDataset
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import count_num_param
import torch
import torch.nn as nn
from torch.nn import functional as F
from dassl.engine.trainer_tmp import SimpleNet
from dassl.utils import MetricMeter
import numpy as np
@TRAINER_REGISTRY.register()
class CustomMCD(TrainerXU):
def __init__(self, cfg):
super().__init__(cfg)
self.n_step_F = cfg.TRAINER.CustomMCD.N_STEP_F
self._best_epoch_val_loss = 10000
self.ce = nn.CrossEntropyLoss()
if cfg.DATASET.TOTAL_CLASS_WEIGHT:
total_data_class_weight = self.dm.dataset.whole_class_weight
if total_data_class_weight is not None:
torch_weight = torch.from_numpy(np.array(total_data_class_weight)).float().to(self.device)
self.ce = nn.CrossEntropyLoss(weight=torch_weight)
def build_model(self):
cfg = self.cfg
print('Building F')
self.F = SimpleNet(cfg, cfg.MODEL, 0,**cfg.MODEL.BACKBONE.PARAMS)
self.F.to(self.device)
print('# params: {:,}'.format(count_num_param(self.F)))
self.optim_F = build_optimizer(self.F, cfg.OPTIM)
self.sched_F = build_lr_scheduler(self.optim_F, cfg.OPTIM)
self.register_model('F', self.F, self.optim_F, self.sched_F)
fdim = self.F.fdim
print('Building C1')
print("fdim : ",fdim)
print("num_classes : ",self.num_classes)
self.C1 = nn.Linear(fdim, self.num_classes)
self.C1.to(self.device)
print('# params: {:,}'.format(count_num_param(self.C1)))
self.optim_C1 = build_optimizer(self.C1, cfg.OPTIM)
self.sched_C1 = build_lr_scheduler(self.optim_C1, cfg.OPTIM)
self.register_model('C1', self.C1, self.optim_C1, self.sched_C1)
print('Building C2')
self.C2 = nn.Linear(fdim, self.num_classes)
self.C2.to(self.device)
print('# params: {:,}'.format(count_num_param(self.C2)))
self.optim_C2 = build_optimizer(self.C2, cfg.OPTIM)
self.sched_C2 = build_lr_scheduler(self.optim_C2, cfg.OPTIM)
self.register_model('C2', self.C2, self.optim_C2, self.sched_C2)
def forward_backward(self, batch_x, batch_u,backprob = True):
parsed = self.parse_batch_train(batch_x, batch_u)
input_x, label_x,_ ,input_u = parsed
# Step A
feat_x = self.F(input_x)
logit_x1 = self.C1(feat_x)
logit_x2 = self.C2(feat_x)
# loss_x1 = F.cross_entropy(logit_x1, label_x)
# loss_x2 = F.cross_entropy(logit_x2, label_x)
loss_x1 = self.ce(logit_x1, label_x)
loss_x2 = self.ce(logit_x2, label_x)
loss_step_A = loss_x1 + loss_x2
if backprob:
self.model_backward_and_update(loss_step_A)
# Step B
with torch.no_grad():
feat_x = self.F(input_x)
logit_x1 = self.C1(feat_x)
logit_x2 = self.C2(feat_x)
# loss_x1 = F.cross_entropy(logit_x1, label_x)
# loss_x2 = F.cross_entropy(logit_x2, label_x)
loss_x1 = self.ce(logit_x1, label_x)
loss_x2 = self.ce(logit_x2, label_x)
loss_x = loss_x1 + loss_x2
with torch.no_grad():
feat_u = self.F(input_u)
pred_u1 = F.softmax(self.C1(feat_u), 1)
pred_u2 = F.softmax(self.C2(feat_u), 1)
loss_dis = self.discrepancy(pred_u1, pred_u2)
loss_step_B = loss_x - loss_dis
if backprob:
self.model_backward_and_update(loss_step_B, ['C1', 'C2'])
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
# Step C
for _ in range(self.n_step_F):
feat_u = self.F(input_u)
pred_u1 = F.softmax(self.C1(feat_u), 1)
pred_u2 = F.softmax(self.C2(feat_u), 1)
loss_step_C = self.discrepancy(pred_u1, pred_u2)
if backprob:
self.model_backward_and_update(loss_step_C, 'F')
loss_summary = {
'loss_step_A': loss_step_A.item(),
'loss_step_B': loss_step_B.item(),
'loss_step_C': loss_step_C.item()
}
return loss_summary
@torch.no_grad()
# def validate(self,full_results = False):
def validate(self):
"""A generic testing pipeline."""
self.set_model_mode('eval')
self.evaluator.reset()
losses = MetricMeter()
print('Do evaluation on {} set'.format('valid set'))
data_loader = self.val_loader
assert data_loader is not None
self.num_batches = len(data_loader)
valid_loader_x_iter = iter(data_loader)
loader_u_iter = iter(self.train_loader_u)
for self.batch_idx in range(self.num_batches):
try:
batch_x = next(valid_loader_x_iter)
except StopIteration:
valid_loader_x_iter = iter(data_loader)
batch_x = next(valid_loader_x_iter)
try:
batch_u = next(loader_u_iter)
except StopIteration:
train_loader_u_iter = iter(self.train_loader_u)
batch_u = next(train_loader_u_iter)
input, label, domain, target = self.parse_batch_train(batch_x, batch_u)
loss = self.forward_backward(batch_x, batch_u, backprob=False)
losses.update(loss)
output = self.model_inference(input)
self.evaluator.process(output, label)
results = self.evaluator.evaluate()
total_loss = losses.meters['loss_step_A'].avg
for k, v in results.items():
tag = '{}/{}'.format('validation', k)
self.write_scalar(tag, v, self.epoch)
# if full_results:
return [total_loss,losses.dict_results(),results]
# return total_loss
# def after_epoch(self):
# """
# save the best model for given validation loss
# """
# epoch_total_loss = self.validate()
# if self._best_epoch_val_loss > epoch_total_loss:
# print("save best model at epoch %f , Improve loss from %4f -> %4f" % (
# self.epoch, self._best_epoch_val_loss, epoch_total_loss))
# self._best_epoch_val_loss = epoch_total_loss
# self.save_model(epoch=self.epoch, directory=self.output_dir, is_best=True)
# super().after_epoch()
def discrepancy(self, y1, y2):
return (y1 - y2).abs().mean()
def model_inference(self, input):
feat = self.F(input)
return F.softmax(self.C1(feat),dim=1)
|
145651
|
import os
import errno
try:
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
except:
from PySide.QtCore import *
from PySide.QtGui import *
def printText(text, name="Print"):
QMessageBox.warning(QWidget(), str(name), str(text))
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
|
145679
|
from pathlib import Path
from textwrap import dedent
from jupyter_client.kernelspec import find_kernel_specs
SUPPORTED_FILE_SUFFIXES = [".ipynb", ".md", ".markdown", ".myst", ".Rmd", ".py"]
def _filename_to_title(filename, split_char="_"):
"""Convert a file path into a more readable title."""
filename = Path(filename).with_suffix("").name
filename_parts = filename.split(split_char)
try:
# If first part of the filename is a number for ordering, remove it
int(filename_parts[0])
if len(filename_parts) > 1:
filename_parts = filename_parts[1:]
except Exception:
pass
title = " ".join(ii.capitalize() for ii in filename_parts)
return title
##############################################################################
# CLI utilities
border = "=" * 79
endc = "\033[0m"
bcolors = dict(
blue="\033[94m",
green="\033[92m",
orange="\033[93m",
red="\033[91m",
bold="\033[1m",
underline="\033[4m",
)
def _color_message(msg, style):
return bcolors[style] + msg + endc
def _message_box(msg, color="green", doprint=True, print_func=print):
# Prepare the message so the indentation is the same as the box
msg = dedent(msg)
# Color and create the box
border_colored = _color_message(border, color)
box = """
{border_colored}
{msg}
{border_colored}
"""
box = dedent(box).format(msg=msg, border_colored=border_colored)
if doprint is True:
print_func(box)
return box
def _error(msg, kind=None):
if kind is None:
kind = RuntimeError
box = _message_box(msg, color="red", doprint=False)
raise kind(box)
##############################################################################
# MyST + Jupytext
def init_myst_file(path, kernel, verbose=True):
"""Initialize a file with a Jupytext header that marks it as MyST markdown.
Parameters
----------
path : string
A path to a markdown file to be initialized for Jupytext
kernel : string
A kernel name to add to the markdown file. See a list of kernel names with
`jupyter kernelspec list`.
"""
try:
from jupytext.cli import jupytext
except ImportError:
raise ImportError(
"In order to use myst markdown features, " "please install jupytext first."
)
if not Path(path).exists():
raise FileNotFoundError(f"Markdown file not found: {path}")
kernels = list(find_kernel_specs().keys())
kernels_text = "\n".join(kernels)
if kernel is None:
if len(kernels) > 1:
_error(
"There are multiple kernel options, so you must give one manually."
" with `--kernel`\nPlease specify one of the following kernels.\n\n"
f"{kernels_text}"
)
else:
kernel = kernels[0]
if kernel not in kernels:
raise ValueError(
f"Did not find kernel: {kernel}\nPlease specify one of the "
f"installed kernels:\n\n{kernels_text}"
)
args = (str(path), "-q", "--set-kernel", kernel, "--set-formats", "myst")
jupytext(args)
if verbose:
print(f"Initialized file: {path}\nWith kernel: {kernel}")
|
145705
|
add_pointer_input = {
"type": "object",
"additionalProperties": False,
"properties": {"pointer": {"type": "string"}, "weight": {"type": "integer"}, "key": {"type": "string"}},
"required": ["pointer", "weight"],
}
remove_pointer_input = {
"type": "object",
"additionalProperties": False,
"properties": {"pointer": {"type": "string"}, "key": {"type": "string"}},
"required": ["pointer"],
}
replace_config_input = {
"type": "object",
"additionalProperties": False,
"properties": {"pointers": {"type": "object"}, "key": {"type": "string"}},
"required": ["pointers"],
}
get_config_input = {
"type": "object",
"additionalProperties": False,
"properties": {"key": {"type": "string"}}
}
pick_pointer_input = get_config_input
pick_pointer_output = {
"type": "object",
"additionalProperties": False,
"properties": {"pointer": {"type": "string"}},
"required": ["pointer"],
}
|
145717
|
import os
import xml.dom.minidom
import Lp_FrontEndFunctions
import Lp_UserInterface
import textwrap
def parseIface(mod,architecture,lpArch):
curDir=os.getcwd()
modName=mod.split('/')[len(mod.split('/'))-1]
modStripped=modName.split('_')[0]
modStripped=modStripped.split('.')[0]
modPath = findModuleXml(modStripped)
if modPath == '':
return -1
dom=xml.dom.minidom.parse(os.path.join(modPath,(modStripped+'.xml')))
ifaceNum=dom.getElementsByTagName('iface')[0].firstChild.data
return int(ifaceNum)
def parseDefaultDir():
curDir=os.getcwd()
dom=xml.dom.minidom.parse((curDir+'/Xml/Lp.xml'))
defaultDir = dom.getElementsByTagName('defaultDir')
if len(defaultDir)>0:
return "%s%s"%(curDir,defaultDir[0].firstChild.data)
else:
return (curDir+'/Output/')
def __getParameter(command,atterName,save):
parameters = command.getElementsByTagName('parameters')[0]
parameterList = parameters.getElementsByTagName('parameter')
for parameter in parameterList:
if parameter.getAttribute('name') == atterName:
if save == True:
data = parameter.firstChild.data
f = open(atterName,'w+')
f.write(data)
f.close()
return os.path.join(os.getcwd(),'BubbleKeys',atterName)
else:
return parameter.firstChild.data
return -1
def parseBubblewrapXml(pathToXml,funcDict):
try:
try:
cwd = os.getcwd()
os.mkdir(os.path.join(cwd,'BubbleKeys'))
except OSError:
#we dont care if the directory already exists.
pass
bubbleDom = xml.dom.minidom.parse(pathToXml)
topLevel = bubbleDom.getElementsByTagName('dci')[0]
header = topLevel.getElementsByTagName('header')[0]
fQ = header.getElementsByTagName('fullyQualifiedId')[0]
targetId = fQ.getElementsByTagName('targetId')[0].firstChild.data
instanceId = fQ.getElementsByTagName('instanceId')[0].firstChild.data
commands = topLevel.getElementsByTagName('commands')[0]
commandList = commands.getElementsByTagName('command')
for command in commandList:
if command.getAttribute('name') == 'targetInformation':
targetIp = __getParameter(command,'targetIp',False)
if command.getAttribute('name') == 'implantLp':
ip = __getParameter(command,'ip',False)
port = __getParameter(command,'port',False)
if command.getAttribute('name') == 'deploymentId':
deployId = __getParameter(command,'id',False)
if command.getAttribute('name') == 'implantCrypto':
cv = __getParameter(command,'cv',True)
if command.getAttribute('name') == 'infMod':
infMod = __getParameter(command,'infMod',True)
if command.getAttribute('name') == 'infMu':
infMu = __getParameter(command,'infMu',True)
if command.getAttribute('name') == 'infPriv':
infPriv = __getParameter(command,'infPriv',True)
if command.getAttribute('name') == 'infPub':
infPub = __getParameter(command,'infPub',True)
if command.getAttribute('name') == 'impMod':
impMod = __getParameter(command,'impMod',True)
if command.getAttribute('name') == 'impMu':
impMu = __getParameter(command,'impMu',True)
if command.getAttribute('name') == 'impPriv':
impPriv = __getParameter(command,'impPriv',True)
if command.getAttribute('name') == 'impPub':
impPub = __getParameter(command,'impPub',True)
funcDict['targetId'] = targetId
funcDict['instanceId'] = int(instanceId)
funcDict['deploymentId'] = int(deployId)
funcDict['targetIp'] = targetIp
funcDict['ip'] = ip
funcDict['port'] = port
funcDict['cv'] = cv
funcDict['infMod'] = infMod
funcDict['infMu'] = infMu
funcDict['infPriv'] = infPriv
funcDict['infPub'] = infPub
funcDict['impMod'] = impMod
funcDict['impMu'] = impMu
funcDict['impPriv'] = impPriv
funcDict['impPub'] = impPub
return 0
except IOError:
print "The specified xml file could not be found: %s"%pathToXml
return -1
except AttributeError:
print "Error parsing provided xml: %s"%pathToXml
return -1
def findModuleXml(modName):
curDir=os.getcwd()
baseDir = os.path.join(curDir,'../Mods')
modTypes = os.listdir(baseDir)
for modType in modTypes:
projects = os.listdir(os.path.join(baseDir,modType))
for project in projects:
arches = os.listdir(os.path.join(baseDir,modType,project))
for arch in arches:
files = os.listdir(os.path.join(baseDir,modType,project,arch))
for file in files:
if file.find(modName+'.xml')>=0:
return os.path.join(baseDir,modType,project,arch)
#try one more time in cur/Xml
files = os.listdir(os.path.join(curDir,'Xml'))
for file in files:
if file.find(modName+'.xml')>=0:
return os.path.join(curDir,'Xml')
return ''
#Parse the xml file for the module specified in usrIn and populate the Modules dictionary
#Arguments:
def parseMod(helpDict, Modules, mod, architecture, lpArch, fMaps, requireLpex,
lpFuncs, okToParse):
parsedFunctions = []
if okToParse>0:
return
curDir=os.getcwd()
modName=mod.split('/')[-1]
modStripped=modName.split('_')[0]
modStripped=modStripped.split('.')[0]
path = mod.split('/')[:-1]
#just a module name was given, we need to search ../Mods until we find
#an xml file for this module
if len(path) == 0:
modPath = findModuleXml(modStripped)
if modPath == '':
return
else:
modPath = '/'+os.path.join(*path)
modName = modStripped+'.xml'
dom=xml.dom.minidom.parse(os.path.join(modPath,modName))
if requireLpex == 1:
dirSplit=mod.split('/')[:-1]
if lpArch=='i386':
lpEx='%s.lx32'%modStripped
else:
lpEx='%s.lx64'%modStripped
fileLoc = '/'+os.path.join(*dirSplit)
fileLoc = os.path.join(fileLoc,lpEx)
if not os.path.exists(fileLoc):
print "\nCould not find an Lp extension for %s."%lpEx.split('.')[0],
print "Ensure that an Lp extension is located in\nRelease/Mods/App/<project name>/<arch>."
return
lpexArgs = Modules['Lp']['Lp.lpex']
Lp_UserInterface.setLpexArgs(lpexArgs, fileLoc)
#silently ignore errors since we dont care if the lpex was already loaded
lpFuncs.cmdGeneric('lpex',lpexArgs,{})
Modules[modStripped]={}
helpDict[modStripped]={}
modDict=Modules[modStripped]
ifaceNum=dom.getElementsByTagName('iface')[0].firstChild.data
helpDict[modStripped]['iface']=ifaceNum
Modules[modStripped]['iface']=ifaceNum
functions=dom.getElementsByTagName('function')
for f in functions:
#Read optional parameters
cursesOpt=f.getElementsByTagName('curses')
if len(cursesOpt)>0:
cursesOpt=cursesOpt[0].firstChild.data
noArgs=f.getElementsByTagName('noargs')
if len(noArgs)>0:
noArgs=noArgs[0].firstChild.data
confirm=f.getElementsByTagName('confirm')
if len(confirm)>0:
confirm=confirm[0].firstChild.data
dirList=f.getElementsByTagName('useDirList')
if len(dirList)>0:
dirList=dirList[0].firstChild.data
switch=f.getElementsByTagName('useSwitch')
if len(switch)>0:
switch=switch[0].firstChild.data
noDisplay=f.getElementsByTagName('nodisplay')
if len(noDisplay)>0:
noDisplay=noDisplay[0].firstChild.data
printFunc=f.getElementsByTagName('printFunctionOnComplete')
if len(printFunc)>0:
printFunc=printFunc[0].firstChild.data
argConfirm=f.getElementsByTagName('useArgConfirm')
if len(argConfirm)>0:
argConfirm=argConfirm[0].firstChild.data
ignoreDone=f.getElementsByTagName('ignoreDone')
if len(ignoreDone)>0:
ignoreDone=ignoreDone[0].firstChild.data
endOnString=f.getElementsByTagName('endOnString')
if len(endOnString)>0:
endOnString=endOnString[0].firstChild.data
useDefaultDir=f.getElementsByTagName('useDefaultDir')
if len(useDefaultDir)>0:
useDefaultDir=useDefaultDir[0].firstChild.data
useBubblewrapXml=f.getElementsByTagName('useBubblewrapXml')
if len(useBubblewrapXml)>0:
useBubblewrapXml=useBubblewrapXml[0].firstChild.data
checkForCfg=f.getElementsByTagName('checkForCfg')
if len(checkForCfg)>0:
checkForCfg=checkForCfg[0].firstChild.data
#Read required parameters
try:
fName="%s.%s"%(modStripped,f.getElementsByTagName('name')[0].firstChild.data)
except IndexError:
str1='Warning! Function configuration does not contain the function name.'
str2='This function will not be available.'
print textwrap.fill('%s %s'%(str1,str2))
continue
try:
fNum=f.getElementsByTagName('fnum')[0].firstChild.data
except IndexError:
str1='Warning! Function configuration does not contain the function number.'
str2='This function will not be available.'
print textwrap.fill('%s %s'%(str1,str2))
continue
try:
command=f.getElementsByTagName('command')[0].firstChild.data
except IndexError:
str1="Configuration for the function %s does not contain the necessary element 'command'!"%(fName)
str2="This function will not be available."
print textwrap.fill("%s %s"%(str1,str2))
continue
helpDict[modStripped][fName]={}
try:
fMaps[fNum]=fName
helpDict[modStripped][fName]['nodisplay']=noDisplay
if noDisplay != 'true':
parsedFunctions.append(fName)
helpDict[modStripped][fName]['fnum']=fNum
helpDict[modStripped][fName]['usage']=f.getElementsByTagName('helpUse')[0].firstChild.data
helpDict[modStripped][fName]['text']=f.getElementsByTagName('helpText')[0].firstChild.data
except IndexError:
print textwrap.fill("Warning! Function configuration for %s does not contain help information!"%fName)
helpDict[modStripped][fName]['nodisplay']=noDisplay
helpDict[modStripped][fName]['usage']=""
helpDict[modStripped][fName]['text']=""
modDict[fName]={}
modDict[fName]['curses']=cursesOpt
modDict[fName]['command']=command
modDict[fName]['fnum']=fNum
modDict[fName]['noargs']=noArgs
modDict[fName]['confirm']=confirm
modDict[fName]['useDirList']=dirList
modDict[fName]['useSwitch']=switch
modDict[fName]['nodisplay']=noDisplay
modDict[fName]['cursesPrompts']=[]
modDict[fName]['promptList']={}
modDict[fName]['printFunc']=printFunc
modDict[fName]['useArgConfirm']=argConfirm
modDict[fName]['ignoreDone']=ignoreDone
modDict[fName]['endOnString']=endOnString
modDict[fName]['useDefaultDir']=useDefaultDir
modDict[fName]['useBubblewrapXml']=useBubblewrapXml
modDict[fName]['checkForCfg']=checkForCfg
modDict[fName]['errors']={}
bubblePrompt = f.getElementsByTagName('bubblePrompt')
if len(bubblePrompt)>0:
modDict[fName]['bubblePrompt'] = bubblePrompt[0].firstChild.data
errors = f.getElementsByTagName('errors')
if len(errors)>0:
errorEnts = errors[0].getElementsByTagName('errorEnt')
for entry in errorEnts:
errorStr = entry.getElementsByTagName('errorStr')
errorStr=errorStr[0].firstChild.data
errorMsg = entry.getElementsByTagName('errorMsg')
if type(errorMsg[0].firstChild) == type(None):
errorMsg=''
else:
errorMsg=errorMsg[0].firstChild.data
modDict[fName]['errors'][str(errorStr)] = str(errorMsg)
prompts=f.getElementsByTagName('prompt')
for p in prompts:
attr=p.getAttribute('value')
attrC=p.getAttribute('cprompt')
modDict[fName][p.firstChild.data]=attr
#If this prompt is specified as a curses prompt, append it to the list
if attrC!="":
modDict[fName]['cursesPrompts'].append((p.firstChild.data,attrC))
switchArgs=f.getElementsByTagName('switch')
if switchArgs.length>0:
modDict[fName]['switchParams']={}
switchDict=modDict[fName]['switchParams']
for s in switchArgs:
switchDict['prompt']=str(s.getElementsByTagName('sprompt')[0].firstChild.data)
switchDict['switchOpts']={}
options=s.getElementsByTagName('switchOpt')
for opt in options:
optionString=opt.getElementsByTagName('input')[0].firstChild.data
switchDict['switchOpts'][optionString]=[]
vals=opt.getElementsByTagName('setValue')
for val in vals:
switchDict['switchOpts'][optionString].append((val.firstChild.data,
val.getAttribute('value')
))
argConfirmArgs=f.getElementsByTagName('argConfirms')
if argConfirmArgs.length>0:
modDict[fName]['argConfirmParams']={}
confirmDict=modDict[fName]['argConfirmParams']
for c in argConfirmArgs:
args=c.getElementsByTagName('arg')
for a in args:
promptName=a.getElementsByTagName('promptToConfirm')[0].firstChild.data
valToConfirm=a.getElementsByTagName('valToConfirm')[0].firstChild.data
confirmDict[promptName]=valToConfirm
dirListArgs=f.getElementsByTagName('dirList')
if dirListArgs.length>0:
modDict[fName]['dirListParams']={}
dirParams=modDict[fName]['dirListParams']
for d in dirListArgs:
prePrint=d.getElementsByTagName('prePrint')
if len(prePrint)>0:
dirParams['prePrint']=str(prePrint[0].firstChild.data)
else:
dirParams['prePrint']=[]
listPrompt=d.getElementsByTagName('listPrompt')
if len(listPrompt)>0:
dirParams['listPrompt']=str(listPrompt[0].firstChild.data)
else:
dirParams['listPrompt']=[]
fileEx=d.getElementsByTagName('fileEx')
if len(fileEx)>0:
dirParams['fileEx']=str(fileEx[0].firstChild.data)
else:
dirParams['fileEx']=[]
prependCWD=d.getElementsByTagName('prependCWD')
if len(prependCWD)>0:
dirParams['prependCWD']=str(prependCWD[0].firstChild.data)
else:
dirParams['prependCWD']=[]
appendImplantArch=d.getElementsByTagName('appendImplantArch')
if len(appendImplantArch)>0:
dirParams['appendImplantArch']=str(appendImplantArch[0].firstChild.data)
else:
dirParams['appendImplantArch']=[]
baseDir=d.getElementsByTagName('baseDir')
if len(baseDir)>0:
dirParams['baseDir']=str(baseDir[0].firstChild.data)
else:
dirParams['baseDir']=[]
promptToSet=d.getElementsByTagName('promptToSet')
if len(promptToSet)>0:
dirParams['promptToSet']=str(promptToSet[0].firstChild.data)
else:
dirParams['promptToSet']=[]
showIfaceNumbers=d.getElementsByTagName('showIfaceNumbers')
if len(showIfaceNumbers)>0:
dirParams['showIfaceNumbers']=str(showIfaceNumbers[0].firstChild.data)
else:
dirParams['showIfaceNumbers']=[]
modNameSplitChar=d.getElementsByTagName('modNameSplitChar')
if len(modNameSplitChar)>0:
dirParams['modNameSplitChar']=str(modNameSplitChar[0].firstChild.data)
else:
dirParams['modNameSplitChar']=[]
requireXml=d.getElementsByTagName('requireXml')
if len(requireXml)>0:
dirParams['requireXml']=str(requireXml[0].firstChild.data)
else:
dirParams['requireXml']=[]
recurse=d.getElementsByTagName('recurse')
if len(recurse)>0:
dirParams['recurse']=str(recurse[0].firstChild.data)
else:
dirParams['recurse']=[]
return parsedFunctions
|
145719
|
import copy
import numpy as np
import timeit
import torch
import torch.nn as nn
from torch.utils.data import BatchSampler, SubsetRandomSampler
import rl_sandbox.constants as c
from rl_sandbox.algorithms.cem.cem import CEMQ
from rl_sandbox.auxiliary_tasks.auxiliary_tasks import AuxiliaryTask
class GRAC:
def __init__(self, model, policy_opt, qs_opt, buffer, algo_params, aux_tasks=AuxiliaryTask()):
""" GRAC Algorithm: https://arxiv.org/abs/2009.08973
"""
self.model = model
self.policy_opt = policy_opt
self.qs_opt = qs_opt
self.buffer = buffer
self.algo_params = algo_params
self.step = 0
self.action_dim = algo_params[c.ACTION_DIM]
# TODO: They have a scehduler for alpha
self._alpha = algo_params.get(
c.ALPHA, c.DEFAULT_GRAC_PARAMS[c.ALPHA])
self._cov_noise_init = algo_params.get(
c.COV_NOISE_INIT, c.DEFAULT_GRAC_PARAMS[c.COV_NOISE_INIT])
self._cov_noise_end = algo_params.get(
c.COV_NOISE_END, c.DEFAULT_GRAC_PARAMS[c.COV_NOISE_END])
self._cov_noise_tau = algo_params.get(
c.COV_NOISE_TAU, c.DEFAULT_GRAC_PARAMS[c.COV_NOISE_TAU])
self._num_iters = algo_params.get(
c.NUM_ITERS, c.DEFAULT_GRAC_PARAMS[c.NUM_ITERS])
self._pop_size = algo_params.get(
c.POP_SIZE, c.DEFAULT_GRAC_PARAMS[c.POP_SIZE])
self._elite_size = algo_params.get(
c.ELITE_SIZE, c.DEFAULT_GRAC_PARAMS[c.ELITE_SIZE])
self._min_action = algo_params.get(
c.MIN_ACTION, c.DEFAULT_GRAC_PARAMS[c.MIN_ACTION])
self._max_action = algo_params.get(
c.MAX_ACTION, c.DEFAULT_GRAC_PARAMS[c.MAX_ACTION])
self._update_num = algo_params.get(c.UPDATE_NUM, 0)
self.device = algo_params.get(c.DEVICE, torch.device(c.CPU))
self._num_q_updates = algo_params.get(
c.NUM_Q_UPDATES, c.DEFAULT_GRAC_PARAMS[c.NUM_Q_UPDATES])
self._steps_between_update = algo_params.get(
c.STEPS_BETWEEN_UPDATE, c.DEFAULT_GRAC_PARAMS[c.STEPS_BETWEEN_UPDATE])
self._buffer_warmup = algo_params.get(
c.BUFFER_WARMUP, c.DEFAULT_GRAC_PARAMS[c.BUFFER_WARMUP])
self._reward_scaling = algo_params.get(
c.REWARD_SCALING, c.DEFAULT_GRAC_PARAMS[c.REWARD_SCALING])
self._gamma = algo_params.get(c.GAMMA, c.DEFAULT_GRAC_PARAMS[c.GAMMA])
self._num_gradient_updates = algo_params.get(
c.NUM_GRADIENT_UPDATES, c.DEFAULT_GRAC_PARAMS[c.NUM_GRADIENT_UPDATES])
self._batch_size = algo_params.get(
c.BATCH_SIZE, c.DEFAULT_GRAC_PARAMS[c.BATCH_SIZE])
self._accum_num_grad = algo_params.get(
c.ACCUM_NUM_GRAD, c.DEFAULT_GRAC_PARAMS[c.ACCUM_NUM_GRAD])
self._num_prefetch = algo_params.get(
c.NUM_PREFETCH, 1)
self._aux_tasks = aux_tasks
assert self._batch_size % self._accum_num_grad == 0
assert self._num_gradient_updates % self._num_prefetch == 0
self._num_samples_per_accum = self._batch_size // self._accum_num_grad
self._max_grad_norm = algo_params.get(
c.MAX_GRAD_NORM, c.DEFAULT_GRAC_PARAMS[c.MAX_GRAD_NORM])
self.train_preprocessing = algo_params[c.TRAIN_PREPROCESSING]
self.cem = CEMQ(cov_noise_init=self._cov_noise_init,
cov_noise_end=self._cov_noise_end,
cov_noise_tau=self._cov_noise_tau,
action_dim=self.action_dim,
batch_size=self._num_samples_per_accum,
num_iters=self._num_iters,
pop_size=self._pop_size,
elite_size=self._elite_size,
device=self.device,
min_action=self._min_action,
max_action=self._max_action,)
def state_dict(self):
state_dict = {}
state_dict[c.STATE_DICT] = self.model.state_dict()
state_dict[c.POLICY_OPTIMIZER] = self.policy_opt.state_dict()
state_dict[c.QS_OPTIMIZER] = self.qs_opt.state_dict()
if hasattr(self.model, c.OBS_RMS):
state_dict[c.OBS_RMS] = self.model.obs_rms
if hasattr(self.model, c.VALUE_RMS):
state_dict[c.VALUE_RMS] = self.model.value_rms
return state_dict
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict[c.STATE_DICT])
self.policy_opt.load_state_dict(state_dict[c.POLICY_OPTIMIZER])
self.qs_opt.load_state_dict(state_dict[c.QS_OPTIMIZER])
if hasattr(self.model, c.OBS_RMS) and c.OBS_RMS in state_dict:
self.model.obs_rms = state_dict[c.OBS_RMS]
if hasattr(self.model, c.VALUE_RMS) and c.VALUE_RMS in state_dict:
self.model.value_rms = state_dict[c.VALUE_RMS]
def construct_q_function(self, q_i):
def q_function(obss, h_states, acts, lengths):
res = self.model.q_vals(obss, h_states, acts, lengths=lengths)
return res[q_i + 1]
return q_function
def _compute_qs_loss(self, obss, h_states, acts, dones, best_next_acts, target, targ_q1_best, targ_q2_best, next_obss, lengths):
best_next_acts, target, targ_q1_best, targ_q2_best, dones = best_next_acts.to(self.device), target.to(self.device), targ_q1_best.to(self.device), targ_q2_best.to(self.device), dones.to(self.device)
_, q1_val, q2_val, next_h_states = self.model.q_vals(obss, h_states, acts, lengths=lengths)
_, q1_best, q2_best, _ = self.model.q_vals(next_obss, next_h_states, best_next_acts)
q1_loss = ((q1_val - target) ** 2).sum()
q2_loss = ((q2_val - target) ** 2).sum()
# NOTE: Supposedly we shouldn't be concerned about state at timestep T + 1, assuming the episode ends at timestep T.
q1_reg = (((1 - dones) * (q1_best - targ_q1_best)) ** 2).sum()
q2_reg = (((1 - dones) * (q2_best - targ_q2_best)) ** 2).sum()
return q1_loss, q2_loss, q1_reg, q2_reg
def _compute_acts_targets(self, obss, h_states, acts, rews, dones, next_obss, discounting, lengths):
with torch.no_grad():
rews, dones, discounting = rews.to(self.device), dones.to(self.device), discounting.to(self.device)
_, q1_val, q2_val, next_h_states = self.model.q_vals(obss, h_states, acts, lengths=lengths)
# Compute next actions with policy and CEM
next_acts_pi, next_acts_pi_mean, next_acts_pi_var, _, _ = self.model.act_stats(next_obss, next_h_states)
# NOTE: It is important to clip this action. Otherwise the Q-function gets OOD data
next_acts_pi = torch.clamp(next_acts_pi, min=self._min_action[0], max=self._max_action[0])
best_next_acts = self.cem.compute_action(self.construct_q_function(q_i=1),
next_obss,
next_h_states,
next_acts_pi_mean,
next_acts_pi_var,
lengths=None)
# Get best actions and best Q values
min_q_targs_pi, _, _, _ = self.model.q_vals(next_obss, next_h_states, next_acts_pi)
min_q_targs_cem, _, _, _ = self.model.q_vals(next_obss, next_h_states, best_next_acts)
best_q_targs = torch.max(min_q_targs_pi, min_q_targs_cem)
target = rews + (self._gamma ** discounting) * (1 - dones) * best_q_targs
replace_idxes = (min_q_targs_pi > min_q_targs_cem).squeeze()
best_next_acts[replace_idxes] = next_acts_pi[replace_idxes]
_, q1_best, q2_best, _ = self.model.q_vals(next_obss, next_h_states, best_next_acts)
return best_next_acts.cpu().detach(), target.cpu().detach(), q1_best.cpu().detach(), q2_best.cpu().detach(), (q2_val - q1_val).sum().cpu().detach(), q1_best.max().cpu().detach(), q2_best.max().cpu().detach()
def _compute_pi_loss(self, obss, h_states, acts, lengths):
acts_pi, acts_pi_mean, acts_pi_var, entropies, v_pi = self.model.act_stats(obss, h_states, lengths=lengths)
_, q1_pi, _, _ = self.model.q_vals(obss, h_states, acts_pi, lengths=lengths)
acts_cem = self.cem.compute_action(self.construct_q_function(q_i=0),
obss,
h_states,
acts_pi_mean,
acts_pi_var,
lengths=lengths)
with torch.no_grad():
_, q1_cem, _, _ = self.model.q_vals(obss, h_states, acts_cem, lengths=lengths)
score = q1_cem - v_pi
score = torch.clamp(score.detach(), min=0.)
acts_cem_lprob = self.model.lprob(obss, h_states, acts_cem, lengths=lengths)
cem_loss = (score * acts_cem_lprob).sum() / self.action_dim
q_loss = q1_pi.sum()
pi_loss = -(q_loss + cem_loss)
return pi_loss, acts_cem_lprob.max().detach().cpu(), acts_cem_lprob.min().detach().cpu()
def update_qs(self, batch_start_idx, obss, h_states, acts, rews, dones, next_obss, next_h_states, discounting, infos, lengths, update_info):
init_qs_loss = None
best_next_acts = []
targets = []
q1_bests = []
q2_bests = []
total_qs_descrepancy = 0.
total_q2_val = 0.
max_q1 = -np.inf
max_q2 = -np.inf
for grad_i in range(self._accum_num_grad):
opt_idxes = range(batch_start_idx + grad_i * self._num_samples_per_accum,
batch_start_idx + (grad_i + 1) * self._num_samples_per_accum)
best_next_act, target, q1_best, q2_best, qs_descrepancy, q1_max, q2_max = self._compute_acts_targets(obss[opt_idxes],
h_states[opt_idxes],
acts[opt_idxes],
rews[opt_idxes],
dones[opt_idxes],
next_obss[opt_idxes],
discounting[opt_idxes],
lengths[opt_idxes])
best_next_acts.append(best_next_act)
targets.append(target)
q1_bests.append(q1_best)
q2_bests.append(q2_best)
total_qs_descrepancy += qs_descrepancy
max_q1 = max(q1_max, max_q1)
max_q2 = max(q2_max, max_q2)
best_next_acts = torch.cat(best_next_acts, dim=0)
targets = torch.cat(targets, dim=0)
q1_bests = torch.cat(q1_bests, dim=0)
q2_bests = torch.cat(q2_bests, dim=0)
q1_losses = []
q2_losses = []
total_update_time = 0.
q1_regs = []
q2_regs = []
for update_i in range(self._num_q_updates):
tic = timeit.default_timer()
self.qs_opt.zero_grad()
total_q1_loss = 0.
total_q2_loss = 0.
total_q1_reg = 0.
total_q2_reg = 0.
for grad_i in range(self._accum_num_grad):
q1_loss, q2_loss, q1_reg, q2_reg = self._compute_qs_loss(obss[opt_idxes],
h_states[opt_idxes],
acts[opt_idxes],
dones[opt_idxes],
best_next_acts[opt_idxes],
target[opt_idxes],
q1_best[opt_idxes],
q2_best[opt_idxes],
next_obss[opt_idxes],
lengths[opt_idxes])
q1_loss /= self._batch_size
q2_loss /= self._batch_size
q1_reg /= self._batch_size
q2_reg /= self._batch_size
qs_loss = q1_loss + q2_loss + q1_reg + q2_reg
total_q1_loss += q1_loss.detach().cpu()
total_q2_loss += q2_loss.detach().cpu()
total_q1_reg += q1_reg.detach().cpu()
total_q2_reg += q2_reg.detach().cpu()
qs_loss.backward()
nn.utils.clip_grad_norm_(self.model.qs_parameters,
self._max_grad_norm)
self.qs_opt.step()
total_update_time += timeit.default_timer() - tic
q1_losses.append(total_q1_loss.numpy())
q2_losses.append(total_q2_loss.numpy())
q1_regs.append(total_q1_reg.numpy())
q2_regs.append(total_q2_reg.numpy())
if init_qs_loss is None:
init_qs_loss = qs_loss.detach()
# This seems to be a hack for not overfitting?
if qs_loss.detach() < init_qs_loss * self._alpha:
break
update_info[c.Q1_MAX].append(max_q1)
update_info[c.Q2_MAX].append(max_q2)
update_info[c.Q_UPDATE_TIME].append(total_update_time)
update_info[c.Q1_LOSS].append(np.mean(q1_losses))
update_info[c.Q2_LOSS].append(np.mean(q2_losses))
update_info[c.Q1_REG].append(np.mean(q1_regs))
update_info[c.Q2_REG].append(np.mean(q2_regs))
update_info[c.AVG_Q_DISCREPANCY].append(qs_descrepancy / self._batch_size)
def update_policy(self, batch_start_idx, obss, h_states, acts, rews, dones, next_obss, next_h_states, discounting, infos, lengths, update_info):
tic = timeit.default_timer()
self.policy_opt.zero_grad()
total_pi_loss = 0.
max_lprob = -np.inf
min_lprob = np.inf
for grad_i in range(self._accum_num_grad):
opt_idxes = range(batch_start_idx + grad_i * self._num_samples_per_accum,
batch_start_idx + (grad_i + 1) * self._num_samples_per_accum)
pi_loss, lprob_max, lprob_min = self._compute_pi_loss(obss[opt_idxes],
h_states[opt_idxes],
acts[opt_idxes],
lengths[opt_idxes])
max_lprob = max(lprob_max, max_lprob)
min_lprob = min(lprob_min, min_lprob)
pi_loss /= self._batch_size
total_pi_loss += pi_loss.detach().cpu()
pi_loss.backward()
nn.utils.clip_grad_norm_(self.model.policy_parameters,
self._max_grad_norm)
self.policy_opt.step()
update_info[c.LPROB_MAX].append(max_lprob)
update_info[c.LPROB_MIN].append(min_lprob)
update_info[c.POLICY_UPDATE_TIME].append(timeit.default_timer() - tic)
update_info[c.PI_LOSS].append(total_pi_loss.numpy())
def _store_to_buffer(self, curr_obs, curr_h_state, act, rew, done, info, next_obs, next_h_state):
self.buffer.push(curr_obs, curr_h_state, act, rew, [done], info, next_obs=next_obs, next_h_state=next_h_state)
def update(self, curr_obs, curr_h_state, act, rew, done, info, next_obs, next_h_state):
self._store_to_buffer(curr_obs, curr_h_state, act, rew, done, info, next_obs, next_h_state)
self.step += 1
update_info = {}
if hasattr(self.model, c.OBS_RMS):
self.model.obs_rms.update(self.eval_preprocessing(torch.tensor(curr_obs)))
# Perform SAC update
if self.step >= self._buffer_warmup and self.step % self._steps_between_update == 0:
update_info[c.PI_LOSS] = []
update_info[c.Q1_LOSS] = []
update_info[c.Q2_LOSS] = []
update_info[c.Q1_REG] = []
update_info[c.Q2_REG] = []
update_info[c.SAMPLE_TIME] = []
update_info[c.Q_UPDATE_TIME] = []
update_info[c.POLICY_UPDATE_TIME] = []
update_info[c.AVG_Q1_VAL] = []
update_info[c.AVG_Q2_VAL] = []
update_info[c.AVG_Q_DISCREPANCY] = []
update_info[c.LPROB_MAX] = []
update_info[c.LPROB_MIN] = []
update_info[c.Q1_MAX] = []
update_info[c.Q2_MAX] = []
for _ in range(self._num_gradient_updates // self._num_prefetch):
tic = timeit.default_timer()
obss, h_states, acts, rews, dones, next_obss, next_h_states, infos, lengths = self.buffer.sample_with_next_obs(
self._batch_size * self._num_prefetch, next_obs, next_h_state)
obss = self.train_preprocessing(obss)
next_obss = self.train_preprocessing(next_obss)
rews = rews * self._reward_scaling
discounting = infos[c.DISCOUNTING]
update_info[c.SAMPLE_TIME].append(timeit.default_timer() - tic)
for batch_i in range(self._num_prefetch):
self._update_num += 1
batch_start_idx = batch_i * self._batch_size
# Update Q functions
# Auxiliary tasks are usually for shared layers, which is updated along with Q
aux_loss, aux_update_info = self._aux_tasks.compute_loss(next_obs, next_h_state)
if hasattr(aux_loss, c.BACKWARD):
aux_loss.backward()
self.update_qs(batch_start_idx,
obss,
h_states,
acts,
rews,
dones,
next_obss,
next_h_states,
discounting,
infos,
lengths,
update_info)
self._aux_tasks.step()
update_info.update(aux_update_info)
# Update policy
self.update_policy(batch_start_idx,
obss,
h_states,
acts,
rews,
dones,
next_obss,
next_h_states,
discounting,
infos,
lengths,
update_info)
if hasattr(self.model, c.VALUE_RMS):
update_info[f"{c.VALUE_RMS}/{c.MEAN}"] = self.model.value_rms.mean.numpy()
update_info[f"{c.VALUE_RMS}/{c.VARIANCE}"] = self.model.value_rms.var.numpy()
return True, update_info
return False, update_info
|
145736
|
import functools
import getpass
import json
import click
from click_didyoumean import DYMMixin
from click_help_colors import HelpColorsGroup
api_key_option = click.option(
"--apiKey",
"api_key",
help="API key to use this time only",
)
def del_if_value_is_none(dict_):
"""Remove all elements with value == None"""
for key, val in list(dict_.items()):
if val is None:
del dict_[key]
def jsonify_dicts(dict_):
json_fields = [
"envVars",
"nodeAttrs"
]
for field in json_fields:
if field in dict_:
dict_[field] = json.dumps(dict_[field])
class ClickGroup(DYMMixin, HelpColorsGroup):
pass
def prompt_for_secret(prompt):
def callback_fun(ctx, param, value):
if value is None:
value = getpass.getpass(prompt)
return value
return callback_fun
def deprecated(version="1.0.0"):
deprecated_invoke_notice = """DeprecatedWarning: \nWARNING: This command will not be included in version %s .
For more information, please see:
https://docs.paperspace.com
If you depend on functionality not listed there, please file an issue.""" % version
def new_invoke(self, ctx):
click.echo(click.style(deprecated_invoke_notice, fg='red'), err=True)
super(type(self), self).invoke(ctx)
def decorator(f):
f.invoke = functools.partial(new_invoke, f)
return decorator
|
145762
|
import json
import os
from requests.auth import HTTPBasicAuth
from moneywagon.core import (
Service, NoService, NoData, ServiceError, SkipThisService, currency_to_protocol,
decompile_scriptPubKey
)
from bitcoin import deserialize
import arrow
from bs4 import BeautifulSoup
import re
import hmac, hashlib, time, requests, base64
from requests.auth import AuthBase
try:
from urllib import urlencode, quote_plus
except ImportError:
from urllib.parse import urlencode, quote_plus
def make_standard_nonce(small=False):
num = int(time.time() * 1000)
if small:
return str(num - 1506215312123)
return str(num)
def make_stateful_nonce(exchange):
path = os.path.expanduser('~/.moneywagon_state')
if not os.path.exists(path):
# make
with open(path, "w+") as f:
f.write('{}')
with open(path) as f:
j = json.loads(f.read())
if exchange not in j:
j[exchange] = {'last_used_nonce': 0}
nonce = j[exchange].get('last_used_nonce', 0) + 1
j[exchange]['last_used_nonce'] = nonce
with open(path, "w") as f:
f.write(json.dumps(j))
return nonce
def eight_decimal_places(amount, format="str"):
"""
>>> eight_decimal_places(3.12345678912345)
"3.12345679"
>>> eight_decimal_places("3.12345678912345")
"3.12345679"
>>> eight_decimal_places(3.12345678912345, format='float')
3.12345679
>>> eight_decimal_places("3.12345678912345", format='float')
3.12345679
"""
if type(amount) == str:
return amount
if format == 'str':
return "%.8f" % amount
if format == 'float':
return float("%.8f" % amount)
class Bitstamp(Service):
service_id = 1
supported_cryptos = ['btc']
api_homepage = "https://www.bitstamp.net/api/"
name = "Bitstamp"
exchange_fee_rate = 0.0025
def __init__(self, customer_id=None, **kwargs):
self.customer_id = customer_id
super(Bitstamp, self).__init__(**kwargs)
def make_market(self, crypto, fiat):
return ("%s%s" % (crypto, fiat)).lower()
def get_current_price(self, crypto, fiat):
url = "https://www.bitstamp.net/api/v2/ticker/%s" % (
self.make_market(crypto, fiat)
)
response = self.get_url(url).json()
return float(response['last'])
def get_pairs(self):
return ['btc-usd', 'btc-eur', 'bch-btc', 'bch-usd', 'xrp-usd', 'xrp-eur', 'xrp-btc']
def get_orderbook(self, crypto, fiat):
url = "https://www.bitstamp.net/api/v2/order_book/%s/" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'bids': [(float(x[0]), float(x[1])) for x in resp['bids']],
'asks': [(float(x[0]), float(x[1])) for x in resp['asks']]
}
def _make_signature(self, nonce):
message = nonce + self.customer_id + self.api_key
return hmac.new(
self.api_secret,
msg=message,
digestmod=hashlib.sha256
).hexdigest().upper()
def _auth_request(self, url, params):
nonce = make_standard_nonce()
params.update({
'nonce': nonce,
'signature': self._make_signature(nonce),
'key': self.api_key,
})
return self.post_url(url, params)
def get_exchange_balance(self, currency, type="available"):
url = "https://www.bitstamp.net/api/balance/"
resp = self._auth_request(url, {}).json()
try:
return float(resp["%s_%s" % (currency.lower(), type)])
except KeyError:
return 0
def get_total_exchange_balances(self):
url = "https://www.bitstamp.net/api/balance/"
resp = self._auth_request(url, {}).json()
return {
code[:-8]: float(bal) for code, bal in resp.items()
if code.endswith("balance") and float(bal) > 0
}
def get_deposit_address(self, currency):
if currency.lower() == 'btc':
url = "https://www.bitstamp.net/api/bitcoin_deposit_address/"
return self._auth_request(url, {}).json()
if currency.lower() == 'xrp':
url = "https://www.bitstamp.net/api/ripple_address/"
return self._auth_request(url, {}).json()['address']
if currency.lower() in ['eth', 'ltc']:
url = "https://www.bitstamp.net/api/v2/%s_address/" % currency.lower()
return self._auth_request(url, {}).json()['address']
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
if type == 'limit':
url = "https://www.bitstamp.net/api/v2/%s/%s/" % (
side, self.make_market(crypto, fiat)
)
resp = self._auth_request(url, {
'amount': eight_decimal_places(amount),
'price': price,
})
if type == 'market':
url = "https://www.bitstamp.net/api/v2/%s/market/%s/" % (
side, self.make_market(crypto, fiat)
)
resp = self._auth_request(url, {
'amount': eight_decimal_places(amount),
})
return resp.json()
make_order.supported_types = ['limit', 'market']
make_order.minimums = {}
class CoinbaseExchangeAuth(AuthBase):
def __init__(self, api_key, secret_key, passphrase):
self.api_key = api_key
self.secret_key = secret_key
self.passphrase = passphrase
def __call__(self, request):
timestamp = str(time.time())
message = timestamp + request.method + request.path_url + (request.body or '')
hmac_key = base64.b64decode(self.secret_key)
signature = hmac.new(hmac_key, message, hashlib.sha256)
signature_b64 = signature.digest().encode('base64').rstrip('\n')
request.headers.update({
'CB-ACCESS-SIGN': signature_b64,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-KEY': self.api_key,
'CB-ACCESS-PASSPHRASE': self.passphrase,
'Content-Type': 'application/json'
})
return request
class GDAX(Service):
service_id = 59
name = "GDAX"
base_url = "https://api.gdax.com"
api_homepage = "https://docs.gdax.com/"
supported_cryptos = ['btc', 'ltc', 'eth', 'bch']
exchange_fee_rate = 0.0025
def __init__(self, api_pass=None, **kwargs):
self.auth = None
self.api_pass = api_pass
super(GDAX, self).__init__(**kwargs)
if self.api_key and self.api_secret and self.api_pass:
self.auth = CoinbaseExchangeAuth(self.api_key, self.api_secret, self.api_pass)
def check_error(self, response):
if response.status_code != 200:
j = response.json()
raise ServiceError("GDAX returned %s error: %s" % (
response.status_code, j['message'])
)
super(GDAX, self).check_error(response)
def make_market(self, crypto, fiat):
return ("%s-%s" % (crypto, fiat)).upper()
def get_current_price(self, crypto, fiat):
url = "%s/products/%s/ticker" % (self.base_url, self.make_market(crypto, fiat))
response = self.get_url(url).json()
return float(response['price'])
def get_pairs(self):
url = "%s/products" % self.base_url
r = self.get_url(url).json()
return [x['id'].lower() for x in r]
def get_orderbook(self, crypto, fiat):
url = "%s/products/%s/book?level=3" % (self.base_url, self.make_market(crypto, fiat))
r = self.get_url(url).json()
return {
'bids': [(float(x[0]), float(x[1])) for x in r['bids']],
'asks': [(float(x[0]), float(x[1])) for x in r['asks']]
}
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
time_in_force = 'GTC'
if type == 'fill-or-kill':
type = 'limit'
time_in_force = 'FOK'
url = "%s/orders" % self.base_url
data = {
"size": eight_decimal_places(amount),
"type": type,
"price": price,
"side": side,
"time_in_force": time_in_force,
"product_id": self.make_market(crypto, fiat)
}
response = self.post_url(url, json=data, auth=self.auth).json()
return response['id']
make_order.supported_types = ['fill-or-kill', 'limit', 'market', 'stop']
make_order.minimums = {'btc': 0.0001, 'eth': 0.001, 'ltc': 0.01, 'usd': 1, 'eur': 1, 'gbp': 1}
def list_orders(self, status="open"):
url = "%s/orders" % self.base_url
response = self.get_url(url, auth=self.auth)
return response.json()
def cancel_order(self, order_id):
url = "%s/orders/%s" % (self.base_url, order_id)
response = self.delete_url(url, auth=self.auth)
return response.json()
def get_exchange_balance(self, currency, type="available"):
url = "%s/accounts" % self.base_url
resp = self.get_url(url, auth=self.auth).json()
try:
match = [x for x in resp if currency.upper() == x['currency']][0]
except IndexError:
return 0
return float(match[type])
def get_total_exchange_balances(self):
url = "%s/accounts" % self.base_url
resp = self.get_url(url, auth=self.auth).json()
return {
x['currency'].lower(): float(x['balance']) for x in resp
}
def initiate_withdraw(self, currency, amount, address):
url = "%s/withdrawals/crypto" % self.base_url
resp = self.post_url(url, auth=self.auth, json={
'crypto_address': address,
'currency': currency.upper(),
'amount': eight_decimal_places(amount)
})
return resp.json()
class BitFinex(Service):
service_id = 120
api_homepage = "https://bitfinex.readme.io/v2/reference"
exchange_fee_rate = 0.002
def check_error(self, response):
j = response.json()
if j and type(j) is list and j[0] == 'error':
raise SkipThisService(
"BitFinex returned Error: %s (%s)" % (j[2], j[1])
)
super(BitFinex, self).check_error(response)
def parse_market(self, market):
crypto = market[:3]
fiat = market[3:]
if crypto == 'dsh':
crypto = 'dash'
if crypto == 'iot':
crypto = 'miota'
return crypto, fiat
def fix_symbol(self, symbol):
if symbol == 'dash':
return 'dsh'
if symbol == 'miota':
return 'iot'
return symbol
def make_market(self, crypto, fiat):
return "%s%s" % (
self.fix_symbol(crypto).lower(), self.fix_symbol(fiat).lower()
)
def get_pairs(self):
url = "https://api.bitfinex.com/v1/symbols"
r = self.get_url(url).json()
return ["%s-%s" % self.parse_market(x) for x in r]
def get_current_price(self, crypto, fiat):
url = "https://api.bitfinex.com/v2/ticker/t%s" % self.make_market(crypto, fiat).upper()
r = self.get_url(url).json()
return r[6]
def get_orderbook(self, crypto, fiat):
url = "https://api.bitfinex.com/v1/book/%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'bids': [(float(x['price']), float(x['amount'])) for x in resp['bids']],
'asks': [(float(x['price']), float(x['amount'])) for x in resp['asks']]
}
def _make_signature(self, path, args, nonce, version=2):
if version == 2:
msg = '/api/' + path + nonce + json.dumps(args)
elif version == 1:
msg = nonce # actually payload, but passed in as nonce
return hmac.new(self.api_secret, msg, hashlib.sha384).hexdigest()
def _auth_request(self, path, params):
url = "https://api.bitfinex.com"
nonce = make_standard_nonce()
if path.startswith("/v2/"):
headers = {
'bfx-nonce': nonce,
'bfx-apikey': self.api_key,
'bfx-signature': self._make_signature(path, params, nonce, version=2)
}
elif path.startswith("/v1/"):
params['request'] = path
params['nonce'] = nonce
payload = base64.b64encode(json.dumps(params))
headers = {
'X-BFX-PAYLOAD': payload,
'X-BFX-APIKEY': self.api_key,
'X-BFX-SIGNATURE': self._make_signature(path, params, payload, version=1)
}
return self.post_url(url + path, json=params, headers=headers)
def get_deposit_address(self, crypto):
resp = self._auth_request("/v2/auth/r/wallets", {})
filt = [x[2] for x in resp.json() if x[1] == crypto.upper()]
return filt[0] if filt else 0
def get_exchange_balance(self, currency, type="available"):
curr = self.fix_symbol(currency)
resp = self._auth_request("/v1/balances", {}).json()
for item in resp:
if item['currency'] == curr.lower():
return float(item[type])
return 0
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
url = "/v1/order/new"
resp = self._auth_request(url, {
'symbol': self.make_market(crypto, fiat),
'amount': eight_decimal_places(amount),
'price': str(price),
'side': side,
'type': 'exchange %s' % type,
})
return resp.json()
make_order.supported_types = ['fill-or-kill', 'market', 'limit', 'stop', 'trailing-stop']
make_order.minimums = {}
def initiate_withdraw(self, crypto, amount, address):
from moneywagon.crypto_data import crypto_data
if crypto == 'etc':
type = "ethereumc"
else:
type = crypto_data[crypto.lower()]['name'].lower()
resp = self._auth_request("/v1/withdraw", {
"withdraw_type": type,
"walletselected": "exchange",
"amount": eight_decimal_places(amount),
"address": address,
}).json()
return resp
class NovaExchange(Service):
service_id = 89
name = "NovaExchange"
api_homepage = "https://novaexchange.com/remote/faq/"
def make_market(self, crypto, fiat):
return "%s_%s" % (fiat, crypto)
def check_error(self, response):
if response.json()['status'] == 'error':
raise ServiceError("NovaExchange returned error: %s" % response.json()['message'])
super(NovaExchange, self).check_error(response)
def get_current_price(self, crypto, fiat):
url = "https://novaexchange.com/remote/v2/market/info/%s" % self.make_market(crypto, fiat)
r = self.get_url(url).json()
return float(r['markets'][0]['last_price'])
def get_pairs(self):
url = "https://novaexchange.com/remote/v2/markets/"
r = self.get_url(url).json()
ret = []
for pair in r['markets']:
fiat = pair['basecurrency'].lower()
crypto = pair['currency'].lower()
ret.append("%s-%s" % (crypto, fiat))
return ret
def get_orderbook(self, crypto, fiat):
url = "https://novaexchange.com/remote/v2/market/openorders/%s/both/" % (
self.make_market(crypto, fiat)
)
r = self.get_url(url).json()
return {
'bids': [(float(x['price']), float(x['amount'])) for x in r['buyorders']],
'asks': [(float(x['price']), float(x['amount'])) for x in r['sellorders']],
}
def _make_signature(self, url):
return base64.b64encode(
hmac.new(self.api_secret, url, hashlib.sha512).digest()
)
def _auth_request(self, url, params):
url += '?nonce=' + make_standard_nonce()
params['apikey'] = self.api_key
params['signature'] = self._make_signature(url)
headers = {'content-type': 'application/x-www-form-urlencoded'}
return self.post_url(url, data=params, headers=headers, timeout=60)
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
url = "https://novaexchange.com/remote/v2/private/trade/%s/" % (
self.make_market(crypto, fiat)
)
params = {
'tradetype': side.upper(),
'tradeamount': eight_decimal_places(amount),
'tradeprice': price,
'tradebase': 0, # indicates "amount" is in crypto units, not fiat units
}
resp = self._auth_request(url, params)
return resp.json()['tradeitems'][0]['orderid']
make_order.minimums = {}
def cancel_order(self, order_id):
url = "https://novaexchange.com/remote/v2/private/cancelorder/%s/" % order_id
resp = self._auth_request(url, {})
return resp.json()['status'] == 'ok'
def list_orders(self, status="open"):
if status == 'open':
url = "https://novaexchange.com/remote/v2/private/myopenorders/"
else:
NotImplementedError("getting orders by status=%s not implemented yet" % status)
resp = self._auth_request(url, {})
return resp.json()['items']
def get_deposit_address(self, crypto):
url = "https://novaexchange.com/remote/v2/private/getdepositaddress/%s/" % crypto
resp = self._auth_request(url, {})
return resp.json()['address']
def initiate_withdraw(self, crypto, amount, address):
url = "https://novaexchange.com/remote/v2/private/withdraw/%s/" % crypto
params = {'currency': crypto, 'amount': eight_decimal_places(amount), 'address': address}
resp = self._auth_request(url, params)
return resp.json()
class xBTCe(Service):
service_id = 90
name = "xBTCe"
api_homepage = "https://www.xbtce.com/tradeapi"
def get_current_price(self, crypto, fiat):
if crypto.lower() == 'dash':
crypto = "dsh"
if fiat.lower() == 'rur':
fiat = 'rub'
if fiat.lower() == 'cny':
fiat = 'cnh'
pair = "%s%s" % (crypto.upper(), fiat.upper())
url = "https://cryptottlivewebapi.xbtce.net:8443/api/v1/public/ticker/%s" % pair
r = self.get_url(url).json()
try:
return r[0]['LastSellPrice']
except IndexError:
raise ServiceError("Pair not found")
def get_pairs(self):
url = "https://cryptottlivewebapi.xbtce.net:8443/api/v1/public/symbol"
r = self.get_url(url).json()
ret = []
for pair in r:
crypto = pair['MarginCurrency'].lower()
fiat = pair['ProfitCurrency'].lower()
if crypto.lower() == 'dsh':
crypto = "dash"
if fiat.lower() == 'rub':
fiat = 'rur'
if fiat == 'cnh':
fiat = 'cny'
ret.append(("%s-%s" % (crypto, fiat)))
return list(set(ret))
class OKcoin(Service):
service_id = 60
name = "OKcoin"
exchange_base_url = "https://www.okcoin.cn"
block_base_url = "http://block.okcoin.cn"
supported_cryptos = ['btc', 'ltc']
api_homepage = "https://www.okcoin.cn/rest_getStarted.html"
def get_current_price(self, crypto, fiat):
if not fiat == 'cny':
raise SkipThisService("Only fiat=CNY supported")
url = "%s/api/v1/ticker.do?symbol=%s_%s" % (
self.exchange_base_url, crypto.lower(), fiat.lower()
)
response = self.get_url(url).json()
return float(response['ticker']['last'])
def check_error(self, response):
j = response.json()
if 'error_code' in j:
raise ServiceError("OKcoin returned error code %s" % j['error_code'])
super(OKcoin, self).check_error(response)
def get_pairs(self):
return ['btc-cny', 'ltc-cny']
def get_block(self, crypto, block_hash=None, block_number=None, latest=False):
if latest:
args = 'latest_block.do?'
if block_number or block_number == 0:
args = "block_height.do?block_height=%s&" % block_number
if block_hash:
raise SkipThisService("Block by hash not supported")
url = "%s/api/v1/%ssymbol=%s" % (
self.block_base_url, args, crypto.upper()
)
r = self.get_url(url).json()
ret = dict(
block_number=r['height'],
size=r['size'],
time=arrow.get(r['time'] / 1000).datetime,
hash=r['hash'],
txids=r['txid'],
tx_count=r['txcount'],
version=r['version'],
mining_difficulty=r['difficulty'],
total_fees=r['fee'],
sent_value=r['totalOut']
)
if r.get('relayed_by'):
ret['miner'] = r['relayed_by']
if r.get('previousblockhash'):
ret['previous_hash'] = r['previousblockhash']
if r.get('nextblockhash'):
ret['next_hash'] = r['nextblockhash']
return ret
class FreeCurrencyConverter(Service):
service_id = 61
base_url = "http://free.currencyconverterapi.com"
api_homepage = "http://www.currencyconverterapi.com/docs"
def get_fiat_exchange_rate(self, from_fiat, to_fiat):
pair = "%s_%s" % (to_fiat.upper(), from_fiat.upper())
url = "%s/api/v3/convert?q=%s&compact=y" % (
self.base_url, pair
)
response = self.get_url(url).json()
return response[pair]['val']
class BTCChina(Service):
service_id = 62
api_homepage = "https://www.btcc.com/apidocs/spot-exchange-market-data-rest-api#ticker"
name = "BTCChina"
def get_current_price(self, crypto, fiat):
if fiat == 'usd':
url = "https://spotusd-data.btcc.com/data/pro/ticker?symbol=%sUSD" % crypto.upper()
key = "Last"
else:
url = "https://data.btcchina.com/data/ticker?market=%s%s" % (
crypto.lower(), fiat.lower()
)
key = "last"
response = self.get_url(url).json()
if not response:
raise ServiceError("Pair not supported (blank response)")
return float(response['ticker'][key])
class Gemini(Service):
service_id = 63
api_homepage = "https://docs.gemini.com/rest-api/"
name = "Gemini"
exchange_fee_rate = 0.0025
def check_error(self, response):
j = response.json()
if 'result' in j and j['result'] == 'error':
raise ServiceError("Gemini returned error: %s" % j['reason'])
super(Gemini, self).check_error(response)
def make_market(self, crypto, fiat):
return "%s%s" % (crypto.lower(), fiat.lower())
def get_current_price(self, crypto, fiat):
url = "https://api.gemini.com/v1/pubticker/%s" % self.make_market(crypto, fiat)
response = self.get_url(url).json()
return float(response['last'])
def get_orderbook(self, crypto, fiat):
url = "https://api.gemini.com/v1/book/%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'bids': [(float(x['price']), float(x['amount'])) for x in resp['bids']],
'asks': [(float(x['price']), float(x['amount'])) for x in resp['asks']]
}
def _make_signature(self, payload):
return hmac.new(self.api_secret, payload, hashlib.sha384).hexdigest()
def _auth_request(self, path, params):
params['nonce'] = make_standard_nonce()
params['request'] = path
payload = base64.b64encode(json.dumps(params))
headers = {
'X-GEMINI-APIKEY': self.api_key,
'X-GEMINI-PAYLOAD': payload,
'X-GEMINI-SIGNATURE': self._make_signature(payload)
}
return self.post_url("https://api.gemini.com" + path, headers=headers)
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
path = "/v1/order/new"
#order_token = "" % datetime.datetime.now()
opts = []
if type == 'fill-or-kill':
opts = ['immediate-or-cancel']
type = "limit"
if type == 'post-only':
opts = ["maker-or-cancel"]
type = 'limit'
if type != "limit":
raise NotImplementedError("Only limit orders currently supported")
params = {
#"client_order_id": order_token, # A client-specified order token
"symbol": self.make_market(crypto, fiat), # Or any symbol from the /symbols api
"amount": eight_decimal_places(amount), # Once again, a quoted number
"price": str(price),
"side": side, # must be "buy" or "sell"
"type": "exchange %s" % type, # the order type; only "exchange limit" supported
"options": opts # execution options; may be omitted for a standard limit order
}
resp = self._auth_request(path, params).json()
return resp
make_order.supported_types = ['post-only', 'limit', 'fill-or-kill']
make_order.minimums = {}
def get_deposit_address(self, currency):
path = "/v1/deposit/%s/newAddress" % currency.lower()
resp = self._auth_request(path, {})
return resp.json()['address']
def get_exchange_balance(self, currency, type="available"):
path = "/v1/balances"
resp = self._auth_request(path, {})
try:
match = [x for x in resp.json() if x['currency'] == currency.upper()][0]
except IndexError:
return 0
return float(match[type])
def get_total_exchange_balances(self):
path = "/v1/balances"
resp = self._auth_request(path, {})
return {
x['currency'].lower(): float(x['amount']) for x in resp.json()
if float(x['amount']) > 0
}
class CexIO(Service):
service_id = 64
api_homepage = "https://cex.io/rest-api"
name = "Cex.io"
exchange_fee_rate = 0.002
def __init__(self, user_id=None, **kwargs):
self.user_id = user_id
super(CexIO, self).__init__(**kwargs)
def check_error(self, response):
super(CexIO, self).check_error(response)
j = response.json()
if 'error' in j:
raise ServiceError("CexIO returned error: %s" % j['error'])
def get_current_price(self, crypto, fiat):
url = "https://cex.io/api/ticker/%s/%s" % (crypto.upper(), fiat.upper())
response = self.get_url(url).json()
return float(response['last'])
def get_pairs(self):
url = "https://cex.io/api/currency_limits"
r = self.get_url(url).json()['data']['pairs']
return [("%s-%s" % (x['symbol1'], x['symbol2'])).lower() for x in r]
def get_orderbook(self, crypto, fiat):
url = "https://cex.io/api/order_book/%s/%s/" % (crypto.upper(), fiat.upper())
resp = self.get_url(url).json()
return {
'bids': [(x[0], x[1]) for x in resp['bids']],
'asks': [(x[0], x[1]) for x in resp['asks']]
}
def _make_signature(self, nonce):
message = nonce + self.user_id + self.api_key
return hmac.new(self.api_secret, message, hashlib.sha256).hexdigest().upper()
def _auth_request(self, url, params):
nonce = make_standard_nonce()
params['nonce'] = nonce
params['signature'] = self._make_signature(nonce)
params['key'] = self.api_key
return self.post_url(url, params)
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
url = "https://cex.io/api/place_order/%s/%s" % (crypto.upper(), fiat.upper())
if type in ('limit', 'market'):
print("about to send amount to cex:", eight_decimal_places(amount))
params = {
'type': side,
'amount': eight_decimal_places(amount),
}
if type == 'market':
params['order_type'] = 'market'
if type == 'limit':
params['price'] = price
else:
raise Exception("Order with type=%s not yet supported" % type)
resp = self._auth_request(url, params).json()
return resp['id']
make_order.supported_types = ['limit', 'market']
make_order.minimums = {'btc': 0.01}
def list_orders(self):
url = "https://cex.io/api/open_orders/"
resp = self._auth_request(url, {})
return resp.json()
def cancel_order(self, order_id):
url = "https://cex.io/api/cancel_order/"
resp = self._auth_request(url, {'id': order_id})
return resp.content == 'true'
def get_deposit_address(self, crypto):
url = "https://cex.io/api/get_address"
resp = self._auth_request(url, {'currency': crypto.upper()})
return resp.json()['data']
def get_exchange_balance(self, currency, type="available"):
url = "https://cex.io/api/balance/"
resp = self._auth_request(url, {})
try:
return float(resp.json()[currency.upper()]['available'])
except KeyError:
return 0
def get_total_exchange_balances(self):
url = "https://cex.io/api/balance/"
resp = self._auth_request(url, {})
return {
code.lower(): float(data['available']) for code, data in resp.json().items()
if code not in ['timestamp', 'username'] and float(data['available']) > 0
}
class Poloniex(Service):
service_id = 65
api_homepage = "https://poloniex.com/support/api/"
name = "Poloniex"
exchange_fee_rate = 0.0025
def check_error(self, response):
j = response.json()
if 'error' in j:
raise ServiceError("Poloniex returned error: %s" % j['error'])
super(Poloniex, self).check_error(response)
def fix_symbol(self, symbol):
symbol = symbol.lower()
if symbol == 'usd':
return 'usdt'
if symbol == 'xlm':
return 'str'
return symbol
def reverse_fix_symbol(self, symbol):
symbol = symbol.lower()
if symbol == 'usdt':
return 'usd'
if symbol == 'str':
return 'xlm'
return symbol
def get_current_price(self, crypto, fiat):
url = "https://poloniex.com/public?command=returnTicker"
response = self.get_url(url).json()
is_usd = False
if fiat.lower() == 'usd':
fiat = 'usdt'
is_usd = True
find_pair = "%s_%s" % (fiat.upper(), crypto.upper())
for pair, data in response.items():
if pair == find_pair:
return float(data['last'])
reverse_pair = "%s_%s" % (crypto.upper(), fiat.upper())
for pair, data in response.items():
if pair == reverse_pair:
return 1 / float(data['last'])
btc_pair = "BTC_%s" % crypto.upper()
if is_usd and btc_pair in response:
btc_rate = float(response['USDT_BTC']['last'])
fiat_exchange = float(response[btc_pair]['last'])
return fiat_exchange * btc_rate
raise SkipThisService("Pair %s not supported" % find_pair)
def get_pairs(self):
url = "https://poloniex.com/public?command=returnTicker"
r = self.get_url(url).json()
ret = []
for pair in r.keys():
fiat, crypto = pair.lower().split('_')
ret.append("%s-%s" % (self.reverse_fix_symbol(crypto), self.reverse_fix_symbol(fiat)))
return ret
def get_orderbook(self, crypto, fiat):
url = "https://poloniex.com/public?command=returnOrderBook¤cyPair=%s" % (
self.make_market(crypto, fiat)
)
resp = self.get_url(url).json()
return {
'asks': [(float(x[0]), x[1]) for x in resp['asks']],
'bids': [(float(x[0]), x[1]) for x in resp['bids']]
}
def make_market(self, crypto, fiat):
return ("%s_%s" % (self.fix_symbol(fiat), self.fix_symbol(crypto))).upper()
def _make_signature(self, args):
str_args = urlencode(args)
return hmac.new(self.api_secret, str_args, hashlib.sha512).hexdigest()
def _auth_request(self, args):
url = "https://poloniex.com/tradingApi"
args["nonce"] = make_standard_nonce()
headers = {
'Sign': self._make_signature(args),
'Key': self.api_key
}
return self.post_url(url, args, headers=headers)
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
params = {}
if type == "fill-or-kill":
params = {'fillOrKill': 1}
if type == 'post-only':
params = {'postOnly': 1}
params.update({
"command": side,
"currencyPair": self.make_market(crypto, fiat),
"rate": price,
"amount": eight_decimal_places(amount)
})
r = self._auth_request(params)
return r.json()['orderNumber']
make_order.supported_types = ['limit', 'fill-or-kill', 'post-only']
make_order.minimums = {}
def cancel_order(self, order_id):
r = self._auth_request({
"command": "cancelOrder",
"orderNumber": order_id
})
return r['success'] == 1
def list_orders(self, crypto=None, fiat=None):
if not crypto and not fiat:
pair = "all"
else:
self.make_market(crypto, fiat)
resp = self._auth_request({
"command": "returnOpenOrders",
"currencyPair": pair,
})
return resp.json()
def initiate_withdraw(self, crypto, amount, address):
resp = self._auth_request({
"command": "withdrawl",
"currency": crypto,
"amount": eight_decimal_places(amount),
"address": address
})
return resp.json()
def get_deposit_address(self, currency):
c = self.fix_symbol(currency)
resp = self._auth_request({"command": "returnDepositAddresses"})
address = resp.json().get(c.upper())
if not address:
return self.generate_new_deposit_address(c)
return address
def generate_new_deposit_address(self, crypto):
resp = self._auth_request({
"command": "generateNewAddress",
"currency": crypto.upper()
})
return resp.json()['response']
def get_exchange_balance(self, currency, type="available"):
resp = self._auth_request({"command": "returnBalances"})
return float(resp.json().get(self.reverse_fix_symbol(currency).upper()))
def get_total_exchange_balances(self):
resp = self._auth_request({"command": "returnBalances"})
return {
self.reverse_fix_symbol(code): float(bal) for code, bal in resp.json().items()
if float(bal) > 0
}
class Bittrex(Service):
service_id = 66
api_homepage = "https://bittrex.com/home/api"
exchange_fee_rate = 0.0025
def check_error(self, response):
j = response.json()
if not j['success']:
raise ServiceError("Bittrex returned error: %s" % j['message'])
super(Bittrex, self).check_error(response)
def fix_symbol(self, symbol):
if symbol.lower() == 'usd':
return 'usdt'
if symbol == 'xmy':
return 'myr'
if symbol == 'bcc':
raise SkipThisService("BCC not supported (maybe you want BCH?)")
if symbol == 'bch':
return 'bcc'
return symbol.lower()
def reverse_fix_symbol(self, symbol):
symbol = symbol.lower()
if symbol == 'usdt':
return 'usd'
if symbol == 'bcc':
return 'bch'
return symbol
def make_market(self, crypto, fiat):
return "%s-%s" % (
self.fix_symbol(fiat).upper(),
self.fix_symbol(crypto).upper()
)
def get_current_price(self, crypto, fiat):
url = "https://bittrex.com/api/v1.1/public/getticker?market=%s" % (
self.make_market(crypto, fiat)
)
r = self.get_url(url).json()
return r['result']['Last']
def get_orderbook(self, crypto, fiat):
url = "https://bittrex.com/api/v1.1/public/getorderbook?market=%s&type=both" % (
self.make_market(crypto, fiat)
)
r = self.get_url(url).json()['result']
return {
'bids': [(x['Rate'], x['Quantity']) for x in r['buy']],
'asks': [(x['Rate'], x['Quantity']) for x in r['sell']],
}
def get_pairs(self):
url = "https://bittrex.com/api/v1.1/public/getmarkets"
r = self.get_url(url).json()['result']
ret = []
for x in r:
crypto = x['MarketCurrency'].lower()
fiat = x['BaseCurrency'].lower()
if fiat == 'usdt':
fiat = 'usd'
ret.append("%s-%s" % (crypto, fiat))
return ret
def _make_signature(self, url):
return hmac.new(
self.api_secret.encode(), url.encode(), hashlib.sha512
).hexdigest()
def _auth_request(self, path, params):
if not self.api_key or not self.api_secret:
raise Exception("Trade API requires an API key and secret.")
params["apikey"] = self.api_key
params["nonce"] = make_standard_nonce()
url = "https://bittrex.com/api" + path + "?" + urlencode(params)
return self.get_url(url, headers={"apisign": self._make_signature(url)})
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
path = "/v1.1/market/%slimit" % side
r = self._auth_request(path, {
'market': self.make_market(crypto, fiat),
'quantity': eight_decimal_places(amount),
'rate': price
})
return r.json()['result']['uuid']
make_order.supported_types = ['limit']
make_order.minimums = {}
def cancel_order(self, order_id):
path = "/v1.1/market/cancel"
r = self._auth_request(path, {'uuid': order_id})
return r['success']
def get_exchange_balance(self, currency, type="available"):
currency = self.fix_symbol(currency)
path = "/v1.1/account/getbalance"
resp = self._auth_request(path, {'currency': self.fix_symbol(currency)}).json()['result']
return resp[type.capitalize()] or 0
def get_total_exchange_balances(self):
path = "/v1.1/account/getbalances"
resp = self._auth_request(path, {}).json()['result']
return {
self.reverse_fix_symbol(x['Currency']): x['Balance'] for x in resp
if x['Balance'] > 0
}
def get_deposit_address(self, crypto):
path = "/v1.1/account/getdepositaddress"
resp = self._auth_request(path, {'currency': self.fix_symbol(crypto)})
return resp.json()['result']['Address']
def initiate_withdraw(self, crypto, amount, address):
path = "/v1.1/account/withdraw"
resp = self._auth_request(path, {
'currency': self.fix_symbol(crypto),
'quantity': eight_decimal_places(amount),
'address': address
})
return resp.json()
class Huobi(Service):
service_id = 67
api_homepage = "https://github.com/huobiapi/API_Docs_en/wiki"
name = "Huobi"
def check_error(self, response):
if response.status_code != 200:
j = response.json()
raise ServiceError("Huobi returned error: %s" % j['error'])
super(Huobi, self).check_error(response)
def get_current_price(self, crypto, fiat):
if fiat.lower() == "cny":
fiat = 'static'
elif fiat.lower() == 'usd':
pass
else:
raise SkipThisService("CNY and USD only fiat supported")
url = "http://api.huobi.com/%smarket/detail_%s_json.js" % (
fiat.lower(), crypto.lower()
)
r = self.get_url(url).json()
return r['p_last']
class BTER(Service):
service_id = 25
api_homepage = "https://bter.com/api"
name = "BTER"
def fix_symbol(self, symbol):
if symbol == 'bch':
return 'bcc'
return symbol
def make_market(self, crypto, fiat):
return ("%s_%s" % (self.fix_symbol(crypto), fiat)).lower()
def get_current_price(self, crypto, fiat):
url = "http://data.bter.com/api/1/ticker/%s" % self.make_market(crypto, fiat)
response = self.get_url(url).json()
if response.get('result', '') == 'false':
raise ServiceError("BTER returned error: " + r['message'])
return float(response['last'] or 0)
def get_pairs(self):
url = "http://data.bter.com/api/1/pairs"
r = self.get_url(url).json()
return [x.replace("_", "-") for x in r]
def get_orderbook(self, crypto, fiat):
url = "http://data.bter.com/api2/1/orderBook/%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'bids': [(float(x[0]), float(x[1])) for x in resp['bids']],
'asks': [(float(x[0]), float(x[1])) for x in resp['asks']],
}
def _make_signature(self, params):
return hmac.new(
self.api_secret, urlencode(params), hashlib.sha512
).hexdigest()
def _auth_request(self, url, params):
raise Exception("Not tested")
return self.post_url(url, headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Key': self.api_key,
'Sign': self._make_signature(params)
})
def get_exchange_balance(self, currency, type="available"):
url = "https://api.bter.com/api2/1/private/balances"
resp = self._auth_request(url, {})
for curr, bal in resp.json()[type].items():
if curr == currency.upper():
return float(bal)
def get_deposit_address(self, currency):
url = "https://bter.com/api2/1/private/depositAddress"
resp = self._auth_request(url, {'currency': currency.upper()})
return resp.json()['addr']
class Wex(Service):
service_id = 7
api_homepage = "https://wex.nz/api/documentation"
name = "Wex"
exchange_fee_rate = 0.002
def check_error(self, response):
try:
j = response.json()
except:
raise ServiceError("Wex returned error: %s" % response.content)
if 'error' in j:
raise ServiceError("Wex returned error: %s" % j['error'])
super(Wex, self).check_error(response)
def make_market(self, crypto, fiat):
return "%s_%s" % (
self.fix_symbol(crypto).lower(),
self.fix_symbol(fiat).lower()
)
def fix_symbol(self, symbol):
if symbol == 'dash':
return 'dsh'
return symbol
def reverse_fix_symbol(self, symbol):
if symbol == 'dsh':
return 'dash'
return symbol
def _fix_fiat_symbol(self, fiat):
return fiat
def get_current_price(self, crypto, fiat):
pair = self.make_market(crypto, fiat)
url = "https://wex.nz/api/3/ticker/" + pair
response = self.get_url(url).json()
return response[pair]['last']
def get_pairs(self):
url = "https://wex.nz/api/3/info"
r = self.get_url(url).json()
return [x.replace('_', '-') for x in r['pairs'].keys()]
def get_orderbook(self, crypto, fiat):
m = self.make_market(crypto, fiat)
url = "https://wex.nz/api/3/depth/%s" % m
resp = self.get_url(url).json()
return {
'bids': [(x[0], x[1]) for x in resp[m]['bids']],
'asks': [(x[0], x[1]) for x in resp[m]['asks']]
}
def _make_signature(self, params):
return hmac.new(
self.api_secret, urlencode(params), hashlib.sha512
).hexdigest()
def _auth_request(self, params):
# max nonce wex will accept is 4294967294
params['nonce'] = make_stateful_nonce(self.name)
headers = {"Key": self.api_key, "Sign": self._make_signature(params)}
return self.post_url("https://wex.nz/tapi", params, headers=headers)
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
params = {
'method': 'Trade',
'pair': self.make_market(crypto, fiat),
'type': side,
'rate': price,
'amount': eight_decimal_places(amount),
}
return self._auth_request(params)
make_order.supported_types = ['limit']
make_order.minimums = {'btc': 0.001, 'ltc': 0.1}
def get_deposit_address(self, crypto):
params = {'coinName': crypto.lower(), 'method': 'CoinDepositAddress'}
resp = self._auth_request(params).json()
return resp['return']['address']
def get_exchange_balance(self, currency, type="available"):
resp = self._auth_request({'method': 'getInfo'}).json()
try:
return resp['return']['funds'][self.fix_symbol(currency).lower()]
except IndexError:
return 0
def get_total_exchange_balances(self):
resp = self._auth_request({'method': 'getInfo'}).json()['return']['funds']
return {
self.reverse_fix_symbol(code): bal for code, bal in resp.items()
if not code.endswith("et") and bal > 0
}
def initiate_withdraw(self, currency, amount, address):
resp = self._auth_request({
'method': 'WithdrawCoin',
'coinName': self.fix_symbol(currency),
'amount': amount,
'address': address,
})
return resp.json()
class ViaBTC(Service):
service_id = 116
def get_current_price(self, crypto, fiat):
url = "https://www.viabtc.com/api/v1/market/ticker?market=%s%s" % (
crypto.upper(), fiat.upper()
)
return float(self.get_url(url).json()['data']['ticker']['last'])
class CryptoDao(Service):
service_id = 115
api_homepage = "https://cryptodao.com/doc/api"
def get_current_price(self, crypto, fiat):
url = "https://cryptodao.com/api/ticker?source=%s&target=%s" % (
fiat.upper(), crypto.upper()
)
r = self.get_url(url).json()
return r['last']
def get_orderbook(self, crypto, fiat):
url = "https://cryptodao.com/api/depth?source=%s&target=%s" % (
fiat.upper(), crypto.upper()
)
resp = self.get_url(url).json()
return resp
class HitBTC(Service):
service_id = 109
api_homepage = "https://hitbtc.com/api"
exchange_fee_rate = 0.001
def check_error(self, response):
j = response.json()
if response.status_code in (400, 401) and 'error' in j:
e = j['error']
raise SkipThisService("HitBTC returned %s %s: %s" % (
e['code'], e['message'], e['description']
))
if 'code' in j:
raise SkipThisService("HitBTC returned %s: %s" % (j['code'], j['message']))
super(HitBTC, self).check_error(response)
def fix_symbol(self, symbol):
return symbol.lower()
def make_market(self, crypto, fiat):
return ("%s%s" % (self.fix_symbol(crypto), self.fix_symbol(fiat))).upper()
def get_pairs(self):
url = 'https://api.hitbtc.com/api/1/public/symbols'
r = self.get_url(url).json()['symbols']
return [("%s-%s" % (x['commodity'], x['currency'])).lower() for x in r]
def get_current_price(self, crypto, fiat):
url = "https://api.hitbtc.com/api/1/public/%s/ticker" % self.make_market(crypto, fiat)
r = self.get_url(url).json()
return float(r['last'])
def get_orderbook(self, crypto, fiat):
url = "https://api.hitbtc.com/api/1/public/%s/orderbook" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'asks': [(float(x[0]), float(x[1])) for x in resp['asks']],
'bids': [(float(x[0]), float(x[1])) for x in resp['bids']]
}
def _auth_request(self, path, params, method="post"):
path = path + "?" + urlencode({
'nonce': make_standard_nonce(),
'apikey': self.api_key
})
headers = {"X-Signature": self._make_signature(path, params)}
return self._external_request(
method, "https://api.hitbtc.com" + path,
params, headers=headers
)
def _make_signature(self, path, params):
msg = path + urlencode(params)
return hmac.new(self.api_secret, msg, hashlib.sha512).hexdigest()
def get_exchange_balance(self, currency, type="available"):
resp = self._auth_request("/api/1/trading/balance", {}, method="get").json()
c = self.fix_symbol(currency).upper()
try:
matched = [x for x in resp['balance'] if x['currency_code'] == c][0]
except IndexError:
return 0
if type == 'available':
return float(matched['cash'])
raise NotImplemented()
def get_total_exchange_balances(self):
resp = self._auth_request("/api/1/trading/balance", {}, method="get")
return {
self.fix_symbol(x['currency_code']): float(x['cash'])
for x in resp.json()['balance'] if float(x['cash']) > 0
}
def get_deposit_address(self, currency):
path = "/api/1/payment/address/%s" % self.fix_symbol(currency).upper()
resp = self._auth_request(path, {}, method="get").json()
return resp['address']
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
path = "/api/1/trading/new_order"
import random, string
clientOrderId = "".join(random.choice(string.digits + string.ascii_lowercase) for _ in range(30))
params = {
'symbol': self.make_market(crypto, fiat),
'side': side,
'price': price,
'quantity': eight_decimal_places(amount),
'clientOrderId': clientOrderId
}
if type == 'fill-or-kill':
params['timeInForce'] = 'FOK'
resp = self._auth_request(path, params).json()
return resp
make_order.minimums = {}
make_order.supported_types = ['fill-or-kill', 'limit']
class Liqui(Service):
service_id = 106
def parse_market(self, market):
crypto, fiat = market.split("_")
if fiat == 'usdt':
fiat = 'usd'
return crypto, fiat
def get_pairs(self):
url = "https://api.liqui.io/api/3/info"
r = self.get_url(url).json()['pairs']
return ["%s-%s" % self.parse_market(x) for x in r.keys()]
def make_market(self, crypto, fiat):
return "%s_%s" % (
self.fix_symbol(crypto).lower(), self.fix_symbol(fiat).lower()
)
def fix_symbol(self, symbol):
if symbol == 'usd':
return 'usdt'
return symbol
def get_current_price(self, crypto, fiat):
pair = self.make_market(crypto, fiat)
url = "https://api.liqui.io/api/3/ticker/%s" % pair
return self.get_url(url).json()[pair]['last']
def get_orderbook(self, crypto, fiat):
pair = self.make_market(crypto, fiat)
url = "https://api.liqui.io/api/3/depth/%s" % pair
return self.get_url(url).json()[pair]
def _make_signature(self, params):
return hmac.new(
self.api_secret, "?" + urlencode(params), hashlib.sha512
).hexdigest()
def _auth_request(self, params):
params['nonce'] = make_standard_nonce(small=True)
headers = {
'Key':self.api_key,
'Sign': self._make_signature(params)
}
return self.post_url('https://api.liqui.io', params, headers=headers)
def get_exchange_balance(self, currency):
resp = self._auth_request({'method': 'getInfo'}).json()
return resp
def list_orders(self, crypto=None, fiat=None):
resp = self._auth_request({'method': 'ActiveOrders'}).json()
return resp
class CoinOne(Service):
service_id = 105
def get_pairs(self):
return ['btc-krw', 'eth-krw', 'xrp-krw', 'etc-krw']
def get_current_price(self, crypto, fiat):
url = "https://api.coinone.co.kr/ticker?currency=%s" % crypto.lower()
return float(self.get_url(url).json()['last'])
class CryptoBG(Service):
service_id = 102
def get_current_price(self, crypto, fiat):
url = "https://crypto.bg/api/v1/public_rates"
if crypto != 'btc' or fiat != 'bgn':
raise SkipThisService("Only btc-bgn supported")
return float(self.get_url(url).json()['rates']['bitcoin']['bid'])
class Bitso(Service):
service_id = 101
def get_current_price(self, crypto, fiat):
url = "https://api.bitso.com/v3/ticker/?book=%s_%s" % (crypto, fiat)
r = self.get_url(url.lower()).json()
return float(['payload']['last'])
def get_pairs(self):
url = "https://api.bitso.com/v3/available_books/"
r = self.get_url(url).json()['payload']
return [x['book'].replace("_", '-') for x in r]
class TradeSatoshi(Service):
service_id = 96
def get_pairs(self):
url = "https://tradesatoshi.com/api/public/getmarketsummaries"
r = self.get_url(url).json()
return [x['market'].replace("_", '-').lower() for x in r['result']]
def get_current_price(self, crypto, fiat):
url = "https://tradesatoshi.com/api/public/getticker?market=%s_%s" % (
crypto.upper(), fiat.upper()
)
return self.get_url(url).json()['result']['last']
class UseCryptos(Service):
service_id = 95
def get_pairs(self):
url = "https://usecryptos.com/jsonapi/pairs"
r = self.get_url(url).json()
return r
def get_current_price(self, crypto, fiat):
pair = "%s-%s" % (crypto.lower(), fiat.lower())
url = "https://usecryptos.com/jsonapi/ticker/%s" % pair
return self.get_url(url).json()['lastPrice']
class BitcoinIndonesia(Service):
service_id = 94
api_homepage = "https://blog.bitcoin.co.id/wp-content/uploads/2014/03/API-Documentation-Bitcoin.co_.id_.pdf"
def get_current_price(self, crypto, fiat):
url = "https://vip.bitcoin.co.id/api/%s_%s/ticker" % (crypto.lower(), fiat.lower())
return float(self.get_url(url).json()['ticker']['last'])
class Kraken(Service):
service_id = 93
def check_error(self, response):
if response.json()['error']:
raise ServiceError("Kraken returned error: %s" % response.json()['error'][0])
super(Kraken, self).check_error(response)
def get_pairs(self):
url = "https://api.kraken.com/0/public/AssetPairs"
r = self.get_url(url).json()['result']
ret = []
for name, data in r.items():
crypto = data['base'].lower()
if len(crypto) == 4 and crypto.startswith('x'):
crypto = crypto[1:]
fiat = data['quote'].lower()
if fiat.startswith("z"):
fiat = fiat[1:]
if crypto == 'xbt':
crypto = 'btc'
if fiat == 'xxbt':
fiat = 'btc'
if fiat == 'xeth':
fiat = 'eth'
ret.append("%s-%s" % (crypto, fiat))
return list(set(ret))
def get_current_price(self, crypto, fiat):
if crypto != 'bch':
crypto = "x" + crypto.lower()
if crypto == 'xbtc':
crypto = 'xxbt'
fiat = "z" + fiat.lower()
if fiat == 'zbtc':
fiat = 'xxbt'
else:
# bch pairs have completely different format for some reason
# my god kraken's api is terrible
if fiat.lower() == 'btc':
fiat = 'xbt'
pair = "%s%s" % (crypto.upper(), fiat.upper())
url = "https://api.kraken.com/0/public/Ticker?pair=%s" % pair
r = self.get_url(url).json()['result']
return float(r[pair]['c'][0])
class BTC38(Service):
service_id = 92
api_homepage = "http://www.btc38.com/help/document/2581.html"
def get_current_price(self, crypto, fiat):
url = 'http://api.btc38.com/v1/ticker.php?c=%s&mk_type=%s' % (crypto, fiat)
return self.get_url(url).json()['ticker']['last']
def get_pairs(self):
url = "http://api.btc38.com/v1/ticker.php?c=all&mk_type=cny"
cny_bases = self.get_url(url).json().keys()
url = "http://api.btc38.com/v1/ticker.php?c=all&mk_type=btc"
btc_bases = self.get_url(url).json().keys()
return ["%s-cny" % x for x in cny_bases] + ["%s-btc" % x for x in btc_bases]
class BleuTrade(Service):
service_id = 91
api_homepage = 'https://bleutrade.com/help/API'
def get_pairs(self):
url = 'https://bleutrade.com/api/v2/public/getmarkets'
r = self.get_url(url).json()['result']
return [x['MarketName'].lower().replace("_", '-') for x in r]
def get_current_price(self, crypto, fiat):
url = "https://bleutrade.com/api/v2/public/getticker?market=%s_%s" % (
crypto.upper(), fiat.upper()
)
r = self.get_url(url).json()
return float(r['result'][0]['Last'])
class xBTCe(Service):
service_id = 90
name = "xBTCe"
api_homepage = "https://www.xbtce.com/tradeapi"
def get_current_price(self, crypto, fiat):
if crypto.lower() == 'dash':
crypto = "dsh"
if fiat.lower() == 'rur':
fiat = 'rub'
if fiat.lower() == 'cny':
fiat = 'cnh'
pair = "%s%s" % (crypto.upper(), fiat.upper())
url = "https://cryptottlivewebapi.xbtce.net:8443/api/v1/public/ticker/%s" % pair
r = self.get_url(url).json()
try:
return r[0]['LastSellPrice']
except IndexError:
raise ServiceError("Pair not found")
def get_pairs(self):
url = "https://cryptottlivewebapi.xbtce.net:8443/api/v1/public/symbol"
r = self.get_url(url).json()
ret = []
for pair in r:
crypto = pair['MarginCurrency'].lower()
fiat = pair['ProfitCurrency'].lower()
if crypto.lower() == 'dsh':
crypto = "dash"
if fiat.lower() == 'rub':
fiat = 'rur'
if fiat == 'cnh':
fiat = 'cny'
ret.append(("%s-%s" % (crypto, fiat)))
return list(set(ret))
class Cryptopia(Service):
service_id = 82
api_homepage = "https://www.cryptopia.co.nz/Forum/Thread/255"
exchange_fee_rate = 0.002
def check_error(self, response):
r = response.json()
error = r.get('Error', False)
if error is not None:
raise ServiceError("Cryptopia returned error: %s" % r['Error'])
super(Cryptopia, self).check_error(response)
def fix_symbol(self, symbol):
if symbol.lower() in ['nzd', 'usd']:
symbol += "t"
return symbol
def reverse_fix_symbol(self, symbol):
symbol = symbol.lower()
if symbol in ['nzdt', 'usdt']:
symbol = symbol[:-1]
return symbol
def make_market(self, crypto, fiat):
return "%s_%s" % (
self.fix_symbol(crypto).upper(), self.fix_symbol(fiat).upper()
)
def get_current_price(self, crypto, fiat):
url = "https://www.cryptopia.co.nz/api/GetMarket/%s" % self.make_market(crypto, fiat)
r = self.get_url(url).json()
return r['Data']['LastPrice']
def get_pairs(self):
url = "https://www.cryptopia.co.nz/api/GetTradePairs"
r = self.get_url(url).json()['Data']
ret = []
for pair in r:
crypto = pair['Symbol']
fiat = pair['BaseSymbol']
if fiat.lower() == 'usdt':
fiat = 'usd'
ret.append(("%s-%s" % (crypto, fiat)).lower())
return ret
def get_orderbook(self, crypto, fiat):
url = "https://www.cryptopia.co.nz/api/GetMarketOrders/%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'bids': [(x['Price'], x['Total']) for x in resp['Data']['Buy']],
'asks': [(x['Price'], x['Total']) for x in resp['Data']['Sell']]
}
def _make_signature_header(self, url, params):
nonce = str(int(time.time()))
post_data = json.dumps(params);
m = hashlib.md5()
m.update(post_data)
requestContentBase64String = base64.b64encode(m.digest())
signature = self.api_key + "POST" + quote_plus(url).lower() + nonce + requestContentBase64String
hmacsignature = base64.b64encode(
hmac.new(base64.b64decode(self.api_secret), signature, hashlib.sha256).digest()
)
header_value = "amx " + self.api_key + ":" + hmacsignature + ":" + nonce
return {'Authorization': header_value, 'Content-Type':'application/json; charset=utf-8' }
def _auth_request(self, method, args):
url = "https://www.cryptopia.co.nz/Api/" + method
return self.post_url(url, json=args, headers=self._make_signature_header(url, args))
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
args = {
'Market': ("%s/%s" % (self.fix_symbol(crypto), self.fix_symbol(fiat))).upper(),
'Type': side,
'Rate': price,
'Amount': eight_decimal_places(amount)
}
resp = self._auth_request("SubmitTrade", args).json()
return resp['Data']['OrderId']
make_order.minimums = {}
make_order.supported_types = ['limit']
def get_exchange_balance(self, currency):
curr = self.fix_symbol(currency).upper()
try:
resp = self._auth_request('GetBalance', {'Currency': curr})
except ServiceError:
return 0
for item in resp.json()['Data']:
if item['Symbol'] == curr:
return item['Total']
def get_total_exchange_balances(self):
resp = self._auth_request('GetBalance', {}).json()
#return resp.json()
return {
self.reverse_fix_symbol(x['Symbol']): x['Total'] for x in resp['Data']
if x['Total'] > 0
}
def get_deposit_address(self, currency):
curr = self.fix_symbol(currency).upper()
resp = self._auth_request('GetDepositAddress', {'Currency': curr})
return resp.json()['Data']['Address']
def initiate_withdraw(self, currency, amount, address):
curr = self.fix_symbol(currency).upper()
resp = self._auth_request('SubmitWithdraw', {
'Currency': curr,
'Address': address,
'Amount': amount
})
return resp.json()
class YoBit(Service):
service_id = 77
api_homepage = "https://www.yobit.net/en/api/"
def get_current_price(self, crypto, fiat):
pair = "%s_%s" % (crypto.lower(), fiat.lower())
url = "https://yobit.net/api/3/ticker/%s" % pair
r = self.get_url(url).json()
if 'error' in r:
raise SkipThisService(r['error'])
return r[pair]['last']
def get_pairs(self):
url = 'https://yobit.net/api/3/info'
r = self.get_url(url).json()
return [x.replace("_", '-') for x in r['pairs'].keys()]
class Yunbi(Service):
service_id = 78
api_homepage = "https://yunbi.com/swagger"
def get_current_price(self, crypto, fiat):
if fiat.lower() != "cny":
raise SkipThisService("Only CNY markets supported")
url = "https://yunbi.com/api/v2/tickers/%s%s.json" % (crypto.lower(), fiat.lower())
r = self.get_url(url, headers={"Accept": "application/json"}).json()
return float(r['ticker']['last'])
def get_pairs(self):
url = "https://yunbi.com/api/v2/markets.json"
r = self.get_url(url).json()
ret = []
for pair in r:
ret.append(pair['name'].replace("/", '-').lower())
return ret
class Vircurex(Service):
service_id = 70
base_url = "https://api.vircurex.com/api"
api_homepage = "https://vircurex.com/welcome/api"
def check_error(self, response):
j = response.json()
if j['status'] != 0:
raise ServiceError("Vircurex returned error: %s" % j['status_text'])
super(Vircurex, self).check_error(response)
def get_current_price(self, crypto, fiat):
if crypto == 'blk':
crypto = 'bc'
url = "%s/get_last_trade.json?base=%s&alt=%s" % (
self.base_url, crypto.upper(), fiat.upper()
)
r = self.get_url(url).json()
return float(r['value'])
def get_pairs(self):
url = "%s/get_info_for_currency.json" % self.base_url
r = self.get_url(url).json()
ret = []
for fiat, data in r.items():
if fiat == 'status':
continue
for crypto, exchange_data in data.items():
pair = "%s-%s" % (crypto.lower(), fiat.lower())
ret.append(pair)
return ret
class LiveCoin(Service):
service_id = 110
base_url = "https://api.livecoin.net"
api_homepage = "https://www.livecoin.net/api/public"
def get_pairs(self):
url = "%s/exchange/ticker" % (self.base_url)
r = self.get_url(url).json()
return [x['symbol'].replace('/', '-').lower() for x in r]
def get_current_price(self, crypto, fiat):
url = "%s/exchange/ticker/?currencyPair=%s/%s" % (
self.base_url, crypto.upper(), fiat.upper()
)
return self.get_url(url).json()['last']
class Bithumb(Service):
service_id = 129
def get_current_price(self, crypto, fiat):
if fiat != 'krw':
raise SkipThisService("Only KRW supported")
url = "https://api.bithumb.com/public/ticker/%s" % crypto.upper()
resp = self.get_url(url).json()
return float(resp['data']['average_price'])
def get_orderbook(self, crypto, fiat):
if fiat != 'krw':
raise SkipThisService("Only KRW supported")
url = "https://api.bithumb.com/public/orderbook/%s" % crypto
resp = self.get_url(url).json()['data']
return {
'bids': [(float(x['price']), float(x['quantity'])) for x in resp['bids']],
'asks': [(float(x['price']), float(x['quantity'])) for x in resp['asks']]
}
class Binance(Service):
service_id = 130
def check_error(self, response):
j = response.json()
if 'code' in j:
raise ServiceError("Binance returned error: %s %s" % (j['code'], j['msg']))
super(Binance, self).check_error(response)
def make_market(self, crypto, fiat):
return ("%s%s" % (self.fix_symbol(crypto), self.fix_symbol(fiat))).upper()
def parse_market(self, market):
market = market.lower()
if market.endswith("usdt"):
crypto, fiat = market[:-4], "usd"
elif market.endswith("eth"):
crypto, fiat = market[:-3], "eth"
elif market.endswith("btc"):
crypto, fiat = market[:-3], "btc"
else:
crypto, fiat = market[:-3], market[-3:]
if crypto == 'iota':
crypto = 'miota'
if crypto == 'bcc':
crypto = 'bch'
return "%s-%s" % (crypto, fiat)
def fix_symbol(self, symbol):
if symbol.lower() == 'usd':
return 'usdt'
if symbol == 'miota':
return 'iota'
if symbol == 'bch':
return "bcc"
return symbol
def get_current_price(self, crypto, fiat):
url = "https://www.binance.com/api/v1/ticker/allPrices"
resp = self.get_url(url).json()
for data in resp:
if data['symbol'] == self.make_market(crypto, fiat):
return float(data['price'])
raise SkipThisService("Market not found")
def get_pairs(self):
url = "https://www.binance.com/api/v1/ticker/allPrices"
resp = self.get_url(url).json()
symbols = []
for data in resp:
symbols.append(self.parse_market(data['symbol']))
return symbols
def get_orderbook(self, crypto, fiat):
url = "https://www.binance.com/api/v1/depth"
resp = self.get_url(url, {'symbol': self.make_market(crypto, fiat)}).json()
return {
'bids': [(float(x[1]), float(x[0])) for x in resp['bids']],
'asks': [(float(x[1]), float(x[0])) for x in resp['asks']]
}
def _auth_request(self, path, params, method="post"):
params['timestamp'] = make_standard_nonce()
params['signature'] = self._make_signature(params)
headers = {"X-MBX-APIKEY": self.api_key}
return self._external_request(
method, "https://www.binance.com" + path, params, headers=headers
)
def _make_signature(self, params):
return hmac.new(
self.api_secret, urlencode(params), hashlib.sha256
).hexdigest()
def get_exchange_balance(self, currency, type="available"):
if type == 'available':
type = 'free'
else:
type == 'locked'
path = "/api/v3/account"
resp = self._auth_request(path, {}, method="get").json()
for data in resp['balances']:
if data['asset'].lower() == currency.lower():
return float(data[type])
return 0
class BitFlyer(Service):
service_id = 111
api_homepage = "https://bitflyer.jp/API?top_link&footer"
def get_current_price(self, crypto, fiat):
url = "https://api.bitflyer.jp/v1/getticker?product_code=%s" % (
self.make_market(crypto, fiat)
)
r = self.get_url(url).json()
return r['ltp']
def get_pairs(self):
return ['btc-jpy', 'eth-btc', 'btc-usd']
def make_market(self, crypto, fiat):
return ("%s_%s" % (crypto, fiat)).upper()
def get_orderbook(self, crypto, fiat):
if fiat.lower() == 'jpy':
domain = "api.bitflyer.jp"
elif fiat.lower() == 'usd':
domain = "api.bitflyer.com"
else:
raise SkipThisService("Only jpy and usd suppported")
url = "https://%s/v1/getboard?product_code=%s" % (
domain, self.make_market(crypto, fiat)
)
resp = self.get_url(url).json()
return {
'bids': [(x['price'], x['size']) for x in resp['bids']],
'asks': [(x['price'], x['size']) for x in resp['asks']],
}
def get_block(self, crypto, block_number=None, block_hash=None, latest=False):
url = "https://chainflyer.bitflyer.jp/v1/block/%s" % (
block_hash or
('height/%s' % block_number if block_number else None) or
('latest' if latest else 'None')
)
r = self.get_url(url).json()
return dict(
block_number=r['height'],
time=arrow.get(r['timestamp']).datetime,
#mining_difficulty=r['difficulty'],
hash=r['block_hash'],
next_hash=r.get('nextblockhash', None),
previous_hash=r.get('prev_block'),
txids=r['tx_hashes'],
version=r['version']
)
class BitX(Service):
service_id = 131
api_homepage = "https://www.luno.com/en/api"
def parse_market(self, market):
if market.startswith("XBT"):
crypto = "BTC"
fiat = market[3:]
return crypto, fiat
def fix_symbol(self, symbol):
if symbol.lower() == 'btc':
return 'XBT'
return symbol
def make_market(self, crypto, fiat):
return ("%s%s" % (self.fix_symbol(crypto), self.fix_symbol(fiat))).upper()
def get_current_price(self, crypto, fiat):
url = "https://api.mybitx.com/api/1/ticker?pair=%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return float(resp['last_trade'])
def get_pairs(self):
url = "https://api.mybitx.com/api/1/tickers"
resp = self.get_url(url).json()['tickers']
return [
("%s-%s" % self.parse_market(x['pair'])).lower() for x in resp
]
def get_orderbook(self, crypto, fiat):
url = "https://api.mybitx.com/api/1/orderbook?pair=%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'bids': [(float(x['price']), float(x['volume'])) for x in resp['bids']],
'asks': [(float(x['price']), float(x['volume'])) for x in resp['asks']]
}
class ItBit(Service):
service_id = 132
def fix_symbol(self, symbol):
if symbol.lower() == 'btc':
return 'XBT'
return symbol
def make_market(self, crypto, fiat):
return ("%s%s" % (self.fix_symbol(crypto), self.fix_symbol(fiat))).upper()
def get_current_price(self, crypto, fiat):
url = "https://api.itbit.com/v1/markets/%s/ticker" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return float(resp['lastPrice'])
def get_pairs(self):
return ['btc-usd', 'btc-sgd', 'btc-eur']
def get_orderbook(self, crypto, fiat):
url = "https://api.itbit.com/v1/markets/%s/order_book" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'bids': [(float(x[0]), float(x[1])) for x in resp['bids']],
'asks': [(float(x[0]), float(x[1])) for x in resp['asks']]
}
def _make_signature(self):
pass # https://github.com/itbit/itbit-restapi-python/blob/master/itbit_api.py
class KuCoin(Service):
service_id = 133
symbol_mapping = (
('usd', 'usdt'),
)
def make_market(self, crypto, fiat):
return ("%s-%s" % (self.fix_symbol(crypto), self.fix_symbol(fiat))).upper()
def parse_market(self, market):
return super(KuCoin, self).parse_market(market.lower(), '-')
def get_pairs(self):
url = "https://api.kucoin.com/v1/market/open/symbols"
resp = self.get_url(url).json()
pairs = []
for pair in resp['data']:
crypto, fiat = self.parse_market(pair['symbol'])
pairs.append("%s-%s" % (crypto, fiat))
return pairs
def get_orderbook(self, crypto, fiat):
url = "https://api.kucoin.com/v1/open/orders?symbol=%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()['data']
return {
'bids': [(x[0], x[1]) for x in resp['BUY']],
'asks': [(x[0], x[1]) for x in resp['SELL']]
}
def get_current_price(self, crypto, fiat):
url = "https://api.kucoin.com/v1/open/tick?symbol=%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()['data']
return resp['lastDealPrice']
class CCex(Service):
service_id = 134
api_homepage = "https://c-cex.com/?id=api"
base_url = "https://c-cex.com/t/api_pub.html"
def check_error(self, response):
if response.content == "Maintenance...":
raise ServiceError("C-Cex is down for maintenance")
super(CCex, self).check_error(response)
def make_market(self, crypto, fiat):
return "%s-%s" % (crypto.lower(), fiat.lower())
def get_current_price(self, crypto, fiat):
url = "https://c-cex.com/t/%s.json" % (
self.make_market(crypto, fiat)
)
response = self.get_url(url).json()
return float(response['ticker']['lastprice'])
def get_pairs(self):
url = "https://c-cex.com/t/pairs.json"
r = self.get_url(url).json()
return r['pairs']
def get_orderbook(self, crypto, fiat):
url = "%s?a=getorderbook&market=%s&type=both" % (
self.base_url, self.make_market(crypto, fiat)
)
resp = self.get_url(url).json()['result']
return {
'bids': [(x["Rate"], x["Quantity"]) for x in resp['buy']],
'asks': [(x["Rate"], x["Quantity"]) for x in resp['sell']]
}
def _auth_request(self, params):
params['nonce'] = make_standard_nonce()
params['apikey'] = self.api_key
url = "%s?%s" % (self.base_url, urlencode(params))
headers = {'apisign': self._make_signature(url)}
return self.get_url(url, headers=headers)
def _make_signature(self, url):
return hmac.new(
self.api_secret,
msg=url,
digestmod=hashlib.sha512
).hexdigest()
def get_exchange_balance(self, currency, type="available"):
resp = self._auth_request({'a': 'getbalance', 'currency': currency})
if type == 'available':
return resp.json()['Available']
def get_deposit_address(self, currency):
resp = self._auth_request({'a': 'getbalance', 'currency': currency})
return resp.json()['CryptoAddress']
class CoinEx(Service):
service_id = 138
def __init__(self, access_id=None, **kwargs):
self.access_id = access_id
return super(CoinEx, self).__init__(**kwargs)
def get_pairs(self):
url = "https://api.coinex.com/v1/market/list"
resp = self.get_url(url).json()['data']
return [("%s-%s" % (x[:-3], x[-3:])).lower() for x in resp]
def make_market(self, crypto, fiat):
return ("%s%s" % (crypto, fiat)).upper()
def get_current_price(self, crypto, fiat):
url = "https://api.coinex.com/v1/market/ticker?market=%s" % (
self.make_market(crypto, fiat)
)
resp = self.get_url(url).json()
return float(resp['data']['ticker']['last'])
def _auth_request(self, url, params=None):
params['tonce'] = make_standard_nonce()
params['access_id'] = self.access_id
str_params = urlencode(sorted(params.items(), key=lambda x: x[0]))
to_sign = str_params + "&secret_key=%s" % self.api_secret
digest = hashlib.md5(to_sign).hexdigest().upper()
return self.get_url(url + "?" + str_params, headers={
'Content-Type': 'application/json',
'authorization': digest
})
def get_exchange_balance(self, crypto):
url = "https://api.coinex.com/v1/balance/"
resp = self._auth_request(url, {}).json()
return resp
class OKEX(Service):
service_id = 139
api_homepage = 'https://www.okex.com/rest_api.html'
symbol_mapping = (
('usd', 'usdt'),
)
def check_error(self, response):
j = response.json()
if 'error_code' in j:
raise ServiceError("OKEX returned error: %s" % j['error_code'])
super(OKEX, self).check_error(response)
def get_current_price(self, crypto, fiat):
url = "https://www.okex.com/api/v1/ticker.do?symbol=%s" % (
self.make_market(crypto, fiat)
)
resp = self.get_url(url).json()
return float(resp['ticker']['last'])
class BitZ(Service):
service_id = 140
def check_error(self, response):
j = response.json()
if not j['code'] == 0:
raise ServiceError("BitZ returned error: %s: %s" % (
j['code'], j['msg']
))
super(BitZ, self).check_error(response)
def get_current_price(self, crypto, fiat):
url = "https://www.bit-z.com/api_v1/ticker?coin=%s" % (self.make_market(crypto, fiat))
resp = self.get_url(url).json()
return float(resp['data']['last'])
class Zaif(Service):
service_id = 141
def check_error(self, response):
j = response.json()
if 'error' in j:
raise ServiceError("Zaif returned error: %s" % (j['error']))
super(Zaif, self).check_error(response)
def get_current_price(self, crypto, fiat):
url = "https://api.zaif.jp/api/1/ticker/%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return resp['last']
class Korbit(Service):
service_id = 142
api_homepage = "https://apidocs.korbit.co.kr/"
def get_current_price(self, crypto, fiat):
url = "https://api.korbit.co.kr/v1/ticker?currency_pair=%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return float(resp['last'])
def get_orderbook(self, crypto, fiat):
url = "https://api.korbit.co.kr/v1/orderbook?currency_pair=%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'asks': [(float(x[0]), float(x[1])) for x in resp['asks']],
'bids': [(float(x[0]), float(x[1])) for x in resp['bids']]
}
class CoinEgg(Service):
service_id = 143
api_homepage = "https://www.coinegg.com/explain.api.html#partone"
def get_current_price(self, crypto, fiat):
if fiat.lower() != 'btc':
raise SkipThisService("Only BTC markets supported")
url = "https://api.coinegg.com/api/v1/ticker/?coin=%s" % crypto
resp = self.get_url(url).json()
return float(resp['last'])
def get_orderbook(self, crypto, fiat):
if fiat.lower() != 'btc':
raise SkipThisService("Only BTC markets supported")
url = "https://api.coinegg.com/api/v1/depth/"
resp = self.get_url(url).json()
return {
'bids': [(float(x[0]), float(x[1])) for x in resp['bids']],
'asks': [(float(x[0]), float(x[1])) for x in resp['asks']]
}
class ZB(Service):
service_id = 144
api_homepage = "https://www.zb.com/i/developer"
symbol_mapping = (
('usd', 'usdt'),
('bch', 'bcc')
)
def get_pairs(self):
url = "http://api.zb.com/data/v1/markets"
resp = self.get_url(url).json()
pairs = []
for pair in resp.keys():
pairs.append("%s-%s" % self.parse_market(pair))
return pairs
def get_current_price(self, crypto, fiat):
url = "http://api.zb.com/data/v1/ticker?market=%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return float(resp['ticker']['last'])
def get_orderbook(self, crypto, fiat):
url = "http://api.zb.com/data/v1/depth?market=%s&size=3" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
del resp['timestamp']
return resp
class CoinNest(Service):
service_id = 145
api_homepage = "https://www.coinnest.co.kr/doc/intro.html"
def get_current_price(self, crypto, fiat):
if fiat.lower() != 'krw':
raise SkipThisService("Only KRW markets supported")
url = "https://api.coinnest.co.kr/api/pub/ticker?coin=%s" % crypto
resp = self.get_url(url).json()
return resp['last']
def get_orderbook(self, crypto, fiat):
if fiat.lower() != 'krw':
raise SkipThisService("Only KRW markets supported")
url = "https://api.coinnest.co.kr/api/pub/depth?coin=%s" % crypto
resp = self.get_url(url).json()
del resp['result']
return resp
class BitBank(Service):
service_id = 147
api_homepage = "https://docs.bitbank.cc/"
symbol_mapping = (
('bch', 'bcc'),
)
def get_current_price(self, crypto, fiat):
url = "https://public.bitbank.cc/%s/ticker" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return float(resp['data']['last'])
def get_orderbook(self, crypto, fiat):
url = "https://public.bitbank.cc/%s/depth" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'asks': [(float(x[0]), float(x[1])) for x in resp['data']['asks']],
'bids': [(float(x[0]), float(x[1])) for x in resp['data']['bids']]
}
class EXX(Service):
service_id = 148
symbol_mapping = (
('usd', 'usdt'),
('bch', 'bcc')
)
def get_pairs(self):
url = "https://api.exx.com/data/v1/markets"
resp = self.get_url(url).json()
pairs = []
for pair in resp.keys():
pairs.append("%s-%s" % self.parse_market(pair))
return pairs
def get_current_price(self, crypto, fiat):
url = "https://api.exx.com/data/v1/ticker?currency=%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return float(resp['ticker']['last'])
def get_orderbook(self, crypto, fiat):
url = "https://api.exx.com/data/v1/depth?currency=%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'asks': [(float(x[0]), float(x[1])) for x in resp['asks']],
'bids': [(float(x[0]), float(x[1])) for x in resp['bids']]
}
class BL3P(Service):
service_id = 150
def check_error(self, response):
j = response.json()
if j['result'] == 'error':
d = j['data']
raise ServiceError("BL3P returned error: %s, %s" % (d['code'], d['message']))
super(BL3P, self).check_error(response)
def get_current_price(self, crypto, fiat):
url = "https://api.bl3p.eu/1/%s/ticker" % self.make_market(crypto, fiat, '-')
return self.get_url(url).json()
class BTCbox(Service):
service_id = 151
def get_current_price(self, crypto, fiat):
if fiat.lower() != 'jpy':
raise SkipThisService("Only JPY trading pairs supported")
url = "https://www.btcbox.co.jp/api/v1/ticker/?coin=%s" % crypto
r = self.get_url(url).json()
return r['last']
class Bibox(Service):
service_id = 152
api_homepage = "https://github.com/Biboxcom/api_reference/wiki/api_reference"
symbol_mapping = (
('usd', 'usdt'),
)
def get_current_price(self, crypto, fiat):
url = "https://api.bibox.com/v1/mdata?cmd=ticker&pair=%s" % (
self.make_market(crypto, fiat).upper()
)
r = self.get_url(url).json()
return float(r['result']['last'])
def get_pairs(self):
url ="https://api.bibox.com/v1/mdata?cmd=pairList"
r = self.get_url(url).json()
markets = []
for data in r['result']:
pair = data['pair']
crypto, fiat = self.parse_market(pair)
markets.append("%s-%s" % (crypto, fiat))
return markets
def get_orderbook(self, crypto, fiat):
url = "https://api.bibox.com/v1/mdata?cmd=depth&pair=%s&size=200" % (
self.make_market(crypto, fiat).upper()
)
resp = self.get_url(url).json()['result']
return {
'asks': [(float(x['price']), float(x['volume'])) for x in resp['asks']],
'bids': [(float(x['price']), float(x['volume'])) for x in resp['bids']]
}
|
145801
|
from pydantic import BaseModel
from icolos.core.workflow_steps.schrodinger.base import StepSchrodingerBase
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import shortest_path
from icolos.utils.enums.step_enums import StepFepPlusEnum
from typing import List
import time
import os
from icolos.core.workflow_steps.step import _LE
_SFE = StepFepPlusEnum()
class StepFEPBase(StepSchrodingerBase, BaseModel):
"""
Base class containing common functionality for Schrodinger FEP+ workflows
"""
def __init__(self, **data):
super().__init__(**data)
def _parse_output(self, tmp_dir):
# pick up the final annotated map construction
self.data.generic.clear_file_dict()
self._logger.log(f"Reading output map.", _LE.INFO)
data = None
counts = 0
# hold whilst the job data gets written to local fs
while data is None and counts < 50000:
try:
path = [
file
for file in os.listdir(tmp_dir)
if file.endswith(_SFE.FMP_OUTPUT_FILE)
]
assert len(path) == 1
path = path[0]
with open(os.path.join(tmp_dir, path), "rb") as f:
data = f.read()
except AssertionError:
self._logger.log(
"Output file has not yet appeared in the file system, sleeping and retrying...",
_LE.INFO,
)
time.sleep(15)
counts += 1
self._add_data_to_generic(path, data)
def _extract_log_file_data(self, tmp_dir):
"""
Parses FEP log file to extract edge and node properties
"""
lines = None
counts = 0
# wait whilst job sits in the queue
while lines is None and counts < 50000:
try:
log_file = [
file for file in os.listdir(tmp_dir) if file.endswith(_SFE.LOGFILE)
]
assert len(log_file) == 1
log_file = log_file[0]
with open(os.path.join(tmp_dir, log_file), "r") as f:
lines = f.readlines()
edge_header_index = [
idx for idx, s in enumerate(lines) if _SFE.EDGE_HEADER_LINE in s
][-1]
node_header_index = [
idx for idx, s in enumerate(lines) if _SFE.NODE_HEADER_LINE in s
][-1]
end_of_data_index = [
idx for idx, s in enumerate(lines) if _SFE.DATA_TERMINUS in s
][0]
edge_data_lines = [
line
for line in lines[edge_header_index + 3 : node_header_index - 1]
]
node_data_lines = [
line
for line in lines[node_header_index + 3 : end_of_data_index - 1]
]
self._process_edge_lines(edge_data_lines)
self._process_node_lines(node_data_lines)
except AssertionError:
self._logger.log(
"Log file has not yet appeared in the file system, sleeping and retrying...",
_LE.INFO,
)
time.sleep(15)
counts += 1
def _process_node_lines(self, data: List[str]) -> None:
for entry in data:
fields = entry.split()
idx = fields[1]
dG = fields[2]
# attach dG tags to compound objects if present
if self.data.compounds:
# account for running this step compoundless
self.data.compounds[int(idx[0])].get_enumerations()[0].get_conformers()[
0
].get_molecule().SetProp("dG", str(dG))
self._logger.log(
f"dG directly from the output file for compound {idx} is {dG} ",
_LE.INFO,
)
def _process_edge_lines(self, edge_data: List[str]) -> None:
"""
Calibrate dG values using a reference compound and edge ddG from log file output, return dG for each compound
"""
# caluclate the max ligand index, accounting for ligands that may have been skipped in previous steps, so can't rely on self.get_compounds()
len_nodes = 0
for line in edge_data:
parts = line.split()
lig_from = int(parts[1].split(":")[0])
lig_to = int(parts[3].split(":")[0])
for idx in [lig_from, lig_to]:
if idx > len_nodes:
len_nodes = idx
len_nodes += 1 # account for zero indexed ligands
error_matrix = np.zeros((len_nodes, len_nodes))
ddG_matrix = np.zeros((len_nodes, len_nodes))
for line in edge_data:
parts = line.split()
try:
# parse the compound info from the log file
lig_from = int(parts[1].split(":")[0])
lig_to = int(parts[3].split(":")[0])
ddG = float(parts[4].split("+-")[0])
err = float(parts[4].split("+-")[1])
except ValueError:
self._logger.log(
f"Line: {line} from the logfile contained an unexpected datatype - cannot process this edge - skipping",
_LE.WARNING,
)
continue
error_matrix[lig_from, lig_to] = err
error_matrix[lig_to, lig_from] = err
ddG_matrix[lig_from, lig_to] = ddG
ddG_matrix[lig_to, lig_from] = -ddG
error_matrix = csr_matrix(error_matrix)
# compute shortest path from one ligand to the anchor
_, predecessors = shortest_path(
error_matrix, directed=False, return_predecessors=True, indices=0
)
self._construct_dg_per_compound(ddG_matrix, predecessors, error_matrix)
def _construct_dg_per_compound(
self, ddG: np.ndarray, predecessors: List, error_matrix: np.ndarray
) -> None:
"""
Calculate the calibrated binding free energy per compound using a reference value
Attach calcualted dG to compounds
"""
try:
ref_dG = self.settings.additional[_SFE.REFERENCE_DG]
except KeyError:
self._logger.log(
"Expected to find a reference dG value for the lead compound, but none was found."
"Defaulting to 0.00, you will need to apply a manual correction afterwards",
_LE.WARNING,
)
ref_dG = 0.00
def _calculate_dg(comp_num: int, dG=ref_dG, err=0):
prev_index = predecessors[comp_num]
dG += ddG[prev_index, comp_num]
err += error_matrix[prev_index, comp_num]
if prev_index != 0:
_calculate_dg(prev_index, dG=dG, err=err)
else:
data = str(round(dG, 2)) + "+-" + str(round(err, 2))
self.data.compounds[idx].get_enumerations()[0].get_conformers()[
0
].get_molecule().SetProp("map_dG", data)
self._logger.log(
f"Calculated dG from spanning tree for compound {idx} is {data}",
_LE.INFO,
)
for comp in self.get_compounds():
idx = comp.get_compound_number()
# check whether the compound appeared in the final map
try:
if idx == 0:
comp.get_enumerations()[0].get_conformers()[
0
].get_molecule().SetProp(
"map_dG", str(self.settings.additional[_SFE.REFERENCE_DG])
)
if idx != 0: # skip the reference compound
_calculate_dg(idx)
except IndexError:
self._logger.log(
f"Compound {idx} was not found in the output map, it was likely dropped during the workflow",
_LE.WARNING,
)
continue
|
145805
|
from scipy.linalg import toeplitz
import numpy as np
from cooltools.lib.numutils import LazyToeplitz
n = 100
m = 150
c = np.arange(1, n + 1)
r = np.r_[1, np.arange(-2, -m, -1)]
L = LazyToeplitz(c, r)
T = toeplitz(c, r)
def test_symmetric():
for si in [
slice(10, 20),
slice(0, 150),
slice(0, 0),
slice(150, 150),
slice(10, 10),
]:
assert np.allclose(L[si, si], T[si, si])
def test_triu_no_overlap():
for si, sj in [
(slice(10, 20), slice(30, 40)),
(slice(10, 15), slice(30, 40)),
(slice(10, 20), slice(30, 45)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_tril_no_overlap():
for si, sj in [
(slice(30, 40), slice(10, 20)),
(slice(30, 40), slice(10, 15)),
(slice(30, 45), slice(10, 20)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_triu_with_overlap():
for si, sj in [
(slice(10, 20), slice(15, 25)),
(slice(13, 22), slice(15, 25)),
(slice(10, 20), slice(18, 22)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_tril_with_overlap():
for si, sj in [
(slice(15, 25), slice(10, 20)),
(slice(15, 22), slice(10, 20)),
(slice(15, 25), slice(10, 18)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_nested():
for si, sj in [
(slice(10, 40), slice(20, 30)),
(slice(10, 35), slice(20, 30)),
(slice(10, 40), slice(20, 25)),
(slice(20, 30), slice(10, 40)),
]:
assert np.allclose(L[si, sj], T[si, sj])
|
145820
|
from ._lambdification import (
DUMMY_TIME_SYMBOL,
lambdify,
LambdifiedArrayExpressions,
LambdifiedMatrixExpressions,
LambdifiedVectorExpressions,
LambdifiedArrayExpression,
LambdifiedMatrixExpression,
LambdifiedVectorExpression,
LambdifiedScalarExpression
)
from .utilities import find_length_of_state_vectors
|
145857
|
import sys
from .__version__ import __author__, __author_email__, __title__, __url__, __version__
from .hpm import (
DoubleAssignmentException,
EmptyValue,
HyperParameterManager,
NotLiteralEvaluable,
NotLiteralNameException,
SourceHelper,
)
from .hpm_db import (
HyperParameterDB,
HyperParameterDBLambdas,
HyperParameterOccurrence,
HyperParameterPriority,
L,
P,
)
# moneky patch to enable ``from hpman.m import whatever```
from .hpm_zoo_monkey_patch import HPMZooModule
m = HPMZooModule(__name__ + ".m", HPMZooModule.__doc__)
sys.modules[m.__name__] = m
del sys
del HPMZooModule
__all__ = [
"__author__",
"__author_email__",
"__title__",
"__url__",
"__version__",
"DoubleAssignmentException",
"EmptyValue",
"HyperParameterManager",
"NotLiteralEvaluable",
"NotLiteralNameException",
"SourceHelper",
"HyperParameterDB",
"HyperParameterDBLambdas",
"HyperParameterOccurrence",
"HyperParameterPriority",
"L",
"P",
]
|
145875
|
self.description = "Install a package with an existing file matching a negated --overwrite pattern"
p = pmpkg("dummy")
p.files = ["foobar"]
self.addpkg(p)
self.filesystem = ["foobar*"]
self.args = "-U --overwrite=foobar --overwrite=!foo* %s" % p.filename()
self.addrule("!PACMAN_RETCODE=0")
self.addrule("!PKG_EXIST=dummy")
self.addrule("!FILE_MODIFIED=foobar")
|
145882
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="dolores",
version="1.0.3",
author="<NAME>, <NAME>, DNE LLC",
author_email="<EMAIL>",
description="Dolores is a Python library for developers using GPT-3.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dne-digital/dolores",
download_url="https://pypi.org/project/dolores/",
packages=setuptools.find_packages(),
package_dir = {'dolores': 'dolores'},
package_data = {'dolores': ['dolores/*.json']},
#data_files = [('dolores', 'packages/prompts.json')],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
145888
|
import os
import math
import shutil
import torch
from utils import ensure_dir, Early_stopping
class BaseTrainer:
""" Base class for all trainer.
Note:
Modify if you need to change logging style, checkpoint naming, or something else.
"""
def __init__(self, model, loss, vocab, optimizer, epochs,
save_dir, save_freq, eval_freq, resume, verbosity, id, dataset, identifier='', logger=None):
self.model = model
self.loss = loss
self.optimizer = optimizer
self.epochs = epochs
self.min_loss = math.inf
self.vocab = vocab
self.save_dir = save_dir
self.save_freq = save_freq
self.verbosity = verbosity
self.identifier = identifier
self.logger = logger
self.start_epoch = 1
self.dataset = dataset
self.id = id
self.eval_freq = eval_freq
ensure_dir(save_dir)
if resume:
self._resume_checkpoint(resume)
self.early_stop = Early_stopping(patience=3)
def train(self):
for epoch in range(self.start_epoch, self.epochs+1):
result = self._train_epoch(epoch)
# if self.logger:
# log = {'epoch': epoch}
# for key, value in result.items():
# if key == 'metrics':
# for i, metric in enumerate(self.metrics):
# log[metric.__name__] = result['metrics'][i]
# elif key == 'val_metrics':
# for i, metric in enumerate(self.metrics):
# log['val_'+metric.__name__] = result['val_metrics'][i]
# else:
# log[key] = value
# self.logger.add_entry(log)
# if self.verbosity >= 1:
# print(log)
if epoch % self.save_freq == 0:
self._save_checkpoint(epoch, result['loss'])
self.early_stop.update(result["coco_stat"]["Bleu_4"])
if self.early_stop.stop():
break
def _train_epoch(self, epoch):
raise NotImplementedError
def _valid_epoch(self, epoch):
raise NotImplementedError
def _save_checkpoint(self, epoch, loss):
if loss < self.min_loss:
self.min_loss = loss
arch = type(self.model).__name__
state = {
'epoch': epoch,
'logger': self.logger,
'arch': arch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'min_loss': self.min_loss,
}
id_filename = str(self.id) + '_/'
id_file_path = self.save_dir + '/' + id_filename + 'checkpoints/'
ensure_dir(id_file_path)
filename = os.path.join(id_file_path,
self.identifier + 'checkpoint_epoch{:02d}_loss_{:.5f}.pth.tar'.format(epoch, loss))
print("Saving checkpoint: {} ...".format(filename))
torch.save(state, filename)
if loss == self.min_loss:
shutil.copyfile(filename, os.path.join(self.save_dir, 'model_best.pth.tar'))
def _resume_checkpoint(self, resume_path):
print("Loading checkpoint: {} ...".format(resume_path))
checkpoint = torch.load(resume_path)
self.start_epoch = checkpoint['epoch'] + 1
self.min_loss = checkpoint['min_loss']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger = checkpoint['logger']
print("Checkpoint '{}' (epoch {}) loaded".format(resume_path, self.start_epoch))
|
145906
|
import numpy as np
import torch
import torch.nn as nn
from ....ops.iou3d_nms import iou3d_nms_utils
class CenterTargetLayer(nn.Module):
def __init__(self, roi_sampler_cfg):
super().__init__()
self.roi_sampler_cfg = roi_sampler_cfg
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
batch_dict:
rois: (B, M, 7 + C)
gt_of_rois: (B, M, 7 + C)
gt_iou_of_rois: (B, M)
roi_scores: (B, M)
roi_labels: (B, M)
reg_valid_mask: (B, M)
rcnn_cls_labels: (B, M)
"""
batch_rois, batch_gt_of_rois, batch_roi_dist, batch_roi_scores, batch_roi_labels = self.sample_rois_for_rcnn(
batch_dict=batch_dict
)
# regression valid mask
reg_valid_mask = (batch_roi_dist <= self.roi_sampler_cfg.REG_FG_DIST).long()
# classification label
assert self.roi_sampler_cfg.CLS_SCORE_TYPE == 'roi_dist'
dist_bg_thresh = self.roi_sampler_cfg.CLS_BG_DIST
dist_fg_thresh = self.roi_sampler_cfg.CLS_FG_DIST
fg_mask = batch_roi_dist <= dist_fg_thresh
bg_mask = batch_roi_dist > dist_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
batch_cls_labels = (fg_mask > 0).float()
# inverse scores !!!
batch_cls_labels[interval_mask] = (dist_bg_thresh - batch_roi_dist[interval_mask]) / (dist_bg_thresh - dist_fg_thresh)
targets_dict = {'rois': batch_rois, 'gt_of_rois': batch_gt_of_rois, 'gt_dist_of_rois': batch_roi_dist,
'roi_scores': batch_roi_scores, 'roi_labels': batch_roi_labels,
'reg_valid_mask': reg_valid_mask,
'rcnn_cls_labels': batch_cls_labels}
return targets_dict
def sample_rois_for_rcnn(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
roi_scores = batch_dict['roi_scores']
roi_labels = batch_dict['roi_labels']
gt_boxes = batch_dict['gt_boxes']
code_size = rois.shape[-1]
batch_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size)
batch_gt_of_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size + 1)
batch_roi_dist = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_scores = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_labels = rois.new_zeros((batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE), dtype=torch.long)
for index in range(batch_size):
cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = \
rois[index], gt_boxes[index], roi_labels[index], roi_scores[index]
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
cur_gt = cur_gt.new_zeros((1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt
roi_flag = (cur_roi[:, 3:6].sum(dim = 1) != 0) # wlh != 0 valid roi
cur_roi = cur_roi[roi_flag]
cur_roi_scores = cur_roi_scores[roi_flag]
cur_roi_labels = cur_roi_labels[roi_flag]
if self.roi_sampler_cfg.get('SAMPLE_ROI_BY_EACH_CLASS', False):
min_dist, gt_assignment = self.get_min_dist_with_same_class(
rois=cur_roi[:, 0:7], roi_labels=cur_roi_labels,
gt_boxes=cur_gt[:, 0:7], gt_labels=cur_gt[:, -1].long()
)
else:
cdist = iou3d_nms_utils.boxes_dist_torch(cur_roi[:, 0:7], cur_gt[:, 0:7]) # (M, N)
min_dist, gt_assignment = torch.min(cdist, dim=1)
sampled_inds = self.subsample_rois(min_dist = min_dist)
batch_rois[index] = cur_roi[sampled_inds]
batch_roi_labels[index] = cur_roi_labels[sampled_inds]
batch_roi_dist[index] = min_dist[sampled_inds]
batch_roi_scores[index] = cur_roi_scores[sampled_inds]
batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]]
return batch_rois, batch_gt_of_rois, batch_roi_dist, batch_roi_scores, batch_roi_labels
def subsample_rois(self, min_dist):
# sample fg, easy_bg, hard_bg
fg_rois_per_image = int(np.round(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE))
fg_dist = max(self.roi_sampler_cfg.REG_FG_DIST, self.roi_sampler_cfg.CLS_FG_DIST)
bg_dist = self.roi_sampler_cfg.CLS_BG_DIST_LO
fg_inds = (min_dist <= fg_dist).nonzero().view(-1)
easy_bg_inds = (min_dist > bg_dist).nonzero().view(-1)
hard_bg_inds = ((min_dist > fg_dist) & (min_dist <= bg_dist)).nonzero().view(-1)
fg_num_rois = fg_inds.numel()
bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(min_dist).long()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(min_dist).long()
fg_inds = fg_inds[rand_num]
bg_inds = []
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
else:
print('min distance:(min=%f, max=%f)' % (min_dist.min().item(), min_dist.max().item()))
print('ERROR: FG=%d, BG=%d' % (fg_num_rois, bg_num_rois))
raise NotImplementedError
sampled_inds = torch.cat((fg_inds, bg_inds), dim=0)
return sampled_inds
@staticmethod
def sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, hard_bg_ratio):
if hard_bg_inds.numel() > 0 and easy_bg_inds.numel() > 0:
hard_bg_rois_num = min(int(bg_rois_per_this_image * hard_bg_ratio), len(hard_bg_inds))
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
hard_bg_inds = hard_bg_inds[rand_idx]
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
easy_bg_inds = easy_bg_inds[rand_idx]
bg_inds = torch.cat([hard_bg_inds, easy_bg_inds], dim=0)
elif hard_bg_inds.numel() > 0 and easy_bg_inds.numel() == 0:
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
bg_inds = hard_bg_inds[rand_idx]
elif hard_bg_inds.numel() == 0 and easy_bg_inds.numel() > 0:
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
bg_inds = easy_bg_inds[rand_idx]
else:
raise NotImplementedError
return bg_inds
@staticmethod
def get_min_dist_with_same_class(rois, roi_labels, gt_boxes, gt_labels):
"""
Args:
rois: (N, 7)
roi_labels: (N)
gt_boxes: (N, )
gt_labels:
Returns:
"""
min_dist = rois.new_zeros(rois.shape[0])
gt_assignment = roi_labels.new_zeros(roi_labels.shape[0])
for k in range(gt_labels.min().item(), gt_labels.max().item() + 1):
roi_mask = (roi_labels == k)
gt_mask = (gt_labels == k)
if roi_mask.sum() > 0 and gt_mask.sum() > 0:
cur_roi = rois[roi_mask]
cur_gt = gt_boxes[gt_mask]
original_gt_assignment = gt_mask.nonzero().view(-1)
cdist = iou3d_nms_utils.boxes_dist_torch(cur_roi, cur_gt) # (M, N)
cur_min_dist, cur_gt_assignment = torch.min(cdist, dim=1)
min_dist[roi_mask] = cur_min_dist
gt_assignment[roi_mask] = original_gt_assignment[cur_gt_assignment]
return min_dist, gt_assignment
|
145911
|
import datetime
from unittest.mock import MagicMock
import pytest
from bloop.models import BaseModel, Column
from bloop.stream.coordinator import Coordinator
from bloop.stream.stream import Stream
from bloop.types import Integer, String
from bloop.util import ordered
from . import build_shards
@pytest.fixture
def coordinator():
# MagicMock because we're testing __next__
return MagicMock(spec=Coordinator)
@pytest.fixture
def stream(coordinator, engine):
stream = Stream(model=Email, engine=engine)
stream.coordinator = coordinator
return stream
class Email(BaseModel):
class Meta:
stream = {
"include": {"new", "old"},
"arn": "stream-arn"
}
id = Column(Integer, hash_key=True)
data = Column(String)
def test_repr(stream):
assert repr(stream) == "<Stream[Email]>"
def test_iter(stream):
"""stream is both an Iterable and an Iterator"""
assert iter(stream) is stream
def test_token(engine):
engine.bind(Email)
shards = build_shards(3, {0: [1, 2]}, stream_arn=Email.Meta.stream["arn"])
shards[1].iterator_type = "latest"
shards[2].iterator_type = "at_sequence"
shards[2].sequence_number = "sequence-number"
stream = Stream(model=Email, engine=engine)
stream.coordinator.roots.append(shards[0])
stream.coordinator.active.extend(shards[1:])
assert ordered(stream.token) == ordered({
"stream_arn": "stream-arn",
"active": ["shard-id-1", "shard-id-2"],
"shards": [
{"shard_id": "shard-id-0"},
{"shard_id": "shard-id-1", "parent": "shard-id-0", "iterator_type": "latest"},
{"shard_id": "shard-id-2", "parent": "shard-id-0",
"iterator_type": "at_sequence", "sequence_number": "sequence-number"},
]
})
def test_heartbeat(stream, coordinator):
stream.heartbeat()
coordinator.heartbeat.assert_called_once_with()
def test_move_to(stream, coordinator):
stream.move_to("latest")
coordinator.move_to.assert_called_once_with("latest")
def test_next_no_record(stream, coordinator):
coordinator.__next__.return_value = None
# Explicit marker so we don't get next's default value
missing = object()
record = next(stream, missing)
assert record is None
def test_next_unpacks(stream, coordinator):
now = datetime.datetime.now(datetime.timezone.utc)
meta = {
"created_at": now,
"sequence_number": "sequence-number",
"event": {
"id": "event-id",
"type": "event-type",
"version": "event-version"
}
}
coordinator.__next__.return_value = {
# Impossible to have old and key, but for the sake of testing
# an object that's partially/fully loaded
"old": {
"id": {"N": "0"},
"data": {"S": "some-data"}
},
"key": {
# Omitted because the model only includes "new"
"id": {"N": "343"}
},
"new": None,
"meta": meta
}
record = next(stream)
assert record["old"].id == 0
assert record["old"].data == "some-data"
assert record["new"] is None
assert record["key"] is None
assert not hasattr(record["key"], "data")
|
145915
|
import math
class Solution:
def constructRectangle(self, area: int) -> List[int]:
W = int(math.sqrt(area))
while area % W:
W -= 1
return [area//W, W]
|
145923
|
import csv
import os
import re
import sys
import pandas as pd
try:
import geocoder
except ImportError:
print("[ERROR] Unable to import Geocoder module: cant'run! Exit...")
sys.exit()
try:
import common_utils as cu
except ImportError:
print("[ERROR] Unable to import 'common_utils' module! Exit...")
sys.exit()
class CountryProcessor:
def __init__(self):
self._status = ""
self._myFile = ""
self._myList = ""
self._reader = ""
@staticmethod
def clean_name(tweet):
return ' '.join(
re.sub(
"(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)",
" ",
tweet).split())
def parse_countries(self, fname):
if os.name == "nt":
with open(fname, 'r', encoding='utf-8') as self._myFile:
self._reader = csv.reader(self._myFile, delimiter=',')
next(self._reader, None)
self._myList = list(
self.clean_name(
self._i[1]) for self._i in self._reader)
else:
with open(fname, 'r') as self._myFile:
self._reader = csv.reader(self._myFile, delimiter=',')
next(self._reader, None)
self._myList = list(
self.clean_name(
self._i[1]) for self._i in self._reader)
return self._myList
db = cu.FastWriter()
if os.path.exists("./geo_db.db") is True:
status = True
df = pd.read_csv("geo_db.db", names=["loc", "country"], comment='#')
else:
status = False
def geocoder_worker(location):
try:
# if cache exists
if status is True:
res = df[df["loc"].str.match(location)]['country']
if len(res) != 0:
print("[Cached]", res.iloc[0])
return str(res.iloc[0])
else:
country_elem = geocoder.komoot(location)
country_name = country_elem.country
if country_name is None:
return
else:
print(country_name)
temp = location + "," + country_name
db.backup_db(temp, "geo_db.db")
return country_name
# if cache doesn't exists
else:
country_elem = geocoder.komoot(location)
country_name = country_elem.country
if country_name is None:
return
else:
print(country_name)
temp = location + "," + country_name
db.backup_db(temp, "geo_db.db")
return country_name
except KeyboardInterrupt:
return
|
145925
|
from pymatgen.ext.matproj import MPRester
# Change "<APIKEY>" to the API key obtained from MP.
mpr = MPRester("<APIKEY>")
data = mpr.query(criteria={"pretty_formula": "Al2O3"},
properties=["final_energy", "band_gap"])
print(data)
import pandas as pd
df = pd.DataFrame(data) # Convert to DataFrame
|
145940
|
import ajenti
from ajenti.api import *
from ajenti.plugins import *
info = PluginInfo(
title='Ajenti VH - NGINX Support',
icon='globe',
dependencies=[
PluginDependency('vh'),
PluginDependency('services'),
#BinaryDependency('nginx'),
],
)
def init():
from ajenti.plugins.vh import destroyed_configs
destroyed_configs.append('nginx')
import nginx
import nginx_templates
from ajenti.plugins import manager
from ajenti.plugins.nginx.main import Nginx
#from ajenti.plugins.apache.main import Apache
manager.blacklist.append(Nginx)
#manager.blacklist.append(Apache)
|
145960
|
from m5.params import *
from m5.proxy import *
from MemObject import MemObject
# This is an interface module to attach the PIM to the memory interconnect
class ethz_PIMIF(MemObject):
type = 'ethz_PIMIF'
cxx_header = "mem/ethz_pim_if.hh"
slave = SlavePort('Slave port')
master = MasterPort('Master port')
system = Param.System(Parent.any, "System we belong to") # A pointer to the system is necessary for each master components
# Maybe I will delete these later
# Maybe I will delete these later
# Maybe I will delete these later
req_size = Param.Unsigned(16, "The number of requests to buffer")
resp_size = Param.Unsigned(16, "The number of responses to buffer")
delay = Param.Latency('0ns', "The latency")
ranges = VectorParam.AddrRange([AllMemory],
"Address ranges to pass through")
|
145973
|
import os
import time
from rsqueakvm import constants, error, wrapper
from rsqueakvm.model.character import W_Character
from rsqueakvm.model.compiled_methods import W_CompiledMethod, W_PreSpurCompiledMethod, W_SpurCompiledMethod
from rsqueakvm.model.display import W_DisplayBitmap
from rsqueakvm.model.numeric import W_Float, W_SmallInteger, W_LargeIntegerWord, W_LargeIntegerBig, W_LargeInteger
from rsqueakvm.model.pointers import W_PointersObject
from rsqueakvm.model.block_closure import W_BlockClosure
from rsqueakvm.model.variable import W_BytesObject, W_WordsObject
from rsqueakvm.util import stream, system
from rsqueakvm.util.bitmanipulation import splitter
from rsqueakvm.util.progress import Progress
from rpython.rlib import objectmodel
from rpython.rlib.rarithmetic import r_ulonglong, r_longlong, r_int, intmask, r_uint, r_uint32, r_int64
from rpython.rlib import jit, rbigint, unroll, rgc
if r_longlong is not r_int:
r_uint64 = r_ulonglong
else:
r_uint64 = r_uint
# Access for module users
Stream = stream.Stream
# ____________________________________________________________
#
# Constants and image versions.
# from the squeak source code:
# in squeak, the compact classes array can be found at this position
# in the special objects array
COMPACT_CLASSES_ARRAY = 28
# The image data can optionally start after this fixed offset.
POSSIBLE_IMAGE_OFFSET = 512
UNROLLING_CONSTANTS = unroll.unrolling_iterable(sorted(
constants.constant_objects_in_special_object_table_wo_types.items(),
key=lambda t: t[1]
))
class ImageVersion(object):
_immutable_fields_ = [
"magic", "is_big_endian", "is_64bit", "has_closures",
"has_floats_reversed", "is_modern", "is_spur"]
def __init__(self, magic, is_big_endian, is_64bit, has_closures,
has_floats_reversed, is_spur=False):
self.magic = magic
self.is_big_endian = is_big_endian
self.is_64bit = is_64bit
self.has_closures = has_closures
self.has_floats_reversed = has_floats_reversed
self.is_modern = magic > 6502
self.is_spur = is_spur
def configure_stream(self, stream):
stream.big_endian = self.is_big_endian
if self.is_64bit:
if not system.IS_64BIT:
raise error.FatalError("Cannot handle 64-bit image.")
stream.be_64bit()
else:
stream.be_32bit()
image_versions = {
0x00001966: ImageVersion(6502, True, False, False, False),
0x66190000: ImageVersion(6502, False, False, False, False),
0x00001968: ImageVersion(6504, True, False, True, False),
0x68190000: ImageVersion(6504, False, False, True, False),
0x00001969: ImageVersion(6505, True, False, True, True ),
0x69190000: ImageVersion(6505, False, False, True, True ),
0x00001979: ImageVersion(6521, True, False, True, True , is_spur=True),
0x79190000: ImageVersion(6521, False, False, True, True , is_spur=True),
# CUSTOM VERSION MAGIC: These are for a Spur-format image that we have
# written from an old image with block-contexts
0x34120000: ImageVersion(6521, False, False, False, True , is_spur=True)
}
image_versions_64bit = {
# Versions for 64 bit images (expressed as two 32-bit words)
(0x00000000, 0x000109A0): ImageVersion(68000, True, True, False, False),
(-0x5ff6ff00, 0x00000000): ImageVersion(68000, False, True, False, False), # 0xA009010000000000
(0x00000000, 0x000109A2): ImageVersion(68002, True, True, True, False),
(-0x5df6ff00, 0x00000000): ImageVersion(68002, False, True, True, False), # 0xA209010000000000
(0x00000000, 0x000109A3): ImageVersion(68003, True, True, True, True ),
(-0x5cf6ff00, 0x00000000): ImageVersion(68003, False, True, True, True ), # 0xA309010000000000
# TODO: add 64bit Spur once supported
}
# ____________________________________________________________
#
# Parser classes for Squeak image format.
class ImageReader(object):
_immutable_fields_ = ["space", "stream", "version", "readerStrategy", "logging_enabled"]
def __init__(self, space, stream, logging_enabled=False):
self.space = space
self.stream = stream
self.version = None
self.readerStrategy = None
self.logging_enabled = logging_enabled
def create_image(self):
self.read_all()
return SqueakImage(self)
def read_all(self):
self.read_header()
self.readerStrategy.read_and_initialize()
def try_read_version(self):
magic1 = self.stream.next()
version = image_versions.get(magic1, None)
if version:
return version
# Check 64 bit version
magic2 = self.stream.next()
version = image_versions_64bit.get((magic1, magic2), None)
if not version:
self.stream.reset()
return version
def read_version(self):
version = self.try_read_version()
if not version:
if self.stream.length() > POSSIBLE_IMAGE_OFFSET + 4:
self.stream.skipbytes(POSSIBLE_IMAGE_OFFSET)
version = self.try_read_version()
if not version:
raise error.CorruptImageError("Illegal version magic.")
version.configure_stream(self.stream)
self.version = version
self.readerStrategy = self.choose_reader_strategy()
self.stream = None
if not version.has_closures:
self.space.uses_block_contexts.activate()
def read_header(self):
self.read_version()
self.readerStrategy.continue_read_header()
self.lastWindowSize = self.readerStrategy.lastWindowSize
self.readerStrategy.skip_to_body()
def choose_reader_strategy(self):
if self.version.is_spur:
return SpurReader(self, self.version, self.stream, self.space)
if self.version.is_modern:
return NonSpurReader(self, self.version, self.stream, self.space)
return AncientReader(self, self.version, self.stream, self.space)
def g_class_of(self, chunk):
return self.readerStrategy.g_class_of(chunk)
@property
def compactclasses(self):
return self.readerStrategy.compactclasses
@property
def intcache(self):
return self.readerStrategy.intcache
@property
def chunklist(self):
return self.readerStrategy.chunklist
@property
def chunks(self):
return self.readerStrategy.chunks
def decode_pointers(self, g_object, space, end=-1):
return self.readerStrategy.decode_pointers(g_object, space, end)
class BaseReaderStrategy(object):
_immutable_fields_ = ["imageReader", "version", "stream", "space", "chunks", "chunklist"]
def __init__(self, imageReader, version, stream, space):
self.imageReader = imageReader
self.version = version
self.stream = stream
self.space = space
self.chunks = {} # Dictionary mapping old address to chunk object
self.chunklist = [] # Flat list of all read chunks
self.intcache = {} # Cached instances of SmallInteger
self.lastWindowSize = 0
self._progress = Progress(stages=5, silent=space.silent.is_set()) # Track 5 stages in read_and_initialize
def log(self, msg):
if self.imageReader.logging_enabled:
print msg
def continue_read_header(self):
# 1 word headersize
self.headersize = self.stream.next()
# 1 word size of the full image
self.endofmemory = self.stream.next() # endofmemory = bodysize
# 1 word old base address
self.oldbaseaddress = self.stream.next()
# 1 word pointer to special objects array
self.specialobjectspointer = self.stream.next()
# 1 word last used hash
lasthash = self.stream.next()
self.lastWindowSize = self.stream.next()
fullscreenflag = self.stream.next()
extravmmemory = self.stream.next()
def skip_to_body(self):
self.stream.skipbytes(self.headersize - self.stream.pos)
def read_and_initialize(self):
self.read_body()
# All chunks are read, now convert them to real objects.
self.init_g_objects()
self.assign_prebuilt_constants()
for chunk in self.chunklist:
if self.ispointers(chunk.g_object):
chunk.data = None
if chunk.g_object.filled_in:
chunk.data = None
self.chunks = {}
self.intcache = {}
rgc.collect()
self.init_w_objects()
self.fillin_w_objects()
rgc.collect()
self.fillin_weak_w_objects()
self.fillin_finalize()
def read_body(self):
raise NotImplementedError("subclass must override this")
def init_compactclassesarray(self):
raise NotImplementedError("subclass must override this")
def g_class_of(self, chunk):
raise NotImplementedError("subclass must override this")
def init_g_objects(self):
self._progress.next_stage(len(self.chunks))
for chunk in self.chunks.itervalues():
self.init_g_object(chunk)
self._progress.update()
self.special_g_objects = self.chunks[self.specialobjectspointer].g_object.pointers
def init_g_object(self, chunk):
chunk.as_g_object(self, self.space) # initialize g_object
def assign_prebuilt_constants(self):
g_special_objects_array = self.chunks[self.specialobjectspointer].g_object
g_special_objects_array.w_object = self.space.w_special_objects
# Assign classes and objects that in special objects array that are already created.
self._assign_prebuilt_constants()
def smalltalk_g_at(self, lookup_name):
# first, try to find an association in the special objects array
g_object = self.lookup_in_assocs_g(self.special_g_objects, lookup_name)
if g_object is not None:
return g_object
try:
g_smalltalk = self.special_g_object(constants.SO_SMALLTALK)
except IndexError:
# should be happen in some tests
return None
array_g = []
if len(g_smalltalk.pointers) == 1:
# modern image
globals_g = g_smalltalk.pointers[0].pointers
if len(globals_g) == 6:
bindings_g = globals_g[2].pointers
if len(bindings_g) == 2:
array_g = bindings_g[1].pointers
elif len(globals_g) == 4:
array_g = globals_g[1].pointers
elif len(g_smalltalk.pointers) == 2:
# old image
array_g = g_smalltalk.pointers[1].pointers
return self.lookup_in_assocs_g(array_g, lookup_name)
def lookup_in_assocs_g(self, array_g, lookup_name):
for g_assoc in array_g:
if g_assoc.pointers and len(g_assoc.pointers) == 2:
g_name = g_assoc.pointers[0]
if self.isbytes(g_name):
name = "".join(g_name.get_bytes())
if name == lookup_name:
return g_assoc.pointers[1]
return None
def special_g_object(self, index):
# while python would raise an IndexError, after translation a nonexisting key results in a segfault...
if index >= len(self.special_g_objects):
raise IndexError
return self.special_g_objects[index]
def special_g_object_safe(self, index):
# while python would raise an IndexError, after translation a nonexisting key results in a segfault...
if index >= len(self.special_g_objects):
return self.special_g_objects[constants.SO_NIL]
return self.special_g_objects[index]
def init_w_objects(self):
self._progress.next_stage(len(self.chunklist))
for g in self.special_g_objects:
self.init_w_object(g.chunk)
self._progress.update()
for chunk in self.chunklist:
self.init_w_object(chunk)
self._progress.update()
def init_w_object(self, chunk):
chunk.g_object.init_w_object(self.space)
def fillin_w_objects(self):
self._progress.next_stage(len(self.chunklist))
for chunk in self.chunklist:
self.fillin_w_object(chunk)
self._progress.update()
def fillin_w_object(self, chunk):
chunk.g_object.fillin(self.space)
def fillin_weak_w_objects(self):
self._progress.next_stage(len(self.chunklist))
for chunk in self.chunklist:
self.fillin_weak_w_object(chunk)
self._progress.update()
def fillin_weak_w_object(self, chunk):
chunk.g_object.fillin_weak(self.space)
def fillin_finalize(self):
for chunk in self.chunklist:
chunk.g_object.fillin_finalize(self.space)
def len_bytes_of(self, chunk):
return len(chunk.data) * 4
def get_bytes_of(self, chunk):
bytes = []
if self.version.is_big_endian:
for each in chunk.data:
bytes.append(chr((each >> 24) & 0xff))
bytes.append(chr((each >> 16) & 0xff))
bytes.append(chr((each >> 8) & 0xff))
bytes.append(chr((each >> 0) & 0xff))
else:
for each in chunk.data:
bytes.append(chr((each >> 0) & 0xff))
bytes.append(chr((each >> 8) & 0xff))
bytes.append(chr((each >> 16) & 0xff))
bytes.append(chr((each >> 24) & 0xff))
return bytes
def isfloat(self, g_object):
return self.iswords(g_object) and self.space.w_Float.is_same_object(g_object.g_class.w_object)
def islargeinteger(self, g_object):
g_lpi = self.special_g_object_safe(constants.SO_LARGEPOSITIVEINTEGER_CLASS)
g_lni = self.special_g_object_safe(constants.SO_LARGENEGATIVEINTEGER_CLASS)
is_large = (g_lpi == g_object.g_class or g_lni == g_object.g_class)
if is_large:
assert self.isbytes(g_object)
return is_large
def issignedinteger(self, g_object):
if not self.islargeinteger(g_object):
return False
bytes = g_object.get_bytes()
value = rbigint.rbigint.frombytes(''.join(bytes), 'little', False)
if g_object.g_class != self.special_g_object_safe(constants.SO_LARGEPOSITIVEINTEGER_CLASS):
value = value.neg()
try:
value.toint()
except OverflowError:
return False
return True
def isunsignedinteger(self, g_object):
return self.islargeinteger(g_object) and g_object.len_bytes() == constants.BYTES_PER_MACHINE_INT
def isbiginteger(self, g_object):
return self.islargeinteger(g_object) and g_object.len_bytes() > constants.BYTES_PER_MACHINE_INT
def _assign_prebuilt_constants(self):
for name, so_index in UNROLLING_CONSTANTS:
w_object = getattr(self.space, "w_%s" % name)
g_object = None
try:
g_object = self.special_g_object(so_index)
except IndexError:
g_object = self.smalltalk_g_at(name)
if g_object is not None:
if g_object.w_object is None:
g_object.w_object = w_object
elif not g_object.w_object.is_nil(self.space):
raise Warning('Object %s found in multiple places in the special objects array' % name)
class NonSpurReader(BaseReaderStrategy):
def read_body(self):
self.stream.reset_count()
self._progress.next_stage(self.stream.length())
while self.stream.count < self.endofmemory:
chunk, pos = self.read_object()
self._progress.update(self.stream.count)
self.chunklist.append(chunk)
self.chunks[pos + self.oldbaseaddress] = chunk
self.stream.close()
self.stream = None
rgc.collect()
return self.chunklist # return for testing
def init_g_objects(self):
self.init_compactclassesarray()
BaseReaderStrategy.init_g_objects(self)
def read_object(self):
kind = self.stream.peek() & 3 # 2 bits
if kind == 0: # 00 bits
chunk, pos = self.read_3wordobjectheader()
elif kind == 1: # 01 bits
chunk, pos = self.read_2wordobjectheader()
elif kind == 3: # 11 bits
chunk, pos = self.read_1wordobjectheader()
else: # 10 bits
raise error.CorruptImageError("Unused block not allowed in image")
size = intmask(chunk.size)
chunk.data = [self.stream.next()
for _ in range(size - 1)] #size-1, excluding header
return chunk, pos
def read_1wordobjectheader(self):
kind, size, format, classid, idhash = (
splitter[2,6,4,5,12](self.stream.next()))
assert kind == 3
return ImageChunk(size, format, classid, idhash), self.stream.count - 4
def read_2wordobjectheader(self):
assert self.stream.peek() & 3 == 1 #kind
classid = self.stream.next() - 01 # remove headertype to get pointer
kind, size, format, _, idhash = splitter[2,6,4,5,12](self.stream.next())
assert kind == 1
return ImageChunk(size, format, classid, idhash), self.stream.count - 4
def read_3wordobjectheader(self):
kind, size = splitter[2,30](self.stream.next())
assert kind == 0
assert splitter[2](self.stream.peek())[0] == 0 #kind
classid = self.stream.next() - 00 # remove headertype to get pointer
kind, _, format, _, idhash = splitter[2,6,4,5,12](self.stream.next())
assert kind == 0
return ImageChunk(size, format, classid, idhash), self.stream.count - 4
def init_compactclassesarray(self):
""" from the blue book (CompiledMethod Symbol Array PseudoContext LargePositiveInteger nil MethodDictionary Association Point Rectangle nil TranslatedMethod BlockContext MethodContext nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil ) """
special = self.chunks[self.specialobjectspointer]
assert special.size > 24 #at least
assert special.format == 2
# squeak-specific: compact classes array
chunk = self.chunks[special.data[COMPACT_CLASSES_ARRAY]]
assert len(chunk.data) == 31
assert chunk.format == 2
self.compactclasses = [self.chunks[pointer] for pointer in chunk.data]
def g_class_of(self, chunk):
if chunk.iscompact():
return self.compactclasses[chunk.classid
- 1].g_object # Smalltalk is 1-based indexed
else:
return self.chunks[chunk.classid].g_object
def decode_pointers(self, g_object, space, end=-1):
if end == -1:
end = len(g_object.chunk.data)
pointers = []
for i in range(end):
pointer = g_object.chunk.data[i]
if (pointer & 1) == 1:
# pointer = ...1
# tagged integer
small_int = GenericObject()
small_int.initialize_int(pointer >> 1, self, space)
pointers.append(small_int)
else:
# pointer = ...0
pointers.append(self.chunks[pointer].g_object)
return pointers
def instantiate(self, g_object):
""" 0 no fields
1 fixed fields only (all containing pointers)
2 indexable fields only (all containing pointers)
3 both fixed and indexable fields (all containing pointers)
4 both fixed and indexable weak fields (all containing pointers).
5 unused
6 indexable word fields only (no pointers)
7 indexable long (64-bit) fields (only in 64-bit images)
8-11 indexable byte fields only (no pointers) (low 2 bits are low 2 bits of size)
12-15 compiled methods:
# of literal oops specified in method header,
followed by indexable bytes (same interpretation of low 2 bits as above)
"""
# the instantiate call circumvents the constructors
# and makes empty objects
if self.ischar(g_object):
return objectmodel.instantiate(W_Character)
elif self.isblockclosure(g_object):
return objectmodel.instantiate(W_BlockClosure)
elif self.ispointers(g_object):
return objectmodel.instantiate(W_PointersObject)
elif g_object.format == 5:
raise error.CorruptImageError("Unknown format 5")
elif self.isfloat(g_object):
return objectmodel.instantiate(W_Float)
elif self.issignedinteger(g_object):
return objectmodel.instantiate(W_SmallInteger)
elif self.isunsignedinteger(g_object):
return objectmodel.instantiate(W_LargeIntegerWord)
elif self.isbiginteger(g_object):
return objectmodel.instantiate(W_LargeIntegerBig)
elif self.iswords(g_object):
return objectmodel.instantiate(W_WordsObject)
elif g_object.format == 7:
raise error.CorruptImageError("Unknown format 7, no 64-bit support yet :-)")
elif self.isbytes(g_object):
return objectmodel.instantiate(W_BytesObject)
elif self.iscompiledmethod(g_object):
return objectmodel.instantiate(W_PreSpurCompiledMethod)
else:
assert 0, "not reachable"
def isbytes(self, g_object):
return 8 <= g_object.format <= 11
def ischar(self, g_object):
g_char = self.special_g_object_safe(constants.SO_CHARACTER_CLASS)
return (self.ispointers(g_object) and g_object.g_class == g_char)
def iswords(self, g_object):
return g_object.format == 6
def isblockclosure(self, g_object):
g_closure = self.special_g_object_safe(constants.SO_BLOCKCLOSURE_CLASS)
return self.ispointers(g_object) and g_object.g_class == g_closure
def ispointers(self, g_object):
return g_object.format < 5
def isweak(self, g_object):
return g_object.format == 4
def iscompiledmethod(self, g_object):
return 12 <= g_object.format <= 15
def literal_count_of_method_header(self, untagged_header):
_, literalsize, _, _, _ = constants.decode_compiled_method_header(untagged_header)
return literalsize
class AncientReader(NonSpurReader):
"""Reader strategy for pre-4.0 images"""
pass
class SpurReader(BaseReaderStrategy):
FREE_OBJECT_CLASS_INDEX_PUN = 0
def __init__(self, imageReader, version, stream, space):
BaseReaderStrategy.__init__(self, imageReader, version, stream, space)
space.is_spur.activate()
def continue_read_header(self):
BaseReaderStrategy.continue_read_header(self)
self.hdrNumStackPages = self.stream.next_short()
self.hdrCogCodeSize = self.stream.next_short()
self.hdrEdenBytes = self.stream.next() # nextWord32
self.hdrMaxExtSemTabSize = self.stream.next_short()
self.stream.skipbytes(2) # unused, realign to word boundary
self.firstSegSize = self.stream.next()
self.freeOldSpaceInImage = self.stream.next()
_SLOTS_MASK = 0xFFL << 56
SLOTS_MASK = intmask(_SLOTS_MASK) if system.IS_64BIT else r_ulonglong(_SLOTS_MASK)
def read_body(self):
self.stream.reset_count()
segmentEnd = self.firstSegSize
currentAddressSwizzle = self.oldbaseaddress
self._progress.next_stage(self.stream.length())
while self.stream.count < segmentEnd:
while self.stream.count < segmentEnd - 16:
chunk, pos = self.read_object()
self._progress.update(self.stream.count)
if chunk.classid == self.FREE_OBJECT_CLASS_INDEX_PUN:
continue # ignore free chunks
self.chunklist.append(chunk)
self.chunks[pos + currentAddressSwizzle] = chunk
self.log("bridge: %s (%s)" % (self.stream.count, self.stream.count + currentAddressSwizzle))
# read bridge
bridge = r_uint64(self.stream.next_qword())
if bridge & self.SLOTS_MASK == 0:
bridgeSpan = 0
else:
bridgeSpan = intmask(r_uint64(bridge & ~self.SLOTS_MASK))
nextSegmentSize = intmask(r_uint64(self.stream.next_qword()))
self.log("bridgeSpan: %s; nextSegmentSize: %s" % (bridgeSpan, nextSegmentSize))
assert bridgeSpan >= 0
assert nextSegmentSize >= 0
assert self.stream.count == segmentEnd
# if nextSegmentSize is zero, the end of the image has been reached
if nextSegmentSize == 0:
self.log("last segment end: %s " % (segmentEnd + currentAddressSwizzle))
break
segmentEnd = segmentEnd + nextSegmentSize
# address swizzle is in bytes, but bridgeSpan is in image words
currentAddressSwizzle += (bridgeSpan * (8 if self.version.is_64bit else 4))
self.stream.close()
self.stream = None
return self.chunklist # return for testing
def read_object(self):
# respect new header format
pos = self.stream.count
assert pos % 8 == 0, "every object must be 64-bit aligned"
headerWord = self.stream.next_qword()
classid_l, _, format_l, _, hash_l, _, size_l = splitter[22,2,5,3,22,2,8](headerWord)
classid, format, hash = intmask(classid_l), intmask(format_l), intmask(hash_l)
OVERFLOW_SLOTS = 255
if size_l == OVERFLOW_SLOTS:
size_l = headerWord & ~self.SLOTS_MASK
pos = self.stream.count
classid_l, _, format_l, _, hash_l, _, overflow_size = splitter[22,2,5,3,22,2,8](self.stream.next_qword())
classid, format, hash = intmask(classid_l), intmask(format_l), intmask(hash_l)
assert overflow_size == OVERFLOW_SLOTS, "objects with long header must have 255 in slot count"
size = r_uint(r_uint32(size_l)) # reading 64 bit images not supported in 32 bit build
assert 0 <= format <= 31
chunk = ImageChunk(size, format, classid, hash)
# the minimum object length is 16 bytes, i.e. 8 header + 8 payload
# (to accommodate a forwarding ptr)
chunk.data = [self.stream.next() for _ in range(self.words_for(size))]
if len(chunk.data) != size:
# remove trailing alignment slots
assert size < len(chunk.data) and len(chunk.data) - size < 4
chunk.data = chunk.data[:size]
if format < 10 and classid != self.FREE_OBJECT_CLASS_INDEX_PUN:
for slot in chunk.data:
assert slot % 16 != 0 or slot >= self.oldbaseaddress
assert format != 0 or classid == 0 or size == 0, "empty objects must not have slots"
return chunk, pos
def words_for(self, size):
# see Spur32BitMemoryManager>>smallObjectBytesForSlots:
if size <= 1:
return 2
else:
return size + (size & 1)
def g_class_of(self, chunk):
major_class_index = self.major_class_index_of(chunk.classid)
minor_class_index = self.minor_class_index_of(chunk.classid)
HIDDEN_ROOTS_CHUNK = 4 # after nil, true, false, freeList
hiddenRoots = self.chunklist[HIDDEN_ROOTS_CHUNK]
classTablePage = self.chunks[hiddenRoots.data[major_class_index]]
return self.chunks[classTablePage.data[minor_class_index]].g_object
def major_class_index_of(self, classid):
return classid >> 10
def minor_class_index_of(self, classid):
return classid & ((1 << 10) - 1)
def decode_pointers(self, g_object, space, end=-1):
if end == -1:
end = len(g_object.chunk.data)
pointers = []
for i in range(end):
pointer = g_object.chunk.data[i]
if (pointer & 3) == 0:
# pointer = ...00
try:
pointers.append(self.chunks[pointer].g_object)
except KeyError:
print "WARN: Bogus pointer: %d. Treating as small int." % pointer
small_int = GenericObject()
small_int.initialize_int(pointer >> 1, self, space)
pointers.append(small_int)
elif (pointer & 1) == 1:
# pointer = ....1
# tagged integer
small_int = GenericObject()
small_int.initialize_int(pointer >> 1, self, space)
pointers.append(small_int)
else:
#assert (pointer & 3) == 2
# pointer = ...10
# immediate character
character = GenericObject()
character.initialize_char(pointer >> 2, self, space)
pointers.append(character)
return pointers
def instantiate(self, g_object):
""" 0 no fields
1 fixed fields only (all containing pointers)
2 indexable fields only (all containing pointers)
3 both fixed and indexable fields (all containing pointers)
4 indexable weak fields (all containing pointers)
5 fixed weak fields (all containing pointers)
6-8 unused
9 indexable 64 bit fields (no pointers)
10-11 indexable 32 bit fields (no pointers)
12-15 indexable 16 bit fields (no pointers)
16-23 indexable byte fields (no pointers)
for the above, the lower bits are the lower bits of the size
24-31 compiled methods:
# of literal oops specified in method header,
followed by indexable bytes (same interpretation of low bits as above)
"""
# the instantiate call circumvents the constructors
# and makes empty objects
if self.ischar(g_object):
return objectmodel.instantiate(W_Character)
elif self.isblockclosure(g_object):
return objectmodel.instantiate(W_BlockClosure)
elif self.ispointers(g_object):
return objectmodel.instantiate(W_PointersObject)
elif self.isfloat(g_object):
return objectmodel.instantiate(W_Float)
elif self.issignedinteger(g_object):
return objectmodel.instantiate(W_SmallInteger)
elif self.isunsignedinteger(g_object):
return objectmodel.instantiate(W_LargeIntegerWord)
elif self.isbiginteger(g_object):
return objectmodel.instantiate(W_LargeIntegerBig)
elif self.iswords(g_object):
return objectmodel.instantiate(W_WordsObject)
elif self.isbytes(g_object):
return objectmodel.instantiate(W_BytesObject)
elif self.iscompiledmethod(g_object):
return objectmodel.instantiate(W_SpurCompiledMethod)
elif g_object.format in (6, 7, 8):
raise error.CorruptImageError("Unknown format " + str(g_object.format))
else:
assert 0, "not reachable"
def ischar(self, g_object):
g_char = self.special_g_object_safe(constants.SO_CHARACTER_CLASS)
return (self.ispointers(g_object) and g_object.g_class == g_char)
def isblockclosure(self, g_object):
g_closure = self.special_g_object_safe(constants.SO_BLOCKCLOSURE_CLASS)
return self.ispointers(g_object) and g_closure == g_object.g_class
def ispointers(self, g_object):
return g_object.format < 6
def isweak(self, g_object):
return 4 <= g_object.format <= 5
def iswords(self, g_object):
if not system.IS_64BIT and g_object.format == 9:
# 64-bit words objects are not supported in our 32-bit VM, because
# we mush them all together
self.log("Warning: a 64bit-words object is being truncated to 32-bits.")
return 9 <= g_object.format <= 15
def isbytes(self, g_object):
return 16 <= g_object.format <= 23
def iscompiledmethod(self, g_object):
return 24 <= g_object.format <= 31
def literal_count_of_method_header(self, untagged_header):
return untagged_header & 0x7fff # AlternateHeaderNumLiteralsMask
# ____________________________________________________________
class SqueakImage(object):
_immutable_fields_ = [
"space",
"w_asSymbol",
"version",
"startup_time",
"w_simulatePrimitive",
]
def __init__(self, reader):
space = self.space = reader.space
self.w_asSymbol = self.find_symbol(space, reader, "asSymbol")
self.lastWindowSize = reader.lastWindowSize
self.version = reader.version
self.startup_time = time.time()
from rsqueakvm.plugins.simulation import SIMULATE_PRIMITIVE_SELECTOR
self.w_simulatePrimitive = self.find_symbol(space, reader, SIMULATE_PRIMITIVE_SELECTOR)
def find_symbol(self, space, reader, symbol):
w_dnu = space.w_doesNotUnderstand
assert isinstance(w_dnu, W_BytesObject)
assert space.unwrap_string(w_dnu) == "doesNotUnderstand:"
w_Symbol = w_dnu.getclass(space)
w_obj = None
# bit annoying that we have to hunt through the image :-(
for chunk in reader.chunklist:
w_obj = chunk.g_object.w_object
if not isinstance(w_obj, W_BytesObject):
continue
if not w_obj.getclass(space).is_same_object(w_Symbol):
continue
if space.unwrap_string(w_obj) == symbol:
return w_obj
w_obj = space.w_nil
return w_obj
def special(self, index):
if index >= self.space.w_special_objects.size():
return None
else:
return self.space.w_special_objects.at0(self.space, index)
# ____________________________________________________________
class GenericObject(object):
""" Intermediate representation of squeak objects. To establish all
pointers as object references, ImageReader creates instances of
GenericObject from the image chunks, and uses them as starting
point for the actual create of rsqueakvm.model classes.
"""
def __init__(self):
self.reader = None
self.filled_in = False
self.filled_in_weak = False
self.pointers = None
self.g_class = None
self.chunk = None
def isinitialized(self):
return self.reader is not None
def initialize_int(self, value, reader, space):
self.reader = reader
if value in reader.intcache:
w_int = reader.intcache[value]
else:
w_int = space.wrap_int(value)
reader.intcache[value] = w_int
self.w_object = w_int
self.filled_in = True
def initialize_char(self, untagged_value, reader, space):
self.reader = reader
self.w_object = W_Character(untagged_value)
self.filled_in = True
def initialize(self, chunk, reader, space):
self.reader = reader
self.chunk = chunk # for bytes, words and compiledmethod
self.init_pointers()
self.init_g_class()
self.w_object = None
@property
def size(self):
if self.chunk is None: return 0
return self.chunk.size
@property
def hash(self):
if self.chunk is None: return 0
return self.chunk.hash
@property
def format(self):
if self.chunk is None: return 0
return self.chunk.format
def __repr__(self):
return "<GenericObject %s>" % ("uninitialized" if not self.isinitialized()
else self.w_object if hasattr(self, "w_object") and self.w_object
else "size=%d hash=%d format=%d" % (self.size, self.hash, self.format))
def init_g_class(self):
self.g_class = self.reader.g_class_of(self.chunk)
def init_pointers(self):
space = self.reader.space
if self.reader.ispointers(self):
ptrs = self.reader.decode_pointers(self, space)
assert None not in ptrs
elif self.reader.iscompiledmethod(self):
header = self.chunk.data[0] >> 1 # untag tagged int
literalsize = self.reader.literal_count_of_method_header(header)
ptrs = self.reader.decode_pointers(self, space, literalsize + 1) # adjust +1 for the header
assert None not in ptrs
else:
ptrs = None
self.pointers = ptrs
def init_w_object(self, space):
if self.w_object is None:
self.w_object = self.reader.instantiate(self)
return self.w_object
def isweak(self):
return self.reader.isweak(self)
def len_bytes(self):
sz = self.reader.len_bytes_of(self.chunk)
return sz - (self.format & 3)
def get_bytes(self):
bytes = self.reader.get_bytes_of(self.chunk)
stop = len(bytes) - (self.format & 3)
assert stop >= 0
return bytes[:stop] # omit odd bytes
def get_ruints(self, required_len=-1):
from rpython.rlib.rarithmetic import r_uint32, r_uint
words = [r_uint(r_uint32(x)) for x in self.chunk.data]
if required_len != -1 and len(words) != required_len:
raise error.CorruptImageError("Expected %d words, got %d" % (required_len, len(words)))
return words
def fillin(self, space):
if not self.filled_in:
self.filled_in = True
self.w_object.fillin(space, self)
self.chunk.data = None
def fillin_weak(self, space):
if not self.filled_in_weak and self.isweak():
self.filled_in_weak = True
self.w_object.fillin_weak(space, self)
self.chunk = None
def fillin_finalize(self, space):
self.w_object.fillin_finalize(space, self)
def get_pointers(self):
assert self.pointers is not None
ptrs_g = [g_object.w_object for g_object in self.pointers]
self.pointers = None
return ptrs_g
def get_class(self):
w_class = self.g_class.w_object
assert isinstance(w_class, W_PointersObject)
return w_class
def get_hash(self):
return self.chunk.hash
@objectmodel.not_rpython
def as_string(self):
return "".join([chr(c) for bytes in
[splitter[8,8,8,8](w) for w in self.chunk.data]
for c in bytes if c != 0])
@objectmodel.not_rpython
def classname(self):
return self.g_class.pointers[6].as_string()
class ImageChunk(object):
""" A chunk knows the information from the header, but the body of the
object is not decoded yet."""
def __init__(self, size, format, classid, hash, data=None):
self.size = size
self.format = format
self.classid = classid
self.hash = hash
# list of integers forming the body of the object
self.data = data
self.g_object = GenericObject()
def __repr__(self):
return "ImageChunk(size=%(size)d, format=%(format)d, " \
"classid=%(classid)d, hash=%(hash)d, data=%(data)r)" \
% self.__dict__
def __eq__(self, other):
"(for testing)"
return (self.__class__ is other.__class__ and
self.format == other.format and
self.classid == other.classid and
self.hash == other.hash and
self.data == other.data)
def __ne__(self, other):
"(for testing)"
return not self == other
def as_g_object(self, reader, space):
if not self.g_object.isinitialized():
self.g_object.initialize(self, reader, space)
return self.g_object
def iscompact(self):
# pre-Spur
return 0 < self.classid < 32
class SpurImageWriter(object):
_immutable_fields_ = ["space", "image", "trace_queue", "oop_map"]
# XXX: Writes forcibly little-endian 32-bit Spur-format images
image_header_size = 64
word_size = 4
def __init__(self, interp, filename):
from rpython.rlib import streamio, objectmodel
self.space = interp.space
self.image = interp.image
self.f = streamio.open_file_as_stream(filename, mode="wb")
self.next_chunk = self.image_header_size
self.oop_map = {}
self.trace_queue = []
self.hidden_roots = None
@objectmodel.specialize.argtype(1)
def len_and_header(self, obj):
import math
n = self.fixed_and_indexable_size_for(obj)
if isinstance(obj, W_BytesObject) or isinstance(obj, W_LargeInteger) or isinstance(obj, W_CompiledMethod):
size = int(math.ceil(n / float(self.word_size)))
else:
size = n
if size < 255:
return n, size + 2, 2
else:
return n, size + 2, 4
def frame_size_for(self, obj):
w_method = None
if obj.getclass(self.space).is_same_object(self.space.w_MethodContext):
w_method = obj.fetch(self.space, constants.MTHDCTX_METHOD)
if not w_method.is_nil(self.space):
w_method.compute_frame_size()
elif obj.getclass(self.space).is_same_object(self.space.w_BlockContext):
w_home = obj.fetch(self.space, constants.BLKCTX_HOME_INDEX)
return self.frame_size_for(w_home)
return constants.COMPILED_METHOD_FULL_FRAME_SIZE
@objectmodel.specialize.argtype(1)
def fixed_and_indexable_size_for(self, obj):
if (isinstance(obj, W_PointersObject) and
(obj.getclass(self.space).is_same_object(self.space.w_MethodContext) or
obj.getclass(self.space).is_same_object(self.space.w_BlockContext))):
return obj.instsize() + self.frame_size_for(obj)
elif isinstance(obj, W_SpurCompiledMethod):
return obj.varsize()
elif isinstance(obj, W_PreSpurCompiledMethod):
if obj.primitive() != 0:
return obj.varsize() + 3 # account for three extra bytes with
# primitive idx
else:
return obj.varsize()
else:
return obj.instsize() + obj.varsize()
def padding_for(self, length):
if length - 2 == 0:
return 8
elif (length % 2 != 0 and self.word_size == 4):
return 4
else:
return 0
def trace_image(self, s_frame):
w_active_process = wrapper.scheduler(self.space).active_process()
active_process = wrapper.ProcessWrapper(self.space, w_active_process)
active_process.store_suspended_context(s_frame.w_self())
try:
# The first objects need to be in this order:
# 1. nil
# 2. false
# 3. true
# 4. free list
# 5. hidden roots
# 6. special objects array
self.reserve(self.space.w_nil)
self.reserve(self.space.w_false)
self.reserve(self.space.w_true)
# free list object. we need a word array kind of thing. Bitmaps are like that
self.reserve(W_WordsObject(self.space, self.space.w_Bitmap, self.word_size * 8))
self.hidden_roots = W_PointersObject(self.space, self.space.w_Array, 2**12 + 8)
self.reserve(self.hidden_roots)
w_special_objects = self.space.w_special_objects
for i in range(w_special_objects.size()):
w_obj = w_special_objects.fetch(self.space, i)
if isinstance(w_obj, W_SmallInteger):
# This cannot be...
val = self.space.unwrap_int(w_obj)
if val >= 0:
w_cls = self.space.w_LargePositiveInteger
else:
w_cls = self.space.w_LargeNegativeInteger
w_special_objects.store(
self.space, i,
W_LargeIntegerWord(self.space, w_cls, r_uint(val), 4))
self.reserve(w_special_objects)
self.trace_until_finish()
# tracing through the image will have populated the hidden roots and
# its classtables. write the hidden roots object again, which
# triggers writing its classtables
assert len(self.trace_queue) == 0
self.trace_queue.append(self.hidden_roots)
self.trace_until_finish()
self.write_last_bridge()
self.write_file_header(w_special_objects)
finally:
self.f.close()
active_process.store_suspended_context(self.space.w_nil)
@jit.dont_look_inside
def trace_until_finish(self):
while True:
if len(self.trace_queue) == 0:
break
obj = self.trace_queue.pop()
self.write_and_trace(obj)
def write_file_header(self, w_special_objects):
sp_obj_oop = self.oop_map[w_special_objects][0]
image_header_size = 64 if self.word_size == 4 else 128
displaysize = self.image.lastWindowSize
hdrflags = (0 + # 0/1 fullscreen or not
0b10 + # 0/2 imageFloatsLittleEndian or not
0x10 + # preemption does not yield
0) # old finalization
self.f.seek(0, 0)
version = 6521
if self.space.uses_block_contexts.is_set():
version = 0x1234 # our custom version magic
self.write_word(version)
self.write_word(image_header_size) # hdr size
self.write_word(self.next_chunk - image_header_size) # memory size
self.write_word(image_header_size) # start of memory
self.write_word(sp_obj_oop)
self.write_word(0xffee) # last hash
self.write_word(displaysize)
self.write_word(hdrflags)
self.write_word(0) # extra VM memory
self.write_word(0) # (num stack pages << 16) | cog code size
self.write_word(0) # eden bytes
self.write_word(0) # max ext semaphore size << 16
self.write_word(self.next_chunk - image_header_size) # first segment size
self.write_word(0) # free old space in image
self.write_word(0) # padding
self.write_word(0) # padding
def write_last_bridge(self):
self.f.seek(self.next_chunk, 0)
self.next_chunk = self.next_chunk + 16
# put the magic FINAL BRIDGE header
# FIXME: 64bit??
self.write_word((1 << 30) + (10 << 24) + 3)
self.write_word(0)
self.write_word(0)
self.write_word(0)
def insert_class_into_classtable(self, obj):
classhash = obj.gethash()
majoridx = classhash >> 10
minoridx = classhash & ((1 << 10) - 1)
page = self.hidden_roots.fetch(self.space, majoridx)
if page.is_nil(self.space):
page = W_PointersObject(self.space, self.space.w_Array, 2**10)
self.hidden_roots.store(self.space, majoridx, page)
# XXX: TODO: Why does this happen??
# assert page.fetch(self.space, minoridx).is_nil(self.space)
page.store(self.space, minoridx, obj)
@objectmodel.specialize.argtype(1)
def write_and_trace(self, obj):
if obj.is_class(self.space):
self.insert_class_into_classtable(obj)
# always make sure we're tracing our own class, too this is really
# important for metaclasses and old images, where a compact class might
# not otherwise be traced, because it would be in the header.
self.reserve(obj.getclass(self.space))
oop, length, hdrsize, sz, padding = self.oop_map[obj]
self.write_header(hdrsize, sz, obj, oop)
assert self.f.tell() == (oop + (2 * self.word_size))
if isinstance(obj, W_BytesObject) or isinstance(obj, W_LargeInteger):
self.write_bytes_object(obj)
elif isinstance(obj, W_WordsObject) or isinstance(obj, W_DisplayBitmap) or isinstance(obj, W_Float):
self.write_words_object(obj)
elif isinstance(obj, W_CompiledMethod):
self.write_compiled_method(obj)
else:
self.write_pointers_object(obj)
self.f.write("\0" * padding)
assert self.f.tell() == oop + length * self.word_size + padding
@objectmodel.specialize.argtype(1)
def reserve(self, obj):
if isinstance(obj, W_SmallInteger):
newoop = 0
if obj.value >= 0:
if obj.value <= constants.TAGGED_MAXINT32:
newoop = (obj.value << 1) + 1
else:
return self.reserve(W_LargeIntegerWord(
self.space, self.space.w_LargePositiveInteger,
r_uint(obj.value), constants.BYTES_PER_MACHINE_INT))
else:
if obj.value >= constants.TAGGED_MININT32:
newoop = intmask((((r_int64(1) << 31) + obj.value) << 1) + 1)
else:
return self.reserve(W_LargeIntegerWord(
self.space, self.space.w_LargeNegativeInteger,
r_uint(obj.value), constants.BYTES_PER_MACHINE_INT))
return (newoop, 0, 0, 0, 0)
elif isinstance(obj, W_Character):
assert obj.value < constants.TAGGED_MAXINT32
return ((obj.value << 2) + 0b10, 0, 0, 0, 0)
else:
oop = self.oop_map.get(obj, (0, 0, 0, 0, 0))
if oop[0] > 0:
return oop
else:
sz, length, hdrsize = self.len_and_header(obj)
oop = self.next_chunk + (hdrsize - 2) * self.word_size
padding = self.padding_for(length)
self.next_chunk = oop + length * self.word_size + padding
retval = (oop, length, hdrsize, sz, padding)
self.oop_map[obj] = retval
self.trace_queue.append(obj)
if (not self.space.is_spur.is_set()) and obj.is_class(self.space):
# rehash all classes in non-spur images, so we don't get
# collisions
obj.rehash()
return retval
def write_bytes_object(self, obj):
self.f.write(self.space.unwrap_string(obj))
paddingbytes = self.word_size - (obj.size() % self.word_size)
if paddingbytes != self.word_size:
self.f.write("\0" * paddingbytes)
def write_words_object(self, obj):
self.f.write(self.space.unwrap_string(obj))
if self.word_size == 8 and (obj.size() % 2 == 1):
self.f.write("\0" * 4)
def write_compiled_method(self, obj):
cmbytes = obj.getbytes()
if self.space.is_spur.is_set():
self.write_word((obj.getheader() << 1) + 1) # header is saved as tagged int
else:
newheader = (obj.literalsize # 15 bits
| (0 << 15) # is optimized, 1 bit
| ((1 if (obj.primitive() != 0) else 0) << 16) # 1 bit
| ((1 if obj.islarge else 0) << 17) # 1 bit
| (obj.tempsize() << 18) # 6 bits
| (obj.argsize << 24) # 4 bits
| (0 << 28) # access mod, 2 bits
| (0 << 30)) # instruction set bit, 1 bit
self.write_word((newheader << 1) + 1) # header is saved as tagged int
for i in range(obj.getliteralsize() / constants.BYTES_PER_WORD):
self.write_word(self.reserve(obj.getliteral(i))[0])
paddingbytes = 0
if (not self.space.is_spur.is_set()) and obj.primitive() != 0:
# we must insert the primitive bytecode and index into the first
# three bytes
self.f.write(chr(139)) # call prim bytecode
self.f.write(chr(obj.primitive() & 255)) # lower bits
self.f.write(chr((obj.primitive() >> 8) & 255)) # higher bits
paddingbytes = self.word_size - ((len(cmbytes) + 3) % self.word_size)
else:
paddingbytes = self.word_size - (len(cmbytes) % self.word_size)
self.f.write("".join(cmbytes))
if paddingbytes != self.word_size:
self.f.write("\0" * paddingbytes)
def write_pointers_object(self, obj):
if (not self.space.is_spur.is_set()) and obj.is_class(self.space) and (obj.size() > constants.CLASS_FORMAT_INDEX):
# we must retrofit the new class format
# The classformat in Spur, as an integer value, is:
# <5 bits inst spec><16 bits inst size>
w_oldfmt = obj.fetch(self.space, constants.CLASS_FORMAT_INDEX)
oldfmt = self.space.unwrap_int(w_oldfmt)
instsize_lo = (oldfmt >> 1) & 0x3F
instsize_hi = (oldfmt >> (9 + 1)) & 0xC0
oldinstsize = (instsize_lo | instsize_hi) - 1 # subtract hdr
instspec = self.convert_instspec_to_spur((oldfmt >> 7) & 15)
newfmt = ((instspec & 0x1f) << 16) | (oldinstsize & 0xffff)
w_newfmt = self.space.wrap_int(newfmt)
for i in range(obj.size()):
if i != constants.CLASS_FORMAT_INDEX:
self.write_word(self.reserve(obj.fetch(self.space, i))[0])
else:
self.write_word(self.reserve(w_newfmt)[0])
else:
for i in range(obj.size()):
self.write_word(self.reserve(obj.fetch(self.space, i))[0])
if (obj.getclass(self.space).is_same_object(self.space.w_MethodContext) or
obj.getclass(self.space).is_same_object(self.space.w_BlockContext)):
# fill out nils beyond the knowable end of stack
for i in range(self.frame_size_for(obj) - obj.varsize()):
self.write_word(self.reserve(self.space.w_nil)[0])
def write_word(self, word):
# FIXME: 64bit??
self.f.write("".join(
[chr(word & r_uint(0x000000ff)),
chr((word & r_uint(0x0000ff00)) >> 8),
chr((word & r_uint(0x00ff0000)) >> 16),
chr((word & r_uint(0xff000000)) >> 24)]))
def write_header(self, hdrsize, sz, obj, oop):
self.f.seek(oop - ((hdrsize - 2) * self.word_size), 0)
self.f.write(self.headers_for_hash_numfields(
obj.getclass(self.space),
obj.gethash(),
sz))
# conversion map from old formats to new formats
old_to_spur_specs = [0,1,2,3,4,-1,10,9,16,16,16,16,24,24,24,24]
def convert_instspec_to_spur(self, spec):
fmt = self.old_to_spur_specs[spec]
assert fmt >= 0
# if fmt == 4 and not Class.isvariable():
# fmt = 5 # weak objects now split in fixed and indexable types
return fmt
def headers_for_hash_numfields(self, Class, Hash, size):
import math
from rsqueakvm.storage_classes import BYTES, COMPILED_METHOD, LARGE_INTEGER
classshadow = Class.as_class_get_shadow(self.space)
length = r_uint64(size)
wordlen = size
fmt = 0
w_fmt = Class.fetch(self.space, constants.CLASS_FORMAT_INDEX)
assert isinstance(w_fmt, W_SmallInteger)
if self.space.is_spur.is_set():
fmt = (w_fmt.value >> 16) & 0x1f
else:
fmt = self.convert_instspec_to_spur((w_fmt.value >> 7) & 15)
if (classshadow.instance_kind == BYTES or
classshadow.instance_kind == COMPILED_METHOD or
classshadow.instance_kind == LARGE_INTEGER):
wordlen = int(math.ceil(size / 4.0))
length = r_uint64(wordlen)
fmt = fmt | ((wordlen * 4) - size)
header = r_uint64(0)
length_header = r_uint64(0)
if wordlen >= 255:
length_header = r_uint64(length | (r_uint64(0xff) << 56))
length = r_uint64(0xff)
header = header | ((length << 56) |
(r_uint64(Hash) << 32) |
(r_uint64(fmt) << 24) |
(r_uint64(Class.gethash())))
if wordlen >= 255:
extra_bytes = self.ruint64_tobytes(length_header)
header_bytes = self.ruint64_tobytes(header)
return extra_bytes + header_bytes
else:
return self.ruint64_tobytes(header)
def ruint64_tobytes(self, i):
res = ['\0'] * 8
value = i
mask = r_uint64(0xff)
for i in range(8):
res[i] = chr(intmask(value & mask))
value >>= 8
return "".join(res)
|
145976
|
import time
from stacksampler import Sampler
def slp():
time.sleep(0.00001)
def fn():
for i in range(50000):
slp()
s = Sampler()
def test_foo():
s.start()
fn()
print s.output_stats()
if __name__ == '__main__':
test_foo()
|
145988
|
import logging
from gehomesdk.erd.converters.abstract import ErdReadOnlyConverter
from gehomesdk.erd.converters.primitives import *
from gehomesdk.erd.values.laundry import ErdTumbleStatus
_LOGGER = logging.getLogger(__name__)
class TumbleStatusConverter(ErdReadOnlyConverter[ErdTumbleStatus]):
def erd_decode(self, value: str) -> ErdTumbleStatus:
try:
return ErdTumbleStatus(erd_decode_int(value))
except (KeyError, ValueError):
return ErdTumbleStatus.NOT_AVAILABLE
|
146005
|
from pathlib import Path
import pytest
import pyfqmr
# Get the /example folder at the root of this repo
EXAMPLES_DIR = Path(__file__, "..", "..", "example").resolve()
def test_example():
import trimesh as tr
bunny = tr.load_mesh(EXAMPLES_DIR / 'Stanford_Bunny_sample.stl')
simp = pyfqmr.Simplify()
simp.setMesh(bunny.vertices, bunny.faces)
simp.simplify_mesh(len(bunny.faces) // 2)
vertices, faces, normals = simp.getMesh()
assert len(faces) / len(bunny.faces) == pytest.approx(.5, rel=.05)
simplified = tr.Trimesh(vertices, faces, normals)
assert simplified.area == pytest.approx(simplified.area, rel=.05)
|
146030
|
from fastapi import FastAPI, HTTPException, Depends, Request
from fastapi.responses import JSONResponse
from fastapi_jwt_auth import AuthJWT
from fastapi_jwt_auth.exceptions import AuthJWTException
from pydantic import BaseModel
app = FastAPI()
class User(BaseModel):
username: str
password: str
# in production you can use Settings management
# from pydantic to get secret key from .env
class Settings(BaseModel):
authjwt_secret_key: str = "secret"
# callback to get your configuration
@AuthJWT.load_config
def get_config():
return Settings()
# exception handler for authjwt
# in production, you can tweak performance using orjson response
@app.exception_handler(AuthJWTException)
def authjwt_exception_handler(request: Request, exc: AuthJWTException):
return JSONResponse(
status_code=exc.status_code,
content={"detail": exc.message}
)
# provide a method to create access tokens. The create_access_token()
# function is used to actually generate the token to use authorization
# later in endpoint protected
@app.post('/login')
def login(user: User, Authorize: AuthJWT = Depends()):
if user.username != "test" or user.password != "<PASSWORD>":
raise HTTPException(status_code=401,detail="Bad username or password")
# subject identifier for who this token is for example id or username from database
access_token = Authorize.create_access_token(subject=user.username)
return {"access_token": access_token}
# protect endpoint with function jwt_required(), which requires
# a valid access token in the request headers to access.
@app.get('/user')
def user(Authorize: AuthJWT = Depends()):
Authorize.jwt_required()
current_user = Authorize.get_jwt_subject()
return {"user": current_user}
|
146041
|
version https://git-lfs.github.com/spec/v1
oid sha256:00d41c118e6c444c85be97ecd91f67088a2fd53b9c4398d2a9b0b9fd8b244979
size 2932
|
146087
|
from gtfspy.import_loaders.table_loader import TableLoader, decode_six
class RouteLoader(TableLoader):
fname = 'routes.txt'
table = 'routes'
tabledef = '(route_I INTEGER PRIMARY KEY, ' \
'route_id TEXT UNIQUE NOT NULL, ' \
'agency_I INT, ' \
'name TEXT, ' \
'long_name TEXT, ' \
'desc TEXT, ' \
'type INT, ' \
'url TEXT, ' \
'color TEXT, ' \
'text_color TEXT' \
')'
extra_keys = ['agency_I', ]
extra_values = ['(SELECT agency_I FROM agencies WHERE agency_id=:_agency_id )',
]
# route_id,agency_id,route_short_name,route_long_name,route_desc,route_type,route_url
# 1001,HSL,1,Kauppatori - Kapyla,0,http://aikataulut.hsl.fi/linjat/fi/h1_1a.html
def gen_rows(self, readers, prefixes):
from gtfspy import extended_route_types
for reader, prefix in zip(readers, prefixes):
for row in reader:
#print (row)
yield dict(
route_id = prefix + decode_six(row['route_id']),
_agency_id = prefix + decode_six(row['agency_id']) if 'agency_id' in row else None,
name = decode_six(row['route_short_name']),
long_name = decode_six(row['route_long_name']),
desc = decode_six(row['route_desc']) if 'route_desc' in row else None,
type = extended_route_types.ROUTE_TYPE_CONVERSION[int(row['route_type'])],
url = decode_six(row['route_url']) if 'route_url' in row else None,
color = decode_six(row['route_color']) if 'route_color' in row else None,
text_color = decode_six(row['route_text_color']) if 'route_text_color' in row else None,
)
@classmethod
def index(cls, cur):
# cur.execute('CREATE INDEX IF NOT EXISTS idx_rid ON route (route_id)')
cur.execute('CREATE INDEX IF NOT EXISTS idx_route_name ON routes (name)')
|
146117
|
from pythran.tests import TestEnv
@TestEnv.module
class TestMath(TestEnv):
def test_cos_(self):
self.run_test("def cos_(a):\n from math import cos\n return cos(a)", 1, cos_=[int])
def test_exp_(self):
self.run_test("def exp_(a):\n from math import exp\n return exp(a)", 1, exp_=[int])
def test_sqrt_(self):
self.run_test("def sqrt_(a):\n from math import sqrt\n return sqrt(a)", 1, sqrt_=[int])
def test_log10_(self):
self.run_test("def log10_(a):\n from math import log10\n return log10(a)", 1, log10_=[int])
def test_isnan_(self):
self.run_test("def isnan_(a):\n from math import isnan\n return isnan(a)", 1, isnan_=[int])
def test_pi_(self):
self.run_test("def pi_():\n from math import pi\n return pi", pi_=[])
def test_e_(self):
self.run_test("def e_():\n from math import e\n return e", e_=[])
def test_asinh_(self):
self.run_test("def asinh_(a):\n from math import asinh\n return asinh(a)",1., asinh_=[float])
def test_atanh_(self):
self.run_test("def atanh_(a):\n from math import atanh\n return atanh(a)",.1, atanh_=[float])
def test_acosh_(self):
self.run_test("def acosh_(a):\n from math import acosh\n return acosh(a)",1, acosh_=[int])
def test_radians_(self):
self.run_test("def radians_(a):\n from math import radians\n return radians(a)",1, radians_=[int])
def test_degrees_(self):
self.run_test("def degrees_(a):\n from math import degrees\n return degrees(a)",1, degrees_=[int])
def test_hypot_(self):
self.run_test("def hypot_(a,b):\n from math import hypot\n return hypot(a,b)",3,4, hypot_=[int,int])
def test_tanh_(self):
self.run_test("def tanh_(a):\n from math import tanh\n return tanh(a)",1, tanh_=[int])
def test_cosh_(self):
self.run_test("def cosh_(a):\n from math import cosh\n return cosh(a)",1., cosh_=[float])
def test_sinh_(self):
self.run_test("def sinh_(a):\n from math import sinh\n return sinh(a)",1, sinh_=[int])
def test_atan_(self):
self.run_test("def atan_(a):\n from math import atan\n return atan(a)",1, atan_=[int])
def test_atan2_(self):
self.run_test("def atan2_(a,b):\n from math import atan2\n return atan2(a,b)",2,4, atan2_=[int,int])
def test_asin_(self):
self.run_test("def asin_(a):\n from math import asin\n return asin(a)",1, asin_=[int])
def test_tan_(self):
self.run_test("def tan_(a):\n from math import tan\n return tan(a)",1, tan_=[int])
def test_log_(self):
self.run_test("def log_(a):\n from math import log\n return log(a)",1, log_=[int])
def test_log1p_(self):
self.run_test("def log1p_(a):\n from math import log1p\n return log1p(a)",1, log1p_=[int])
def test_expm1_(self):
self.run_test("def expm1_(a):\n from math import expm1\n return expm1(a)",1, expm1_=[int])
def test_ldexp_(self):
self.run_test("def ldexp_(a,b):\n from math import ldexp\n return ldexp(a,b)",3,4, ldexp_=[int,int])
def test_fmod_(self):
self.run_test("def fmod_(a,b):\n from math import fmod\n return fmod(a,b)",5.3,2, fmod_=[float,int])
def test_fabs_(self):
self.run_test("def fabs_(a):\n from math import fabs\n return fabs(a)",1, fabs_=[int])
def test_copysign_(self):
self.run_test("def copysign_(a,b):\n from math import copysign\n return copysign(a,b)",2,-2, copysign_=[int,int])
def test_acos_(self):
self.run_test("def acos_(a):\n from math import acos\n return acos(a)",1, acos_=[int])
def test_erf_(self):
self.run_test("def erf_(a):\n from math import erf\n return erf(a)",1, erf_=[int])
def test_erfc_(self):
self.run_test("def erfc_(a):\n from math import erfc\n return erfc(a)",1, erfc_=[int])
def test_gamma_(self):
self.run_test("def gamma_(a):\n from math import gamma\n return gamma(a)",1, gamma_=[int])
def test_lgamma_(self):
self.run_test("def lgamma_(a):\n from math import lgamma\n return lgamma(a)",1, lgamma_=[int])
def test_trunc_(self):
self.run_test("def trunc_(a):\n from math import trunc\n return trunc(a)",1, trunc_=[int])
def test_factorial_(self):
self.run_test("def factorial_(a):\n from math import factorial\n return factorial(a)",2, factorial_=[int])
def test_modf_(self):
self.run_test("def modf_(a):\n from math import modf\n return modf(a)",2, modf_=[int])
def test_frexp_(self):
self.run_test("def frexp_(a):\n from math import frexp\n return frexp(a)",2.2, frexp_=[float])
def test_isinf_(self):
self.run_test("def isinf_(a):\n from math import isinf\n n=1\n while not isinf(a):\n a=a*a\n n+=1\n return isinf(a)", 2., isinf_=[float])
def test_pow_accuracy(self):
code = '''
from math import factorial
def pow_accuracy(N, i):
N = N ** i
p = 0.0000001 * 1.0
binomial_coef = 1. * factorial(N) / factorial(i) / factorial(N-i)
pp = binomial_coef * p**i * (1-p)**(N-i)
return pp'''
self.run_test(code,
3, 2,
pow_accuracy=[int, int])
def test_pow_array_accuracy(self):
code = '''
import numpy as np
def pow_array_accuracy(N, i):
p = np.arange(N) * 0.0000001
pp = p**i * (1-p)**(N-i)
return pp'''
self.run_test(code,
3, 2,
pow_array_accuracy=[int, int])
|
146188
|
from rest_framework.test import APITestCase, APIClient
from src.utils.tests_utils import create_test_log
class TestViews(APITestCase):
def test_get_log_list(self):
create_test_log()
client = APIClient()
response = client.get('/logs/')
self.assertEqual(1, len(response.data))
def test_get_log_detail(self):
log = create_test_log()
client = APIClient()
response = client.get('/logs/' + str(log.id))
self.assertEqual(log.id, response.data['id'])
self.assertEqual(log.name, response.data['name'])
|
146246
|
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import ManualParameter
import numpy as np
class SimControlCZ_v2(Instrument):
"""
Noise and other parameters for cz_superoperator_simulation_v2
Created for VCZ simulation
"""
def __init__(self, name, **kw):
super().__init__(name, **kw)
# Noise parameters
self.add_parameter(
"T1_q0",
unit="s",
label="T1 fluxing qubit",
docstring="T1 fluxing qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T1_q1",
unit="s",
label="T1 static qubit",
docstring="T1 static qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T2_q1",
unit="s",
label="T2 static qubit",
docstring="T2 static qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T2_q0_amplitude_dependent",
docstring="fitcoefficients giving T2_q0 or Tphi_q0 as a function of inverse sensitivity (in units of w_q0/Phi_0): a, b. Function is ax+b",
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([-1, -1]),
)
# for flux noise simulations
self.add_parameter(
"sigma_q0",
unit="flux quanta",
docstring="standard deviation of the Gaussian from which we sample the flux bias, q0",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"sigma_q1",
unit="flux quanta",
docstring="standard deviation of the Gaussian from which we sample the flux bias, q1",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"w_q1_sweetspot",
docstring="NB: different from the operating point in general",
parameter_class=ManualParameter,
vals=vals.Numbers(),
)
self.add_parameter(
"w_q0_sweetspot",
docstring="NB: different from the operating point in general",
parameter_class=ManualParameter,
vals=vals.Numbers(),
)
# Control parameters for the simulations
self.add_parameter(
"dressed_compsub",
docstring="true if we use the definition of the comp subspace that uses the dressed 00,01,10,11 states",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=True,
)
self.add_parameter(
"distortions",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"voltage_scaling_factor",
unit="a.u.",
docstring="scaling factor for the voltage for a CZ pulse",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=1,
)
self.add_parameter(
"n_sampling_gaussian_vec",
docstring="array. each element is a number of samples from the gaussian distribution. Std to guarantee convergence is [11]. More are used only to verify convergence",
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([11]),
)
self.add_parameter(
"cluster",
docstring="true if we want to use the cluster",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"T2_scaling",
unit="a.u.",
docstring="scaling factor for T2_q0_amplitude_dependent",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=1,
)
self.add_parameter(
"which_gate",
docstring="Direction of the CZ gate. E.g. 'NE'. Used to extract parameters from the fluxlutman ",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="NE",
)
self.add_parameter(
"simstep_div",
docstring="Division of the simulation time step. 4 is a good one, corresponding to a time step of 0.1 ns. For smaller values landscapes can deviate significantly from experiment.",
parameter_class=ManualParameter,
vals=vals.Numbers(min_value=1),
initial_value=4,
)
self.add_parameter(
"gates_num",
docstring="Chain the same gate gates_num times.",
parameter_class=ManualParameter,
# It should be an integer but the measurement control cast to float when setting sweep points
vals=vals.Numbers(min_value=1),
initial_value=1,
)
self.add_parameter(
"gates_interval",
docstring="Time interval that separates the gates if gates_num > 1.",
parameter_class=ManualParameter,
unit="s",
vals=vals.Numbers(min_value=0),
initial_value=0,
)
self.add_parameter(
"cost_func",
docstring="Used to calculate the cost function based on the quantities of interest (qoi). Signature: cost_func(qoi). NB: qoi's that represent percentages will be in [0, 1] range. Inspect 'pycqed.simulations.cz_superoperator_simulation_new_functions.simulate_quantities_of_interest_superoperator_v2??' in notebook for available qoi's.",
parameter_class=ManualParameter,
unit="a.u.",
vals=vals.Callable(),
initial_value=None,
)
self.add_parameter(
"cost_func_str",
docstring="Not loaded automatically. Convenience parameter to store the cost function string and use `exec('sim_control_CZ.cost_func(' + sim_control_CZ.cost_func_str() + ')')` to load it.",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="lambda qoi: np.log10((1 - qoi['avgatefid_compsubspace_pc']) * (1 - 0.5) + qoi['L1'] * 0.5)",
)
# Was used to simulate the "refocusing pulses"
self.add_parameter(
"double_cz_pi_pulses",
docstring="If set to 'no_pi_pulses' or 'with_pi_pulses' will simulate two sequential CZs with or without Pi pulses simulated as an ideal superoperator multiplication.",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="", # Use empty string to evaluate to false
)
self.add_parameter(
"optimize_const_amp",
docstring="If true constant amplitude points in the pulse will be 'absorbed' to make simulation much faster",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=True,
)
self.add_parameter(
"look_for_minimum",
docstring="FB: If cost_func=None, if this is False my old cost func is used, if it's True that cost func is used to power 4",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"purcell_device",
docstring="FB: should be set to True only when we want to use the old way of defining T2_q0_amplitude_dependent, so it could be that we simulate the purcell device but we set this parameter to False",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"artificial_waiting_at_sweetspot",
docstring="FB: integer number of simstep_new in the middle of VCZ. Used for matching sim-exp",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"timestamp_for_contour",
docstring="FB: timestamp of previously generated heatmap. Used for contour scans along the 180 deg line",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="",
)
self.add_parameter(
"measurement_time",
docstring="FB: measurement time. Used to get the right missing fraction from the conditional-oscillations experiment",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"fluxbias_mean",
docstring="FB: used for scans wrt the fluxbias at one specific point in the landscape, for fluxing qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"fluxbias_mean_q1",
docstring="FB: used for scans wrt the fluxbias at one specific point in the landscape, for static qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
# for ramsey/Rabi simulations
self.add_parameter(
"detuning",
unit="Hz",
docstring="detuning of w_q0 from its sweet spot value",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"initial_state",
docstring="determines initial state for ramsey_simulations_new",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="changeme",
)
self.add_parameter(
"scanning_time",
unit="s",
docstring="time between the two pi/2 pulses",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"czd_double_sided",
docstring="Ramsey or echo pulse. Used since it has been removed from fluxlutman",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
# for spectral tomo
self.add_parameter(
"repetitions",
docstring="Repetitions of CZ gate, used for spectral tomo",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=1,
)
self.add_parameter(
"time_series",
docstring="",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"overrotation_sims",
docstring="instead of constant shift in flux, we use constant rotations around some axis",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"axis_overrotation",
docstring="",
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([1, 0, 0]),
)
def set_cost_func(self, cost_func_str=None):
"""
Sets the self.cost_func from the self.cost_func_str string
or from the provided string
"""
if cost_func_str is None:
cost_func_str = self.cost_func_str()
else:
self.cost_func_str(cost_func_str)
exec("self.cost_func(" + self.cost_func_str() + ")")
|
146271
|
from cipher_description import CipherDescription
def generate_speck_version(n,a,b):
speck = CipherDescription(2*n)
s = ['s{}'.format(i) for i in range(2*n)]
'''
if n == 16:
a = 7
b = 2
else:
a = 8
b = 3
'''
x = s[n:]
y = s[:n]
if n%a==0:
for j in range(a):
shift = ['s{}'.format(n+j+(i*(n-a))%n) for i in range(n/a)]
speck.apply_permutation(shift)
else:
shift = ['s{}'.format(n+(i*(n-a))%n) for i in range(n)]
speck.apply_permutation(shift)
speck.add_mod(x,y,x,n,0)
if n%b==0:
for j in range(b):
shift = ['s{}'.format(j+i*b) for i in range(n/b)]
speck.apply_permutation(shift)
else:
shift = ['s{}'.format((i*b)%n) for i in range(n)]
speck.apply_permutation(shift)
for i in range(n):
speck.apply_xor(x[i],y[i],y[i])
return speck
|
146283
|
import os
import numpy as np
from stompy.spatial import field
datadir=os.path.join( os.path.dirname(__file__), 'data')
#depth_bin_file = '/home/rusty/classes/research/spatialdata/us/ca/suntans/bathymetry/compiled2/final.bin'
def test_xyz():
depth_bin_file = os.path.join(datadir,'depth.xyz')
f = field.XYZText(fname=depth_bin_file)
f.build_index()
center = np.array([ 563379.6 , 4196117. ])
elev = f.inv_dist_interp(center,
min_n_closest=8,
min_radius=3900.0)
##
def test_lin_interp():
X=np.array([[0.,0.],[10.,0.],[10.,10.],[0.,10.]])
F=np.array([1.,2.,3.,4.])
f = field.XYZField(X=X,F=F)
elev = f.interpolate( [2,3] )
out=f.interpolate(X)
assert np.allclose(out,F)
|
146314
|
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import kornia
from codes.models.resnet import resnet18
import matplotlib
from codes.models.region_proposal_network import RegionProposalNetwork
import cv2
from codes.EX_CONST import Const
import matplotlib.pyplot as plt
matplotlib.use('Agg')
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view(1, self.shape[0])
class PerspTransDetector(nn.Module):
def __init__(self, dataset = None):
super().__init__()
if dataset is not None:
self.num_cam = dataset.num_cam
self.img_shape, self.reducedgrid_shape = dataset.img_shape, dataset.reducedgrid_shape
imgcoord2worldgrid_matrices = self.get_imgcoord2worldgrid_matrices(dataset.base.intrinsic_matrices,
dataset.base.extrinsic_matrices,
dataset.base.worldgrid2worldcoord_mat)
self.coord_map = self.create_coord_map(self.reducedgrid_shape + [1])
# img
self.upsample_shape = list(map(lambda x: int(x / Const.reduce), self.img_shape))
img_reduce = np.array(self.img_shape) / np.array(self.upsample_shape)
img_zoom_mat = np.diag(np.append(img_reduce, [1]))
# map
map_zoom_mat = np.diag(np.append(np.ones([2]) / Const.reduce, [1]))
self.proj_mats = [torch.from_numpy(map_zoom_mat @ imgcoord2worldgrid_matrices[cam] @ img_zoom_mat)
for cam in range(self.num_cam)]
self.backbone = nn.Sequential(*list(resnet18(pretrained=True, replace_stride_with_dilation=[False, False, True]).children())[:-2]).cuda()
self.rpn = RegionProposalNetwork(in_channels=1026, mid_channels=1026, ratios=[0.9, 1.1], anchor_scales=[4]).cuda()
def forward(self, imgs,frame, gt_boxes = None, epoch = None, visualize=False, train = True, mark = None):
B, N, C, H, W = imgs.shape
world_features = []
img_featuremap = []
for cam in range(self.num_cam):
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
img_feature =self.backbone(imgs[:, cam].cuda())
img_feature = F.interpolate(img_feature, self.upsample_shape, mode='bilinear')
if cam == 0:
plt.imsave("img_norm_0.jpg", torch.norm(img_feature[0], dim=0).cpu().numpy())
else:
plt.imsave("img_norm_1.jpg", torch.norm(img_feature[0], dim=0).cpu().numpy())
img_featuremap.append(img_feature)
proj_mat = self.proj_mats[cam].repeat([B, 1, 1]).float().cuda()
world_feature = kornia.warp_perspective(img_feature.cuda(), proj_mat, self.reducedgrid_shape) # 0.0142 * 2 = 0.028
world_feature = kornia.vflip(world_feature)
if cam == 0:
plt.imsave("world_feature_0.jpg", torch.norm(world_feature[0], dim=0).cpu().numpy())
else:
plt.imsave("world_feature_1.jpg", torch.norm(world_feature[0], dim=0).cpu().numpy())
world_features.append(world_feature.cuda())
world_features = torch.cat(world_features + [self.coord_map.repeat([B, 1, 1, 1]).cuda()], dim=1)
plt.imsave("world_features.jpg", torch.norm(world_features[0], dim=0).cpu().numpy())
rpn_locs, rpn_scores, anchor, rois, roi_indices = self.rpn(world_features, Const.grid_size) # 0.08
return rpn_locs, rpn_scores, anchor, rois, roi_indices, img_featuremap, world_features
def get_imgcoord2worldgrid_matrices(self, intrinsic_matrices, extrinsic_matrices, worldgrid2worldcoord_mat):
projection_matrices = {}
for cam in range(self.num_cam):
worldcoord2imgcoord_mat = intrinsic_matrices[cam] @ np.delete(extrinsic_matrices[cam], 2, 1)
worldgrid2imgcoord_mat = worldcoord2imgcoord_mat @ worldgrid2worldcoord_mat
imgcoord2worldgrid_mat = np.linalg.inv(worldgrid2imgcoord_mat)
permutation_mat = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
projection_matrices[cam] = permutation_mat @ imgcoord2worldgrid_mat
return projection_matrices
def create_coord_map(self, img_size, with_r=False):
H, W, C = img_size
grid_x, grid_y = np.meshgrid(np.arange(W), np.arange(H))
grid_x = torch.from_numpy(grid_x / (W - 1) * 2 - 1).float()
grid_y = torch.from_numpy(grid_y / (H - 1) * 2 - 1).float()
ret = torch.stack([grid_x, grid_y], dim=0).unsqueeze(0)
if with_r:
rr = torch.sqrt(torch.pow(grid_x, 2) + torch.pow(grid_y, 2)).view([1, 1, H, W])
ret = torch.cat([ret, rr], dim=1)
return ret
def vis_feature(x, max_num=5, out_path='/home/dzc/Desktop/CASIA/proj/mvRPN-det/images/'):
for i in range(0, x.shape[1]):
if i >= max_num:
break
feature = x[0, i, :, :].view(x.shape[-2], x.shape[-1])
feature = feature.detach().cpu().numpy()
feature = 1.0 / (1 + np.exp(-1 * feature))
feature = np.round(feature * 255).astype(np.uint8)
feature_img = cv2.applyColorMap(feature, cv2.COLORMAP_JET)
dst_path = os.path.join(out_path, str(i) + '.jpg')
cv2.imwrite(dst_path, feature_img)
|
146381
|
from whispers.plugins.uri import Uri
from whispers.rules import WhisperRules
class StructuredDocument:
def __init__(self, rules: WhisperRules):
self.breadcrumbs = []
self.rules = rules
def traverse(self, code, key=None):
"""Recursively traverse YAML/JSON document"""
if isinstance(code, dict):
yield from self.cloudformation(code)
for k, v in code.items():
self.breadcrumbs.append(k)
yield k, v, self.breadcrumbs
yield from self.traverse(v, key=k)
self.breadcrumbs.pop()
# Special key/value format
elements = list(code.keys())
if "key" in elements and "value" in elements:
yield code["key"], code["value"], self.breadcrumbs
elif isinstance(code, list):
for item in code:
yield key, item, self.breadcrumbs
yield from self.traverse(item, key=key)
elif isinstance(code, str):
if "=" in code:
item = code.split("=", 1)
if len(item) == 2:
yield item[0], item[1], self.breadcrumbs
if self.rules.match("uri", code):
for k, v in Uri().pairs(code):
yield k, v, self.breadcrumbs
def cloudformation(self, code):
"""
AWS CloudFormation format
"""
if self.breadcrumbs:
return # Not tree root
if "AWSTemplateFormatVersion" not in code:
return # Not CF format
if "Parameters" not in code:
return # No parameters
for key, values in code["Parameters"].items():
if "Default" not in values:
continue # No default value
yield key, values["Default"]
|
146472
|
import logging
from collections import namedtuple
from typing import Optional, Dict, Callable, Any
from auth.authorization import Authorizer, is_same_user
from auth.user import User
from execution.executor import ScriptExecutor
from model import script_config
from model.model_helper import is_empty, AccessProhibitedException
from utils.exceptions.missing_arg_exception import MissingArgumentException
from utils.exceptions.not_found_exception import NotFoundException
LOGGER = logging.getLogger('script_server.execution_service')
_ExecutionInfo = namedtuple('_ExecutionInfo',
['execution_id', 'owner_user', 'audit_name', 'config', 'audit_command'])
class ExecutionService:
def __init__(self, authorizer, id_generator):
self._id_generator = id_generator
self._authorizer = authorizer # type: Authorizer
self._executors = {} # type: Dict[str, ScriptExecutor]
self._execution_infos = {} # type: Dict[str, _ExecutionInfo]
# active from user perspective:
# - either they are running
# - OR user haven't yet seen execution results
self._active_executor_ids = set()
self._finish_listeners = []
self._start_listeners = []
def get_active_executor(self, execution_id, user):
self.validate_execution_id(execution_id, user, only_active=False)
if execution_id not in self._active_executor_ids:
return None
return self._executors.get(execution_id)
def start_script(self, config, values, user: User):
audit_name = user.get_audit_name()
executor = ScriptExecutor(config, values)
execution_id = self._id_generator.next_id()
audit_command = executor.get_secure_command()
LOGGER.info('Calling script #%s: %s', execution_id, audit_command)
executor.start()
self._executors[execution_id] = executor
self._execution_infos[execution_id] = _ExecutionInfo(
execution_id=execution_id,
owner_user=user,
audit_name=audit_name,
audit_command=audit_command,
config=config)
self._active_executor_ids.add(execution_id)
self._add_post_finish_handling(execution_id, executor, user)
self._fire_execution_started(execution_id, user)
return execution_id
def stop_script(self, execution_id, user):
self.validate_execution_id(execution_id, user)
if execution_id in self._executors:
self._executors[execution_id].stop()
def kill_script(self, execution_id, user):
self.validate_execution_id(execution_id, user)
if execution_id in self._executors:
self._executors[execution_id].kill()
def kill_script_by_system(self, execution_id):
if execution_id in self._executors:
self._executors[execution_id].kill()
def get_exit_code(self, execution_id):
return self._get_for_executor(execution_id, lambda e: e.get_return_code())
def is_running(self, execution_id, user):
executor = self._executors.get(execution_id) # type: ScriptExecutor
if executor is None:
return False
self.validate_execution_id(execution_id, user, only_active=False, allow_when_history_access=True)
return not executor.is_finished()
def get_active_executions(self, user_id):
result = []
for id in self._active_executor_ids:
execution_info = self._execution_infos[id]
if self._can_access_execution(execution_info, user_id):
result.append(id)
return result
def get_running_executions(self):
result = []
for id, executor in self._executors.items():
if executor.is_finished():
continue
result.append(id)
return result
def get_config(self, execution_id, user) -> Optional[script_config.ConfigModel]:
self.validate_execution_id(execution_id, user)
return self._get_for_execution_info(execution_id,
lambda i: i.config)
def is_active(self, execution_id):
return execution_id in self._active_executor_ids
def can_access(self, execution_id, user_id):
execution_info = self._execution_infos.get(execution_id)
return self._can_access_execution(execution_info, user_id)
def validate_execution_id(self, execution_id, user, only_active=True, allow_when_history_access=False):
if is_empty(execution_id):
raise MissingArgumentException('Execution id is missing', 'execution_id')
if only_active and (not self.is_active(execution_id)):
raise NotFoundException('No (active) executor found for id ' + execution_id)
if not self.can_access(execution_id, user.user_id) \
and not (allow_when_history_access and self._has_full_history_rights(user.user_id)):
LOGGER.warning('Prohibited access to not owned execution #%s (user=%s)',
execution_id, str(user))
raise AccessProhibitedException('Prohibited access to not owned execution')
@staticmethod
def _can_access_execution(execution_info: _ExecutionInfo, user_id):
return (execution_info is not None) and (is_same_user(execution_info.owner_user.user_id, user_id))
def get_user_parameter_values(self, execution_id):
return self._get_for_executor(execution_id,
lambda e: e.get_user_parameter_values())
def get_script_parameter_values(self, execution_id):
return self._get_for_executor(execution_id,
lambda e: e.get_script_parameter_values())
def get_owner(self, execution_id):
return self._get_for_execution_info(execution_id,
lambda i: i.owner_user.user_id)
def get_audit_name(self, execution_id):
return self._get_for_execution_info(execution_id,
lambda i: i.owner_user.get_audit_name())
def get_audit_command(self, execution_id):
return self._get_for_execution_info(execution_id,
lambda i: i.audit_command)
def get_all_audit_names(self, execution_id):
return self._get_for_execution_info(execution_id,
lambda i: i.owner_user.audit_names)
def get_anonymized_output_stream(self, execution_id):
return self._get_for_executor(execution_id,
lambda e: e.get_anonymized_output_stream())
def get_raw_output_stream(self, execution_id, user_id):
owner = self.get_owner(execution_id)
def getter(executor):
if user_id != owner:
LOGGER.warning(user_id + ' tried to access execution #' + execution_id + ' with owner ' + owner)
return executor.get_raw_output_stream()
return self._get_for_executor(execution_id, getter)
def get_process_id(self, execution_id):
return self._get_for_executor(execution_id,
lambda e: e.get_process_id())
def _get_for_executor(self, execution_id, getter: Callable[[ScriptExecutor], Any]):
executor = self._executors.get(execution_id)
if executor is None:
return None
return getter(executor)
def _get_for_execution_info(self, execution_id, getter: Callable[[_ExecutionInfo], Any]):
info = self._execution_infos.get(execution_id)
if info is None:
return None
return getter(info)
def cleanup_execution(self, execution_id, user):
try:
self.validate_execution_id(execution_id, user)
except NotFoundException:
return
executor = self._executors.get(execution_id)
if not executor.is_finished():
raise Exception('Executor ' + execution_id + ' is not yet finished')
executor.cleanup()
self._active_executor_ids.remove(execution_id)
def add_finish_listener(self, callback, execution_id=None):
if execution_id is None:
self._finish_listeners.append(callback)
else:
executor = self._executors.get(execution_id)
if not executor:
LOGGER.error('Failed to find executor for id ' + execution_id)
return
class FinishListener:
def finished(self):
callback()
executor.add_finish_listener(FinishListener())
def _add_post_finish_handling(self, execution_id, executor, user):
self_service = self
class FinishListener:
def finished(self):
self_service._fire_execution_finished(execution_id, user)
executor.add_finish_listener(FinishListener())
def _fire_execution_finished(self, execution_id, user):
for callback in self._finish_listeners:
try:
callback(execution_id, user)
except:
LOGGER.exception('Could not notify finish listener (%s), execution: %s', str(callback), execution_id)
def add_start_listener(self, callback):
self._start_listeners.append(callback)
def _fire_execution_started(self, execution_id, user):
for callback in self._start_listeners:
try:
callback(execution_id, user)
except:
LOGGER.exception('Could not notify start listener (%s), execution: %s', str(callback), execution_id)
def _has_full_history_rights(self, user_id):
return self._authorizer.has_full_history_access(user_id)
|
146506
|
from smartnlp.classfication.svm_classifier import SVMClassifier
if __name__ == '__main__':
svm_model = SVMClassifier('model/svm/model.pkl',
'./data/imdb/aclImdb.txt',
train=True)
# svm_model = SVMClassifier('model/svm/model.pkl')
svm_model.predict(['i like it ! its very interesting', 'I don\'t like it, it\'s boring'])
|
146533
|
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
from ..utils import (
ExtractorError,
get_element_by_id,
)
class SlideshareIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)"
_TEST = {
"url": "http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity",
"info_dict": {
"id": "25665706",
"ext": "mp4",
"title": "Managing Scale and Complexity",
"description": "This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by <NAME> from Netflix.",
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_title = mobj.group("title")
webpage = self._download_webpage(url, page_title)
slideshare_obj = self._search_regex(
r"\$\.extend\(.*?slideshare_object,\s*(\{.*?\})\);",
webpage,
"slideshare object",
)
info = json.loads(slideshare_obj)
if info["slideshow"]["type"] != "video":
raise ExtractorError(
'Webpage type is "%s": only video extraction is supported for Slideshare'
% info["slideshow"]["type"],
expected=True,
)
doc = info["doc"]
bucket = info["jsplayer"]["video_bucket"]
ext = info["jsplayer"]["video_extension"]
video_url = compat_urlparse.urljoin(bucket, doc + "-SD." + ext)
description = get_element_by_id(
"slideshow-description-paragraph", webpage
) or self._html_search_regex(
r'(?s)<p[^>]+itemprop="description"[^>]*>(.+?)</p>',
webpage,
"description",
fatal=False,
)
return {
"_type": "video",
"id": info["slideshow"]["id"],
"title": info["slideshow"]["title"],
"ext": ext,
"url": video_url,
"thumbnail": info["slideshow"]["pin_image_url"],
"description": description.strip() if description else None,
}
|
146541
|
import numpy as np
import soundfile as sf
import argparse
import os
import keras
import sklearn
import librosa
from keras import backend as K
eps = np.finfo(np.float).eps
def class_mae(y_true, y_pred):
return K.mean(
K.abs(
K.argmax(y_pred, axis=-1) - K.argmax(y_true, axis=-1)
),
axis=-1
)
def count(audio, model, scaler):
# compute STFT
X = np.abs(librosa.stft(audio, n_fft=400, hop_length=160)).T
# apply global (featurewise) standardization to mean1, var0
X = scaler.transform(X)
# cut to input shape length (500 frames x 201 STFT bins)
X = X[:500, :]
# apply l2 normalization
Theta = np.linalg.norm(X, axis=1) + eps
X /= np.mean(Theta)
# add sample dimension
X = X[np.newaxis, ...]
if len(model.input_shape) == 4:
X = X[:, np.newaxis, ...]
ys = model.predict(X, verbose=0)
return np.argmax(ys, axis=1)[0]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Load keras model and predict speaker count'
)
parser.add_argument(
'audio',
help='audio file (samplerate 16 kHz) of 5 seconds duration'
)
parser.add_argument(
'--model', default='CRNN',
help='model name'
)
args = parser.parse_args()
# load model
model = keras.models.load_model(
os.path.join('models', args.model + '.h5'),
custom_objects={
'class_mae': class_mae,
'exp': K.exp
}
)
# print model configuration
model.summary()
# save as svg file
# load standardisation parameters
scaler = sklearn.preprocessing.StandardScaler()
with np.load(os.path.join("models", 'scaler.npz')) as data:
scaler.mean_ = data['arr_0']
scaler.scale_ = data['arr_1']
# compute audio
audio, rate = sf.read(args.audio, always_2d=True)
# downmix to mono
audio = np.mean(audio, axis=1)
estimate = count(audio, model, scaler)
print("Speaker Count Estimate: ", estimate)
|
146562
|
import os.path
import yaml
from appr.auth import ApprAuth
def test_fake_home(fake_home):
assert os.path.expanduser("~") == fake_home
def test_init_create_dir(fake_home):
ApprAuth(".appr")
assert os.path.exists(os.path.join(str(fake_home), ".appr"))
def test_init_token_empty(fake_home):
k = ApprAuth()
assert os.path.exists(k.tokenfile) is False
def test_get_empty_token(fake_home):
k = ApprAuth()
assert k.token('*') is None
assert k.tokens is None
def test_delete_empty_token(fake_home):
""" Should not fail if there is no token """
k = ApprAuth()
assert k.delete_token('*') is None
def test_delete_token(fake_home):
""" Should not fail if there is no token """
k = ApprAuth()
k.add_token('*', "titid")
assert k.token('*') == "titid"
assert k.delete_token('*') == {'scope': {'namespace': '*', 'repo': '*'}, 'token': 'titid'}
assert k.token('*') is None
def test_create_token_value(fake_home):
""" Should not fail if there is no token """
k = ApprAuth()
k.add_token('a', "titic")
k.add_token('b', "titib")
assert k.token('a') == "titic"
assert k.token('a') == "titic"
assert k.token('c') is None
def test_create_token_file(fake_home):
k = ApprAuth()
k.add_token('a', "titib")
assert os.path.exists(k.tokenfile) is True
f = open(k.tokenfile, 'r')
r = f.read()
assert {'auths': {'a': {'scope': {'namespace': '*', 'repo': '*'}, 'token': 'titib'}}} == yaml.load(r)
def test_create_delete_get_token(fake_home):
k = ApprAuth()
k.add_token('a', "<PASSWORD>")
assert k.token('a') == "<PASSWORD>"
k.delete_token('a')
assert k.token('a') is None
def test_get_token_from_file(fake_home):
k = ApprAuth()
f = open(k.tokenfile, 'w')
r = yaml.dump({'auths': {'a': {'scope': {'namespace': '*', 'repo': '*'}, 'token': '<PASSWORD>'}}})
f.write(r)
f.close()
k = ApprAuth()
assert k.token('a') == "titib"
def test_retro_compat(fake_home):
k = ApprAuth()
f = open(k.tokenfile, 'w')
r = yaml.dump({'auths': {'a': 'foo', 'b': 'bar'}})
f.write(r)
f.close()
k = ApprAuth()
f = open(k.tokenfile, 'r')
new_format = f.read()
f.close()
expected = {'auths': {'a': {'scope': {'namespace': '*', 'repo': '*'}, 'token': 'foo'},
'b': {'scope': {'namespace': '*', 'repo': '*'}, 'token': 'bar'}}}
assert yaml.load(new_format) == expected
assert k.tokens == expected
|
146579
|
from calibre.customize import InterfaceActionBase
class ReadwisePlugin(InterfaceActionBase):
name = 'Readwise'
description = 'Export highlights to Readwise'
supported_platforms = ['windows', 'osx', 'linux']
author = '<NAME>'
version = (0, 1, 1)
minimum_calibre_version = (5, 0, 1)
actual_plugin = 'calibre_plugins.readwise.ui:InterfacePlugin'
def is_customizable(self):
return True
def config_widget(self):
from calibre_plugins.readwise.config import ConfigWidget
return ConfigWidget()
def save_settings(self, config_widget):
config_widget.save_settings()
ac = self.actual_plugin_
if ac is not None:
ac.apply_settings()
|
146618
|
from argparse import ArgumentParser
def make_args():
parser = ArgumentParser()
# general
parser.add_argument('--comment', dest='comment', default='0', type=str,
help='comment')
parser.add_argument('--task', dest='task', default='link', type=str,
help='link; link_pair')
parser.add_argument('--model', dest='model', default='GCN', type=str,
help='model class name. E.g., GCN, PGNN, ...')
parser.add_argument('--dataset', dest='dataset', default='All', type=str,
help='All; Cora; grid; communities; ppi')
parser.add_argument('--gpu', dest='gpu', action='store_true',
help='whether use gpu')
parser.add_argument('--cache_no', dest='cache', action='store_false',
help='whether use cache')
parser.add_argument('--cpu', dest='gpu', action='store_false',
help='whether use cpu')
parser.add_argument('--cuda', dest='cuda', default='0', type=str)
# dataset
parser.add_argument('--remove_link_ratio', dest='remove_link_ratio', default=0.2, type=float)
parser.add_argument('--rm_feature', dest='rm_feature', action='store_true',
help='whether rm_feature')
parser.add_argument('--rm_feature_no', dest='rm_feature', action='store_false',
help='whether rm_feature')
parser.add_argument('--permute', dest='permute', action='store_true',
help='whether permute subsets')
parser.add_argument('--permute_no', dest='permute', action='store_false',
help='whether permute subsets')
parser.add_argument('--feature_pre', dest='feature_pre', action='store_true',
help='whether pre transform feature')
parser.add_argument('--feature_pre_no', dest='feature_pre', action='store_false',
help='whether pre transform feature')
parser.add_argument('--dropout', dest='dropout', action='store_true',
help='whether dropout, default 0.5')
parser.add_argument('--dropout_no', dest='dropout', action='store_false',
help='whether dropout, default 0.5')
parser.add_argument('--approximate', dest='approximate', default=-1, type=int,
help='k-hop shortest path distance. -1 means exact shortest path') # -1, 2
parser.add_argument('--batch_size', dest='batch_size', default=8, type=int) # implemented via accumulating gradient
parser.add_argument('--layer_num', dest='layer_num', default=2, type=int)
parser.add_argument('--feature_dim', dest='feature_dim', default=32, type=int)
parser.add_argument('--hidden_dim', dest='hidden_dim', default=32, type=int)
parser.add_argument('--output_dim', dest='output_dim', default=32, type=int)
parser.add_argument('--anchor_num', dest='anchor_num', default=64, type=int)
parser.add_argument('--normalize_adj', dest='normalize_adj', action='store_true',
help='whether normalize_adj')
parser.add_argument('--lr', dest='lr', default=1e-2, type=float)
parser.add_argument('--epoch_num', dest='epoch_num', default=2001, type=int)
parser.add_argument('--repeat_num', dest='repeat_num', default=2, type=int) # 10
parser.add_argument('--epoch_log', dest='epoch_log', default=10, type=int)
parser.set_defaults(gpu=True, task='link', model='GCN', dataset='All',
cache=False, rm_feature=False,
permute=True, feature_pre=True, dropout=True,
approximate=-1, normalize_adj=False)
args = parser.parse_args()
return args
|
146626
|
from messagebird.base import Base
from messagebird.base_list import BaseList
class CallFlowList(BaseList):
def __init__(self):
self._data = None
self._pagination = None
super(CallFlowList, self).__init__(CallFlow)
@property
def data(self):
return self._data
@property
def pagination(self):
return self._pagination
@pagination.setter
def pagination(self, value):
self._pagination = value
@data.setter
def data(self, value):
"""Create typed objects from the dicts."""
items = []
for item in value:
items.append(self.itemType().load(item))
self._data = items
class CallFlowNumberList(BaseList):
def __init__(self):
self._data = None
self._pagination = None
super(CallFlowNumberList, self).__init__(CallFlowNumber)
@property
def data(self):
return self._data
@property
def pagination(self):
return self._pagination
@pagination.setter
def pagination(self, value):
self._pagination = value
@data.setter
def data(self, value):
"""Create typed objects from the dicts."""
items = []
for item in value:
items.append(self.itemType().load(item))
self._data = items
class CallFlow(Base):
def __init__(self):
self.id = None
self.title = None
self.record = None
self.steps = None
self.default = None
self._createdAt = None
self._updatedAt = None
@property
def createdAt(self):
return self._createdAt
@createdAt.setter
def createdAt(self, value):
self._createdAt = self.value_to_time(value)
@property
def updatedAt(self):
return self._updatedAt
@updatedAt.setter
def updatedAt(self, value):
self._updatedAt = self.value_to_time(value)
def load(self, data):
if data.get('data') is not None:
items = data.get('data')[0].items()
else:
items = list(data.items())
for name, value in items:
if hasattr(self, name) and not callable(getattr(self, name)):
setattr(self, name, value)
return self
class CallFlowNumber(Base):
def __init__(self):
self.id = None
self.number = None
self.callFlowId = None
self._createdAt = None
self._updatedAt = None
@property
def createdAt(self):
return self._createdAt
@createdAt.setter
def createdAt(self, value):
self._createdAt = self.value_to_time(value)
@property
def updatedAt(self):
return self._updatedAt
@updatedAt.setter
def updatedAt(self, value):
self._updatedAt = self.value_to_time(value)
def load(self, data):
if data.get('data') is not None:
items = data.get('data')[0].items()
else:
items = list(data.items())
for name, value in items:
if hasattr(self, name) and not callable(getattr(self, name)):
setattr(self, name, value)
return self
|
146632
|
import os
import json
with open(os.path.abspath(os.path.dirname(__file__) + '/config.json'), 'r') as f:
raw_config = json.load(f)
class TestConfig:
def setEnvironment(self,test_env):
config = {}
config["botName"] = raw_config["botName"]
if test_env == 'development':
test_env = 'aiaasst'
if(raw_config["hosts"][test_env]):
config["hostname"]= raw_config["hosts"][test_env]["hostname"]
config["userKey"] = raw_config["hosts"][test_env]["userKey"]
config["appId"] = raw_config["hosts"][test_env]["appId"]
config["botKey"] = raw_config["hosts"][test_env]["botKey"]
config["expectations"]= raw_config["hosts"][test_env]["expectations"]
else:
config["hostname"] = raw_config["hosts"]["production"]["hostname"]
config["userKey"] = raw_config["hosts"]["production"]["userKey"]
config["appId"] = raw_config["hosts"]["production"]["appId"]
config["botKey"] = raw_config["hosts"]["production"]["botKey"]
config["expectations"] = raw_config["hosts"]["production"]["expectations"]
return config
|
146672
|
import tf
import numpy as np
""" convert multicam_calib transforms to tagslam format:
example session:
ipython
import numpy as np
from convert_transforms import multicam_to_tagslam
%load_ext autoreload
%autoreload 2
# copy T_cn_cnm1 from multicam_calibration file:
T=np.array([[0.99995273841, 0.00284628684, 0.00929621430, -0.20032164920], [-0.00285007802, 0.99999586067, 0.00039459796, -0.00109630102], [-0.00929505268, -0.00042107425, 0.99995671141, 0.00092501568], [ 0.00000000000, 0.00000000000, 0.00000000000, 1.00000000000]])
rvec,tvec = multicam_to_tagslam(T)
"""
def mat_to_rvec_tvec(T):
angle, direc, point = tf.transformations.rotation_from_matrix(T)
return angle*direc, T[0:3,3]
def multicam_to_tagslam(tf_matrix_4x4_cn_cnm1):
T_w_c = np.linalg.inv(tf_matrix_4x4_cn_cnm1)
return mat_to_rvec_tvec(T_w_c)
def rvec_tvec_to_mat(rvec, tvec):
l = np.linalg.norm(rvec)
n = rvec/l if l > 1e-8 else np.array([1.0, 0.0, 0.0])
T = tf.transformations.rotation_matrix(l, n)
T[0:3, 3] = tvec
return T
def as_yaml(rvec, tvec):
print " position:"
print " x: %12.8f" % tvec[0]
print " y: %12.8f" % tvec[1]
print " z: %12.8f" % tvec[2]
print " rotation:"
print " x: %11.8f" % rvec[0]
print " y: %11.8f" % rvec[1]
print " z: %11.8f" % rvec[2]
|
146679
|
with open("vocab_origin.txt", "r") as f:
lines = f.readlines()
for i in range(185):
lines.append("<s_{}>".format(i))
with open("vocab.txt", "w") as f:
for line in lines:
f.write(line.strip() + "\n")
|
146706
|
from .box_utils import *
from .seq_matcher import SeqBoxMatcher
from .detection import Detect
from .prior_box import PriorBox
|
146711
|
import pytest
import os
from polyglotdb.io import inspect_orthography
from polyglotdb.exceptions import DelimiterError
from polyglotdb import CorpusContext
def test_load_spelling_no_ignore(graph_db, text_spelling_test_dir):
spelling_path = os.path.join(text_spelling_test_dir, 'text_spelling.txt')
parser = inspect_orthography(spelling_path)
with CorpusContext('spelling_no_ignore', **graph_db) as c:
c.reset()
c.load(parser, spelling_path)
# assert(c.lexicon['ab'].frequency == 2)
def test_load_spelling_directory(graph_db, text_spelling_test_dir):
parser = inspect_orthography(text_spelling_test_dir)
with CorpusContext('spelling_directory', **graph_db) as c:
c.reset()
c.load(parser, text_spelling_test_dir)
@pytest.mark.xfail
def test_export_spelling(graph_db, export_test_dir):
export_path = os.path.join(export_test_dir, 'export_spelling.txt')
with CorpusContext('spelling_no_ignore', **graph_db) as c:
export_discourse_spelling(c, 'text_spelling', export_path, words_per_line=10)
with open(export_path, 'r') as f:
assert (f.read() == 'ab cab\'d ad ab ab.')
def test_load_spelling_ignore(graph_db, text_spelling_test_dir):
spelling_path = os.path.join(text_spelling_test_dir, 'text_spelling.txt')
parser = inspect_orthography(spelling_path)
parser.annotation_tiers[0].ignored_characters = set(["'", '.'])
with CorpusContext('spelling_ignore', **graph_db) as c:
c.reset()
c.load(parser, spelling_path)
# assert(c.lexicon['ab'].frequency == 3)
# assert(c.lexicon['cabd'].frequency == 1)
|
146717
|
import numpy as np
import torch
import torch.nn as nn
from .utils import register_model, get_model
from . import cos_norm_classifier
@register_model('MannNet')
class MannNet(nn.Module):
"""Defines a Dynamic Meta-Embedding Network."""
def __init__(self, num_cls=10, model='LeNet', src_weights_init=None,
weights_init=None, use_domain_factor_selector=False, centroids_path=None, feat_dim=512):
super(MannNet, self).__init__()
self.name = 'MannNet'
self.base_model = model
self.num_cls = num_cls
self.feat_dim = feat_dim
self.use_domain_factor_selector = use_domain_factor_selector
self.cls_criterion = nn.CrossEntropyLoss()
self.gan_criterion = nn.CrossEntropyLoss()
self.centroids = torch.from_numpy(np.load(centroids_path)).float().cuda()
assert self.centroids is not None
self.centroids.requires_grad = False
self.setup_net()
if weights_init is not None:
self.load(weights_init)
elif src_weights_init is not None:
self.load_src_net(src_weights_init)
else:
raise Exception('MannNet must be initialized with weights.')
def forward(self, x_s, x_t):
"""Pass source and target images through their respective networks."""
score_s, x_s = self.src_net(x_s, with_ft=True)
score_t, x_t = self.tgt_net(x_t, with_ft=True)
if self.discrim_feat:
d_s = self.discriminator(x_s.clone())
d_t = self.discriminator(x_t.clone())
else:
d_s = self.discriminator(score_s.clone())
d_t = self.discriminator(score_t.clone())
return score_s, score_t, d_s, d_t
def setup_net(self):
"""Setup source, target and discriminator networks."""
self.src_net = get_model(self.base_model, num_cls=self.num_cls, feat_dim=self.feat_dim)
self.tgt_net = get_model(self.base_model, num_cls=self.num_cls, feat_dim=self.feat_dim)
input_dim = self.num_cls
self.discriminator = nn.Sequential(
nn.Linear(input_dim, 500),
nn.ReLU(),
nn.Linear(500, 500),
nn.ReLU(),
nn.Linear(500, 2),
)
self.fc_selector = nn.Linear(self.feat_dim, self.feat_dim)
if self.use_domain_factor_selector:
self.domain_factor_selector = nn.Linear(self.feat_dim, self.feat_dim)
self.classifier = cos_norm_classifier.create_model(self.feat_dim, self.num_cls)
self.image_size = self.src_net.image_size
self.num_channels = self.src_net.num_channels
def load(self, init_path):
"""Loads full src and tgt models."""
net_init_dict = torch.load(init_path)
self.load_state_dict(net_init_dict)
def load_src_net(self, init_path):
"""Initialize source and target with source weights."""
self.src_net.load(init_path)
self.tgt_net.load(init_path)
net_init_dict = torch.load(init_path)
classifier_weights = net_init_dict['classifier.weight']
self.classifier.weight.data = classifier_weights.data.clone()
def save(self, out_path):
torch.save(self.state_dict(), out_path)
def save_tgt_net(self, out_path):
torch.save(self.tgt_net.state_dict(), out_path)
|
146723
|
import time
from collections import deque
import torch
import numpy as np
from ... import mohex, hex
from . import json, analysis
from .. import common
from rebar import arrdict
from pavlov import stats, runs, logs
from logging import getLogger
import activelo
import pandas as pd
from functools import wraps
from contextlib import contextmanager
from multiprocessing import set_start_method, Process
log = getLogger(__name__)
BOARDSIZES = [3, 5, 7, 9]
RUN_NAMES = [f'mohex-{s}' for s in BOARDSIZES]
def elos(run_name, names=None, queue=[]):
n = (json.symmetric_games(run_name)
.reindex(index=names, columns=names)
.fillna(0))
w = (json.symmetric_wins(run_name)
.reindex(index=names, columns=names)
.fillna(0))
for (i, j) in queue:
ni, nj = names[i], names[j]
w.loc[ni, nj] += (w.loc[ni, nj] + 1)/(n.loc[ni, nj] + 2)
n.loc[ni, nj] += 1
return activelo.solve(n.values, w.values)
def offdiag_refill(run, names, queue, count=1):
n = (json.symmetric_games(run)
.reindex(index=names, columns=names)
.fillna(0))
for (i, j) in queue:
ni, nj = names[i], names[j]
n.loc[ni, nj] += 1
rs, cs = np.indices(n.shape)
mask = ((rs == cs + 1) | (rs == cs - 1))
excess = (n.values - n.values[mask].min())
excess[~mask] = np.inf
probs = np.exp(-excess)/np.exp(-excess).sum()
while len(queue) < count:
idx = np.random.choice(np.arange(n.size), p=probs.flatten())
pair = (idx // n.shape[0], idx % n.shape[0])
queue.append(pair)
queue.append(pair[::-1])
def uniform_refill(run, names, queue, count=1, target=128):
n = (json.symmetric_games(run)
.reindex(index=names, columns=names)
.fillna(0))
for (i, j) in queue:
ni, nj = names[i], names[j]
n.loc[ni, nj] += 1
deficit = (target - n.values).clip(0, None)
deficit[np.diag_indices_from(deficit)] = 0
if (deficit == 0).all():
return
probs = deficit/deficit.sum()
while len(queue) < count:
idx = np.random.choice(np.arange(n.size), p=probs.flatten())
pair = (idx // n.shape[0], idx % n.shape[0])
queue.append(pair)
queue.append(pair[::-1])
def reference_ladder(boardsize):
"""Run this to generate the `mohex-{boardsize}.json` files"""
#TODO: This is all a mess. Why'd I design it this way?
from IPython import display
run_name = f'mohex-{boardsize}'
agent = mohex.MoHexAgent()
worlds = hex.Hex.initial(n_envs=8, boardsize=boardsize)
universe = torch.linspace(0, 1, 11)
names = sorted([f'mohex-{r:.2f}' for r in universe])
queue = []
uniform_refill(run_name, names, queue, worlds.n_envs)
active = torch.tensor(queue[:worlds.n_envs])
queue = queue[worlds.n_envs:]
moves = torch.zeros((worlds.n_envs,))
while (queue or active.size(0)):
display.clear_output(wait=True)
print(f'{boardsize}: {len(queue)} in queue, {len(active)} in active')
idxs = active.gather(1, worlds.seats[:, None].long().cpu())[:, 0]
agent.random = universe[idxs]
decisions = agent(worlds)
worlds, transitions = worlds.step(decisions.actions)
moves += 1
rewards = transitions.rewards.cpu()
wins = (rewards == 1).int()
terminal = transitions.terminal.cpu()
for idx in terminal.nonzero(as_tuple=False).squeeze(-1):
result = arrdict.arrdict(
names=(f'mohex-{universe[active[idx][0]]:.2f}', f'mohex-{universe[active[idx][1]]:.2f}'),
wins=tuple(map(int, wins[idx])),
moves=int(moves[idx]),
boardsize=worlds.boardsize)
json.save(run_name, result)
moves[idx] = 0
uniform_refill(run_name, names, queue)
if not queue:
return
active[idx] = torch.tensor(queue[0])
queue = queue[1:]
def append(df, name):
names = list(df.index) + [name]
return df.reindex(index=names, columns=names).fillna(0)
class RollingArena:
def __init__(self, worlds, max_history):
self.worlds = worlds
self.mohex = mohex.MoHexAgent()
self.history = deque(maxlen=worlds.n_seats*max_history//self.worlds.n_envs)
self.soln = None
def play(self, agent):
size = self.worlds.boardsize
games = json.symmetric_games(f'mohex-{size}').pipe(append, 'agent')
wins = json.symmetric_wins(f'mohex-{size}').pipe(append, 'agent')
for result in self.history:
games.loc[result.names[0], result.names[1]] += result.games
games.loc[result.names[1], result.names[0]] += result.games
wins.loc[result.names[0], result.names[1]] += result.wins[0]
wins.loc[result.names[1], result.names[0]] += result.wins[1]
self.soln = activelo.solve(games, wins, soln=self.soln)
μ, σ = analysis.difference(self.soln, 'mohex-0.00', 'agent')
log.info(f'Agent elo is {μ:.2f}±{σ:.2f} based on {int(games.loc["agent"].sum())} games')
stats.mean_std('elo-mohex', μ, σ)
imp = activelo.improvement(self.soln)
imp = pd.DataFrame(imp, games.index, games.index)
challenger = imp['agent'].idxmax()
randomness = float(challenger.split('-')[1])
self.mohex.random = randomness
results = common.evaluate(self.worlds, {'agent': agent, challenger: self.mohex})
log.info(f'Agent played {challenger}, {int(results[0].wins[0] + results[1].wins[1])}-{int(results[0].wins[1] + results[1].wins[0])}')
self.history.extend(results)
return arrdict.arrdict(games=games.loc['agent'].sum(), mean=μ, std=σ)
def run_sync(run):
log.info('Arena launched')
run = runs.resolve(run)
log.info(f'Running arena for "{run}"')
with logs.to_run(run), stats.to_run(run):
worlds = common.worlds(run, 4)
arena = RollingArena(worlds, 128)
i = 0
agent = None
last_load, last_step = 0, 0
while True:
if time.time() - last_load > 15:
last_load = time.time()
agent = common.agent(run)
if agent and (time.time() - last_step > 1):
last_step = time.time()
log.info('Running trial')
arena.play(agent)
i += 1
@wraps(run_sync)
@contextmanager
def run(*args, **kwargs):
set_start_method('spawn', True)
p = Process(target=run_sync, args=args, kwargs=kwargs, name='mohex-arena')
try:
p.start()
yield p
finally:
for _ in range(50):
if not p.is_alive():
log.info('Arena monitor dead')
break
time.sleep(.1)
else:
log.info('Abruptly terminating arena monitor; it should have shut down naturally!')
p.terminate()
|
146811
|
from hparams import *
from sklearn.externals import joblib
from keras.optimizers import Adam
from sklearn.externals import joblib
from model.tacotron_model import get_tacotron_model
# import prepared data
decoder_input_training = joblib.load('data/decoder_input_training.pkl')
mel_spectro_training = joblib.load('data/mel_spectro_training.pkl')
spectro_training = joblib.load('data/spectro_training.pkl')
text_input_training = joblib.load('data/text_input_ml_training.pkl')
vocabulary = joblib.load('data/vocabulary.pkl')
model = get_tacotron_model(N_MEL, r, K1, K2, NB_CHARS_MAX,
EMBEDDING_SIZE, MAX_MEL_TIME_LENGTH,
MAX_MAG_TIME_LENGTH, N_FFT,
vocabulary)
opt = Adam()
model.compile(optimizer=opt,
loss=['mean_absolute_error', 'mean_absolute_error'])
train_history = model.fit([text_input_training, decoder_input_training],
[mel_spectro_training, spectro_training],
epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
verbose=1, validation_split=0.15)
joblib.dump(train_history.history, 'results/training_history.pkl')
model.save('results/model.h5')
|
146815
|
import os
import sys
from px import px
def test_run_on_pid(capfd):
"""
Just run px on a PID.
The only verification done here is that it doesn't crash,
there is room for improvement...
"""
argv = [
sys.argv[0],
"--no-pager", # Paging causes problems on Travis CI
# Note that px hides our own PID by design, so we look for our
# parent PID in this test.
str(os.getppid()),
]
# Enable manual inspection of the output:
# https://docs.pytest.org/en/latest/capture.html
with capfd.disabled():
px._main(argv)
|
146825
|
import falcon
import jinja2
from .base import BaseResource
from .html import read
class LoginResource(BaseResource):
'''Falcon resource for user authentication'''
auth_required = False
def __init__(self, *args, **kwargs):
super(LoginResource, self).__init__(*args, **kwargs)
def on_get(self, req, resp):
'''Get login page'''
resp.content_type = 'text/html'
file = read('login.html')
tpl = jinja2.Template(file).render(baseurl=self.config.baseurl,
apiurl=self.config.apiurl,
loginurl=self.config.loginurl,
include_register=self.config.include_register,
registerurl=self.config.registerurl,
include_password=self.config.include_password)
resp.body = tpl
def on_post(self, req, resp):
'''Log user in using authentication backend'''
token = self.db.users.login(None, req.params, self.session)
user = self.db.users.detail(token, req.params, self.session)
if token and user:
# setup token and set auth cookie
req.context['auth_token'] = token
req.context['user'] = user
resp.set_cookie('auth_token', token, max_age=self.config.token_timeout, path='/', secure=not self.config.http)
resp.status = falcon.HTTP_302
resp.set_header('Location', self.config.baseurl)
else:
# rerender login page
resp.content_type = 'text/html'
file = read('login.html')
tpl = jinja2.Template(file).render(baseurl=self.config.baseurl,
apiurl=self.config.apiurl,
loginurl=self.config.loginurl,
include_register=self.config.include_register,
registerurl=self.config.registerurl,
include_password=self.config.include_password)
resp.body = tpl
|
146854
|
import os.path as op
from pylama.check_async import check_async
from pylama.config import parse_options
from pylama.core import filter_errors, parse_modeline, run
from pylama.errors import Error, remove_duplicates
from pylama.hook import git_hook, hg_hook
from pylama.main import shell, check_path
def test_filter_errors():
assert list(filter_errors([Error(text='E1')], select=['E'], ignore=['E101']))
assert not list(filter_errors([Error(text='W1')], select=['W100'], ignore=['W']))
def test_remove_duplicates():
errors = [Error(linter='pycodestyle', text='E701'), Error(linter='pylint', text='C0321')]
errors = list(remove_duplicates(errors))
assert len(errors) == 1
def test_parser_modeline():
code = """
bla bla bla
# pylama: ignore=W12,E14:select=R:skip=0
"""
params = parse_modeline(code)
assert params == dict(ignore='W12,E14', select='R', skip='0')
def test_checkpath():
path = op.abspath('dummy.py')
options = parse_options([path])
result = check_path(options)
assert result
assert result[0].filename == 'dummy.py'
def test_linters_params():
options = parse_options(linters='mccabe', config=False)
options.linters_params['mccabe'] = dict(complexity=1)
errors = run('dummy.py', options=options)
assert len(errors) == 1
options.linters_params['mccabe'] = dict(complexity=20)
errors = run('dummy.py', options=options)
assert not errors
def test_sort():
options = parse_options()
options.sort = ['C', 'D']
errors = run('dummy.py', options=options)
assert errors[0].type == 'C'
def test_shell():
errors = shell('-o dummy dummy.py'.split(), error=False)
assert errors
errors = shell(['unknown.py'], error=False)
assert not errors
def test_git_hook():
assert not git_hook(False)
def test_hg_hook():
assert not hg_hook(None, dict())
def test_async():
options = parse_options(config=False)
errors = check_async(['dummy.py'], options=options, rootdir='.')
assert errors
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.