max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
leetcode/algorithm/integer_inversion.py
|
ftconan/python3
| 1
|
6627251
|
"""
@author: magician
@date: 2019/12/18
@file: sum_of_two.py
"""
import sys
def reverse(x):
"""
reverse
:param x:
:return:
"""
new_x = 0
if isinstance(x, int):
try:
new_x = int(str(abs(x))[::-1])
if x < 0:
new_x = -new_x
if new_x < pow(-2, 31) or new_x > pow(2, 31):
new_x = 0
except:
new_x = 0
return new_x
if __name__ == '__main__':
result1 = reverse(123)
print(result1)
result2 = reverse(-1200)
print(result2)
result3 = reverse(-9010000)
print(result3)
# [−231, 231 − 1] python int max(9223372036854775807)
print(sys.maxsize)
result4 = reverse(1534236469)
print(result4)
|
"""
@author: magician
@date: 2019/12/18
@file: sum_of_two.py
"""
import sys
def reverse(x):
"""
reverse
:param x:
:return:
"""
new_x = 0
if isinstance(x, int):
try:
new_x = int(str(abs(x))[::-1])
if x < 0:
new_x = -new_x
if new_x < pow(-2, 31) or new_x > pow(2, 31):
new_x = 0
except:
new_x = 0
return new_x
if __name__ == '__main__':
result1 = reverse(123)
print(result1)
result2 = reverse(-1200)
print(result2)
result3 = reverse(-9010000)
print(result3)
# [−231, 231 − 1] python int max(9223372036854775807)
print(sys.maxsize)
result4 = reverse(1534236469)
print(result4)
|
en
| 0.51031
|
@author: magician @date: 2019/12/18 @file: sum_of_two.py reverse :param x: :return: # [−231, 231 − 1] python int max(9223372036854775807)
| 3.827171
| 4
|
cytomine-datamining/algorithms/counting/setup.py
|
Cytomine-ULiege/Cytomine-python-datamining
| 0
|
6627252
|
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='CellCounting',
version='0.1',
author='<NAME>',
author_email='<EMAIL>',
packages=['cell_counting', 'cell_counting.validation'],
install_requires=['numpy', 'scikit-learn', 'scipy', 'keras', 'shapely', 'joblib']
)
|
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='CellCounting',
version='0.1',
author='<NAME>',
author_email='<EMAIL>',
packages=['cell_counting', 'cell_counting.validation'],
install_requires=['numpy', 'scikit-learn', 'scipy', 'keras', 'shapely', 'joblib']
)
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 0.908239
| 1
|
pybullet-gym/pybulletgym/envs/mujoco/robots/robot_bases.py
|
SmaleZ/vcl_diayn
| 0
|
6627253
|
import pybullet
import gym, gym.spaces, gym.utils
import numpy as np
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0,parentdir)
class XmlBasedRobot:
"""
Base class for mujoco .xml based agents.
"""
self_collision = True
def __init__(self, robot_name, action_dim, obs_dim, self_collision, add_ignored_joints=False):
self.parts = None
self.objects = []
self.jdict = None
self.ordered_joints = None
self.robot_body = None
self.add_ignored_joints = add_ignored_joints
high = np.ones([action_dim])
self.action_space = gym.spaces.Box(-high, high)
high = np.inf * np.ones([obs_dim])
self.observation_space = gym.spaces.Box(-high, high)
self.robot_name = robot_name
self.self_collision = self_collision
def addToScene(self, bullet_client, bodies):
self._p = bullet_client
if self.parts is not None:
parts = self.parts
else:
parts = {}
if self.jdict is not None:
joints = self.jdict
else:
joints = {}
if self.ordered_joints is not None:
ordered_joints = self.ordered_joints
else:
ordered_joints = []
if np.isscalar(bodies): # streamline the case where bodies is actually just one body
bodies = [bodies]
dump = 0
for i in range(len(bodies)):
if self._p.getNumJoints(bodies[i]) == 0:
part_name, robot_name = self._p.getBodyInfo(bodies[i])
self.robot_name = robot_name.decode("utf8")
part_name = part_name.decode("utf8")
parts[part_name] = BodyPart(self._p, part_name, bodies, i, -1)
for j in range(self._p.getNumJoints(bodies[i])):
self._p.setJointMotorControl2(bodies[i], j, pybullet.POSITION_CONTROL, positionGain=0.1, velocityGain=0.1, force=0)
jointInfo = self._p.getJointInfo(bodies[i], j)
joint_name=jointInfo[1]
part_name=jointInfo[12]
joint_name = joint_name.decode("utf8")
part_name = part_name.decode("utf8")
if dump: print("ROBOT PART '%s'" % part_name)
if dump: print("ROBOT JOINT '%s'" % joint_name) # limits = %+0.2f..%+0.2f effort=%0.3f speed=%0.3f" % ((joint_name,) + j.limits()) )
parts[part_name] = BodyPart(self._p, part_name, bodies, i, j)
if part_name == self.robot_name:
self.robot_body = parts[part_name]
if i == 0 and j == 0 and self.robot_body is None: # if nothing else works, we take this as robot_body
parts[self.robot_name] = BodyPart(self._p, self.robot_name, bodies, 0, -1)
self.robot_body = parts[self.robot_name]
if joint_name[:6] == "ignore":
ignored_joint = Joint(self._p, joint_name, bodies, i, j)
ignored_joint.disable_motor()
if self.add_ignored_joints: # some of the robots (Hopper, Walker2D and HalfCheetah in mujoco) require read-access to these joints
joints[joint_name] = ignored_joint
ordered_joints.append(ignored_joint)
joints[joint_name].power_coef = 0.0
continue
if joint_name[:8] != "jointfix":
joints[joint_name] = Joint(self._p, joint_name, bodies, i, j)
ordered_joints.append(joints[joint_name])
joints[joint_name].power_coef = 100.0
return parts, joints, ordered_joints, self.robot_body
def reset_pose(self, position, orientation):
self.parts[self.robot_name].reset_pose(position, orientation)
class MJCFBasedRobot(XmlBasedRobot):
"""
Base class for mujoco .xml based agents.
"""
def __init__(self, model_xml, robot_name, action_dim, obs_dim, self_collision=True, add_ignored_joints=False):
XmlBasedRobot.__init__(self, robot_name, action_dim, obs_dim, self_collision, add_ignored_joints)
self.model_xml = model_xml
self.doneLoading=0
def reset(self, bullet_client):
full_path = os.path.join(os.path.dirname(__file__), "..", "..", "assets", "mjcf", self.model_xml)
self._p = bullet_client
#print("Created bullet_client with id=", self._p._client)
if self.doneLoading == 0:
self.ordered_joints = []
self.doneLoading=1
if self.self_collision:
self.objects = self._p.loadMJCF(full_path, flags=pybullet.URDF_USE_SELF_COLLISION|pybullet.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS)
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self._p, self.objects)
else:
self.objects = self._p.loadMJCF(full_path)
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self._p, self.objects)
self.robot_specific_reset(self._p)
s = self.calc_state() # optimization: calc_state() can calculate something in self.* for calc_potential() to use
return s
def calc_potential(self):
return 0
class URDFBasedRobot(XmlBasedRobot):
"""
Base class for URDF .xml based robots.
"""
def __init__(self, model_urdf, robot_name, action_dim, obs_dim, basePosition=None, baseOrientation=None, fixed_base=False, self_collision=False):
XmlBasedRobot.__init__(self, robot_name, action_dim, obs_dim, self_collision)
self.model_urdf = model_urdf
self.basePosition = basePosition if basePosition is not None else [0, 0, 0]
self.baseOrientation = baseOrientation if baseOrientation is not None else [0, 0, 0, 1]
self.fixed_base = fixed_base
def reset(self, bullet_client):
self._p = bullet_client
self.ordered_joints = []
full_path = os.path.join(os.path.dirname(__file__), "assets", "robots", self.model_urdf)
print(full_path)
if self.self_collision:
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self._p,
self._p.loadURDF(full_path,
basePosition=self.basePosition,
baseOrientation=self.baseOrientation,
useFixedBase=self.fixed_base,
flags=pybullet.URDF_USE_SELF_COLLISION))
else:
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self._p,
self._p.loadURDF(full_path,
basePosition=self.basePosition,
baseOrientation=self.baseOrientation,
useFixedBase=self.fixed_base))
self.robot_specific_reset(self._p)
s = self.calc_state() # optimization: calc_state() can calculate something in self.* for calc_potential() to use
self.potential = self.calc_potential()
return s
def calc_potential(self):
return 0
class SDFBasedRobot(XmlBasedRobot):
"""
Base class for SDF robots in a Scene.
"""
def __init__(self, model_sdf, robot_name, action_dim, obs_dim, basePosition=None, baseOrientation=None, fixed_base=False, self_collision=False):
XmlBasedRobot.__init__(self, robot_name, action_dim, obs_dim, self_collision)
if basePosition is None:
basePosition = [0, 0, 0]
if baseOrientation is None:
baseOrientation = [0, 0, 0, 1]
self.model_sdf = model_sdf
self.fixed_base = fixed_base
def reset(self, bullet_client):
self._p = bullet_client
self.ordered_joints = []
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self._p, # TODO: Not sure if this works, try it with kuka
self._p.loadSDF(os.path.join("models_robot", self.model_sdf)))
self.robot_specific_reset(self._p)
s = self.calc_state() # optimization: calc_state() can calculate something in self.* for calc_potential() to use
self.potential = self.calc_potential()
return s
def calc_potential(self):
return 0
class PoseHelper: # dummy class to comply to original interface
def __init__(self, body_part):
self.body_part = body_part
def xyz(self):
return self.body_part.current_position()
def rpy(self):
return pybullet.getEulerFromQuaternion(self.body_part.current_orientation())
def orientation(self):
return self.body_part.current_orientation()
def speed(self):
return self.body_part.speed()
class BodyPart:
def __init__(self, bullet_client, body_name, bodies, bodyIndex, bodyPartIndex):
self.bodies = bodies
self._p = bullet_client
self.bodyIndex = bodyIndex
self.bodyPartIndex = bodyPartIndex
self.initialPosition = self.current_position()
self.initialOrientation = self.current_orientation()
self.bp_pose = PoseHelper(self)
def state_fields_of_pose_of(self, body_id, link_id=-1): # a method you will most probably need a lot to get pose and orientation
if link_id == -1:
(x, y, z), (a, b, c, d) = self._p.getBasePositionAndOrientation(body_id)
else:
(x, y, z), (a, b, c, d), _, _, _, _ = self._p.getLinkState(body_id, link_id)
return np.array([x, y, z, a, b, c, d])
def get_pose(self):
return self.state_fields_of_pose_of(self.bodies[self.bodyIndex], self.bodyPartIndex)
def speed(self):
if self.bodyPartIndex == -1:
(vx, vy, vz), _ = self._p.getBaseVelocity(self.bodies[self.bodyIndex])
else:
(x,y,z), (a,b,c,d), _,_,_,_, (vx, vy, vz), (vr,vp,vy) = self._p.getLinkState(self.bodies[self.bodyIndex], self.bodyPartIndex, computeLinkVelocity=1)
return np.array([vx, vy, vz])
def current_position(self):
return self.get_pose()[:3]
def current_orientation(self):
return self.get_pose()[3:]
def get_position(self):
return self.current_position()
def get_orientation(self):
return self.current_orientation()
def get_velocity(self):
return self._p.getBaseVelocity(self.bodies[self.bodyIndex])
def reset_position(self, position):
self._p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], position, self.get_orientation())
def reset_orientation(self, orientation):
self._p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], self.get_position(), orientation)
def reset_velocity(self, linearVelocity=None, angularVelocity=None):
if linearVelocity is None:
linearVelocity = [0, 0, 0]
if angularVelocity is None:
angularVelocity = [0, 0, 0]
self._p.resetBaseVelocity(self.bodies[self.bodyIndex], linearVelocity, angularVelocity)
def reset_pose(self, position, orientation):
self._p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], position, orientation)
def pose(self):
return self.bp_pose
def contact_list(self):
return self._p.getContactPoints(self.bodies[self.bodyIndex], -1, self.bodyPartIndex, -1)
class Joint:
def __init__(self, bullet_client, joint_name, bodies, bodyIndex, jointIndex):
self.bodies = bodies
self._p = bullet_client
self.bodyIndex = bodyIndex
self.jointIndex = jointIndex
self.joint_name = joint_name
joint_info = self._p.getJointInfo(self.bodies[self.bodyIndex], self.jointIndex)
self.jointType = joint_info[2]
self.lowerLimit = joint_info[8]
self.upperLimit = joint_info[9]
self.jointHasLimits = self.lowerLimit < self.upperLimit
self.jointMaxVelocity = joint_info[11]
self.power_coeff = 0
def set_state(self, x, vx):
self._p.resetJointState(self.bodies[self.bodyIndex], self.jointIndex, x, vx)
def current_position(self): # just some synonym method
return self.get_state()
def current_relative_position(self):
pos, vel = self.get_state()
if self.jointHasLimits:
pos_mid = 0.5 * (self.lowerLimit + self.upperLimit)
pos = 2 * (pos - pos_mid) / (self.upperLimit - self.lowerLimit)
if self.jointMaxVelocity > 0:
vel /= self.jointMaxVelocity
elif self.jointType == 0: # JOINT_REVOLUTE_TYPE
vel *= 0.1
else:
vel *= 0.5
return (
pos,
vel
)
def get_state(self):
x, vx,_,_ = self._p.getJointState(self.bodies[self.bodyIndex],self.jointIndex)
return x, vx
def get_position(self):
x, _ = self.get_state()
return x
def get_orientation(self):
_,r = self.get_state()
return r
def get_velocity(self):
_, vx = self.get_state()
return vx
def set_position(self, position):
self._p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,pybullet.POSITION_CONTROL, targetPosition=position)
def set_velocity(self, velocity):
self._p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,pybullet.VELOCITY_CONTROL, targetVelocity=velocity)
def set_motor_torque(self, torque): # just some synonym method
self.set_torque(torque)
def set_torque(self, torque):
self._p.setJointMotorControl2(bodyIndex=self.bodies[self.bodyIndex], jointIndex=self.jointIndex, controlMode=pybullet.TORQUE_CONTROL, force=torque) #, positionGain=0.1, velocityGain=0.1)
def reset_current_position(self, position, velocity): # just some synonym method
self.reset_position(position, velocity)
def reset_position(self, position, velocity):
self._p.resetJointState(self.bodies[self.bodyIndex],self.jointIndex,targetValue=position, targetVelocity=velocity)
self.disable_motor()
def disable_motor(self):
self._p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,controlMode=pybullet.POSITION_CONTROL, targetPosition=0, targetVelocity=0, positionGain=0.1, velocityGain=0.1, force=0)
|
import pybullet
import gym, gym.spaces, gym.utils
import numpy as np
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0,parentdir)
class XmlBasedRobot:
"""
Base class for mujoco .xml based agents.
"""
self_collision = True
def __init__(self, robot_name, action_dim, obs_dim, self_collision, add_ignored_joints=False):
self.parts = None
self.objects = []
self.jdict = None
self.ordered_joints = None
self.robot_body = None
self.add_ignored_joints = add_ignored_joints
high = np.ones([action_dim])
self.action_space = gym.spaces.Box(-high, high)
high = np.inf * np.ones([obs_dim])
self.observation_space = gym.spaces.Box(-high, high)
self.robot_name = robot_name
self.self_collision = self_collision
def addToScene(self, bullet_client, bodies):
self._p = bullet_client
if self.parts is not None:
parts = self.parts
else:
parts = {}
if self.jdict is not None:
joints = self.jdict
else:
joints = {}
if self.ordered_joints is not None:
ordered_joints = self.ordered_joints
else:
ordered_joints = []
if np.isscalar(bodies): # streamline the case where bodies is actually just one body
bodies = [bodies]
dump = 0
for i in range(len(bodies)):
if self._p.getNumJoints(bodies[i]) == 0:
part_name, robot_name = self._p.getBodyInfo(bodies[i])
self.robot_name = robot_name.decode("utf8")
part_name = part_name.decode("utf8")
parts[part_name] = BodyPart(self._p, part_name, bodies, i, -1)
for j in range(self._p.getNumJoints(bodies[i])):
self._p.setJointMotorControl2(bodies[i], j, pybullet.POSITION_CONTROL, positionGain=0.1, velocityGain=0.1, force=0)
jointInfo = self._p.getJointInfo(bodies[i], j)
joint_name=jointInfo[1]
part_name=jointInfo[12]
joint_name = joint_name.decode("utf8")
part_name = part_name.decode("utf8")
if dump: print("ROBOT PART '%s'" % part_name)
if dump: print("ROBOT JOINT '%s'" % joint_name) # limits = %+0.2f..%+0.2f effort=%0.3f speed=%0.3f" % ((joint_name,) + j.limits()) )
parts[part_name] = BodyPart(self._p, part_name, bodies, i, j)
if part_name == self.robot_name:
self.robot_body = parts[part_name]
if i == 0 and j == 0 and self.robot_body is None: # if nothing else works, we take this as robot_body
parts[self.robot_name] = BodyPart(self._p, self.robot_name, bodies, 0, -1)
self.robot_body = parts[self.robot_name]
if joint_name[:6] == "ignore":
ignored_joint = Joint(self._p, joint_name, bodies, i, j)
ignored_joint.disable_motor()
if self.add_ignored_joints: # some of the robots (Hopper, Walker2D and HalfCheetah in mujoco) require read-access to these joints
joints[joint_name] = ignored_joint
ordered_joints.append(ignored_joint)
joints[joint_name].power_coef = 0.0
continue
if joint_name[:8] != "jointfix":
joints[joint_name] = Joint(self._p, joint_name, bodies, i, j)
ordered_joints.append(joints[joint_name])
joints[joint_name].power_coef = 100.0
return parts, joints, ordered_joints, self.robot_body
def reset_pose(self, position, orientation):
self.parts[self.robot_name].reset_pose(position, orientation)
class MJCFBasedRobot(XmlBasedRobot):
"""
Base class for mujoco .xml based agents.
"""
def __init__(self, model_xml, robot_name, action_dim, obs_dim, self_collision=True, add_ignored_joints=False):
XmlBasedRobot.__init__(self, robot_name, action_dim, obs_dim, self_collision, add_ignored_joints)
self.model_xml = model_xml
self.doneLoading=0
def reset(self, bullet_client):
full_path = os.path.join(os.path.dirname(__file__), "..", "..", "assets", "mjcf", self.model_xml)
self._p = bullet_client
#print("Created bullet_client with id=", self._p._client)
if self.doneLoading == 0:
self.ordered_joints = []
self.doneLoading=1
if self.self_collision:
self.objects = self._p.loadMJCF(full_path, flags=pybullet.URDF_USE_SELF_COLLISION|pybullet.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS)
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self._p, self.objects)
else:
self.objects = self._p.loadMJCF(full_path)
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self._p, self.objects)
self.robot_specific_reset(self._p)
s = self.calc_state() # optimization: calc_state() can calculate something in self.* for calc_potential() to use
return s
def calc_potential(self):
return 0
class URDFBasedRobot(XmlBasedRobot):
"""
Base class for URDF .xml based robots.
"""
def __init__(self, model_urdf, robot_name, action_dim, obs_dim, basePosition=None, baseOrientation=None, fixed_base=False, self_collision=False):
XmlBasedRobot.__init__(self, robot_name, action_dim, obs_dim, self_collision)
self.model_urdf = model_urdf
self.basePosition = basePosition if basePosition is not None else [0, 0, 0]
self.baseOrientation = baseOrientation if baseOrientation is not None else [0, 0, 0, 1]
self.fixed_base = fixed_base
def reset(self, bullet_client):
self._p = bullet_client
self.ordered_joints = []
full_path = os.path.join(os.path.dirname(__file__), "assets", "robots", self.model_urdf)
print(full_path)
if self.self_collision:
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self._p,
self._p.loadURDF(full_path,
basePosition=self.basePosition,
baseOrientation=self.baseOrientation,
useFixedBase=self.fixed_base,
flags=pybullet.URDF_USE_SELF_COLLISION))
else:
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self._p,
self._p.loadURDF(full_path,
basePosition=self.basePosition,
baseOrientation=self.baseOrientation,
useFixedBase=self.fixed_base))
self.robot_specific_reset(self._p)
s = self.calc_state() # optimization: calc_state() can calculate something in self.* for calc_potential() to use
self.potential = self.calc_potential()
return s
def calc_potential(self):
return 0
class SDFBasedRobot(XmlBasedRobot):
"""
Base class for SDF robots in a Scene.
"""
def __init__(self, model_sdf, robot_name, action_dim, obs_dim, basePosition=None, baseOrientation=None, fixed_base=False, self_collision=False):
XmlBasedRobot.__init__(self, robot_name, action_dim, obs_dim, self_collision)
if basePosition is None:
basePosition = [0, 0, 0]
if baseOrientation is None:
baseOrientation = [0, 0, 0, 1]
self.model_sdf = model_sdf
self.fixed_base = fixed_base
def reset(self, bullet_client):
self._p = bullet_client
self.ordered_joints = []
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self._p, # TODO: Not sure if this works, try it with kuka
self._p.loadSDF(os.path.join("models_robot", self.model_sdf)))
self.robot_specific_reset(self._p)
s = self.calc_state() # optimization: calc_state() can calculate something in self.* for calc_potential() to use
self.potential = self.calc_potential()
return s
def calc_potential(self):
return 0
class PoseHelper: # dummy class to comply to original interface
def __init__(self, body_part):
self.body_part = body_part
def xyz(self):
return self.body_part.current_position()
def rpy(self):
return pybullet.getEulerFromQuaternion(self.body_part.current_orientation())
def orientation(self):
return self.body_part.current_orientation()
def speed(self):
return self.body_part.speed()
class BodyPart:
def __init__(self, bullet_client, body_name, bodies, bodyIndex, bodyPartIndex):
self.bodies = bodies
self._p = bullet_client
self.bodyIndex = bodyIndex
self.bodyPartIndex = bodyPartIndex
self.initialPosition = self.current_position()
self.initialOrientation = self.current_orientation()
self.bp_pose = PoseHelper(self)
def state_fields_of_pose_of(self, body_id, link_id=-1): # a method you will most probably need a lot to get pose and orientation
if link_id == -1:
(x, y, z), (a, b, c, d) = self._p.getBasePositionAndOrientation(body_id)
else:
(x, y, z), (a, b, c, d), _, _, _, _ = self._p.getLinkState(body_id, link_id)
return np.array([x, y, z, a, b, c, d])
def get_pose(self):
return self.state_fields_of_pose_of(self.bodies[self.bodyIndex], self.bodyPartIndex)
def speed(self):
if self.bodyPartIndex == -1:
(vx, vy, vz), _ = self._p.getBaseVelocity(self.bodies[self.bodyIndex])
else:
(x,y,z), (a,b,c,d), _,_,_,_, (vx, vy, vz), (vr,vp,vy) = self._p.getLinkState(self.bodies[self.bodyIndex], self.bodyPartIndex, computeLinkVelocity=1)
return np.array([vx, vy, vz])
def current_position(self):
return self.get_pose()[:3]
def current_orientation(self):
return self.get_pose()[3:]
def get_position(self):
return self.current_position()
def get_orientation(self):
return self.current_orientation()
def get_velocity(self):
return self._p.getBaseVelocity(self.bodies[self.bodyIndex])
def reset_position(self, position):
self._p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], position, self.get_orientation())
def reset_orientation(self, orientation):
self._p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], self.get_position(), orientation)
def reset_velocity(self, linearVelocity=None, angularVelocity=None):
if linearVelocity is None:
linearVelocity = [0, 0, 0]
if angularVelocity is None:
angularVelocity = [0, 0, 0]
self._p.resetBaseVelocity(self.bodies[self.bodyIndex], linearVelocity, angularVelocity)
def reset_pose(self, position, orientation):
self._p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], position, orientation)
def pose(self):
return self.bp_pose
def contact_list(self):
return self._p.getContactPoints(self.bodies[self.bodyIndex], -1, self.bodyPartIndex, -1)
class Joint:
def __init__(self, bullet_client, joint_name, bodies, bodyIndex, jointIndex):
self.bodies = bodies
self._p = bullet_client
self.bodyIndex = bodyIndex
self.jointIndex = jointIndex
self.joint_name = joint_name
joint_info = self._p.getJointInfo(self.bodies[self.bodyIndex], self.jointIndex)
self.jointType = joint_info[2]
self.lowerLimit = joint_info[8]
self.upperLimit = joint_info[9]
self.jointHasLimits = self.lowerLimit < self.upperLimit
self.jointMaxVelocity = joint_info[11]
self.power_coeff = 0
def set_state(self, x, vx):
self._p.resetJointState(self.bodies[self.bodyIndex], self.jointIndex, x, vx)
def current_position(self): # just some synonym method
return self.get_state()
def current_relative_position(self):
pos, vel = self.get_state()
if self.jointHasLimits:
pos_mid = 0.5 * (self.lowerLimit + self.upperLimit)
pos = 2 * (pos - pos_mid) / (self.upperLimit - self.lowerLimit)
if self.jointMaxVelocity > 0:
vel /= self.jointMaxVelocity
elif self.jointType == 0: # JOINT_REVOLUTE_TYPE
vel *= 0.1
else:
vel *= 0.5
return (
pos,
vel
)
def get_state(self):
x, vx,_,_ = self._p.getJointState(self.bodies[self.bodyIndex],self.jointIndex)
return x, vx
def get_position(self):
x, _ = self.get_state()
return x
def get_orientation(self):
_,r = self.get_state()
return r
def get_velocity(self):
_, vx = self.get_state()
return vx
def set_position(self, position):
self._p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,pybullet.POSITION_CONTROL, targetPosition=position)
def set_velocity(self, velocity):
self._p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,pybullet.VELOCITY_CONTROL, targetVelocity=velocity)
def set_motor_torque(self, torque): # just some synonym method
self.set_torque(torque)
def set_torque(self, torque):
self._p.setJointMotorControl2(bodyIndex=self.bodies[self.bodyIndex], jointIndex=self.jointIndex, controlMode=pybullet.TORQUE_CONTROL, force=torque) #, positionGain=0.1, velocityGain=0.1)
def reset_current_position(self, position, velocity): # just some synonym method
self.reset_position(position, velocity)
def reset_position(self, position, velocity):
self._p.resetJointState(self.bodies[self.bodyIndex],self.jointIndex,targetValue=position, targetVelocity=velocity)
self.disable_motor()
def disable_motor(self):
self._p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,controlMode=pybullet.POSITION_CONTROL, targetPosition=0, targetVelocity=0, positionGain=0.1, velocityGain=0.1, force=0)
|
en
| 0.867601
|
Base class for mujoco .xml based agents. # streamline the case where bodies is actually just one body # limits = %+0.2f..%+0.2f effort=%0.3f speed=%0.3f" % ((joint_name,) + j.limits()) ) # if nothing else works, we take this as robot_body # some of the robots (Hopper, Walker2D and HalfCheetah in mujoco) require read-access to these joints Base class for mujoco .xml based agents. #print("Created bullet_client with id=", self._p._client) # optimization: calc_state() can calculate something in self.* for calc_potential() to use Base class for URDF .xml based robots. # optimization: calc_state() can calculate something in self.* for calc_potential() to use Base class for SDF robots in a Scene. # TODO: Not sure if this works, try it with kuka # optimization: calc_state() can calculate something in self.* for calc_potential() to use # dummy class to comply to original interface # a method you will most probably need a lot to get pose and orientation # just some synonym method # JOINT_REVOLUTE_TYPE # just some synonym method #, positionGain=0.1, velocityGain=0.1) # just some synonym method
| 2.225717
| 2
|
dodgy_main/login.py
|
codingPaulStuart/python-carGUI
| 0
|
6627254
|
# 4PINT Assessment 2 - <NAME> 000389223
# Login Class
# 16.06.21
import tkinter as tk
from abc import abstractmethod
from tkinter import messagebox, END
class Login:
__correct_cred = False
__correct_user = ""
__correct_pw = ""
@classmethod
def set_correct_cred(cls, bool_val):
cls.__correct_cred = bool_val
@classmethod
def is_correct_cred(cls):
return cls.__correct_cred
@classmethod
def read_in(cls):
credentials = []
file = open('login_data.txt', 'r')
for line in file:
credentials.append(line.rstrip('\n'))
cls.__correct_user = credentials[0]
cls.__correct_pw = credentials[1]
cls.__correct_user.strip()
cls.__correct_pw.strip()
@classmethod
def login_gui(cls):
cls.read_in()
def check():
user_name = user_n.get()
pass_word = pw.get()
popup = messagebox
if user_name == "" and pass_word == "":
popup.showinfo("", "Blank Not Allowed")
elif user_name == cls.__correct_user and pass_word == cls.__correct_pw:
popup.showinfo("", "Login Success")
cls.set_correct_cred(True)
root.destroy()
else:
popup.showinfo("", "Incorrect Username and Password")
def getNumber(event):
text = event.widget['text']
if root.focus_get() == user_n:
user_n.insert(END, text)
elif root.focus_get() == pw:
pw.insert(END, text)
else:
messagebox.showwarning('Error!', 'Please click into the box you wish to enter numbers for.')
def clearAll():
user_n.delete(0, END)
pw.delete(0, END)
root = tk.Tk()
root.title("Admin Login to Dodgy Bros Interface")
root.geometry("500x700")
root.configure(bg='yellow')
root.iconbitmap('carYellow.ico')
# row configure
root.rowconfigure(0, weight=3)
root.rowconfigure(1, weight=1)
root.rowconfigure(2, weight=3)
root.rowconfigure(3, weight=3)
root.rowconfigure(4, weight=3)
root.rowconfigure(5, weight=3)
# column configure
root.columnconfigure(0, weight=1)
root.columnconfigure(1, weight=1)
root.columnconfigure(2, weight=1)
# defining widgets
heading = tk.Label(root, text='DODGY BROTHERS LOG IN', font=('Verdana', 16, 'bold'), bg='yellow', fg='grey')
pw = tk.Label(root, text='Username/PIN >', bg='yellow', fg='black', font=('Verdana', 8, 'bold'))
user_n = tk.Entry(root)
pw = tk.Entry(root, show='*')
btn_width = 4
btn_height = 4
font_size = 16
one = tk.Button(root, text='1', bg='white', width=btn_width, height=btn_height, relief='ridge',
font=('Verdana', font_size, 'bold'))
two = tk.Button(root, text='2', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
three = tk.Button(root, text='3', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
four = tk.Button(root, text='4', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
five = tk.Button(root, text='5', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
six = tk.Button(root, text='6', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
seven = tk.Button(root, text='7', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
eight = tk.Button(root, text='8', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
nine = tk.Button(root, text='9', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
cancel = tk.Button(root, text='CANCEL\nCLEAR', bg='red', fg='white', width=btn_width, height=btn_height,
command=clearAll, font=('Verdana', font_size, 'bold'), relief='ridge')
zero = tk.Button(root, text='0', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
log_in = tk.Button(root, text='Log in', bg='green', fg='white', width=btn_width, height=btn_height,
command=check, font=('Verdana', font_size, 'bold'), relief='ridge')
# defining grid
heading.grid(row=0, column=0, rowspan=1, columnspan=3, sticky='nsew')
pw.grid(row=1, column=0, sticky='nsew')
user_n.grid(row=1, column=1, sticky='nsew', padx=(10, 10), pady=(10, 10))
pw.grid(row=1, column=2, sticky='nsew', padx=(10, 20), pady=(10, 10))
one.grid(row=2, column=0, sticky='nsew', padx=(20, 10), pady=(10, 10))
two.grid(row=2, column=1, sticky='nsew', padx=(10, 10), pady=(10, 10))
three.grid(row=2, column=2, sticky='nsew', padx=(10, 20), pady=(10, 10))
four.grid(row=3, column=0, sticky='nsew', padx=(20, 10), pady=(10, 10))
five.grid(row=3, column=1, sticky='nsew', padx=(10, 10), pady=(10, 10))
six.grid(row=3, column=2, sticky='nsew', padx=(10, 20), pady=(10, 10))
seven.grid(row=4, column=0, sticky='nsew', padx=(20, 10), pady=(10, 10))
eight.grid(row=4, column=1, sticky='nsew', padx=(10, 10), pady=(10, 10))
nine.grid(row=4, column=2, sticky='nsew', padx=(10, 20), pady=(10, 10))
cancel.grid(row=5, column=0, sticky='nsew', padx=(20, 10), pady=(10, 20))
zero.grid(row=5, column=1, sticky='nsew', padx=(10, 10), pady=(10, 20))
log_in.grid(row=5, column=2, sticky='nsew', padx=(10, 20), pady=(10, 20))
# Binding Functions to buttons
one.bind('<Button-1>', getNumber)
two.bind('<Button-1>', getNumber)
three.bind('<Button-1>', getNumber)
four.bind('<Button-1>', getNumber)
five.bind('<Button-1>', getNumber)
six.bind('<Button-1>', getNumber)
seven.bind('<Button-1>', getNumber)
eight.bind('<Button-1>', getNumber)
nine.bind('<Button-1>', getNumber)
zero.bind('<Button-1>', getNumber)
root.mainloop()
|
# 4PINT Assessment 2 - <NAME> 000389223
# Login Class
# 16.06.21
import tkinter as tk
from abc import abstractmethod
from tkinter import messagebox, END
class Login:
__correct_cred = False
__correct_user = ""
__correct_pw = ""
@classmethod
def set_correct_cred(cls, bool_val):
cls.__correct_cred = bool_val
@classmethod
def is_correct_cred(cls):
return cls.__correct_cred
@classmethod
def read_in(cls):
credentials = []
file = open('login_data.txt', 'r')
for line in file:
credentials.append(line.rstrip('\n'))
cls.__correct_user = credentials[0]
cls.__correct_pw = credentials[1]
cls.__correct_user.strip()
cls.__correct_pw.strip()
@classmethod
def login_gui(cls):
cls.read_in()
def check():
user_name = user_n.get()
pass_word = pw.get()
popup = messagebox
if user_name == "" and pass_word == "":
popup.showinfo("", "Blank Not Allowed")
elif user_name == cls.__correct_user and pass_word == cls.__correct_pw:
popup.showinfo("", "Login Success")
cls.set_correct_cred(True)
root.destroy()
else:
popup.showinfo("", "Incorrect Username and Password")
def getNumber(event):
text = event.widget['text']
if root.focus_get() == user_n:
user_n.insert(END, text)
elif root.focus_get() == pw:
pw.insert(END, text)
else:
messagebox.showwarning('Error!', 'Please click into the box you wish to enter numbers for.')
def clearAll():
user_n.delete(0, END)
pw.delete(0, END)
root = tk.Tk()
root.title("Admin Login to Dodgy Bros Interface")
root.geometry("500x700")
root.configure(bg='yellow')
root.iconbitmap('carYellow.ico')
# row configure
root.rowconfigure(0, weight=3)
root.rowconfigure(1, weight=1)
root.rowconfigure(2, weight=3)
root.rowconfigure(3, weight=3)
root.rowconfigure(4, weight=3)
root.rowconfigure(5, weight=3)
# column configure
root.columnconfigure(0, weight=1)
root.columnconfigure(1, weight=1)
root.columnconfigure(2, weight=1)
# defining widgets
heading = tk.Label(root, text='DODGY BROTHERS LOG IN', font=('Verdana', 16, 'bold'), bg='yellow', fg='grey')
pw = tk.Label(root, text='Username/PIN >', bg='yellow', fg='black', font=('Verdana', 8, 'bold'))
user_n = tk.Entry(root)
pw = tk.Entry(root, show='*')
btn_width = 4
btn_height = 4
font_size = 16
one = tk.Button(root, text='1', bg='white', width=btn_width, height=btn_height, relief='ridge',
font=('Verdana', font_size, 'bold'))
two = tk.Button(root, text='2', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
three = tk.Button(root, text='3', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
four = tk.Button(root, text='4', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
five = tk.Button(root, text='5', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
six = tk.Button(root, text='6', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
seven = tk.Button(root, text='7', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
eight = tk.Button(root, text='8', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
nine = tk.Button(root, text='9', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
cancel = tk.Button(root, text='CANCEL\nCLEAR', bg='red', fg='white', width=btn_width, height=btn_height,
command=clearAll, font=('Verdana', font_size, 'bold'), relief='ridge')
zero = tk.Button(root, text='0', bg='white', width=btn_width, height=btn_height,
font=('Verdana', font_size, 'bold'), relief='ridge')
log_in = tk.Button(root, text='Log in', bg='green', fg='white', width=btn_width, height=btn_height,
command=check, font=('Verdana', font_size, 'bold'), relief='ridge')
# defining grid
heading.grid(row=0, column=0, rowspan=1, columnspan=3, sticky='nsew')
pw.grid(row=1, column=0, sticky='nsew')
user_n.grid(row=1, column=1, sticky='nsew', padx=(10, 10), pady=(10, 10))
pw.grid(row=1, column=2, sticky='nsew', padx=(10, 20), pady=(10, 10))
one.grid(row=2, column=0, sticky='nsew', padx=(20, 10), pady=(10, 10))
two.grid(row=2, column=1, sticky='nsew', padx=(10, 10), pady=(10, 10))
three.grid(row=2, column=2, sticky='nsew', padx=(10, 20), pady=(10, 10))
four.grid(row=3, column=0, sticky='nsew', padx=(20, 10), pady=(10, 10))
five.grid(row=3, column=1, sticky='nsew', padx=(10, 10), pady=(10, 10))
six.grid(row=3, column=2, sticky='nsew', padx=(10, 20), pady=(10, 10))
seven.grid(row=4, column=0, sticky='nsew', padx=(20, 10), pady=(10, 10))
eight.grid(row=4, column=1, sticky='nsew', padx=(10, 10), pady=(10, 10))
nine.grid(row=4, column=2, sticky='nsew', padx=(10, 20), pady=(10, 10))
cancel.grid(row=5, column=0, sticky='nsew', padx=(20, 10), pady=(10, 20))
zero.grid(row=5, column=1, sticky='nsew', padx=(10, 10), pady=(10, 20))
log_in.grid(row=5, column=2, sticky='nsew', padx=(10, 20), pady=(10, 20))
# Binding Functions to buttons
one.bind('<Button-1>', getNumber)
two.bind('<Button-1>', getNumber)
three.bind('<Button-1>', getNumber)
four.bind('<Button-1>', getNumber)
five.bind('<Button-1>', getNumber)
six.bind('<Button-1>', getNumber)
seven.bind('<Button-1>', getNumber)
eight.bind('<Button-1>', getNumber)
nine.bind('<Button-1>', getNumber)
zero.bind('<Button-1>', getNumber)
root.mainloop()
|
en
| 0.400286
|
# 4PINT Assessment 2 - <NAME> 000389223 # Login Class # 16.06.21 # row configure # column configure # defining widgets # defining grid # Binding Functions to buttons
| 3.452575
| 3
|
dbPGClass.py
|
iammortimer/TN-ARRR-Gateway
| 0
|
6627255
|
<reponame>iammortimer/TN-ARRR-Gateway
import psycopg2 as pgdb
from psycopg2 import sql
from psycopg2 import pool
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from datetime import timedelta
import datetime
import os
class dbPGCalls(object):
def __init__(self, config):
self.config = config
try:
self.psPool = pgdb.pool.ThreadedConnectionPool(1, 10,database=config['main']['name'], user=self.config["postgres"]["pguser"], password=self.config["postgres"]["<PASSWORD>"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"])
dbCon = self.psPool.getconn()
#self.dbCon = pgdb.connect(database=config['main']['name'], user=self.config["postgres"]["pguser"], password=self.config["<PASSWORD>"]["<PASSWORD>"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"])
#self.dbCon.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.psPool.putconn(dbCon)
except:
self.dbCon = pgdb.connect(user=self.config["postgres"]["pguser"], password=self.config["postgres"]["<PASSWORD>"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"])
self.dbCon.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
sqlstr = sql.SQL('CREATE DATABASE {};').format(sql.Identifier(self.config['main']['name']))
cursor = self.dbCon.cursor()
cursor.execute(sqlstr)
cursor.close()
self.dbCon.close()
self.psPool = pgdb.pool.ThreadedConnectionPool(1, 10,database=config['main']['name'], user=self.config["postgres"]["pguser"], password=self.config["postgres"]["<PASSWORD>"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"])
#self.dbCon = pgdb.connect(database=config['main']['name'], user=self.config["postgres"]["pguser"], password=self.config["postgres"]["pgpswd"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"])
#self.dbCon.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
def openConn(self):
dbCon = self.psPool.getconn()
dbCon.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
return dbCon
def closeConn(self, dbCon):
self.psPool.putconn(dbCon)
#DB Setup part
def createdb(self):
createHeightTable = '''
CREATE TABLE IF NOT EXISTS heights (
id SERIAL PRIMARY KEY,
chain text NOT NULL,
height integer
);
'''
createTunnelTable = '''
CREATE TABLE IF NOT EXISTS tunnel (
id SERIAL PRIMARY KEY,
sourceaddress text NOT NULL,
targetaddress text NOT NULL,
timestamp timestamp
default current_timestamp,
status text
);
'''
createTableExecuted = '''
CREATE TABLE IF NOT EXISTS executed (
id SERIAL PRIMARY KEY,
sourceaddress text NOT NULL,
targetaddress text NOT NULL,
tntxid text NOT NULL,
othertxid text NOT NULL,
timestamp timestamp
default current_timestamp,
amount real,
amountFee real
);
'''
createTableErrors = '''
CREATE TABLE IF NOT EXISTS errors (
id SERIAL PRIMARY KEY,
sourceaddress text ,
targetaddress text ,
tntxid text ,
othertxid text ,
timestamp timestamp
default current_timestamp,
amount real,
error text,
exception text
);
'''
createVerifyTable = '''
CREATE TABLE IF NOT EXISTS verified (
id SERIAL PRIMARY KEY,
chain text NOT NULL,
tx text NOT NULL,
block integer
);
'''
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql.SQL(createHeightTable))
cursor.execute(sql.SQL(createTunnelTable))
cursor.execute(sql.SQL(createTableExecuted))
cursor.execute(sql.SQL(createTableErrors))
cursor.execute(sql.SQL(createVerifyTable))
self.closeConn(dbCon)
#import existing sqlite db
def importSQLite(self):
import sqlite3
if self.config["main"]["db-location"] != "":
path= os.getcwd()
dbfile = path + '/' + self.config["main"]["db-location"] + '/' + 'gateway.db'
dbfile = os.path.normpath(dbfile)
else:
dbfile = 'gateway.db'
consq=sqlite3.connect(dbfile)
cursq=consq.cursor()
tabnames=[]
cursq.execute("SELECT name FROM sqlite_master WHERE type='table'")
tabgrab = cursq.fetchall()
for item in tabgrab:
tabnames.append(item[0])
dbCon = self.openConn()
for table in tabnames:
cursq.execute("SELECT sql FROM sqlite_master WHERE type='table' AND name = ?;", (table,))
create = cursq.fetchone()[0]
cursq.execute("SELECT * FROM %s;" %table)
rows=cursq.fetchall()
if len(rows) == 0:
continue
colcount=len(rows[0])
pholder='%s,'*colcount
newholder=pholder[:-1]
try:
curpg = dbCon.cursor()
curpg.execute("DROP TABLE IF EXISTS %s;" %table)
curpg.execute(create)
curpg.executemany("INSERT INTO %s VALUES (%s);" % (table, newholder),rows)
if table != 'heights':
curpg.execute("ALTER TABLE %s ALTER id ADD GENERATED ALWAYS AS IDENTITY (START WITH %s);" % (table, len(rows)+1))
except Exception as e:
self.closeConn(dbCon)
print ('Error %s' % e)
self.closeConn(dbCon)
consq.close()
#heights table related
def lastScannedBlock(self, chain):
sql = 'SELECT height FROM heights WHERE chain = %s'
values = (chain,)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult[0][0]
else:
return {}
def getHeights(self):
sql = 'SELECT chain, height FROM heights'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def updHeights(self, block, chain):
sql = 'UPDATE heights SET "height" = %s WHERE chain = %s'
values = (block, chain)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def insHeights(self, block, chain):
sql = 'INSERT INTO heights ("chain", "height") VALUES (%s, %s)'
values = (chain, block)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
#tunnel table related
def doWeHaveTunnels(self):
sql = 'SELECT * FROM tunnel WHERE "status" = %s'
values = ("created", )
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return True
else:
return False
def getTargetAddress(self, sourceAddress):
sql = 'SELECT targetaddress FROM tunnel WHERE "status" <> %s AND sourceaddress = %s'
values = ("error", sourceAddress)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult[0][0]
else:
return {}
def getSourceAddress(self, targetAddress):
if targetAddress == '':
sql = 'SELECT sourceaddress FROM tunnel WHERE "status" = %s'
values = ("created",)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
else:
sql = 'SELECT sourceaddress FROM tunnel WHERE "status" <> %s AND targetaddress = %s'
values = ("error", targetAddress)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult[0][0]
else:
return {}
def getTunnelStatus(self, targetAddress = '', sourceAddress = ''):
if targetAddress != '':
sql = 'SELECT status FROM tunnel WHERE targetaddress = %s ORDER BY id DESC LIMIT 1'
values = (targetAddress,)
elif sourceAddress != '':
sql = 'SELECT status FROM tunnel WHERE sourceaddress = %s ORDER BY id DESC LIMIT 1'
values = (sourceAddress,)
else:
return {}
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def getTunnels(self, status = ''):
if status != '':
sql = 'SELECT sourceaddress, targetaddress FROM tunnel WHERE "status" = %s'
values = (status,)
else:
return {}
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def insTunnel(self, status, sourceAddress, targetAddress):
sql = 'INSERT INTO tunnel ("sourceaddress", "targetaddress", "status", "timestamp") VALUES (%s, %s, %s, CURRENT_TIMESTAMP)'
values = (sourceAddress, targetAddress, status)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def updTunnel(self, status, sourceAddress, targetAddress, statusOld = ''):
if statusOld == '':
statusOld = 'created'
sql = 'UPDATE tunnel SET "status" = %s, "timestamp" = CURRENT_TIMESTAMP WHERE status = %s AND sourceaddress = %s and targetaddress = %s'
values = (status, statusOld, sourceAddress, targetAddress)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def delTunnel(self, sourceAddress, targetAddress):
sql = 'DELETE FROM tunnel WHERE sourceaddress = %s and targetaddress = %s'
values = (sourceAddress, targetAddress)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
#executed table related
def insExecuted(self, sourceAddress, targetAddress, otherTxId, tntxid, amount, amountFee):
sql = 'INSERT INTO executed ("sourceaddress", "targetaddress", "othertxid", "tntxid", "amount", "amountFee") VALUES (%s, %s, %s, %s, %s, %s)'
values = (sourceAddress, targetAddress, otherTxId, tntxid, amount, amountFee)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def updExecuted(self, id, sourceAddress, targetAddress, otherTxId, tntxid, amount, amountFee):
sql = 'UPDATE executed SET "sourceaddress" = %s, "targetaddress" = %s, "othertxid" = %s, "tntxid" = %s, "amount" = %s, "amountFee" = %s) WHERE id = %s'
values = (sourceAddress, targetAddress, otherTxId, tntxid, amount, amountFee, id)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def didWeSendTx(self, txid):
sql = 'SELECT * FROM executed WHERE (othertxid = %s OR tntxid = %s)'
values = (txid, txid)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return True
else:
return False
def getExecutedAll(self):
sql = 'SELECT * FROM executed'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def getExecuted(self, sourceAddress = '', targetAddress = '', otherTxId = '', tntxid = ''):
if sourceAddress != '':
sql = 'SELECT othertxid FROM executed WHERE sourceaddress = %s ORDER BY id DESC LIMIT 1'
values = (sourceAddress,)
elif targetAddress != '':
sql = 'SELECT tntxid FROM executed WHERE targetaddress = %s ORDER BY id DESC LIMIT 1'
values = (targetAddress,)
elif otherTxId != '':
sql = 'SELECT * FROM executed WHERE othertxid = %s ORDER BY id DESC LIMIT 1'
values = (otherTxId,)
elif tntxid != '':
sql = 'SELECT * FROM executed WHERE tntxid = %s ORDER BY id DESC LIMIT 1'
values = (tntxid,)
else:
return {}
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
#error table related
def insError(self, sourceAddress, targetAddress, tntxid, otherTxId, amount, error, exception = ''):
sql = 'INSERT INTO errors ("sourceaddress", "targetaddress", "tntxid", "othertxid", "amount", "error", "exception") VALUES (%s, %s, %s, %s, %s, %s, %s)'
values = (sourceAddress, targetAddress, tntxid, otherTxId, amount, error, exception)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def getErrors(self):
sql = 'SELECT * FROM errors'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def getError(self, sourceAddress='', targetAddress=''):
if sourceAddress != '':
sql = 'SELECT error, tntxid, othertxid FROM errors WHERE sourceaddress = %s ORDER BY id DESC LIMIT 1'
values = (sourceAddress,)
elif targetAddress != '':
sql = 'SELECT error, tntxid, othertxid FROM errors WHERE targetaddress = %s ORDER BY id DESC LIMIT 1'
values = (targetAddress,)
else:
return {}
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def didTxError(self, txid):
sql = 'SELECT * FROM errors WHERE (othertxid = %s OR tntxid = %s)'
values = (txid, txid)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return True
else:
return False
#verified table related
def getVerifiedAll(self):
sql = 'SELECT * FROM verified'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def getUnVerified(self):
sql = 'SELECT * FROM verified WHERE block = 0'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def getVerified(self, tx):
sql = 'SELECT block FROM verified WHERE tx = %s'
values = (tx,)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult[0][0]
else:
return None
def insVerified(self, chain, tx, block):
if self.getVerified(tx) is None:
sql = 'INSERT INTO verified ("chain", "tx", "block") VALUES (%s, %s, %s)'
values = (chain, tx, block)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
else:
sql = 'UPDATE verified SET "block" = %s WHERE tx = %s'
values = (block, tx)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
#other
def checkTXs(self, address):
if address == '':
dbCon = self.openConn()
cursor = dbCon.cursor()
sql = "SELECT e.sourceaddress, e.targetaddress, e.tntxid, e.othertxid as OtherTxId, COALESCE(v.block, 0) as TNVerBlock, COALESCE(v2.block, 0) as OtherVerBlock, e.amount, CASE WHEN e.targetaddress LIKE '3J%%' THEN 'Deposit' ELSE 'Withdraw' END TypeTX, " \
"CASE WHEN e.targetaddress LIKE '3J%%' AND v.block IS NOT NULL THEN 'verified' WHEN e.targetaddress NOT LIKE '3J%%' AND v2.block IS NOT NULL AND v2.block > 0 THEN 'verified' ELSE 'unverified' END Status " \
"FROM executed e LEFT JOIN verified v ON e.tntxid = v.tx LEFT JOIN verified v2 ON e.othertxid = v2.tx "
cursor.execute(sql)
else:
dbCon = self.openConn()
cursor = dbCon.cursor()
sql = "SELECT e.sourceaddress, e.targetaddress, e.tntxid, e.othertxid as OtherTxId, COALESCE(v.block, 0) as TNVerBlock, COALESCE(v2.block, 0) as OtherVerBlock, e.amount, CASE WHEN e.targetaddress LIKE '3J%%' THEN 'Deposit' ELSE 'Withdraw' END TypeTX, " \
"CASE WHEN e.targetaddress LIKE '3J%%' AND v.block IS NOT NULL THEN 'verified' WHEN e.targetaddress NOT LIKE '3J%%' AND v2.block IS NOT NULL AND v2.block > 0 THEN 'verified' ELSE 'unverified' END Status " \
"FROM executed e LEFT JOIN verified v ON e.tntxid = v.tx LEFT JOIN verified v2 ON e.othertxid = v2.tx WHERE (e.sourceaddress = %s or e.targetaddress = %s)"
values = (address, address)
cursor.execute(sql, values)
tx = [dict((cursor.description[i][0], value) for i, value in enumerate(row)) for row in cursor.fetchall()]
cursor.close()
self.closeConn(dbCon)
if len(tx) == 0:
return {'error': 'no tx found'}
else:
return tx
def getFees(self, fromdate, todate):
#check date notation
if len(fromdate) != 0:
fromyear,frommonth,fromday = fromdate.split('-')
isValidFromDate = True
try :
datetime.datetime(int(fromyear),int(frommonth),int(fromday))
except ValueError :
isValidFromDate = False
else:
isValidFromDate = False
if len(todate) != 0:
toyear,tomonth,today = todate.split('-')
isValidtoDate = True
try :
datetime.datetime(int(toyear),int(tomonth),int(today))
except ValueError :
isValidtoDate = False
else:
isValidtoDate = False
if not isValidFromDate:
fromdate = '1990-01-01'
if not isValidtoDate:
todat = datetime.date.today() + timedelta(days=1)
todate = todat.strftime('%Y-%m-%d')
values = (fromdate, todate)
sql = 'SELECT SUM(amountFee) as totalFee from executed WHERE timestamp > %s and timestamp < %s'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) == 0:
Fees = 0
else:
Fees = qryResult[0][0]
return { 'totalFees': Fees }
|
import psycopg2 as pgdb
from psycopg2 import sql
from psycopg2 import pool
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from datetime import timedelta
import datetime
import os
class dbPGCalls(object):
def __init__(self, config):
self.config = config
try:
self.psPool = pgdb.pool.ThreadedConnectionPool(1, 10,database=config['main']['name'], user=self.config["postgres"]["pguser"], password=self.config["postgres"]["<PASSWORD>"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"])
dbCon = self.psPool.getconn()
#self.dbCon = pgdb.connect(database=config['main']['name'], user=self.config["postgres"]["pguser"], password=self.config["<PASSWORD>"]["<PASSWORD>"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"])
#self.dbCon.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.psPool.putconn(dbCon)
except:
self.dbCon = pgdb.connect(user=self.config["postgres"]["pguser"], password=self.config["postgres"]["<PASSWORD>"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"])
self.dbCon.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
sqlstr = sql.SQL('CREATE DATABASE {};').format(sql.Identifier(self.config['main']['name']))
cursor = self.dbCon.cursor()
cursor.execute(sqlstr)
cursor.close()
self.dbCon.close()
self.psPool = pgdb.pool.ThreadedConnectionPool(1, 10,database=config['main']['name'], user=self.config["postgres"]["pguser"], password=self.config["postgres"]["<PASSWORD>"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"])
#self.dbCon = pgdb.connect(database=config['main']['name'], user=self.config["postgres"]["pguser"], password=self.config["postgres"]["pgpswd"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"])
#self.dbCon.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
def openConn(self):
dbCon = self.psPool.getconn()
dbCon.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
return dbCon
def closeConn(self, dbCon):
self.psPool.putconn(dbCon)
#DB Setup part
def createdb(self):
createHeightTable = '''
CREATE TABLE IF NOT EXISTS heights (
id SERIAL PRIMARY KEY,
chain text NOT NULL,
height integer
);
'''
createTunnelTable = '''
CREATE TABLE IF NOT EXISTS tunnel (
id SERIAL PRIMARY KEY,
sourceaddress text NOT NULL,
targetaddress text NOT NULL,
timestamp timestamp
default current_timestamp,
status text
);
'''
createTableExecuted = '''
CREATE TABLE IF NOT EXISTS executed (
id SERIAL PRIMARY KEY,
sourceaddress text NOT NULL,
targetaddress text NOT NULL,
tntxid text NOT NULL,
othertxid text NOT NULL,
timestamp timestamp
default current_timestamp,
amount real,
amountFee real
);
'''
createTableErrors = '''
CREATE TABLE IF NOT EXISTS errors (
id SERIAL PRIMARY KEY,
sourceaddress text ,
targetaddress text ,
tntxid text ,
othertxid text ,
timestamp timestamp
default current_timestamp,
amount real,
error text,
exception text
);
'''
createVerifyTable = '''
CREATE TABLE IF NOT EXISTS verified (
id SERIAL PRIMARY KEY,
chain text NOT NULL,
tx text NOT NULL,
block integer
);
'''
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql.SQL(createHeightTable))
cursor.execute(sql.SQL(createTunnelTable))
cursor.execute(sql.SQL(createTableExecuted))
cursor.execute(sql.SQL(createTableErrors))
cursor.execute(sql.SQL(createVerifyTable))
self.closeConn(dbCon)
#import existing sqlite db
def importSQLite(self):
import sqlite3
if self.config["main"]["db-location"] != "":
path= os.getcwd()
dbfile = path + '/' + self.config["main"]["db-location"] + '/' + 'gateway.db'
dbfile = os.path.normpath(dbfile)
else:
dbfile = 'gateway.db'
consq=sqlite3.connect(dbfile)
cursq=consq.cursor()
tabnames=[]
cursq.execute("SELECT name FROM sqlite_master WHERE type='table'")
tabgrab = cursq.fetchall()
for item in tabgrab:
tabnames.append(item[0])
dbCon = self.openConn()
for table in tabnames:
cursq.execute("SELECT sql FROM sqlite_master WHERE type='table' AND name = ?;", (table,))
create = cursq.fetchone()[0]
cursq.execute("SELECT * FROM %s;" %table)
rows=cursq.fetchall()
if len(rows) == 0:
continue
colcount=len(rows[0])
pholder='%s,'*colcount
newholder=pholder[:-1]
try:
curpg = dbCon.cursor()
curpg.execute("DROP TABLE IF EXISTS %s;" %table)
curpg.execute(create)
curpg.executemany("INSERT INTO %s VALUES (%s);" % (table, newholder),rows)
if table != 'heights':
curpg.execute("ALTER TABLE %s ALTER id ADD GENERATED ALWAYS AS IDENTITY (START WITH %s);" % (table, len(rows)+1))
except Exception as e:
self.closeConn(dbCon)
print ('Error %s' % e)
self.closeConn(dbCon)
consq.close()
#heights table related
def lastScannedBlock(self, chain):
sql = 'SELECT height FROM heights WHERE chain = %s'
values = (chain,)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult[0][0]
else:
return {}
def getHeights(self):
sql = 'SELECT chain, height FROM heights'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def updHeights(self, block, chain):
sql = 'UPDATE heights SET "height" = %s WHERE chain = %s'
values = (block, chain)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def insHeights(self, block, chain):
sql = 'INSERT INTO heights ("chain", "height") VALUES (%s, %s)'
values = (chain, block)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
#tunnel table related
def doWeHaveTunnels(self):
sql = 'SELECT * FROM tunnel WHERE "status" = %s'
values = ("created", )
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return True
else:
return False
def getTargetAddress(self, sourceAddress):
sql = 'SELECT targetaddress FROM tunnel WHERE "status" <> %s AND sourceaddress = %s'
values = ("error", sourceAddress)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult[0][0]
else:
return {}
def getSourceAddress(self, targetAddress):
if targetAddress == '':
sql = 'SELECT sourceaddress FROM tunnel WHERE "status" = %s'
values = ("created",)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
else:
sql = 'SELECT sourceaddress FROM tunnel WHERE "status" <> %s AND targetaddress = %s'
values = ("error", targetAddress)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult[0][0]
else:
return {}
def getTunnelStatus(self, targetAddress = '', sourceAddress = ''):
if targetAddress != '':
sql = 'SELECT status FROM tunnel WHERE targetaddress = %s ORDER BY id DESC LIMIT 1'
values = (targetAddress,)
elif sourceAddress != '':
sql = 'SELECT status FROM tunnel WHERE sourceaddress = %s ORDER BY id DESC LIMIT 1'
values = (sourceAddress,)
else:
return {}
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def getTunnels(self, status = ''):
if status != '':
sql = 'SELECT sourceaddress, targetaddress FROM tunnel WHERE "status" = %s'
values = (status,)
else:
return {}
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def insTunnel(self, status, sourceAddress, targetAddress):
sql = 'INSERT INTO tunnel ("sourceaddress", "targetaddress", "status", "timestamp") VALUES (%s, %s, %s, CURRENT_TIMESTAMP)'
values = (sourceAddress, targetAddress, status)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def updTunnel(self, status, sourceAddress, targetAddress, statusOld = ''):
if statusOld == '':
statusOld = 'created'
sql = 'UPDATE tunnel SET "status" = %s, "timestamp" = CURRENT_TIMESTAMP WHERE status = %s AND sourceaddress = %s and targetaddress = %s'
values = (status, statusOld, sourceAddress, targetAddress)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def delTunnel(self, sourceAddress, targetAddress):
sql = 'DELETE FROM tunnel WHERE sourceaddress = %s and targetaddress = %s'
values = (sourceAddress, targetAddress)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
#executed table related
def insExecuted(self, sourceAddress, targetAddress, otherTxId, tntxid, amount, amountFee):
sql = 'INSERT INTO executed ("sourceaddress", "targetaddress", "othertxid", "tntxid", "amount", "amountFee") VALUES (%s, %s, %s, %s, %s, %s)'
values = (sourceAddress, targetAddress, otherTxId, tntxid, amount, amountFee)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def updExecuted(self, id, sourceAddress, targetAddress, otherTxId, tntxid, amount, amountFee):
sql = 'UPDATE executed SET "sourceaddress" = %s, "targetaddress" = %s, "othertxid" = %s, "tntxid" = %s, "amount" = %s, "amountFee" = %s) WHERE id = %s'
values = (sourceAddress, targetAddress, otherTxId, tntxid, amount, amountFee, id)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def didWeSendTx(self, txid):
sql = 'SELECT * FROM executed WHERE (othertxid = %s OR tntxid = %s)'
values = (txid, txid)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return True
else:
return False
def getExecutedAll(self):
sql = 'SELECT * FROM executed'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def getExecuted(self, sourceAddress = '', targetAddress = '', otherTxId = '', tntxid = ''):
if sourceAddress != '':
sql = 'SELECT othertxid FROM executed WHERE sourceaddress = %s ORDER BY id DESC LIMIT 1'
values = (sourceAddress,)
elif targetAddress != '':
sql = 'SELECT tntxid FROM executed WHERE targetaddress = %s ORDER BY id DESC LIMIT 1'
values = (targetAddress,)
elif otherTxId != '':
sql = 'SELECT * FROM executed WHERE othertxid = %s ORDER BY id DESC LIMIT 1'
values = (otherTxId,)
elif tntxid != '':
sql = 'SELECT * FROM executed WHERE tntxid = %s ORDER BY id DESC LIMIT 1'
values = (tntxid,)
else:
return {}
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
#error table related
def insError(self, sourceAddress, targetAddress, tntxid, otherTxId, amount, error, exception = ''):
sql = 'INSERT INTO errors ("sourceaddress", "targetaddress", "tntxid", "othertxid", "amount", "error", "exception") VALUES (%s, %s, %s, %s, %s, %s, %s)'
values = (sourceAddress, targetAddress, tntxid, otherTxId, amount, error, exception)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
def getErrors(self):
sql = 'SELECT * FROM errors'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def getError(self, sourceAddress='', targetAddress=''):
if sourceAddress != '':
sql = 'SELECT error, tntxid, othertxid FROM errors WHERE sourceaddress = %s ORDER BY id DESC LIMIT 1'
values = (sourceAddress,)
elif targetAddress != '':
sql = 'SELECT error, tntxid, othertxid FROM errors WHERE targetaddress = %s ORDER BY id DESC LIMIT 1'
values = (targetAddress,)
else:
return {}
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def didTxError(self, txid):
sql = 'SELECT * FROM errors WHERE (othertxid = %s OR tntxid = %s)'
values = (txid, txid)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return True
else:
return False
#verified table related
def getVerifiedAll(self):
sql = 'SELECT * FROM verified'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def getUnVerified(self):
sql = 'SELECT * FROM verified WHERE block = 0'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult
else:
return {}
def getVerified(self, tx):
sql = 'SELECT block FROM verified WHERE tx = %s'
values = (tx,)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) > 0:
return qryResult[0][0]
else:
return None
def insVerified(self, chain, tx, block):
if self.getVerified(tx) is None:
sql = 'INSERT INTO verified ("chain", "tx", "block") VALUES (%s, %s, %s)'
values = (chain, tx, block)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
else:
sql = 'UPDATE verified SET "block" = %s WHERE tx = %s'
values = (block, tx)
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
cursor.close()
self.closeConn(dbCon)
#other
def checkTXs(self, address):
if address == '':
dbCon = self.openConn()
cursor = dbCon.cursor()
sql = "SELECT e.sourceaddress, e.targetaddress, e.tntxid, e.othertxid as OtherTxId, COALESCE(v.block, 0) as TNVerBlock, COALESCE(v2.block, 0) as OtherVerBlock, e.amount, CASE WHEN e.targetaddress LIKE '3J%%' THEN 'Deposit' ELSE 'Withdraw' END TypeTX, " \
"CASE WHEN e.targetaddress LIKE '3J%%' AND v.block IS NOT NULL THEN 'verified' WHEN e.targetaddress NOT LIKE '3J%%' AND v2.block IS NOT NULL AND v2.block > 0 THEN 'verified' ELSE 'unverified' END Status " \
"FROM executed e LEFT JOIN verified v ON e.tntxid = v.tx LEFT JOIN verified v2 ON e.othertxid = v2.tx "
cursor.execute(sql)
else:
dbCon = self.openConn()
cursor = dbCon.cursor()
sql = "SELECT e.sourceaddress, e.targetaddress, e.tntxid, e.othertxid as OtherTxId, COALESCE(v.block, 0) as TNVerBlock, COALESCE(v2.block, 0) as OtherVerBlock, e.amount, CASE WHEN e.targetaddress LIKE '3J%%' THEN 'Deposit' ELSE 'Withdraw' END TypeTX, " \
"CASE WHEN e.targetaddress LIKE '3J%%' AND v.block IS NOT NULL THEN 'verified' WHEN e.targetaddress NOT LIKE '3J%%' AND v2.block IS NOT NULL AND v2.block > 0 THEN 'verified' ELSE 'unverified' END Status " \
"FROM executed e LEFT JOIN verified v ON e.tntxid = v.tx LEFT JOIN verified v2 ON e.othertxid = v2.tx WHERE (e.sourceaddress = %s or e.targetaddress = %s)"
values = (address, address)
cursor.execute(sql, values)
tx = [dict((cursor.description[i][0], value) for i, value in enumerate(row)) for row in cursor.fetchall()]
cursor.close()
self.closeConn(dbCon)
if len(tx) == 0:
return {'error': 'no tx found'}
else:
return tx
def getFees(self, fromdate, todate):
#check date notation
if len(fromdate) != 0:
fromyear,frommonth,fromday = fromdate.split('-')
isValidFromDate = True
try :
datetime.datetime(int(fromyear),int(frommonth),int(fromday))
except ValueError :
isValidFromDate = False
else:
isValidFromDate = False
if len(todate) != 0:
toyear,tomonth,today = todate.split('-')
isValidtoDate = True
try :
datetime.datetime(int(toyear),int(tomonth),int(today))
except ValueError :
isValidtoDate = False
else:
isValidtoDate = False
if not isValidFromDate:
fromdate = '1990-01-01'
if not isValidtoDate:
todat = datetime.date.today() + timedelta(days=1)
todate = todat.strftime('%Y-%m-%d')
values = (fromdate, todate)
sql = 'SELECT SUM(amountFee) as totalFee from executed WHERE timestamp > %s and timestamp < %s'
dbCon = self.openConn()
cursor = dbCon.cursor()
cursor.execute(sql, values)
qryResult = cursor.fetchall()
cursor.close()
self.closeConn(dbCon)
if len(qryResult) == 0:
Fees = 0
else:
Fees = qryResult[0][0]
return { 'totalFees': Fees }
|
en
| 0.363251
|
#self.dbCon = pgdb.connect(database=config['main']['name'], user=self.config["postgres"]["pguser"], password=self.config["<PASSWORD>"]["<PASSWORD>"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"]) #self.dbCon.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) #self.dbCon = pgdb.connect(database=config['main']['name'], user=self.config["postgres"]["pguser"], password=self.config["postgres"]["pgpswd"], host=self.config["postgres"]["pghost"], port=self.config["postgres"]["pgport"]) #self.dbCon.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) #DB Setup part CREATE TABLE IF NOT EXISTS heights ( id SERIAL PRIMARY KEY, chain text NOT NULL, height integer ); CREATE TABLE IF NOT EXISTS tunnel ( id SERIAL PRIMARY KEY, sourceaddress text NOT NULL, targetaddress text NOT NULL, timestamp timestamp default current_timestamp, status text ); CREATE TABLE IF NOT EXISTS executed ( id SERIAL PRIMARY KEY, sourceaddress text NOT NULL, targetaddress text NOT NULL, tntxid text NOT NULL, othertxid text NOT NULL, timestamp timestamp default current_timestamp, amount real, amountFee real ); CREATE TABLE IF NOT EXISTS errors ( id SERIAL PRIMARY KEY, sourceaddress text , targetaddress text , tntxid text , othertxid text , timestamp timestamp default current_timestamp, amount real, error text, exception text ); CREATE TABLE IF NOT EXISTS verified ( id SERIAL PRIMARY KEY, chain text NOT NULL, tx text NOT NULL, block integer ); #import existing sqlite db #heights table related #tunnel table related #executed table related #error table related #verified table related #other #check date notation
| 2.929452
| 3
|
qa/setup_packages.py
|
rbetz/DALI
| 0
|
6627256
|
#!/usr/bin/env python
from __future__ import print_function, division
import argparse
import sys
try:
import pip._internal.pep425tags as p
except:
import pip.pep425tags as p
try:
# For Python 3.0 and later
from urllib.request import urlopen, HTTPError, Request
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen, HTTPError, Request
# keeps names of all required packages as a dict key
# required versions are list or dict with keys of CUDA version, to use default just put None
# instead of version number, direct link can be used
# put {0} in pacage link as a placeholder for python pip package version (i.e. cp27-cp27mu-linux_x86_64)
# and cuda_v for cuXX version
# NOTE: First version will be picked in case of one_config_only
packages = {
"opencv-python" : ["4.1.0.25"],
"mxnet-cu{cuda_v}" : {
"90" : ["1.5.0"],
"100" : ["1.5.0"]},
"tensorflow-gpu" : {
"90": ["1.12.0", "1.11", "1.7"],
"100": ["1.13.1", "1.14.0", "1.15.0", "2.0.0"]},
"torch" : {"90": ["http://download.pytorch.org/whl/{cuda_v}/torch-1.1.0-{0}.whl"],
"100": ["http://download.pytorch.org/whl/{cuda_v}/torch-1.2.0-{0}.whl"]},
"torchvision" : {"90": ["https://download.pytorch.org/whl/{cuda_v}/torchvision-0.3.0-{0}.whl"],
"100": ["https://download.pytorch.org/whl/{cuda_v}/torchvision-0.4.0-{0}.whl"]},
}
parser = argparse.ArgumentParser(description='Env setup helper')
parser.add_argument('--list', '-l', help='list configs', action='store_true', default=False)
parser.add_argument('--num', '-n', help='return number of all configurations possible', action='store_true', default=False)
parser.add_argument('--install', '-i', dest='install', type=int, help="get Nth configuration", default=-1)
parser.add_argument('--all', '-a', dest='getall', action='store_true', help='return packages in all versions')
parser.add_argument('--remove', '-r', dest='remove', help="list packages to remove", action='store_true', default=False)
parser.add_argument('--cuda', dest='cuda', default="90", help="CUDA version to use")
parser.add_argument('--use', '-u', dest='use', default=[], help="provide only packages from this list", nargs='*')
args = parser.parse_args()
def get_key_with_cuda(key, val_dict, cuda):
key_w_cuda = key
if isinstance(val_dict, dict):
for ver in sorted(val_dict.keys(), key=int):
if int(ver) <= int(cuda):
key_w_cuda = key.format(cuda_v=ver)
return key_w_cuda
def get_package(package_data, key, cuda):
if key in package_data.keys():
if isinstance(package_data[key], dict):
data = None
for ver in sorted(package_data[key].keys(), key=int):
if int(ver) <= int(cuda):
data = package_data[key][ver]
return data
else:
return packages[key]
else:
return None
def get_pyvers_name(name, cuda):
for v in [(x, y, z) for (x, y, z) in p.get_supported() if y != 'none' and 'any' not in y]:
v = "-".join(v)
v = name.format(v, cuda_v = "cu" + cuda)
request = Request(v)
request.get_method = lambda : 'HEAD'
try:
response = urlopen(request)
return v
except HTTPError:
pass
return ""
def print_configs(cuda):
for key in packages.keys():
key_w_cuda = get_key_with_cuda(key, packages[key], cuda)
print (key_w_cuda + ":")
for val in get_package(packages, key, cuda):
if val == None:
val = "Default"
elif val.startswith('http'):
val = get_pyvers_name(val, cuda)
print ('\t' + val)
def get_install_string(variant, use, cuda):
ret = []
for key in packages.keys():
if key not in use:
continue
key_w_cuda = get_key_with_cuda(key, packages[key], cuda)
tmp = variant % len(get_package(packages, key, cuda))
val = get_package(packages, key, cuda)[tmp]
if val == None:
ret.append(key_w_cuda)
elif val.startswith('http'):
ret.append(get_pyvers_name(val, cuda))
else:
ret.append(key_w_cuda + "==" + val)
variant = variant // len(get_package(packages, key, cuda))
# add all remaining used packages with default versions
additional = [v for v in use if v not in packages.keys()]
return " ".join(ret + additional)
def get_remove_string(use, cuda):
# Remove only these which version we want to change
to_remove = []
for key in packages.keys():
if key not in use:
continue
key_w_cuda = get_key_with_cuda(key, packages[key], cuda)
pkg_list_len = len(get_package(packages, key, cuda))
if pkg_list_len > 1:
to_remove.append(key_w_cuda)
return " ".join(to_remove)
def cal_num_of_configs(use, cuda):
ret = 1
for key in packages.keys():
if key not in use:
continue
ret *= len(get_package(packages, key, cuda))
return ret
def get_all_strings(use, cuda):
ret = []
for key in packages.keys():
if key not in use:
continue
for val in get_package(packages, key, cuda):
if val is None:
ret.append(key)
elif val.startswith('http'):
ret.append(get_pyvers_name(val, cuda))
else:
ret.append(key + "==" + val)
# add all remaining used packages with default versions
additional = [v for v in use if v not in packages.keys()]
return " ".join(ret + additional)
def main():
global args
if args.list:
print_configs(args.cuda)
elif args.num:
print (cal_num_of_configs(args.use, args.cuda) - 1)
elif args.remove:
print (get_remove_string(args.use, args.cuda))
elif args.getall:
print(get_all_strings(args.use, args.cuda))
elif args.install >= 0:
if args.install > cal_num_of_configs(args.use, args.cuda):
args.install = 1
print (get_install_string(args.install, args.use, args.cuda))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
from __future__ import print_function, division
import argparse
import sys
try:
import pip._internal.pep425tags as p
except:
import pip.pep425tags as p
try:
# For Python 3.0 and later
from urllib.request import urlopen, HTTPError, Request
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen, HTTPError, Request
# keeps names of all required packages as a dict key
# required versions are list or dict with keys of CUDA version, to use default just put None
# instead of version number, direct link can be used
# put {0} in pacage link as a placeholder for python pip package version (i.e. cp27-cp27mu-linux_x86_64)
# and cuda_v for cuXX version
# NOTE: First version will be picked in case of one_config_only
packages = {
"opencv-python" : ["4.1.0.25"],
"mxnet-cu{cuda_v}" : {
"90" : ["1.5.0"],
"100" : ["1.5.0"]},
"tensorflow-gpu" : {
"90": ["1.12.0", "1.11", "1.7"],
"100": ["1.13.1", "1.14.0", "1.15.0", "2.0.0"]},
"torch" : {"90": ["http://download.pytorch.org/whl/{cuda_v}/torch-1.1.0-{0}.whl"],
"100": ["http://download.pytorch.org/whl/{cuda_v}/torch-1.2.0-{0}.whl"]},
"torchvision" : {"90": ["https://download.pytorch.org/whl/{cuda_v}/torchvision-0.3.0-{0}.whl"],
"100": ["https://download.pytorch.org/whl/{cuda_v}/torchvision-0.4.0-{0}.whl"]},
}
parser = argparse.ArgumentParser(description='Env setup helper')
parser.add_argument('--list', '-l', help='list configs', action='store_true', default=False)
parser.add_argument('--num', '-n', help='return number of all configurations possible', action='store_true', default=False)
parser.add_argument('--install', '-i', dest='install', type=int, help="get Nth configuration", default=-1)
parser.add_argument('--all', '-a', dest='getall', action='store_true', help='return packages in all versions')
parser.add_argument('--remove', '-r', dest='remove', help="list packages to remove", action='store_true', default=False)
parser.add_argument('--cuda', dest='cuda', default="90", help="CUDA version to use")
parser.add_argument('--use', '-u', dest='use', default=[], help="provide only packages from this list", nargs='*')
args = parser.parse_args()
def get_key_with_cuda(key, val_dict, cuda):
key_w_cuda = key
if isinstance(val_dict, dict):
for ver in sorted(val_dict.keys(), key=int):
if int(ver) <= int(cuda):
key_w_cuda = key.format(cuda_v=ver)
return key_w_cuda
def get_package(package_data, key, cuda):
if key in package_data.keys():
if isinstance(package_data[key], dict):
data = None
for ver in sorted(package_data[key].keys(), key=int):
if int(ver) <= int(cuda):
data = package_data[key][ver]
return data
else:
return packages[key]
else:
return None
def get_pyvers_name(name, cuda):
for v in [(x, y, z) for (x, y, z) in p.get_supported() if y != 'none' and 'any' not in y]:
v = "-".join(v)
v = name.format(v, cuda_v = "cu" + cuda)
request = Request(v)
request.get_method = lambda : 'HEAD'
try:
response = urlopen(request)
return v
except HTTPError:
pass
return ""
def print_configs(cuda):
for key in packages.keys():
key_w_cuda = get_key_with_cuda(key, packages[key], cuda)
print (key_w_cuda + ":")
for val in get_package(packages, key, cuda):
if val == None:
val = "Default"
elif val.startswith('http'):
val = get_pyvers_name(val, cuda)
print ('\t' + val)
def get_install_string(variant, use, cuda):
ret = []
for key in packages.keys():
if key not in use:
continue
key_w_cuda = get_key_with_cuda(key, packages[key], cuda)
tmp = variant % len(get_package(packages, key, cuda))
val = get_package(packages, key, cuda)[tmp]
if val == None:
ret.append(key_w_cuda)
elif val.startswith('http'):
ret.append(get_pyvers_name(val, cuda))
else:
ret.append(key_w_cuda + "==" + val)
variant = variant // len(get_package(packages, key, cuda))
# add all remaining used packages with default versions
additional = [v for v in use if v not in packages.keys()]
return " ".join(ret + additional)
def get_remove_string(use, cuda):
# Remove only these which version we want to change
to_remove = []
for key in packages.keys():
if key not in use:
continue
key_w_cuda = get_key_with_cuda(key, packages[key], cuda)
pkg_list_len = len(get_package(packages, key, cuda))
if pkg_list_len > 1:
to_remove.append(key_w_cuda)
return " ".join(to_remove)
def cal_num_of_configs(use, cuda):
ret = 1
for key in packages.keys():
if key not in use:
continue
ret *= len(get_package(packages, key, cuda))
return ret
def get_all_strings(use, cuda):
ret = []
for key in packages.keys():
if key not in use:
continue
for val in get_package(packages, key, cuda):
if val is None:
ret.append(key)
elif val.startswith('http'):
ret.append(get_pyvers_name(val, cuda))
else:
ret.append(key + "==" + val)
# add all remaining used packages with default versions
additional = [v for v in use if v not in packages.keys()]
return " ".join(ret + additional)
def main():
global args
if args.list:
print_configs(args.cuda)
elif args.num:
print (cal_num_of_configs(args.use, args.cuda) - 1)
elif args.remove:
print (get_remove_string(args.use, args.cuda))
elif args.getall:
print(get_all_strings(args.use, args.cuda))
elif args.install >= 0:
if args.install > cal_num_of_configs(args.use, args.cuda):
args.install = 1
print (get_install_string(args.install, args.use, args.cuda))
if __name__ == "__main__":
main()
|
en
| 0.815305
|
#!/usr/bin/env python # For Python 3.0 and later # Fall back to Python 2's urllib2 # keeps names of all required packages as a dict key # required versions are list or dict with keys of CUDA version, to use default just put None # instead of version number, direct link can be used # put {0} in pacage link as a placeholder for python pip package version (i.e. cp27-cp27mu-linux_x86_64) # and cuda_v for cuXX version # NOTE: First version will be picked in case of one_config_only # add all remaining used packages with default versions # Remove only these which version we want to change # add all remaining used packages with default versions
| 2.080653
| 2
|
venv/lib/python2.7/site-packages/ndb/query_test.py
|
anuja1011/Pick-Up-Sports
| 0
|
6627257
|
<reponame>anuja1011/Pick-Up-Sports
#
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for query.py."""
import datetime
import os
from .google_imports import datastore_errors
from .google_imports import datastore_pbs
from .google_imports import datastore_rpc
from .google_imports import namespace_manager
from .google_imports import users
from .google_test_imports import datastore_stub_util
from .google_test_imports import real_unittest
from .google_test_imports import unittest
from . import model
from . import query
from . import tasklets
from . import test_utils
class BaseQueryTestMixin(object):
def setUp(self):
# Create class inside tests because kinds are cleared every test.
global Foo
class Foo(model.Model):
name = model.StringProperty()
rate = model.IntegerProperty()
tags = model.StringProperty(repeated=True)
self.create_entities()
the_module = query
def create_entities(self):
self.joe = Foo(name='joe', tags=['joe', 'jill', 'hello'], rate=1)
self.joe.put()
self.jill = Foo(name='jill', tags=['jack', 'jill'], rate=2)
self.jill.put()
self.moe = Foo(name='moe', rate=1)
self.moe.put()
def testBasicQuery(self):
q = query.Query(kind='Foo')
q = q.filter(Foo.name >= 'joe').filter(Foo.name <= 'moe').filter()
res = list(q)
self.assertEqual(res, [self.joe, self.moe])
def testOrderedQuery(self):
q = query.Query(kind='Foo')
q = q.order(Foo.rate).order().order(-Foo.name)
res = list(q)
self.assertEqual(res, [self.moe, self.joe, self.jill])
def testQueryError(self):
self.assertRaises(TypeError, query.Query,
ancestor=query.ParameterizedFunction('user',
query.Parameter(1)))
self.assertRaises(TypeError, query.Query, ancestor=42)
self.assertRaises(ValueError, query.Query, ancestor=model.Key('X', None))
self.assertRaises(TypeError, query.Query,
ancestor=model.Key('X', 1), app='another')
self.assertRaises(TypeError, query.Query,
ancestor=model.Key('X', 1), namespace='another')
self.assertRaises(TypeError, query.Query, filters=42)
self.assertRaises(TypeError, query.Query, orders=42)
self.assertRaises(TypeError, query.Query, default_options=42)
def testQueryAttributes(self):
q = query.Query(kind='Foo')
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, None)
self.assertEqual(q.filters, None)
self.assertEqual(q.orders, None)
key = model.Key('Barba', 'papa')
q = query.Query(kind='Foo', ancestor=key)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, None)
self.assertEqual(q.orders, None)
q = q.filter(Foo.rate == 1)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, query.FilterNode('rate', '=', 1))
self.assertEqual(q.orders, None)
q = q.order(-Foo.name)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, query.FilterNode('rate', '=', 1))
expected_order = [('name', query._DESC)]
self.assertEqual(query._orders_to_orderings(q.orders), expected_order)
def testQueryRepr(self):
q = Foo.query()
self.assertEqual(repr(q), "Query(kind='Foo')")
q = Foo.query(ancestor=model.Key('Bar', 1))
self.assertEqual(repr(q), "Query(kind='Foo', ancestor=Key('Bar', 1))")
# Let's not specify what it should show for filters and orders,
# just test that it doesn't blow up.
q1 = q.filter(Foo.rate == 1, Foo.name == 'x')
repr(q1)
q2 = q1.order(-Foo.rate)
repr(q2)
# App and namespace.
q3 = Foo.query(app='a', namespace='ns')
self.assertEqual(repr(q3), "Query(app='a', namespace='ns', kind='Foo')")
# default_options.
q4 = Foo.query(default_options=query.QueryOptions(limit=3))
self.assertEqual(
repr(q4),
"Query(kind='Foo', default_options=QueryOptions(limit=3))")
q5 = Foo.query(projection=[Foo.name, 'tags'], distinct=True)
self.assertEqual(
repr(q5),
"Query(kind='Foo', projection=['name', 'tags'], "
"group_by=['name', 'tags'])")
def testRunToQueue(self):
qry = Foo.query()
queue = tasklets.MultiFuture()
qry.run_to_queue(queue, self.conn).check_success()
results = queue.get_result()
self.assertEqual(len(results), 3)
self.assertEqual(results[0][2], self.joe)
self.assertEqual(results[1][2], self.jill)
self.assertEqual(results[2][2], self.moe)
def testRunToQueueError(self):
self.ExpectWarnings()
qry = Foo.query(Foo.name > '', Foo.rate > 0)
queue = tasklets.MultiFuture()
fut = qry.run_to_queue(queue, self.conn)
self.assertRaises(datastore_errors.BadRequestError, fut.check_success)
self.assertRaises(datastore_errors.BadRequestError, queue.check_success)
def testModernQuerySyntax(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
@classmethod
def seniors(cls, min_age, min_rank):
q = cls.query().filter(cls.age >= min_age, cls.rank <= min_rank)
q = q.order(cls.name, -cls.age)
return q
q = Employee.seniors(42, 5)
self.assertEqual(q.filters,
query.ConjunctionNode(
query.FilterNode('Age', '>=', 42),
query.FilterNode('rank', '<=', 5)))
self.assertEqual(query._orders_to_orderings(q.orders),
[('name', query._ASC), ('Age', query._DESC)])
def testAndQuery(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
q = Employee.query().filter(query.AND(Employee.age >= 42))
self.assertEqual(q.filters, query.FilterNode('Age', '>=', 42))
q = Employee.query(query.AND(Employee.age >= 42, Employee.rank <= 5))
self.assertEqual(q.filters,
query.ConjunctionNode(
query.FilterNode('Age', '>=', 42),
query.FilterNode('rank', '<=', 5)))
def testOrQuery(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
q = Employee.query().filter(query.OR(Employee.age >= 42))
self.assertEqual(q.filters, query.FilterNode('Age', '>=', 42))
q = Employee.query(query.OR(Employee.age < 42, Employee.rank > 5))
self.assertEqual(q.filters,
query.DisjunctionNode(
query.FilterNode('Age', '<', 42),
query.FilterNode('rank', '>', 5)))
def testEmptyInFilter(self):
self.ExpectWarnings()
class Employee(model.Model):
name = model.StringProperty()
for arg in [], (), set(), frozenset():
q = Employee.query(Employee.name.IN(arg))
self.assertEqual(q.filters, query.FalseNode())
self.assertNotEqual(q.filters, 42)
f = iter(q).has_next_async()
self.assertRaises(datastore_errors.BadQueryError, f.check_success)
def testSingletonInFilter(self):
class Employee(model.Model):
name = model.StringProperty()
q = Employee.query(Employee.name.IN(['xyzzy']))
self.assertEqual(q.filters, query.FilterNode('name', '=', 'xyzzy'))
self.assertNotEqual(q.filters, 42)
e = Employee(name='xyzzy')
e.put()
self.assertEqual(q.get(), e)
def testInFilter(self):
class Employee(model.Model):
name = model.StringProperty()
q = Employee.query(Employee.name.IN(['a', 'b']))
self.assertEqual(q.filters,
query.DisjunctionNode(
query.FilterNode('name', '=', 'a'),
query.FilterNode('name', '=', 'b')))
a = Employee(name='a')
a.put()
b = Employee(name='b')
b.put()
self.assertEqual(list(q), [a, b])
def testInFilterArgTypes(self):
class Employee(model.Model):
name = model.StringProperty()
a = Employee(name='a')
a.put()
b = Employee(name='b')
b.put()
for arg in ('a', 'b'), set(['a', 'b']), frozenset(['a', 'b']):
q = Employee.query(Employee.name.IN(arg))
self.assertEqual(set(x.name for x in q), set(['a', 'b']))
def testInFilterWithNone(self):
class Employee(model.Model):
# Try a few different property types, to get a good mix of what
# used to fail.
name = model.StringProperty()
boss = model.KeyProperty()
age = model.IntegerProperty()
date = model.DateProperty()
a = Employee(name='a', age=42L)
a.put()
bosskey = model.Key(Employee, 'x')
b = Employee(boss=bosskey, date=datetime.date(1996, 1, 31))
b.put()
keys = set([a.key, b.key])
q1 = Employee.query(Employee.name.IN(['a', None]))
self.assertEqual(set(e.key for e in q1), keys)
q2 = Employee.query(Employee.boss.IN([bosskey, None]))
self.assertEqual(set(e.key for e in q2), keys)
q3 = Employee.query(Employee.age.IN([42, None]))
self.assertEqual(set(e.key for e in q3), keys)
q4 = Employee.query(Employee.date.IN([datetime.date(1996, 1, 31), None]))
self.assertEqual(set(e.key for e in q4), keys)
def testQueryExceptions(self):
self.ExpectWarnings()
q = Foo.query(Foo.name > '', Foo.rate > 0)
f = q.fetch_async()
self.assertRaises(datastore_errors.BadRequestError, f.check_success)
def testQueryUnindexedFails(self):
# Shouldn't be able to query for unindexed properties
class SubModel(model.Model):
booh = model.IntegerProperty(indexed=False)
class Emp(model.Model):
name = model.StringProperty()
text = model.TextProperty()
blob = model.BlobProperty()
sub = model.StructuredProperty(SubModel)
struct = model.StructuredProperty(Foo, indexed=False)
local = model.LocalStructuredProperty(Foo)
Emp.query(Emp.name == 'a').fetch() # Should pass
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.text == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.text.IN(['a', 'b']))
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.blob == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.sub == SubModel(booh=42))
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.sub.booh == 42)
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.struct == Foo(name='a'))
# TODO: Make this fail? See issue 89. http://goo.gl/K4gbY
# Currently StructuredProperty(..., indexed=False) has no effect.
# self.assertRaises(datastore_errors.BadFilterError,
# lambda: Emp.struct.name == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.local == Foo(name='a'))
def testConstructor(self):
self.ExpectWarnings()
class Foo(model.Model):
p = model.IntegerProperty('pp') # Also check renaming.
q = model.IntegerProperty(required=True)
key = Foo(p=1, q=2, namespace='ns').put()
# Check distinct validation
self.assertRaises(TypeError, Foo.query, distinct=True)
self.assertRaises(TypeError, Foo.query, distinct=False)
self.assertRaises(TypeError, Foo.query,
distinct=True, projection=Foo.p, group_by=[])
self.assertRaises(TypeError, Foo.query,
distinct=False, projection=Foo.p, group_by=[])
# Check both projection and default_options.projection/keys_only is not
# allowed.
self.assertRaises(TypeError, Foo.query,
projection='pp',
default_options=query.QueryOptions(projection=['pp']))
self.assertRaises(TypeError, Foo.query,
projection='pp',
default_options=query.QueryOptions(keys_only=False))
# Check empty projection/group_by not allowed.
for empty in ([], tuple()):
self.assertRaises(TypeError, Foo.query, projection=empty)
self.assertRaises(TypeError, Foo.query, group_by=empty)
# Check that ancestor and namespace must match.
self.assertRaises(TypeError, Foo.query, namespace='other', ancestor=key)
def testIsDistinct(self):
class Foo(model.Model):
p = model.IntegerProperty('pp') # Also check renaming.
q = model.IntegerProperty(required=True)
for qry in (Foo.query(projection=[Foo.p, 'q'], distinct=True),
Foo.query(projection=[Foo.p, 'q'],
group_by=(Foo.q, 'pp', Foo.p))):
self.assertEquals(True, qry.is_distinct)
for qry in (Foo.query(),
Foo.query(projection=[Foo.p, 'q'])):
self.assertEquals(False, qry.is_distinct)
def testIndexOnlyPropertyListNormalization(self):
class Foo(model.Model):
p = model.IntegerProperty('pp') # Also check renaming.
def assertNormalization(expected, value):
q1 = Foo.query(group_by=value, projection=value)
q2 = Foo.query(distinct=True, projection=value)
# make sure it survives mutation.
q1 = q1.order(Foo.p).filter(Foo.p > 0)
q2 = q2.order(Foo.p).filter(Foo.p > 0)
self.assertEquals(expected, q1.group_by)
self.assertEquals(expected, q1.projection)
self.assertEquals(expected, q2.group_by)
self.assertEquals(expected, q2.projection)
for value in (('pp',), ['pp']):
assertNormalization(('pp',), value)
def testIndexOnlyPropertyValidation(self):
self.ExpectWarnings()
class Foo(model.Model):
p = model.IntegerProperty('pp', indexed=False) # Also check renaming.
q = model.IntegerProperty(required=True)
self.assertRaises(TypeError,
Foo.query, group_by=[Foo.q, 42], projection=[Foo.q])
self.assertRaises(datastore_errors.BadArgumentError,
Foo.query().get, projection=[42])
self.assertRaises(TypeError,
Foo.query, group_by=Foo.q, projection=[Foo.q])
self.assertRaises(TypeError,
Foo.query, projection=Foo.q)
# Legacy support for single value projection
Foo.query().get(projection=Foo.q)
for bad in ((Foo.p,), ['wot']):
self.assertRaises(model.InvalidPropertyError, Foo.query,
group_by=bad, projection=[Foo.q])
self.assertRaises(model.BadProjectionError, Foo.query,
group_by=bad, projection=[Foo.q])
self.assertRaises(model.InvalidPropertyError, Foo.query, projection=bad)
self.assertRaises(model.BadProjectionError, Foo.query, projection=bad)
self.assertRaises(model.InvalidPropertyError,
Foo.query().get, projection=bad)
self.assertRaises(model.BadProjectionError,
Foo.query().get, projection=bad)
def testGroupByQuery(self):
self.ExpectWarnings()
class Foo(model.Model):
p = model.IntegerProperty('pp') # Also check renaming
q = model.IntegerProperty(required=True)
r = model.IntegerProperty(repeated=True)
d = model.IntegerProperty(default=42)
key1 = Foo(p=1, q=5, r=[3, 4, 5]).put()
key2 = Foo(p=1, q=4, r=[3, 4]).put()
key3 = Foo(p=2, q=3, r=[3, 4]).put()
key4 = Foo(p=2, q=2, r=[3]).put()
qry = Foo.query(projection=[Foo.p], group_by=[Foo.r, Foo.p])
qry = qry.order(Foo.p, Foo.r, Foo.q)
expected = [(1, key2), (1, key2), (1, key1), (2, key4), (2, key3)]
# Test fetch and iter in base case.
self.assertEqual(expected, [(ent.p, ent.key) for ent in qry.fetch()])
self.assertEqual(expected, [(ent.p, ent.key) for ent in qry])
# Test projection using default options.
qry = Foo.query(group_by=[Foo.r, Foo.p],
default_options=query.QueryOptions(projection=['pp']))
qry = qry.order(Foo.p, Foo.r, Foo.q)
self.assertEqual(expected, [(ent.p, ent.key) for ent in qry.fetch()])
self.assertEqual(expected, [(ent.p, ent.key) for ent in qry])
# Test projection with other default options.
qry = Foo.query(projection=[Foo.p], group_by=[Foo.r, Foo.p],
default_options=query.QueryOptions(limit=4))
qry = qry.order(Foo.p, Foo.r, Foo.q)
self.assertEqual(expected[:4], [(ent.p, ent.key) for ent in qry.fetch()])
self.assertEqual(expected[:4], [(ent.p, ent.key) for ent in qry])
def testProjectionQuery(self):
self.ExpectWarnings()
class Foo(model.Model):
p = model.IntegerProperty('pp') # Also check renaming
q = model.IntegerProperty(required=True)
r = model.IntegerProperty(repeated=True)
d = model.IntegerProperty(default=42)
key = Foo(p=1, q=2, r=[3, 4]).put()
q = Foo.query(Foo.p >= 0)
ent = q.get(projection=[Foo.p, 'q'])
self.assertItemsEqual(ent._projection, ('pp', 'q'))
self.assertEqual(ent.p, 1)
self.assertEqual(ent.q, 2)
self.assertRaises(model.UnprojectedPropertyError, lambda: ent.r)
self.assertRaises(model.UnprojectedPropertyError, lambda: ent.d)
ents = q.fetch(projection=['pp', 'r'])
ents.sort(key=lambda ent: ent.r)
self.assertEqual(ents, [Foo(p=1, r=[3], key=key, projection=('pp', 'r')),
Foo(p=1, r=[4], key=key, projection=['pp', 'r'])])
def testProjectionQuery_AllTypes(self):
class Foo(model.Model):
abool = model.BooleanProperty()
aint = model.IntegerProperty()
afloat = model.FloatProperty()
astring = model.StringProperty()
ablob = model.BlobProperty(indexed=True)
akey = model.KeyProperty()
auser = model.UserProperty()
apoint = model.GeoPtProperty()
adatetime = model.DateTimeProperty()
adate = model.DateProperty()
atime = model.TimeProperty()
boo = Foo(abool=True,
aint=42,
afloat=3.14,
astring='foo',
ablob='bar',
akey=model.Key(Foo, 'ref'),
auser=users.User('<EMAIL>'),
apoint=model.GeoPt(52.35, 4.9166667),
adatetime=datetime.datetime(2012, 5, 1, 8, 19, 42),
adate=datetime.date(2012, 5, 1),
atime=datetime.time(8, 19, 42),
)
boo.put()
qry = Foo.query()
for prop in Foo._properties.itervalues():
ent = qry.get(projection=[prop._name])
pb = ent._to_pb()
decoded_ent = Foo._from_pb(pb, set_key=False)
self.assertEqual(ent, decoded_ent)
self.assertEqual(getattr(ent, prop._code_name),
getattr(boo, prop._code_name))
for otherprop in Foo._properties.itervalues():
if otherprop is not prop:
try:
getattr(ent, otherprop._code_name)
self.fail('Expected an UnprojectedPropertyError for property %s'
' when projecting %s.' % (otherprop, prop))
except model.UnprojectedPropertyError:
pass
def testProjectionQuery_ComputedProperties(self):
class Foo(model.Model):
a = model.StringProperty()
b = model.StringProperty()
c = model.ComputedProperty(lambda ent: '<%s.%s>' % (ent.a, ent.b))
d = model.ComputedProperty(lambda ent: '<%s>' % (ent.a,))
foo = Foo(a='a', b='b')
foo.put()
self.assertEqual((foo.a, foo.b, foo.c, foo.d), ('a', 'b', '<a.b>', '<a>'))
qry = Foo.query()
x = qry.get(projection=['a', 'b'])
self.assertEqual((x.a, x.b, x.c, x.d), ('a', 'b', '<a.b>', '<a>'))
y = qry.get(projection=['a'])
self.assertEqual((y.a, y.d), ('a', '<a>'))
self.assertRaises(model.UnprojectedPropertyError, lambda: y.b)
self.assertRaises(model.UnprojectedPropertyError, lambda: y.c)
z = qry.get(projection=['b'])
self.assertEqual((z.b,), ('b',))
p = qry.get(projection=['c', 'd'])
self.assertEqual((p.c, p.d), ('<a.b>', '<a>'))
def testProjectionQuery_StructuredProperties(self):
class Inner(model.Model):
foo = model.StringProperty()
bar = model.StringProperty()
beh = model.StringProperty()
class Middle(model.Model):
baz = model.StringProperty()
inner = model.StructuredProperty(Inner)
inners = model.StructuredProperty(Inner, repeated=True)
class Outer(model.Model):
name = model.StringProperty()
middle = model.StructuredProperty(Middle, 'mid')
one = Outer(name='one',
middle=Middle(baz='one',
inner=Inner(foo='foo', bar='bar'),
inners=[Inner(foo='a', bar='b'),
Inner(foo='c', bar='d')]))
one.put()
two = Outer(name='two',
middle=Middle(baz='two',
inner=Inner(foo='x', bar='y'),
inners=[Inner(foo='p', bar='q')]))
two.put()
q = Outer.query()
x, y = q.fetch(projection=[Outer.name, Outer.middle.baz])
pb = x._to_pb()
z = Outer._from_pb(pb, set_key=False)
self.assertEqual(x, z)
self.assertEqual(x.middle.baz, 'one')
self.assertEqual(x.middle._projection, ('baz',))
self.assertEqual(x,
Outer(key=one.key, name='one',
middle=Middle(baz='one', projection=['baz']),
projection=['mid.baz', 'name']))
self.assertEqual(y,
Outer(key=two.key, name='two',
middle=Middle(baz='two', projection=['baz']),
projection=['mid.baz', 'name']))
self.assertRaises(model.UnprojectedPropertyError, lambda: x.middle.inner)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x, 'middle', None)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x, 'middle', x.middle)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle, 'inner', None)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle, 'inner',
Inner(foo='', projection=['foo']))
x = q.get(projection=[Outer.middle.inner.foo, 'mid.inner.bar'])
self.assertEqual(x.middle.inner.foo, 'foo')
self.assertItemsEqual(x.middle.inner._projection, ('bar', 'foo'))
self.assertItemsEqual(x.middle._projection, ('inner.bar', 'inner.foo'))
self.assertItemsEqual(x._projection, ('mid.inner.bar', 'mid.inner.foo'))
self.assertEqual(x,
Outer(key=one.key,
projection=['mid.inner.bar', 'mid.inner.foo'],
middle=Middle(projection=['inner.bar', 'inner.foo'],
inner=Inner(projection=['bar', 'foo'],
foo='foo', bar='bar'))))
self.assertRaises(model.UnprojectedPropertyError,
lambda: x.middle.inner.beh)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle.inner, 'foo', '')
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle.inner, 'beh', '')
xs = q.fetch(projection=[Outer.middle.inners.foo])
self.assertEqual(xs[0],
Outer(key=one.key,
middle=Middle(inners=[Inner(foo='a',
_projection=('foo',))],
_projection=('inners.foo',)),
_projection=('mid.inners.foo',)))
self.assertEqual(len(xs), 3)
for x, foo in zip(xs, ['a', 'c', 'p']):
self.assertEqual(len(x.middle.inners), 1)
self.assertEqual(x.middle.inners[0].foo, foo)
def testFilterRepr(self):
class Employee(model.Model):
name = model.StringProperty()
f = (Employee.name == 'xyzzy')
self.assertEqual(repr(f), "FilterNode('name', '=', 'xyzzy')")
def testNodeComparisons(self):
a = query.FilterNode('foo', '=', 1)
b = query.FilterNode('foo', '=', 1)
c = query.FilterNode('foo', '=', 2)
d = query.FilterNode('foo', '<', 1)
# Don't use assertEqual/assertNotEqual; we want to be sure that
# __eq__ or __ne__ is really called here!
self.assertTrue(a == b)
self.assertTrue(a != c)
self.assertTrue(b != d)
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: a <= b)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: a >= b)
x = query.AND(a, b, c)
y = query.AND(a, b, c)
z = query.AND(a, d)
self.assertTrue(x == y)
self.assertTrue(x != z)
def testQueryForStructuredProperty(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
b1 = Bar(name='b1', foo=Foo(name='nest', rate=1, tags=['tag1', 'tag2']))
b1.put()
b2 = Bar(name='b2', foo=Foo(name='best', rate=2, tags=['tag2', 'tag3']))
b2.put()
b3 = Bar(name='b3', foo=Foo(name='rest', rate=2, tags=['tag2']))
b3.put()
q1 = Bar.query().order(Bar.name)
self.assertEqual(q1.fetch(10), [b1, b2, b3])
q2 = Bar.query().filter(Bar.foo.rate >= 2)
self.assertEqual(q2.fetch(10), [b2, b3])
q3 = q2.order(Bar.foo.rate, -Bar.foo.name, +Bar.foo.rate)
self.assertEqual(q3.fetch(10), [b3, b2])
def testQueryForStructuredPropertyErrors(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
# Can't use inequalities.
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo < Foo())
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo != Foo())
# Can't use an empty value.
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo == Foo())
def testQueryForStructuredPropertyIn(self):
self.ExpectWarnings()
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
a = Bar(name='a', foo=Foo(name='a'))
a.put()
b = Bar(name='b', foo=Foo(name='b'))
b.put()
self.assertEqual(
Bar.query(Bar.foo.IN((Foo(name='a'), Foo(name='b')))).fetch(),
[a, b])
self.assertEqual(Bar.query(Bar.foo.IN([Foo(name='a')])).fetch(), [a])
# An IN query with empty argument can be constructed but not executed.
q = Bar.query(Bar.foo.IN(set()))
self.assertRaises(datastore_errors.BadQueryError, q.fetch)
# Passing a non-sequence argument should fail.
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, 42)
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, None)
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, 'not a sequence')
def testQueryForNestedStructuredProperty(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
class Bak(model.Model):
bar = model.StructuredProperty(Bar)
class Baz(model.Model):
bar = model.StructuredProperty(Bar)
bak = model.StructuredProperty(Bak)
rank = model.IntegerProperty()
b1 = Baz(bar=Bar(foo=Foo(name='a')))
b1.put()
b2 = Baz(bar=Bar(foo=Foo(name='b')), bak=Bak(bar=Bar(foo=Foo(name='c'))))
b2.put()
q1 = Baz.query().filter(Baz.bar.foo.name >= 'a')
self.assertEqual(q1.fetch(10), [b1, b2])
q2 = Baz.query().filter(Baz.bak.bar.foo.name >= 'a')
self.assertEqual(q2.fetch(10), [b2])
def testQueryForWholeStructure(self):
class Employee(model.Model):
name = model.StringProperty()
rank = model.IntegerProperty()
class Manager(Employee):
report = model.StructuredProperty(Employee, repeated=True)
reports_a = []
for i in range(3):
e = Employee(name=str(i), rank=i)
e.put()
e.key = None
reports_a.append(e)
reports_b = []
for i in range(3, 6):
e = Employee(name=str(i), rank=0)
e.put()
e.key = None
reports_b.append(e)
mgr_a = Manager(name='a', report=reports_a)
mgr_a.put()
mgr_b = Manager(name='b', report=reports_b)
mgr_b.put()
mgr_c = Manager(name='c', report=reports_a + reports_b)
mgr_c.put()
res = list(Manager.query(Manager.report == Employee(name='1', rank=1)))
self.assertEqual(res, [mgr_a, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0)))
self.assertEqual(res, [mgr_a, mgr_b, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0, name='3')))
self.assertEqual(res, [mgr_b, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0, name='1')))
self.assertEqual(res, [])
res = list(Manager.query(Manager.report == Employee(rank=0, name='0'),
Manager.report == Employee(rank=1, name='1')))
self.assertEqual(res, [mgr_a, mgr_c])
q = Manager.query(Manager.report == Employee(rank=2, name='2'))
res = list(q)
self.assertEqual(res, [mgr_a, mgr_c])
res = list(q.iter(offset=1))
self.assertEqual(res, [mgr_c])
res = list(q.iter(limit=1))
self.assertEqual(res, [mgr_a])
def testQueryForWholeStructureCallsDatastoreType(self):
# See issue 87. http://goo.gl/Tl5Ed
class Event(model.Model):
what = model.StringProperty()
when = model.DateProperty() # Has non-trivial _datastore_type().
class Outer(model.Model):
who = model.StringProperty()
events = model.StructuredProperty(Event, repeated=True)
q = Outer.query(Outer.events == Event(what='stuff',
when=datetime.date.today()))
q.fetch() # Failed before the fix.
def testQueryForWholeNestedStructure(self):
class A(model.Model):
a1 = model.StringProperty()
a2 = model.StringProperty()
class B(model.Model):
b1 = model.StructuredProperty(A)
b2 = model.StructuredProperty(A)
class C(model.Model):
c = model.StructuredProperty(B)
x = C(c=B(b1=A(a1='a1', a2='a2'), b2=A(a1='a3', a2='a4')))
x.put()
q = C.query(C.c == x.c)
self.assertEqual(q.get(), x)
def testQueryForWholeStructureNone(self):
class X(model.Model):
name = model.StringProperty()
class Y(model.Model):
x = model.StructuredProperty(X)
y = Y(x=None)
y.put()
q = Y.query(Y.x == None)
self.assertEqual(q.fetch(), [y])
def testQueryAncestorConsistentWithAppId(self):
class Employee(model.Model):
pass
a = model.Key(Employee, 1)
self.assertEqual(a.app(), self.APP_ID) # Just checkin'.
Employee.query(ancestor=a, app=a.app()).fetch() # Shouldn't fail.
self.assertRaises(Exception, Employee.query, ancestor=a, app='notthisapp')
def testQueryAncestorConsistentWithNamespace(self):
class Employee(model.Model):
pass
a = model.Key(Employee, 1, namespace='ns')
self.assertEqual(a.namespace(), 'ns') # Just checkin'.
Employee.query(ancestor=a, namespace='ns').fetch()
Employee.query(ancestor=a, namespace=None).fetch()
self.assertRaises(Exception,
Employee.query, ancestor=a, namespace='another')
self.assertRaises(Exception,
Employee.query, ancestor=a, namespace='')
# And again with the default namespace.
b = model.Key(Employee, 1)
self.assertEqual(b.namespace(), '') # Just checkin'.
Employee.query(ancestor=b, namespace='')
Employee.query(ancestor=b, namespace=None)
self.assertRaises(Exception,
Employee.query, ancestor=b, namespace='ns')
# Finally some queries with a namespace but no ancestor.
Employee.query(namespace='').fetch()
Employee.query(namespace='ns').fetch()
def testQueryWithNamespace(self):
class Employee(model.Model):
pass
k = model.Key(Employee, None, namespace='ns')
e = Employee(key=k)
e.put()
self.assertEqual(Employee.query().fetch(), [])
self.assertEqual(Employee.query(namespace='ns').fetch(), [e])
def testQueryFilterAndOrderPreserveNamespace(self):
class Employee(model.Model):
name = model.StringProperty()
q1 = Employee.query(namespace='ns')
q2 = q1.filter(Employee.name == 'Joe')
self.assertEqual(q2.namespace, 'ns')
# Ditto for order()
q3 = q2.order(Employee.name)
self.assertEqual(q3.namespace, 'ns')
def testMultiQuery(self):
q1 = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
q2 = query.Query(kind='Foo').filter(Foo.tags == 'joe').order(Foo.name)
qq = query._MultiQuery([q1, q2])
res = list(qq)
self.assertEqual(res, [self.jill, self.joe])
def testIterAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(res, [self.jill, self.joe])
foo()
def testMap(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
callback = lambda e: e.name
@tasklets.tasklet
def callback_async(e):
yield tasklets.sleep(0.01)
raise tasklets.Return(e.name)
self.assertEqual(q.map(callback), ['jill', 'joe'])
self.assertEqual(q.map(callback_async), ['jill', 'joe'])
# TODO: Test map() with esoteric argument combinations
# e.g. keys_only, produce_cursors, and merge_future.
def testMapAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
callback = lambda e: e.name
@tasklets.tasklet
def callback_async(e):
yield tasklets.sleep(0.01)
raise tasklets.Return(e.name)
@tasklets.synctasklet
def foo():
fut = q.map_async(callback)
res = yield fut
self.assertEqual(res, ['jill', 'joe'])
fut = q.map_async(callback_async)
res = yield fut
self.assertEqual(res, ['jill', 'joe'])
foo()
def testFetch(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.fetch(10), [self.jill, self.joe])
self.assertEqual(q.fetch(2), [self.jill, self.joe])
self.assertEqual(q.fetch(1), [self.jill])
def testFetchAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
res = yield q.fetch_async(10)
self.assertEqual(res, [self.jill, self.joe])
res = yield q.fetch_async(2)
self.assertEqual(res, [self.jill, self.joe])
res = yield q.fetch_async(1)
self.assertEqual(res, [self.jill])
foo()
def testFetchEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.fetch(1), [])
def testFetchKeysOnly(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.fetch(10, keys_only=True),
[self.jill.key, self.joe.key])
def testGet(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.get(), self.jill)
def testGetEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.get(), None)
def testGetKeysOnly(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.get(keys_only=True), self.jill.key)
def testCursors(self):
q = query.Query(kind='Foo')
it = q.iter(produce_cursors=True)
expected = [self.joe, self.jill, self.moe]
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_before)
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_after)
before = []
after = []
for i, ent in enumerate(it):
self.assertEqual(ent, expected[i])
before.append(it.cursor_before())
after.append(it.cursor_after())
before.append(it.cursor_before())
after.append(it.cursor_after())
self.assertEqual(before[1], after[0])
self.assertEqual(before[2], after[1])
self.assertEqual(before[3], after[2])
self.assertEqual(before[3], after[3]) # !!!
def testCursorsKeysOnly(self):
q = query.Query(kind='Foo')
it = q.iter(produce_cursors=True, keys_only=True)
expected = [self.joe.key, self.jill.key, self.moe.key]
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_before)
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_after)
before = []
after = []
for i, ent in enumerate(it):
self.assertEqual(ent, expected[i])
before.append(it.cursor_before())
after.append(it.cursor_after())
before.append(it.cursor_before())
after.append(it.cursor_after())
self.assertEqual(before[1], after[0])
self.assertEqual(before[2], after[1])
self.assertEqual(before[3], after[2])
self.assertEqual(before[3], after[3]) # !!!
def testCursorsForAugmentedQuery(self):
class Employee(model.Model):
name = model.StringProperty()
rank = model.IntegerProperty()
class Manager(Employee):
report = model.StructuredProperty(Employee, repeated=True)
reports_a = []
for i in range(3):
e = Employee(name=str(i), rank=i)
e.put()
e.key = None
reports_a.append(e)
reports_b = []
for i in range(3, 6):
e = Employee(name=str(i), rank=0)
e.put()
e.key = None
reports_b.append(e)
mgr_a = Manager(name='a', report=reports_a)
mgr_a.put()
mgr_b = Manager(name='b', report=reports_b)
mgr_b.put()
mgr_c = Manager(name='c', report=reports_a + reports_b)
mgr_c.put()
it = Manager.query(Manager.report == Employee(name='1', rank=1)).iter()
it.next()
self.assertRaises(NotImplementedError, it.cursor_before)
self.assertRaises(NotImplementedError, it.cursor_after)
it.next()
self.assertRaises(NotImplementedError, it.cursor_before)
self.assertRaises(NotImplementedError, it.cursor_after)
self.assertFalse(it.has_next())
def testCursorsEfficientPaging(self):
# We want to read a 'page' of data, get the cursor just past the
# page, and know whether there is another page, all with a single
# RPC. To do this, set limit=pagesize+1, batch_size=pagesize.
q = query.Query(kind='Foo')
cursors = {}
mores = {}
for pagesize in [1, 2, 3, 4]:
it = q.iter(produce_cursors=True, limit=pagesize + 1, batch_size=pagesize)
todo = pagesize
for _ in it:
todo -= 1
if todo <= 0:
break
cursors[pagesize] = it.cursor_after()
mores[pagesize] = it.probably_has_next()
self.assertEqual(mores, {1: True, 2: True, 3: False, 4: False})
self.assertEqual(cursors[3], cursors[4])
# TODO: Assert that only one RPC call was made.
def testProbablyHasNext(self):
q = query.Query(kind='Foo')
probablies = []
it = q.iter(produce_cursors=True)
for _ in it:
probablies.append(it.probably_has_next())
self.assertEqual(probablies, [True, True, False])
def testProbablyHasNextMultipleBatches(self):
q = query.Query(kind='Foo')
probablies = []
it = q.iter(produce_cursors=True, batch_size=1)
for _ in it:
probablies.append(it.probably_has_next())
self.assertEqual(probablies, [True, True, False])
def testProbablyHasNextAndHasNextInteraction(self):
q = query.Query(kind='Foo')
mores = []
probablies = []
it = q.iter(produce_cursors=True)
for _ in it:
mores.append(it.has_next())
probablies.append(it.probably_has_next())
self.assertEqual(probablies, [True, True, False])
self.assertEqual(mores, [True, True, False])
def testCursorsDelete(self):
"""Tests that deleting an entity doesn't affect cursor positioning."""
class DeletedEntity(model.Model):
name = model.StringProperty()
entities = [DeletedEntity(name='A'),
DeletedEntity(name='B'),
DeletedEntity(name='C')]
model.put_multi(entities)
q = DeletedEntity.query().order(DeletedEntity.name)
it = q.iter(limit=2, produce_cursors=True)
self.assertEqual('A', it.next().name)
entities[0].key.delete()
# Grab cursor after deleting first entity. This should point before second.
cursor = it.cursor_after()
it = q.iter(start_cursor=cursor, produce_cursors=True)
self.assertEqual('B', it.next().name)
def testSkippedResultCursor(self):
class SkippedEntity(model.Model):
name = model.StringProperty()
entities = [SkippedEntity(name='A'),
SkippedEntity(name='B'),
SkippedEntity(name='C')]
model.put_multi(entities)
q = SkippedEntity.query().order(SkippedEntity.name)
it = q.iter(offset=2, produce_cursors=True)
self.assertEqual('C', it.next().name)
cursor = it.cursor_before()
# Run the query at the iterator returned before the first result
it = q.iter(start_cursor=cursor, produce_cursors=True)
self.assertEqual('C', it.next().name)
def testCount(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(1), 1)
def testCountAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
res = yield q.count_async(10)
self.assertEqual(res, 2)
res = yield q.count_async(1)
self.assertEqual(res, 1)
foo()
def testCountEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.count(1), 0)
def testCountPostFilter(self):
class Froo(model.Model):
name = model.StringProperty()
rate = model.IntegerProperty()
age = model.IntegerProperty()
class Bar(model.Model):
name = model.StringProperty()
froo = model.StructuredProperty(Froo, repeated=True)
b1 = Bar(name='b1', froo=[Froo(name='a', rate=1)])
b1.put()
b2 = Bar(name='b2', froo=[Froo(name='a', rate=1)])
b2.put()
q = Bar.query(Bar.froo == Froo(name='a', rate=1))
self.assertEqual(q.count(3), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
def testCountDisjunction(self):
q = Foo.query(Foo.name.IN(['joe', 'jill']))
self.assertEqual(q.count(3), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
def testLargeCount(self):
class Bar(model.Model):
pass
for i in xrange(0, datastore_stub_util._MAX_QUERY_OFFSET + 10):
Bar(id=str(i)).put()
count = Bar.query().count(datastore_stub_util._MAX_QUERY_OFFSET + 20)
self.assertEqual(datastore_stub_util._MAX_QUERY_OFFSET + 10, count)
# Test count less than requested limit.
count = Bar.query().count(datastore_stub_util._MAX_QUERY_OFFSET + 5)
self.assertEqual(datastore_stub_util._MAX_QUERY_OFFSET + 5, count)
def testFetchPage(self):
# This test implicitly also tests fetch_page_async().
q = query.Query(kind='Foo')
page_size = 1
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.jill])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 2
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 3
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill, self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 4
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill, self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
def testMultiQueryIterator(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['joe', 'jill']))
q = q.order(Foo.name)
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(res, [self.jill, self.joe])
foo()
def testMultiQueryIteratorUnordered(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['joe', 'jill']))
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(set(r._key for r in res),
set([self.jill._key, self.joe._key]))
foo()
def testMultiQueryFetch(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill'])).order(-Foo.name)
expected = [self.joe, self.jill]
self.assertEqual(q.fetch(10), expected)
self.assertEqual(q.fetch(None), expected)
self.assertEqual(q.fetch(), expected)
self.assertEqual(q.fetch(2), expected)
self.assertEqual(q.fetch(1), expected[:1])
self.assertEqual(q.fetch(10, offset=1), expected[1:])
self.assertEqual(q.fetch(1, offset=1), expected[1:])
self.assertEqual(q.fetch(10, keys_only=True), [e._key for e in expected])
def testMultiQueryFetchUnordered(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
expected = [self.joe, self.jill]
self.assertEqual(q.fetch(10), expected)
self.assertEqual(q.fetch(None), expected)
self.assertEqual(q.fetch(), expected)
self.assertEqual(q.fetch(2), expected)
self.assertEqual(q.fetch(1), expected[:1])
self.assertEqual(q.fetch(10, offset=1), expected[1:])
self.assertEqual(q.fetch(1, offset=1), expected[1:])
self.assertEqual(q.fetch(10, keys_only=True), [e._key for e in expected])
def testMultiQueryCount(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill'])).order(Foo.name)
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(None), 2)
self.assertEqual(q.count(), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
self.assertEqual(q.count(10, keys_only=True), 2)
self.assertEqual(q.count(keys_only=True), 2)
def testMultiQueryCountUnordered(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(None), 2)
self.assertEqual(q.count(), 2)
self.assertEqual(q.count(10, keys_only=True), 2)
self.assertEqual(q.count(keys_only=True), 2)
def testMultiQueryCursors(self):
self.ExpectWarnings()
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
self.assertRaises(datastore_errors.BadArgumentError, q.fetch_page, 1)
q = q.order(Foo.tags)
self.assertRaises(datastore_errors.BadArgumentError, q.fetch_page, 1)
q = q.order(Foo.key)
expected = q.fetch()
self.assertEqual(len(expected), 2)
res, curs, more = q.fetch_page(1, keys_only=True)
self.assertEqual(res, [expected[0].key])
self.assertTrue(curs is not None)
self.assertTrue(more)
res, curs, more = q.fetch_page(1, keys_only=False, start_cursor=curs)
self.assertEqual(res, [expected[1]])
self.assertTrue(curs is not None)
self.assertFalse(more)
res, curs, more = q.fetch_page(1, start_cursor=curs)
self.assertEqual(res, [])
self.assertTrue(curs is None)
self.assertFalse(more)
def testMultiQueryWithAndWithoutAncestor(self):
class Benjamin(model.Model):
name = model.StringProperty()
ben = Benjamin(name='ben', parent=self.moe.key)
ben.put()
benji = Benjamin(name='benji')
benji.put()
bq = Benjamin.query()
baq = Benjamin.query(ancestor=self.moe.key)
mq = query._MultiQuery([bq, baq])
res = list(mq)
self.assertEqual(res, [benji, ben])
def testNestedMultiQuery(self):
class Bar(model.Model):
a = model.StringProperty()
b = model.StringProperty()
class Rank(model.Model):
val = model.IntegerProperty()
class Foo(model.Model):
bar = model.StructuredProperty(Bar, repeated=True)
rank = model.StructuredProperty(Rank)
f1 = Foo(bar=[Bar(a='a1', b='b')], rank=Rank(val=1))
f2 = Foo(bar=[Bar(a='a2', b='e')], rank=Rank(val=2))
f1.put()
f2.put()
q = Foo.query(query.OR(Foo.bar == Bar(a='a1', b='b'),
Foo.bar == Bar(a='a2', b='e')))
q = q.order(Foo.rank.val)
self.assertEqual([f1, f2], q.fetch())
def testProbablyHasNextWithMultiQuery(self):
class Foo(model.Model):
a = model.IntegerProperty()
keys = model.put_multi([Foo(a=i) for i in range(100)])
q = Foo.query(Foo.key.IN(keys)).order(Foo.a)
it = q.iter()
for i in range(0, 99):
it.next()
# Probably has next is conservative so it should always return True
# if there are in fact more results.
self.assertTrue(it.probably_has_next())
def testNotEqualOperator(self):
q = query.Query(kind='Foo').filter(Foo.rate != 2)
res = list(q)
self.assertEqual(res, [self.joe, self.moe])
def testInOperator(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(('jill', 'hello')))
res = list(q)
self.assertEqual(res, [self.joe, self.jill])
def testFullDistributiveLaw(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['jill', 'hello']))
q = q.filter(Foo.rate.IN([1, 2]))
DisjunctionNode = query.DisjunctionNode
ConjunctionNode = query.ConjunctionNode
FilterNode = query.FilterNode
expected = DisjunctionNode(
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 1)),
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 2)),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 1)),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 2)))
self.assertEqual(q.filters, expected)
def testHalfDistributiveLaw(self):
DisjunctionNode = query.DisjunctionNode
ConjunctionNode = query.ConjunctionNode
FilterNode = query.FilterNode
filters = ConjunctionNode(
FilterNode('tags', 'in', ['jill', 'hello']),
ConjunctionNode(FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')))
expected = DisjunctionNode(
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')))
self.assertEqual(filters, expected)
def testKeyFilter(self):
class MyModel(model.Model):
number = model.IntegerProperty()
k1 = model.Key('MyModel', 'foo-1')
m1 = MyModel(key=k1)
m1.put()
k2 = model.Key('MyModel', 'foo-2')
m2 = MyModel(key=k2)
m2.put()
q = MyModel.query(MyModel.key == k1)
res = q.get()
self.assertEqual(res, m1)
q = MyModel.query(MyModel.key > k1)
res = q.get()
self.assertEqual(res, m2)
q = MyModel.query(MyModel.key < k2)
res = q.get()
self.assertEqual(res, m1)
def testUnicode(self):
class MyModel(model.Model):
n = model.IntegerProperty(u'\u4321')
@classmethod
def _get_kind(cls):
return u'\u1234'.encode('utf-8')
a = MyModel(n=42)
k = a.put()
b = k.get()
self.assertEqual(a, b)
self.assertFalse(a is b)
# So far so good, now try queries
res = MyModel.query(MyModel.n == 42).fetch()
self.assertEqual(res, [a])
def testBlobQuery(self):
class MyModel(model.Model):
b = model.BlobProperty(indexed=True)
a = MyModel(b='\xff\x00')
a.put()
q = MyModel.query(MyModel.b == '\xff\x00')
it = iter(q)
b = it.next()
self.assertEqual(a, b)
def testKindlessQuery(self):
class ParentModel(model.Model):
a = model.StringProperty()
class ChildModel(model.Model):
b = model.StringProperty()
p = ParentModel(a="Test1")
p.put()
c = ChildModel(parent=p.key, b="Test2")
c.put()
q = query.Query(ancestor=p.key)
self.assertEqual(q.count(), 2)
l = q.fetch()
self.assertTrue(c in l)
self.assertTrue(p in l)
def testExpandoQueries(self):
class Foo(model.Expando):
pass
testdata = {'int': 42,
'float': 3.14,
'string': 'hello',
'bool': True,
# Don't call this 'key'; it interferes with the built-in
# key attribute (the entity's key).
'akey': model.Key('Foo', 1),
'point': model.GeoPt(52.35, 4.9166667),
'user': users.User('<EMAIL>', 'example.<EMAIL>', '123'),
'blobkey': model.BlobKey('blah'),
'none': None,
}
for name, value in testdata.iteritems():
foo = Foo()
setattr(foo, name, value)
foo.put()
qry = Foo.query(query.FilterNode(name, '=', value))
res = qry.get()
self.assertTrue(res is not None, name)
self.assertEqual(getattr(res, name), value)
res.key.delete()
def testQueryCacheInteraction(self):
class Bar(model.Model):
name = model.StringProperty()
ctx = tasklets.get_context()
ctx.set_cache_policy(True)
a = Bar(name='a')
a.put()
b = a.key.get()
self.assertTrue(b is a) # Just verifying that the cache is on.
b = Bar.query().get()
self.assertTrue(b is a)
a.name = 'x' # Modify, but don't write.
b = Bar.query().get()
self.assertTrue(b is a)
self.assertEqual(a.name, 'x')
b = Bar.query().get(use_cache=False) # Skip the cache.
self.assertFalse(b is a)
self.assertEqual(b.name, 'a')
a.key = None # Invalidate cache by resetting key.
b = Bar.query().get()
self.assertFalse(b is a)
self.assertEqual(a.name, 'x')
self.assertEqual(b.name, 'a')
def testGqlMinimal(self):
qry = query.gql('SELECT * FROM Foo')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlAncestor(self):
key = model.Key('Foo', 42)
qry = query.gql("SELECT * FROM Foo WHERE ANCESTOR IS KEY('%s')" %
key.urlsafe())
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, key)
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlAncestorWithParameter(self):
qry = query.gql('SELECT * FROM Foo WHERE ANCESTOR IS :1')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, query.Parameter(1))
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlFilter(self):
qry = query.gql("SELECT * FROM Foo WHERE name = 'joe' AND rate = 1")
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters,
query.ConjunctionNode(
query.FilterNode('name', '=', 'joe'),
query.FilterNode('rate', '=', 1)))
self.assertEqual(qry.orders, None)
def testGqlOrder(self):
qry = query.gql('SELECT * FROM Foo ORDER BY name')
self.assertEqual(query._orders_to_orderings(qry.orders),
[('name', query._ASC)])
def testGqlOffset(self):
qry = query.gql('SELECT * FROM Foo OFFSET 2')
self.assertEqual(qry.default_options.offset, 2)
def testGqlLimit(self):
qry = query.gql('SELECT * FROM Foo LIMIT 2')
self.assertEqual(qry.default_options.limit, 2)
def testGqlParameters(self):
qry = query.gql('SELECT * FROM Foo WHERE name = :1 AND rate = :foo')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters,
query.ConjunctionNode(
query.ParameterNode(Foo.name, '=',
query.Parameter(1)),
query.ParameterNode(Foo.rate, '=',
query.Parameter('foo'))))
self.assertEqual(qry.orders, None)
def testGqlBindParameters(self):
pqry = query.gql('SELECT * FROM Foo WHERE name = :1')
qry = pqry.bind('joe')
self.assertEqual(list(qry), [self.joe])
qry = pqry.bind('jill')
self.assertEqual(list(qry), [self.jill])
def testGqlUnresolvedParameters(self):
self.ExpectErrors()
qry = query.gql(
'SELECT * FROM Foo WHERE name = :1')
self.assertRaises(datastore_errors.BadArgumentError, qry.fetch)
self.assertRaises(datastore_errors.BadArgumentError, qry.count)
self.assertRaises(datastore_errors.BadArgumentError, list, qry)
self.assertRaises(datastore_errors.BadArgumentError, qry.iter)
def checkGql(self, expected, gql, args=(), kwds={},
fetch=lambda q: list(q)):
actual = fetch(query.gql(gql).bind(*args, **kwds))
self.assertEqual(expected, actual)
def testGqlBasicQueries(self):
self.checkGql([self.joe, self.jill, self.moe], "SELECT * FROM Foo")
def testGqlKeyQueries(self):
self.checkGql([self.joe.key, self.jill.key, self.moe.key],
"SELECT __key__ FROM Foo")
def testGqlOperatorQueries(self):
self.checkGql([self.joe], "SELECT * FROM Foo WHERE name = 'joe'")
self.checkGql([self.moe], "SELECT * FROM Foo WHERE name > 'joe'")
self.checkGql([self.jill], "SELECT * FROM Foo WHERE name < 'joe'")
self.checkGql([self.joe, self.moe],
"SELECT * FROM Foo WHERE name >= 'joe'")
self.checkGql([self.jill, self.joe],
"SELECT * FROM Foo WHERE name <= 'joe'")
self.checkGql([self.jill, self.moe],
"SELECT * FROM Foo WHERE name != 'joe'")
# NOTE: The ordering on these is questionable:
self.checkGql([self.joe, self.jill],
"SELECT * FROM Foo WHERE name IN ('joe', 'jill')")
self.checkGql([self.jill, self.joe],
"SELECT * FROM Foo WHERE name IN ('jill', 'joe')")
def testGqlOrderQueries(self):
self.checkGql([self.jill, self.joe, self.moe],
"SELECT * FROM Foo ORDER BY name")
self.checkGql([self.moe, self.joe, self.jill],
"SELECT * FROM Foo ORDER BY name DESC")
self.checkGql([self.joe, self.jill, self.moe],
"SELECT * FROM Foo ORDER BY __key__ ASC")
self.checkGql([self.moe, self.jill, self.joe],
"SELECT * FROM Foo ORDER BY __key__ DESC")
self.checkGql([self.jill, self.joe, self.moe],
"SELECT * FROM Foo ORDER BY rate DESC, name")
def testGqlOffsetQuery(self):
self.checkGql([self.jill, self.moe], "SELECT * FROM Foo OFFSET 1")
def testGqlLimitQuery(self):
self.checkGql([self.joe, self.jill], "SELECT * FROM Foo LIMIT 2")
def testGqlLimitOffsetQuery(self):
self.checkGql([self.jill], "SELECT * FROM Foo LIMIT 1 OFFSET 1")
def testGqlLimitOffsetQueryUsingFetch(self):
self.checkGql([self.jill], "SELECT * FROM Foo LIMIT 1 OFFSET 1",
fetch=lambda q: q.fetch())
# XXX TODO: Make this work:
# def testGqlLimitQueryUsingFetch(self):
# self.checkGql([self.joe, self.jill], "SELECT * FROM Foo LIMIT 2",
# fetch=lambda q: q.fetch(3))
def testGqlOffsetQueryUsingFetchPage(self):
q = query.gql("SELECT * FROM Foo LIMIT 2")
res1, cur1, more1 = q.fetch_page(1)
self.assertEqual([self.joe], res1)
self.assertEqual(True, more1)
res2, cur2, more2 = q.fetch_page(1, start_cursor=cur1)
self.assertEqual([self.jill], res2)
# XXX TODO: Gotta make this work:
# self.assertEqual(False, more2)
# res3, cur3, more3 = q.fetch_page(1, start_cursor=cur2)
# self.assertEqual([], res3)
# self.assertEqual(False, more3)
# self.assertEqual(None, cur3)
def testGqlLimitQueryUsingFetchPage(self):
q = query.gql("SELECT * FROM Foo OFFSET 1")
res1, cur1, more1 = q.fetch_page(1)
self.assertEqual([self.jill], res1)
self.assertEqual(True, more1)
# NOTE: Without offset=0, the following break.
res2, cur2, more2 = q.fetch_page(1, start_cursor=cur1, offset=0)
self.assertEqual([self.moe], res2)
self.assertEqual(False, more2)
res3, cur3, more3 = q.fetch_page(1, start_cursor=cur2, offset=0)
self.assertEqual([], res3)
self.assertEqual(False, more3)
self.assertEqual(None, cur3)
def testGqlParameterizedAncestor(self):
q = query.gql("SELECT * FROM Foo WHERE ANCESTOR IS :1")
self.assertEqual([self.moe], q.bind(self.moe.key).fetch())
def testGqlParameterizedInClause(self):
# NOTE: The ordering on these is questionable:
q = query.gql("SELECT * FROM Foo WHERE name IN :1")
self.assertEqual([self.jill, self.joe], q.bind(('jill', 'joe')).fetch())
# Exercise the LIST function.
q = query.gql("SELECT * FROM Foo WHERE name IN (:a, :b)")
self.assertEqual([self.jill, self.joe], q.bind(a='jill', b='joe').fetch())
# Generate OR/AND nodes containing parameter nodes.
q = query.gql("SELECT * FROM Foo WHERE name = :1 AND rate in (1, 2)")
self.assertEqual([self.jill], q.bind('jill').fetch())
def testGqlKeyFunction(self):
class Bar(model.Model):
ref = model.KeyProperty(kind=Foo)
noref = Bar()
noref.put()
joeref = Bar(ref=self.joe.key)
joeref.put()
moeref = Bar(ref=self.moe.key)
moeref.put()
self.assertEqual(
[noref],
Bar.gql("WHERE ref = NULL").fetch())
self.assertEqual(
[noref],
Bar.gql("WHERE ref = :1").bind(None).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = :1").bind(self.joe.key).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('%s')" % self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('Foo', %s)" % self.joe.key.id()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY(:1)").bind(self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('Foo', :1)").bind(self.joe.key.id()).fetch())
def testGqlKeyFunctionAncestor(self):
class Bar(model.Model):
pass
nobar = Bar()
nobar.put()
joebar = Bar(parent=self.joe.key)
joebar.put()
moebar = Bar(parent=self.moe.key)
moebar.put()
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY('%s')" % self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS :1").bind(self.joe.key).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY(:1)").bind(
self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY('Foo', :1)")
.bind(self.joe.key.id()).fetch())
def testGqlAncestorFunctionError(self):
self.assertRaises(TypeError,
query.gql, 'SELECT * FROM Foo WHERE ANCESTOR IS USER(:1)')
def testGqlOtherFunctions(self):
class Bar(model.Model):
auser = model.UserProperty()
apoint = model.GeoPtProperty()
adatetime = model.DateTimeProperty()
adate = model.DateProperty()
atime = model.TimeProperty()
abar = Bar(
auser=users.User('<EMAIL>'),
apoint=model.GeoPt(52.35, 4.9166667),
adatetime=datetime.datetime(2012, 2, 1, 14, 54, 0),
adate=datetime.date(2012, 2, 2),
atime=datetime.time(14, 54, 0),
)
abar.put()
bbar = Bar()
bbar.put()
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE auser=USER(:1)")
.bind('<EMAIL>').fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE apoint=GEOPT(:1, :2)")
.bind(52.35, 4.9166667).fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE adatetime=DATETIME(:1)")
.bind('2012-02-01 14:54:00').fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE adate=DATE(:1, :2, :2)")
.bind(2012, 2).fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE atime=TIME(:hour, :min, :sec)")
.bind(hour=14, min=54, sec=0).fetch())
def testGqlStructuredPropertyQuery(self):
class Bar(model.Model):
foo = model.StructuredProperty(Foo)
barf = Bar(foo=Foo(name='one', rate=3, tags=['a', 'b']))
barf.put()
barg = Bar(foo=Foo(name='two', rate=4, tags=['b', 'c']))
barg.put()
barh = Bar()
barh.put()
# TODO: Once SDK 1.6.3 is released, drop quotes around foo.name.
q = Bar.gql("WHERE \"foo.name\" = 'one'")
self.assertEqual([barf], q.fetch())
q = Bar.gql("WHERE foo = :1").bind(Foo(name='two', rate=4))
self.assertEqual([barg], q.fetch())
q = Bar.gql("WHERE foo = NULL")
self.assertEqual([barh], q.fetch())
q = Bar.gql("WHERE foo = :1")
self.assertEqual([barh], q.bind(None).fetch())
def testGqlExpandoProperty(self):
class Bar(model.Expando):
pass
babar = Bar(name='Babar')
babar.put()
bare = Bar(nude=42)
bare.put()
q = Bar.gql("WHERE name = 'Babar'")
self.assertEqual([babar], q.fetch())
q = Bar.gql("WHERE nude = :1")
self.assertEqual([bare], q.bind(42).fetch())
def testGqlExpandoInStructure(self):
class Bar(model.Expando):
pass
class Baz(model.Model):
bar = model.StructuredProperty(Bar)
bazar = Baz(bar=Bar(bow=1, wow=2))
bazar.put()
bazone = Baz()
bazone.put()
q = Baz.gql("WHERE \"bar.bow\" = 1")
self.assertEqual([bazar], q.fetch())
def testGqlKindlessQuery(self):
results = query.gql('SELECT *').fetch()
self.assertEqual([self.joe, self.jill, self.moe], results)
def testGqlSubclass(self):
# You can pass _gql() a subclass of Query and it'll use that.
class MyQuery(query.Query):
pass
q = query._gql("SELECT * FROM Foo WHERE name = :1", query_class=MyQuery)
self.assertTrue(isinstance(q, MyQuery))
# And bind() preserves the class.
qb = q.bind('joe')
self.assertTrue(isinstance(qb, MyQuery))
# .filter() also preserves the class, as well as default_options.
qf = q.filter(Foo.rate == 1)
self.assertTrue(isinstance(qf, MyQuery))
self.assertEqual(qf.default_options, q.default_options)
# Same for .options().
qo = q.order(-Foo.name)
self.assertTrue(isinstance(qo, MyQuery))
self.assertEqual(qo.default_options, q.default_options)
def testGqlUnusedBindings(self):
# Only unused positional bindings raise an error.
q = Foo.gql("WHERE ANCESTOR IS :1 AND rate >= :2")
qb = q.bind(self.joe.key, 2, foo=42) # Must not fail
self.assertRaises(datastore_errors.BadArgumentError, q.bind)
self.assertRaises(datastore_errors.BadArgumentError, q.bind, self.joe.key)
self.assertRaises(datastore_errors.BadArgumentError, q.bind,
self.joe.key, 2, 42)
def testGqlWithBind(self):
q = Foo.gql("WHERE name = :1", 'joe')
self.assertEqual([self.joe], q.fetch())
def testGqlAnalyze(self):
q = Foo.gql("WHERE name = 'joe'")
self.assertEqual([], q.analyze())
q = Foo.gql("WHERE name = :1 AND rate = :2")
self.assertEqual([1, 2], q.analyze())
q = Foo.gql("WHERE name = :foo AND rate = :bar")
self.assertEqual(['bar', 'foo'], q.analyze())
q = Foo.gql("WHERE tags = :1 AND name = :foo AND rate = :bar")
self.assertEqual([1, 'bar', 'foo'], q.analyze())
def testGqlGroupBy(self):
q = query.gql("SELECT DISTINCT name, tags FROM Foo "
"WHERE name < 'joe' ORDER BY name")
self.assertEquals(('name', 'tags'), q.projection)
self.assertEquals(('name', 'tags'), q.group_by)
self.assertEquals(True, q.is_distinct)
ents = q.fetch()
ents.sort(key=lambda ent: ent.tags)
self.assertEqual(ents, [Foo(name='jill', tags=['jack'],
key=self.jill.key,
projection=['name', 'tags']),
Foo(name='jill', tags=['jill'],
key=self.jill.key,
projection=('name', 'tags'))])
def testGqlProjection(self):
q = query.gql("SELECT name, tags FROM Foo WHERE name < 'joe' ORDER BY name")
self.assertEquals(('name', 'tags'), q.projection)
self.assertEquals(None, q.group_by)
self.assertEquals(False, q.is_distinct)
ents = q.fetch()
ents.sort(key=lambda ent: ent.tags)
self.assertEqual(ents, [Foo(name='jill', tags=['jack'],
key=self.jill.key,
projection=['name', 'tags']),
Foo(name='jill', tags=['jill'],
key=self.jill.key,
projection=('name', 'tags'))])
def testGqlBadProjection(self):
self.assertRaises(model.BadProjectionError,
query.gql, "SELECT qqq FROM Foo")
self.assertRaises(model.InvalidPropertyError,
query.gql, "SELECT qqq FROM Foo")
def testGqlBadKind(self):
self.assertRaises(model.KindError,
query.gql, "SELECT * FROM Whatever")
def testAsyncNamespace(self):
# Test that async queries pick up the namespace when the
# foo_async() call is made, not later.
# See issue 168. http://goo.gl/aJp7i
namespace_manager.set_namespace('mission')
barney = Foo(name='Barney')
barney.put()
willy = Foo(name='Willy')
willy.put()
q1 = Foo.query()
qm = Foo.query(Foo.name.IN(['Barney', 'Willy'])).order(Foo._key)
# Test twice: once with a simple query, once with a MultiQuery.
for q in q1, qm:
# Test fetch_async().
namespace_manager.set_namespace('mission')
fut = q.fetch_async()
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, [barney, willy])
# Test map_async().
namespace_manager.set_namespace('mission')
fut = q.map_async(None)
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, [barney, willy])
# Test get_async().
namespace_manager.set_namespace('mission')
fut = q.get_async()
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, barney)
# Test count_async().
namespace_manager.set_namespace('mission')
fut = q.count_async()
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, 2)
# Test fetch_page_async().
namespace_manager.set_namespace('mission')
fut = q.fetch_page_async(2)
namespace_manager.set_namespace('impossible')
res, cur, more = fut.get_result()
self.assertEqual(res, [barney, willy])
self.assertEqual(more, False)
def hugeOffsetTestHelper(self, fetch):
""" Helper function to test large offsets.
Args:
fetch: A function that takes in (query, offset) and returns a list with
one result.
"""
# See issue 210. http://goo.gl/EDfHa
# Vastly reduce _MAX_QUERY_OFFSET since otherwise the test spends
# several seconds creating enough entities to reproduce the problem.
save_max_query_offset = datastore_stub_util._MAX_QUERY_OFFSET
try:
datastore_stub_util._MAX_QUERY_OFFSET = 10
ndb = model
class M(ndb.Model):
a = ndb.IntegerProperty()
ms = [M(a=i, id='%04d' % i) for i in range(33)]
ks = ndb.put_multi(ms)
q = M.query().order(M.a)
xs = fetch(q, 9)
self.assertEqual(xs, ms[9:10])
xs = fetch(q, 10)
self.assertEqual(xs, ms[10:11])
xs = fetch(q, 11)
self.assertEqual(xs, ms[11:12])
xs = fetch(q, 21)
self.assertEqual(xs, ms[21:22])
xs = fetch(q, 31)
self.assertEqual(xs, ms[31:32])
finally:
datastore_stub_util._MAX_QUERY_OFFSET = save_max_query_offset
def testHugeOffset(self):
"""Test offset > MAX_OFFSET for fetch."""
def fetch_one(qry, offset):
return qry.fetch(1, offset=offset)
self.hugeOffsetTestHelper(fetch_one)
def testHugeOffsetRunToQueue(self):
"""Test offset > MAX_OFFSET for run_to_queue."""
def fetch_from_queue(qry, offset):
queue = tasklets.MultiFuture()
options = query.QueryOptions(offset=offset, limit=1)
qry.run_to_queue(queue, self.conn, options).check_success()
results = queue.get_result()
return [result[2] for result in results]
self.hugeOffsetTestHelper(fetch_from_queue)
class IndexListTestMixin(object):
"""Tests for Index lists. Must be used with BaseQueryTestMixin."""
def create_index(self):
ci = datastore_stub_util.datastore_pb.CompositeIndex()
ci.set_app_id(os.environ['APPLICATION_ID'])
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(0)
index.set_entity_type('Foo')
property = index.add_property()
property.set_name('name')
property.set_direction(property.DESCENDING)
property = index.add_property()
property.set_name('tags')
property.set_direction(property.ASCENDING)
stub = self.testbed.get_stub('datastore_v3')
stub.CreateIndex(ci)
def testIndexListPremature(self):
# Before calling next() we don't have the information.
self.create_index()
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
self.assertEqual(qi.index_list(), None)
def testIndexListEmpty(self):
# A simple query requires no composite indexes.
q = Foo.query(Foo.name == 'joe', Foo.tags == 'joe')
qi = q.iter()
qi.next()
self.assertEqual(qi.index_list(), [])
def testIndexListNontrivial(self):
# Test a non-trivial query.
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
qi.next()
properties = [model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='asc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListExhausted(self):
# Test that the information is preserved after the iterator is
# exhausted.
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
list(qi)
properties = [model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='asc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListWithIndexAndOrder(self):
# Test a non-trivial query with sort order and an actual composite
# index present.
self.create_index()
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
q = q.order(-Foo.name, Foo.tags)
qi = q.iter()
qi.next()
# TODO: This is a little odd, because that's not exactly the index
# we created...?
properties = [model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='desc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListMultiQuery(self):
self.create_index()
q = Foo.query(query.OR(Foo.name == 'joe', Foo.name == 'jill'))
qi = q.iter()
qi.next()
self.assertEqual(qi.index_list(), None)
class QueryV3Tests(test_utils.NDBTest, BaseQueryTestMixin, IndexListTestMixin):
"""Query tests that use a connection to a Datastore V3 stub."""
def setUp(self):
test_utils.NDBTest.setUp(self)
BaseQueryTestMixin.setUp(self)
def testConstructorOptionsInteractions(self):
self.ExpectWarnings()
qry = Foo.query(projection=[Foo.name, Foo.rate])
# Keys only overrides projection.
qry.get(keys_only=True)
# Projection overrides original projection.
qry.get(projection=Foo.tags)
# Cannot override both.
self.assertRaises(datastore_errors.BadRequestError, qry.get,
projection=Foo.tags, keys_only=True)
qry = Foo.query(projection=[Foo.name, Foo.rate], distinct=True)
# Cannot project something out side the group by.
self.assertRaises(datastore_errors.BadRequestError, qry.get,
projection=Foo.tags)
# Can project a subset of the group by.
qry.get(projection=Foo.name)
# Keys only overrides projection but a projection is required for group_by.
self.assertRaises(datastore_errors.BadRequestError,
qry.get, keys_only=True)
def testCursorsForMultiQuery(self):
# Only relevant for V3 since V1 has per result cursors.
# TODO(pcostello): This should throw a better error.
q1 = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
q2 = query.Query(kind='Foo').filter(Foo.tags == 'joe').order(Foo.name)
qq = query._MultiQuery([q1, q2])
it = qq.iter()
it.next()
it.cursor_before() # Start cursor
self.assertRaises(AttributeError, it.cursor_after)
it.next()
it.cursor_before() # Start of second query
it.cursor_after() # End of batch cursor
self.assertFalse(it.has_next())
@real_unittest.skipUnless(datastore_pbs._CLOUD_DATASTORE_ENABLED,
"V1 must be supported to run V1 tests.")
class QueryV1Tests(test_utils.NDBCloudDatastoreV1Test, BaseQueryTestMixin):
"""Query tests that use a connection to a Cloud Datastore V1 stub."""
def setUp(self):
test_utils.NDBCloudDatastoreV1Test.setUp(self)
BaseQueryTestMixin.setUp(self)
def testConstructorOptionsInteractions(self):
self.ExpectWarnings()
qry = Foo.query(projection=[Foo.name, Foo.rate])
# Keys only overrides projection.
qry.get(keys_only=True)
# Projection overrides original projection.
qry.get(projection=Foo.tags)
# Can override both.
qry.get(projection=Foo.tags, keys_only=True)
qry = Foo.query(projection=[Foo.name, Foo.rate], distinct=True)
# Cannot project something out side the group by.
self.assertRaises(datastore_errors.BadRequestError, qry.get,
projection=Foo.tags)
# Can project a subset of the group by.
qry.get(projection=Foo.name)
# Keys only overrides projection but a projection is required for group_by.
self.assertRaises(datastore_errors.BadRequestError,
qry.get, keys_only=True)
if __name__ == '__main__':
unittest.main()
|
#
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for query.py."""
import datetime
import os
from .google_imports import datastore_errors
from .google_imports import datastore_pbs
from .google_imports import datastore_rpc
from .google_imports import namespace_manager
from .google_imports import users
from .google_test_imports import datastore_stub_util
from .google_test_imports import real_unittest
from .google_test_imports import unittest
from . import model
from . import query
from . import tasklets
from . import test_utils
class BaseQueryTestMixin(object):
def setUp(self):
# Create class inside tests because kinds are cleared every test.
global Foo
class Foo(model.Model):
name = model.StringProperty()
rate = model.IntegerProperty()
tags = model.StringProperty(repeated=True)
self.create_entities()
the_module = query
def create_entities(self):
self.joe = Foo(name='joe', tags=['joe', 'jill', 'hello'], rate=1)
self.joe.put()
self.jill = Foo(name='jill', tags=['jack', 'jill'], rate=2)
self.jill.put()
self.moe = Foo(name='moe', rate=1)
self.moe.put()
def testBasicQuery(self):
q = query.Query(kind='Foo')
q = q.filter(Foo.name >= 'joe').filter(Foo.name <= 'moe').filter()
res = list(q)
self.assertEqual(res, [self.joe, self.moe])
def testOrderedQuery(self):
q = query.Query(kind='Foo')
q = q.order(Foo.rate).order().order(-Foo.name)
res = list(q)
self.assertEqual(res, [self.moe, self.joe, self.jill])
def testQueryError(self):
self.assertRaises(TypeError, query.Query,
ancestor=query.ParameterizedFunction('user',
query.Parameter(1)))
self.assertRaises(TypeError, query.Query, ancestor=42)
self.assertRaises(ValueError, query.Query, ancestor=model.Key('X', None))
self.assertRaises(TypeError, query.Query,
ancestor=model.Key('X', 1), app='another')
self.assertRaises(TypeError, query.Query,
ancestor=model.Key('X', 1), namespace='another')
self.assertRaises(TypeError, query.Query, filters=42)
self.assertRaises(TypeError, query.Query, orders=42)
self.assertRaises(TypeError, query.Query, default_options=42)
def testQueryAttributes(self):
q = query.Query(kind='Foo')
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, None)
self.assertEqual(q.filters, None)
self.assertEqual(q.orders, None)
key = model.Key('Barba', 'papa')
q = query.Query(kind='Foo', ancestor=key)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, None)
self.assertEqual(q.orders, None)
q = q.filter(Foo.rate == 1)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, query.FilterNode('rate', '=', 1))
self.assertEqual(q.orders, None)
q = q.order(-Foo.name)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, query.FilterNode('rate', '=', 1))
expected_order = [('name', query._DESC)]
self.assertEqual(query._orders_to_orderings(q.orders), expected_order)
def testQueryRepr(self):
q = Foo.query()
self.assertEqual(repr(q), "Query(kind='Foo')")
q = Foo.query(ancestor=model.Key('Bar', 1))
self.assertEqual(repr(q), "Query(kind='Foo', ancestor=Key('Bar', 1))")
# Let's not specify what it should show for filters and orders,
# just test that it doesn't blow up.
q1 = q.filter(Foo.rate == 1, Foo.name == 'x')
repr(q1)
q2 = q1.order(-Foo.rate)
repr(q2)
# App and namespace.
q3 = Foo.query(app='a', namespace='ns')
self.assertEqual(repr(q3), "Query(app='a', namespace='ns', kind='Foo')")
# default_options.
q4 = Foo.query(default_options=query.QueryOptions(limit=3))
self.assertEqual(
repr(q4),
"Query(kind='Foo', default_options=QueryOptions(limit=3))")
q5 = Foo.query(projection=[Foo.name, 'tags'], distinct=True)
self.assertEqual(
repr(q5),
"Query(kind='Foo', projection=['name', 'tags'], "
"group_by=['name', 'tags'])")
def testRunToQueue(self):
qry = Foo.query()
queue = tasklets.MultiFuture()
qry.run_to_queue(queue, self.conn).check_success()
results = queue.get_result()
self.assertEqual(len(results), 3)
self.assertEqual(results[0][2], self.joe)
self.assertEqual(results[1][2], self.jill)
self.assertEqual(results[2][2], self.moe)
def testRunToQueueError(self):
self.ExpectWarnings()
qry = Foo.query(Foo.name > '', Foo.rate > 0)
queue = tasklets.MultiFuture()
fut = qry.run_to_queue(queue, self.conn)
self.assertRaises(datastore_errors.BadRequestError, fut.check_success)
self.assertRaises(datastore_errors.BadRequestError, queue.check_success)
def testModernQuerySyntax(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
@classmethod
def seniors(cls, min_age, min_rank):
q = cls.query().filter(cls.age >= min_age, cls.rank <= min_rank)
q = q.order(cls.name, -cls.age)
return q
q = Employee.seniors(42, 5)
self.assertEqual(q.filters,
query.ConjunctionNode(
query.FilterNode('Age', '>=', 42),
query.FilterNode('rank', '<=', 5)))
self.assertEqual(query._orders_to_orderings(q.orders),
[('name', query._ASC), ('Age', query._DESC)])
def testAndQuery(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
q = Employee.query().filter(query.AND(Employee.age >= 42))
self.assertEqual(q.filters, query.FilterNode('Age', '>=', 42))
q = Employee.query(query.AND(Employee.age >= 42, Employee.rank <= 5))
self.assertEqual(q.filters,
query.ConjunctionNode(
query.FilterNode('Age', '>=', 42),
query.FilterNode('rank', '<=', 5)))
def testOrQuery(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
q = Employee.query().filter(query.OR(Employee.age >= 42))
self.assertEqual(q.filters, query.FilterNode('Age', '>=', 42))
q = Employee.query(query.OR(Employee.age < 42, Employee.rank > 5))
self.assertEqual(q.filters,
query.DisjunctionNode(
query.FilterNode('Age', '<', 42),
query.FilterNode('rank', '>', 5)))
def testEmptyInFilter(self):
self.ExpectWarnings()
class Employee(model.Model):
name = model.StringProperty()
for arg in [], (), set(), frozenset():
q = Employee.query(Employee.name.IN(arg))
self.assertEqual(q.filters, query.FalseNode())
self.assertNotEqual(q.filters, 42)
f = iter(q).has_next_async()
self.assertRaises(datastore_errors.BadQueryError, f.check_success)
def testSingletonInFilter(self):
class Employee(model.Model):
name = model.StringProperty()
q = Employee.query(Employee.name.IN(['xyzzy']))
self.assertEqual(q.filters, query.FilterNode('name', '=', 'xyzzy'))
self.assertNotEqual(q.filters, 42)
e = Employee(name='xyzzy')
e.put()
self.assertEqual(q.get(), e)
def testInFilter(self):
class Employee(model.Model):
name = model.StringProperty()
q = Employee.query(Employee.name.IN(['a', 'b']))
self.assertEqual(q.filters,
query.DisjunctionNode(
query.FilterNode('name', '=', 'a'),
query.FilterNode('name', '=', 'b')))
a = Employee(name='a')
a.put()
b = Employee(name='b')
b.put()
self.assertEqual(list(q), [a, b])
def testInFilterArgTypes(self):
class Employee(model.Model):
name = model.StringProperty()
a = Employee(name='a')
a.put()
b = Employee(name='b')
b.put()
for arg in ('a', 'b'), set(['a', 'b']), frozenset(['a', 'b']):
q = Employee.query(Employee.name.IN(arg))
self.assertEqual(set(x.name for x in q), set(['a', 'b']))
def testInFilterWithNone(self):
class Employee(model.Model):
# Try a few different property types, to get a good mix of what
# used to fail.
name = model.StringProperty()
boss = model.KeyProperty()
age = model.IntegerProperty()
date = model.DateProperty()
a = Employee(name='a', age=42L)
a.put()
bosskey = model.Key(Employee, 'x')
b = Employee(boss=bosskey, date=datetime.date(1996, 1, 31))
b.put()
keys = set([a.key, b.key])
q1 = Employee.query(Employee.name.IN(['a', None]))
self.assertEqual(set(e.key for e in q1), keys)
q2 = Employee.query(Employee.boss.IN([bosskey, None]))
self.assertEqual(set(e.key for e in q2), keys)
q3 = Employee.query(Employee.age.IN([42, None]))
self.assertEqual(set(e.key for e in q3), keys)
q4 = Employee.query(Employee.date.IN([datetime.date(1996, 1, 31), None]))
self.assertEqual(set(e.key for e in q4), keys)
def testQueryExceptions(self):
self.ExpectWarnings()
q = Foo.query(Foo.name > '', Foo.rate > 0)
f = q.fetch_async()
self.assertRaises(datastore_errors.BadRequestError, f.check_success)
def testQueryUnindexedFails(self):
# Shouldn't be able to query for unindexed properties
class SubModel(model.Model):
booh = model.IntegerProperty(indexed=False)
class Emp(model.Model):
name = model.StringProperty()
text = model.TextProperty()
blob = model.BlobProperty()
sub = model.StructuredProperty(SubModel)
struct = model.StructuredProperty(Foo, indexed=False)
local = model.LocalStructuredProperty(Foo)
Emp.query(Emp.name == 'a').fetch() # Should pass
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.text == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.text.IN(['a', 'b']))
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.blob == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.sub == SubModel(booh=42))
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.sub.booh == 42)
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.struct == Foo(name='a'))
# TODO: Make this fail? See issue 89. http://goo.gl/K4gbY
# Currently StructuredProperty(..., indexed=False) has no effect.
# self.assertRaises(datastore_errors.BadFilterError,
# lambda: Emp.struct.name == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.local == Foo(name='a'))
def testConstructor(self):
self.ExpectWarnings()
class Foo(model.Model):
p = model.IntegerProperty('pp') # Also check renaming.
q = model.IntegerProperty(required=True)
key = Foo(p=1, q=2, namespace='ns').put()
# Check distinct validation
self.assertRaises(TypeError, Foo.query, distinct=True)
self.assertRaises(TypeError, Foo.query, distinct=False)
self.assertRaises(TypeError, Foo.query,
distinct=True, projection=Foo.p, group_by=[])
self.assertRaises(TypeError, Foo.query,
distinct=False, projection=Foo.p, group_by=[])
# Check both projection and default_options.projection/keys_only is not
# allowed.
self.assertRaises(TypeError, Foo.query,
projection='pp',
default_options=query.QueryOptions(projection=['pp']))
self.assertRaises(TypeError, Foo.query,
projection='pp',
default_options=query.QueryOptions(keys_only=False))
# Check empty projection/group_by not allowed.
for empty in ([], tuple()):
self.assertRaises(TypeError, Foo.query, projection=empty)
self.assertRaises(TypeError, Foo.query, group_by=empty)
# Check that ancestor and namespace must match.
self.assertRaises(TypeError, Foo.query, namespace='other', ancestor=key)
def testIsDistinct(self):
class Foo(model.Model):
p = model.IntegerProperty('pp') # Also check renaming.
q = model.IntegerProperty(required=True)
for qry in (Foo.query(projection=[Foo.p, 'q'], distinct=True),
Foo.query(projection=[Foo.p, 'q'],
group_by=(Foo.q, 'pp', Foo.p))):
self.assertEquals(True, qry.is_distinct)
for qry in (Foo.query(),
Foo.query(projection=[Foo.p, 'q'])):
self.assertEquals(False, qry.is_distinct)
def testIndexOnlyPropertyListNormalization(self):
class Foo(model.Model):
p = model.IntegerProperty('pp') # Also check renaming.
def assertNormalization(expected, value):
q1 = Foo.query(group_by=value, projection=value)
q2 = Foo.query(distinct=True, projection=value)
# make sure it survives mutation.
q1 = q1.order(Foo.p).filter(Foo.p > 0)
q2 = q2.order(Foo.p).filter(Foo.p > 0)
self.assertEquals(expected, q1.group_by)
self.assertEquals(expected, q1.projection)
self.assertEquals(expected, q2.group_by)
self.assertEquals(expected, q2.projection)
for value in (('pp',), ['pp']):
assertNormalization(('pp',), value)
def testIndexOnlyPropertyValidation(self):
self.ExpectWarnings()
class Foo(model.Model):
p = model.IntegerProperty('pp', indexed=False) # Also check renaming.
q = model.IntegerProperty(required=True)
self.assertRaises(TypeError,
Foo.query, group_by=[Foo.q, 42], projection=[Foo.q])
self.assertRaises(datastore_errors.BadArgumentError,
Foo.query().get, projection=[42])
self.assertRaises(TypeError,
Foo.query, group_by=Foo.q, projection=[Foo.q])
self.assertRaises(TypeError,
Foo.query, projection=Foo.q)
# Legacy support for single value projection
Foo.query().get(projection=Foo.q)
for bad in ((Foo.p,), ['wot']):
self.assertRaises(model.InvalidPropertyError, Foo.query,
group_by=bad, projection=[Foo.q])
self.assertRaises(model.BadProjectionError, Foo.query,
group_by=bad, projection=[Foo.q])
self.assertRaises(model.InvalidPropertyError, Foo.query, projection=bad)
self.assertRaises(model.BadProjectionError, Foo.query, projection=bad)
self.assertRaises(model.InvalidPropertyError,
Foo.query().get, projection=bad)
self.assertRaises(model.BadProjectionError,
Foo.query().get, projection=bad)
def testGroupByQuery(self):
self.ExpectWarnings()
class Foo(model.Model):
p = model.IntegerProperty('pp') # Also check renaming
q = model.IntegerProperty(required=True)
r = model.IntegerProperty(repeated=True)
d = model.IntegerProperty(default=42)
key1 = Foo(p=1, q=5, r=[3, 4, 5]).put()
key2 = Foo(p=1, q=4, r=[3, 4]).put()
key3 = Foo(p=2, q=3, r=[3, 4]).put()
key4 = Foo(p=2, q=2, r=[3]).put()
qry = Foo.query(projection=[Foo.p], group_by=[Foo.r, Foo.p])
qry = qry.order(Foo.p, Foo.r, Foo.q)
expected = [(1, key2), (1, key2), (1, key1), (2, key4), (2, key3)]
# Test fetch and iter in base case.
self.assertEqual(expected, [(ent.p, ent.key) for ent in qry.fetch()])
self.assertEqual(expected, [(ent.p, ent.key) for ent in qry])
# Test projection using default options.
qry = Foo.query(group_by=[Foo.r, Foo.p],
default_options=query.QueryOptions(projection=['pp']))
qry = qry.order(Foo.p, Foo.r, Foo.q)
self.assertEqual(expected, [(ent.p, ent.key) for ent in qry.fetch()])
self.assertEqual(expected, [(ent.p, ent.key) for ent in qry])
# Test projection with other default options.
qry = Foo.query(projection=[Foo.p], group_by=[Foo.r, Foo.p],
default_options=query.QueryOptions(limit=4))
qry = qry.order(Foo.p, Foo.r, Foo.q)
self.assertEqual(expected[:4], [(ent.p, ent.key) for ent in qry.fetch()])
self.assertEqual(expected[:4], [(ent.p, ent.key) for ent in qry])
def testProjectionQuery(self):
self.ExpectWarnings()
class Foo(model.Model):
p = model.IntegerProperty('pp') # Also check renaming
q = model.IntegerProperty(required=True)
r = model.IntegerProperty(repeated=True)
d = model.IntegerProperty(default=42)
key = Foo(p=1, q=2, r=[3, 4]).put()
q = Foo.query(Foo.p >= 0)
ent = q.get(projection=[Foo.p, 'q'])
self.assertItemsEqual(ent._projection, ('pp', 'q'))
self.assertEqual(ent.p, 1)
self.assertEqual(ent.q, 2)
self.assertRaises(model.UnprojectedPropertyError, lambda: ent.r)
self.assertRaises(model.UnprojectedPropertyError, lambda: ent.d)
ents = q.fetch(projection=['pp', 'r'])
ents.sort(key=lambda ent: ent.r)
self.assertEqual(ents, [Foo(p=1, r=[3], key=key, projection=('pp', 'r')),
Foo(p=1, r=[4], key=key, projection=['pp', 'r'])])
def testProjectionQuery_AllTypes(self):
class Foo(model.Model):
abool = model.BooleanProperty()
aint = model.IntegerProperty()
afloat = model.FloatProperty()
astring = model.StringProperty()
ablob = model.BlobProperty(indexed=True)
akey = model.KeyProperty()
auser = model.UserProperty()
apoint = model.GeoPtProperty()
adatetime = model.DateTimeProperty()
adate = model.DateProperty()
atime = model.TimeProperty()
boo = Foo(abool=True,
aint=42,
afloat=3.14,
astring='foo',
ablob='bar',
akey=model.Key(Foo, 'ref'),
auser=users.User('<EMAIL>'),
apoint=model.GeoPt(52.35, 4.9166667),
adatetime=datetime.datetime(2012, 5, 1, 8, 19, 42),
adate=datetime.date(2012, 5, 1),
atime=datetime.time(8, 19, 42),
)
boo.put()
qry = Foo.query()
for prop in Foo._properties.itervalues():
ent = qry.get(projection=[prop._name])
pb = ent._to_pb()
decoded_ent = Foo._from_pb(pb, set_key=False)
self.assertEqual(ent, decoded_ent)
self.assertEqual(getattr(ent, prop._code_name),
getattr(boo, prop._code_name))
for otherprop in Foo._properties.itervalues():
if otherprop is not prop:
try:
getattr(ent, otherprop._code_name)
self.fail('Expected an UnprojectedPropertyError for property %s'
' when projecting %s.' % (otherprop, prop))
except model.UnprojectedPropertyError:
pass
def testProjectionQuery_ComputedProperties(self):
class Foo(model.Model):
a = model.StringProperty()
b = model.StringProperty()
c = model.ComputedProperty(lambda ent: '<%s.%s>' % (ent.a, ent.b))
d = model.ComputedProperty(lambda ent: '<%s>' % (ent.a,))
foo = Foo(a='a', b='b')
foo.put()
self.assertEqual((foo.a, foo.b, foo.c, foo.d), ('a', 'b', '<a.b>', '<a>'))
qry = Foo.query()
x = qry.get(projection=['a', 'b'])
self.assertEqual((x.a, x.b, x.c, x.d), ('a', 'b', '<a.b>', '<a>'))
y = qry.get(projection=['a'])
self.assertEqual((y.a, y.d), ('a', '<a>'))
self.assertRaises(model.UnprojectedPropertyError, lambda: y.b)
self.assertRaises(model.UnprojectedPropertyError, lambda: y.c)
z = qry.get(projection=['b'])
self.assertEqual((z.b,), ('b',))
p = qry.get(projection=['c', 'd'])
self.assertEqual((p.c, p.d), ('<a.b>', '<a>'))
def testProjectionQuery_StructuredProperties(self):
class Inner(model.Model):
foo = model.StringProperty()
bar = model.StringProperty()
beh = model.StringProperty()
class Middle(model.Model):
baz = model.StringProperty()
inner = model.StructuredProperty(Inner)
inners = model.StructuredProperty(Inner, repeated=True)
class Outer(model.Model):
name = model.StringProperty()
middle = model.StructuredProperty(Middle, 'mid')
one = Outer(name='one',
middle=Middle(baz='one',
inner=Inner(foo='foo', bar='bar'),
inners=[Inner(foo='a', bar='b'),
Inner(foo='c', bar='d')]))
one.put()
two = Outer(name='two',
middle=Middle(baz='two',
inner=Inner(foo='x', bar='y'),
inners=[Inner(foo='p', bar='q')]))
two.put()
q = Outer.query()
x, y = q.fetch(projection=[Outer.name, Outer.middle.baz])
pb = x._to_pb()
z = Outer._from_pb(pb, set_key=False)
self.assertEqual(x, z)
self.assertEqual(x.middle.baz, 'one')
self.assertEqual(x.middle._projection, ('baz',))
self.assertEqual(x,
Outer(key=one.key, name='one',
middle=Middle(baz='one', projection=['baz']),
projection=['mid.baz', 'name']))
self.assertEqual(y,
Outer(key=two.key, name='two',
middle=Middle(baz='two', projection=['baz']),
projection=['mid.baz', 'name']))
self.assertRaises(model.UnprojectedPropertyError, lambda: x.middle.inner)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x, 'middle', None)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x, 'middle', x.middle)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle, 'inner', None)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle, 'inner',
Inner(foo='', projection=['foo']))
x = q.get(projection=[Outer.middle.inner.foo, 'mid.inner.bar'])
self.assertEqual(x.middle.inner.foo, 'foo')
self.assertItemsEqual(x.middle.inner._projection, ('bar', 'foo'))
self.assertItemsEqual(x.middle._projection, ('inner.bar', 'inner.foo'))
self.assertItemsEqual(x._projection, ('mid.inner.bar', 'mid.inner.foo'))
self.assertEqual(x,
Outer(key=one.key,
projection=['mid.inner.bar', 'mid.inner.foo'],
middle=Middle(projection=['inner.bar', 'inner.foo'],
inner=Inner(projection=['bar', 'foo'],
foo='foo', bar='bar'))))
self.assertRaises(model.UnprojectedPropertyError,
lambda: x.middle.inner.beh)
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle.inner, 'foo', '')
self.assertRaises(model.ReadonlyPropertyError,
setattr, x.middle.inner, 'beh', '')
xs = q.fetch(projection=[Outer.middle.inners.foo])
self.assertEqual(xs[0],
Outer(key=one.key,
middle=Middle(inners=[Inner(foo='a',
_projection=('foo',))],
_projection=('inners.foo',)),
_projection=('mid.inners.foo',)))
self.assertEqual(len(xs), 3)
for x, foo in zip(xs, ['a', 'c', 'p']):
self.assertEqual(len(x.middle.inners), 1)
self.assertEqual(x.middle.inners[0].foo, foo)
def testFilterRepr(self):
class Employee(model.Model):
name = model.StringProperty()
f = (Employee.name == 'xyzzy')
self.assertEqual(repr(f), "FilterNode('name', '=', 'xyzzy')")
def testNodeComparisons(self):
a = query.FilterNode('foo', '=', 1)
b = query.FilterNode('foo', '=', 1)
c = query.FilterNode('foo', '=', 2)
d = query.FilterNode('foo', '<', 1)
# Don't use assertEqual/assertNotEqual; we want to be sure that
# __eq__ or __ne__ is really called here!
self.assertTrue(a == b)
self.assertTrue(a != c)
self.assertTrue(b != d)
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: a <= b)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: a >= b)
x = query.AND(a, b, c)
y = query.AND(a, b, c)
z = query.AND(a, d)
self.assertTrue(x == y)
self.assertTrue(x != z)
def testQueryForStructuredProperty(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
b1 = Bar(name='b1', foo=Foo(name='nest', rate=1, tags=['tag1', 'tag2']))
b1.put()
b2 = Bar(name='b2', foo=Foo(name='best', rate=2, tags=['tag2', 'tag3']))
b2.put()
b3 = Bar(name='b3', foo=Foo(name='rest', rate=2, tags=['tag2']))
b3.put()
q1 = Bar.query().order(Bar.name)
self.assertEqual(q1.fetch(10), [b1, b2, b3])
q2 = Bar.query().filter(Bar.foo.rate >= 2)
self.assertEqual(q2.fetch(10), [b2, b3])
q3 = q2.order(Bar.foo.rate, -Bar.foo.name, +Bar.foo.rate)
self.assertEqual(q3.fetch(10), [b3, b2])
def testQueryForStructuredPropertyErrors(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
# Can't use inequalities.
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo < Foo())
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo != Foo())
# Can't use an empty value.
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo == Foo())
def testQueryForStructuredPropertyIn(self):
self.ExpectWarnings()
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
a = Bar(name='a', foo=Foo(name='a'))
a.put()
b = Bar(name='b', foo=Foo(name='b'))
b.put()
self.assertEqual(
Bar.query(Bar.foo.IN((Foo(name='a'), Foo(name='b')))).fetch(),
[a, b])
self.assertEqual(Bar.query(Bar.foo.IN([Foo(name='a')])).fetch(), [a])
# An IN query with empty argument can be constructed but not executed.
q = Bar.query(Bar.foo.IN(set()))
self.assertRaises(datastore_errors.BadQueryError, q.fetch)
# Passing a non-sequence argument should fail.
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, 42)
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, None)
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, 'not a sequence')
def testQueryForNestedStructuredProperty(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
class Bak(model.Model):
bar = model.StructuredProperty(Bar)
class Baz(model.Model):
bar = model.StructuredProperty(Bar)
bak = model.StructuredProperty(Bak)
rank = model.IntegerProperty()
b1 = Baz(bar=Bar(foo=Foo(name='a')))
b1.put()
b2 = Baz(bar=Bar(foo=Foo(name='b')), bak=Bak(bar=Bar(foo=Foo(name='c'))))
b2.put()
q1 = Baz.query().filter(Baz.bar.foo.name >= 'a')
self.assertEqual(q1.fetch(10), [b1, b2])
q2 = Baz.query().filter(Baz.bak.bar.foo.name >= 'a')
self.assertEqual(q2.fetch(10), [b2])
def testQueryForWholeStructure(self):
class Employee(model.Model):
name = model.StringProperty()
rank = model.IntegerProperty()
class Manager(Employee):
report = model.StructuredProperty(Employee, repeated=True)
reports_a = []
for i in range(3):
e = Employee(name=str(i), rank=i)
e.put()
e.key = None
reports_a.append(e)
reports_b = []
for i in range(3, 6):
e = Employee(name=str(i), rank=0)
e.put()
e.key = None
reports_b.append(e)
mgr_a = Manager(name='a', report=reports_a)
mgr_a.put()
mgr_b = Manager(name='b', report=reports_b)
mgr_b.put()
mgr_c = Manager(name='c', report=reports_a + reports_b)
mgr_c.put()
res = list(Manager.query(Manager.report == Employee(name='1', rank=1)))
self.assertEqual(res, [mgr_a, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0)))
self.assertEqual(res, [mgr_a, mgr_b, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0, name='3')))
self.assertEqual(res, [mgr_b, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0, name='1')))
self.assertEqual(res, [])
res = list(Manager.query(Manager.report == Employee(rank=0, name='0'),
Manager.report == Employee(rank=1, name='1')))
self.assertEqual(res, [mgr_a, mgr_c])
q = Manager.query(Manager.report == Employee(rank=2, name='2'))
res = list(q)
self.assertEqual(res, [mgr_a, mgr_c])
res = list(q.iter(offset=1))
self.assertEqual(res, [mgr_c])
res = list(q.iter(limit=1))
self.assertEqual(res, [mgr_a])
def testQueryForWholeStructureCallsDatastoreType(self):
# See issue 87. http://goo.gl/Tl5Ed
class Event(model.Model):
what = model.StringProperty()
when = model.DateProperty() # Has non-trivial _datastore_type().
class Outer(model.Model):
who = model.StringProperty()
events = model.StructuredProperty(Event, repeated=True)
q = Outer.query(Outer.events == Event(what='stuff',
when=datetime.date.today()))
q.fetch() # Failed before the fix.
def testQueryForWholeNestedStructure(self):
class A(model.Model):
a1 = model.StringProperty()
a2 = model.StringProperty()
class B(model.Model):
b1 = model.StructuredProperty(A)
b2 = model.StructuredProperty(A)
class C(model.Model):
c = model.StructuredProperty(B)
x = C(c=B(b1=A(a1='a1', a2='a2'), b2=A(a1='a3', a2='a4')))
x.put()
q = C.query(C.c == x.c)
self.assertEqual(q.get(), x)
def testQueryForWholeStructureNone(self):
class X(model.Model):
name = model.StringProperty()
class Y(model.Model):
x = model.StructuredProperty(X)
y = Y(x=None)
y.put()
q = Y.query(Y.x == None)
self.assertEqual(q.fetch(), [y])
def testQueryAncestorConsistentWithAppId(self):
class Employee(model.Model):
pass
a = model.Key(Employee, 1)
self.assertEqual(a.app(), self.APP_ID) # Just checkin'.
Employee.query(ancestor=a, app=a.app()).fetch() # Shouldn't fail.
self.assertRaises(Exception, Employee.query, ancestor=a, app='notthisapp')
def testQueryAncestorConsistentWithNamespace(self):
class Employee(model.Model):
pass
a = model.Key(Employee, 1, namespace='ns')
self.assertEqual(a.namespace(), 'ns') # Just checkin'.
Employee.query(ancestor=a, namespace='ns').fetch()
Employee.query(ancestor=a, namespace=None).fetch()
self.assertRaises(Exception,
Employee.query, ancestor=a, namespace='another')
self.assertRaises(Exception,
Employee.query, ancestor=a, namespace='')
# And again with the default namespace.
b = model.Key(Employee, 1)
self.assertEqual(b.namespace(), '') # Just checkin'.
Employee.query(ancestor=b, namespace='')
Employee.query(ancestor=b, namespace=None)
self.assertRaises(Exception,
Employee.query, ancestor=b, namespace='ns')
# Finally some queries with a namespace but no ancestor.
Employee.query(namespace='').fetch()
Employee.query(namespace='ns').fetch()
def testQueryWithNamespace(self):
class Employee(model.Model):
pass
k = model.Key(Employee, None, namespace='ns')
e = Employee(key=k)
e.put()
self.assertEqual(Employee.query().fetch(), [])
self.assertEqual(Employee.query(namespace='ns').fetch(), [e])
def testQueryFilterAndOrderPreserveNamespace(self):
class Employee(model.Model):
name = model.StringProperty()
q1 = Employee.query(namespace='ns')
q2 = q1.filter(Employee.name == 'Joe')
self.assertEqual(q2.namespace, 'ns')
# Ditto for order()
q3 = q2.order(Employee.name)
self.assertEqual(q3.namespace, 'ns')
def testMultiQuery(self):
q1 = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
q2 = query.Query(kind='Foo').filter(Foo.tags == 'joe').order(Foo.name)
qq = query._MultiQuery([q1, q2])
res = list(qq)
self.assertEqual(res, [self.jill, self.joe])
def testIterAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(res, [self.jill, self.joe])
foo()
def testMap(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
callback = lambda e: e.name
@tasklets.tasklet
def callback_async(e):
yield tasklets.sleep(0.01)
raise tasklets.Return(e.name)
self.assertEqual(q.map(callback), ['jill', 'joe'])
self.assertEqual(q.map(callback_async), ['jill', 'joe'])
# TODO: Test map() with esoteric argument combinations
# e.g. keys_only, produce_cursors, and merge_future.
def testMapAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
callback = lambda e: e.name
@tasklets.tasklet
def callback_async(e):
yield tasklets.sleep(0.01)
raise tasklets.Return(e.name)
@tasklets.synctasklet
def foo():
fut = q.map_async(callback)
res = yield fut
self.assertEqual(res, ['jill', 'joe'])
fut = q.map_async(callback_async)
res = yield fut
self.assertEqual(res, ['jill', 'joe'])
foo()
def testFetch(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.fetch(10), [self.jill, self.joe])
self.assertEqual(q.fetch(2), [self.jill, self.joe])
self.assertEqual(q.fetch(1), [self.jill])
def testFetchAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
res = yield q.fetch_async(10)
self.assertEqual(res, [self.jill, self.joe])
res = yield q.fetch_async(2)
self.assertEqual(res, [self.jill, self.joe])
res = yield q.fetch_async(1)
self.assertEqual(res, [self.jill])
foo()
def testFetchEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.fetch(1), [])
def testFetchKeysOnly(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.fetch(10, keys_only=True),
[self.jill.key, self.joe.key])
def testGet(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.get(), self.jill)
def testGetEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.get(), None)
def testGetKeysOnly(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.get(keys_only=True), self.jill.key)
def testCursors(self):
q = query.Query(kind='Foo')
it = q.iter(produce_cursors=True)
expected = [self.joe, self.jill, self.moe]
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_before)
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_after)
before = []
after = []
for i, ent in enumerate(it):
self.assertEqual(ent, expected[i])
before.append(it.cursor_before())
after.append(it.cursor_after())
before.append(it.cursor_before())
after.append(it.cursor_after())
self.assertEqual(before[1], after[0])
self.assertEqual(before[2], after[1])
self.assertEqual(before[3], after[2])
self.assertEqual(before[3], after[3]) # !!!
def testCursorsKeysOnly(self):
q = query.Query(kind='Foo')
it = q.iter(produce_cursors=True, keys_only=True)
expected = [self.joe.key, self.jill.key, self.moe.key]
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_before)
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_after)
before = []
after = []
for i, ent in enumerate(it):
self.assertEqual(ent, expected[i])
before.append(it.cursor_before())
after.append(it.cursor_after())
before.append(it.cursor_before())
after.append(it.cursor_after())
self.assertEqual(before[1], after[0])
self.assertEqual(before[2], after[1])
self.assertEqual(before[3], after[2])
self.assertEqual(before[3], after[3]) # !!!
def testCursorsForAugmentedQuery(self):
class Employee(model.Model):
name = model.StringProperty()
rank = model.IntegerProperty()
class Manager(Employee):
report = model.StructuredProperty(Employee, repeated=True)
reports_a = []
for i in range(3):
e = Employee(name=str(i), rank=i)
e.put()
e.key = None
reports_a.append(e)
reports_b = []
for i in range(3, 6):
e = Employee(name=str(i), rank=0)
e.put()
e.key = None
reports_b.append(e)
mgr_a = Manager(name='a', report=reports_a)
mgr_a.put()
mgr_b = Manager(name='b', report=reports_b)
mgr_b.put()
mgr_c = Manager(name='c', report=reports_a + reports_b)
mgr_c.put()
it = Manager.query(Manager.report == Employee(name='1', rank=1)).iter()
it.next()
self.assertRaises(NotImplementedError, it.cursor_before)
self.assertRaises(NotImplementedError, it.cursor_after)
it.next()
self.assertRaises(NotImplementedError, it.cursor_before)
self.assertRaises(NotImplementedError, it.cursor_after)
self.assertFalse(it.has_next())
def testCursorsEfficientPaging(self):
# We want to read a 'page' of data, get the cursor just past the
# page, and know whether there is another page, all with a single
# RPC. To do this, set limit=pagesize+1, batch_size=pagesize.
q = query.Query(kind='Foo')
cursors = {}
mores = {}
for pagesize in [1, 2, 3, 4]:
it = q.iter(produce_cursors=True, limit=pagesize + 1, batch_size=pagesize)
todo = pagesize
for _ in it:
todo -= 1
if todo <= 0:
break
cursors[pagesize] = it.cursor_after()
mores[pagesize] = it.probably_has_next()
self.assertEqual(mores, {1: True, 2: True, 3: False, 4: False})
self.assertEqual(cursors[3], cursors[4])
# TODO: Assert that only one RPC call was made.
def testProbablyHasNext(self):
q = query.Query(kind='Foo')
probablies = []
it = q.iter(produce_cursors=True)
for _ in it:
probablies.append(it.probably_has_next())
self.assertEqual(probablies, [True, True, False])
def testProbablyHasNextMultipleBatches(self):
q = query.Query(kind='Foo')
probablies = []
it = q.iter(produce_cursors=True, batch_size=1)
for _ in it:
probablies.append(it.probably_has_next())
self.assertEqual(probablies, [True, True, False])
def testProbablyHasNextAndHasNextInteraction(self):
q = query.Query(kind='Foo')
mores = []
probablies = []
it = q.iter(produce_cursors=True)
for _ in it:
mores.append(it.has_next())
probablies.append(it.probably_has_next())
self.assertEqual(probablies, [True, True, False])
self.assertEqual(mores, [True, True, False])
def testCursorsDelete(self):
"""Tests that deleting an entity doesn't affect cursor positioning."""
class DeletedEntity(model.Model):
name = model.StringProperty()
entities = [DeletedEntity(name='A'),
DeletedEntity(name='B'),
DeletedEntity(name='C')]
model.put_multi(entities)
q = DeletedEntity.query().order(DeletedEntity.name)
it = q.iter(limit=2, produce_cursors=True)
self.assertEqual('A', it.next().name)
entities[0].key.delete()
# Grab cursor after deleting first entity. This should point before second.
cursor = it.cursor_after()
it = q.iter(start_cursor=cursor, produce_cursors=True)
self.assertEqual('B', it.next().name)
def testSkippedResultCursor(self):
class SkippedEntity(model.Model):
name = model.StringProperty()
entities = [SkippedEntity(name='A'),
SkippedEntity(name='B'),
SkippedEntity(name='C')]
model.put_multi(entities)
q = SkippedEntity.query().order(SkippedEntity.name)
it = q.iter(offset=2, produce_cursors=True)
self.assertEqual('C', it.next().name)
cursor = it.cursor_before()
# Run the query at the iterator returned before the first result
it = q.iter(start_cursor=cursor, produce_cursors=True)
self.assertEqual('C', it.next().name)
def testCount(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(1), 1)
def testCountAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
res = yield q.count_async(10)
self.assertEqual(res, 2)
res = yield q.count_async(1)
self.assertEqual(res, 1)
foo()
def testCountEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.count(1), 0)
def testCountPostFilter(self):
class Froo(model.Model):
name = model.StringProperty()
rate = model.IntegerProperty()
age = model.IntegerProperty()
class Bar(model.Model):
name = model.StringProperty()
froo = model.StructuredProperty(Froo, repeated=True)
b1 = Bar(name='b1', froo=[Froo(name='a', rate=1)])
b1.put()
b2 = Bar(name='b2', froo=[Froo(name='a', rate=1)])
b2.put()
q = Bar.query(Bar.froo == Froo(name='a', rate=1))
self.assertEqual(q.count(3), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
def testCountDisjunction(self):
q = Foo.query(Foo.name.IN(['joe', 'jill']))
self.assertEqual(q.count(3), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
def testLargeCount(self):
class Bar(model.Model):
pass
for i in xrange(0, datastore_stub_util._MAX_QUERY_OFFSET + 10):
Bar(id=str(i)).put()
count = Bar.query().count(datastore_stub_util._MAX_QUERY_OFFSET + 20)
self.assertEqual(datastore_stub_util._MAX_QUERY_OFFSET + 10, count)
# Test count less than requested limit.
count = Bar.query().count(datastore_stub_util._MAX_QUERY_OFFSET + 5)
self.assertEqual(datastore_stub_util._MAX_QUERY_OFFSET + 5, count)
def testFetchPage(self):
# This test implicitly also tests fetch_page_async().
q = query.Query(kind='Foo')
page_size = 1
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.jill])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 2
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 3
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill, self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 4
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill, self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
def testMultiQueryIterator(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['joe', 'jill']))
q = q.order(Foo.name)
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(res, [self.jill, self.joe])
foo()
def testMultiQueryIteratorUnordered(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['joe', 'jill']))
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(set(r._key for r in res),
set([self.jill._key, self.joe._key]))
foo()
def testMultiQueryFetch(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill'])).order(-Foo.name)
expected = [self.joe, self.jill]
self.assertEqual(q.fetch(10), expected)
self.assertEqual(q.fetch(None), expected)
self.assertEqual(q.fetch(), expected)
self.assertEqual(q.fetch(2), expected)
self.assertEqual(q.fetch(1), expected[:1])
self.assertEqual(q.fetch(10, offset=1), expected[1:])
self.assertEqual(q.fetch(1, offset=1), expected[1:])
self.assertEqual(q.fetch(10, keys_only=True), [e._key for e in expected])
def testMultiQueryFetchUnordered(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
expected = [self.joe, self.jill]
self.assertEqual(q.fetch(10), expected)
self.assertEqual(q.fetch(None), expected)
self.assertEqual(q.fetch(), expected)
self.assertEqual(q.fetch(2), expected)
self.assertEqual(q.fetch(1), expected[:1])
self.assertEqual(q.fetch(10, offset=1), expected[1:])
self.assertEqual(q.fetch(1, offset=1), expected[1:])
self.assertEqual(q.fetch(10, keys_only=True), [e._key for e in expected])
def testMultiQueryCount(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill'])).order(Foo.name)
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(None), 2)
self.assertEqual(q.count(), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
self.assertEqual(q.count(10, keys_only=True), 2)
self.assertEqual(q.count(keys_only=True), 2)
def testMultiQueryCountUnordered(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(None), 2)
self.assertEqual(q.count(), 2)
self.assertEqual(q.count(10, keys_only=True), 2)
self.assertEqual(q.count(keys_only=True), 2)
def testMultiQueryCursors(self):
self.ExpectWarnings()
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
self.assertRaises(datastore_errors.BadArgumentError, q.fetch_page, 1)
q = q.order(Foo.tags)
self.assertRaises(datastore_errors.BadArgumentError, q.fetch_page, 1)
q = q.order(Foo.key)
expected = q.fetch()
self.assertEqual(len(expected), 2)
res, curs, more = q.fetch_page(1, keys_only=True)
self.assertEqual(res, [expected[0].key])
self.assertTrue(curs is not None)
self.assertTrue(more)
res, curs, more = q.fetch_page(1, keys_only=False, start_cursor=curs)
self.assertEqual(res, [expected[1]])
self.assertTrue(curs is not None)
self.assertFalse(more)
res, curs, more = q.fetch_page(1, start_cursor=curs)
self.assertEqual(res, [])
self.assertTrue(curs is None)
self.assertFalse(more)
def testMultiQueryWithAndWithoutAncestor(self):
class Benjamin(model.Model):
name = model.StringProperty()
ben = Benjamin(name='ben', parent=self.moe.key)
ben.put()
benji = Benjamin(name='benji')
benji.put()
bq = Benjamin.query()
baq = Benjamin.query(ancestor=self.moe.key)
mq = query._MultiQuery([bq, baq])
res = list(mq)
self.assertEqual(res, [benji, ben])
def testNestedMultiQuery(self):
class Bar(model.Model):
a = model.StringProperty()
b = model.StringProperty()
class Rank(model.Model):
val = model.IntegerProperty()
class Foo(model.Model):
bar = model.StructuredProperty(Bar, repeated=True)
rank = model.StructuredProperty(Rank)
f1 = Foo(bar=[Bar(a='a1', b='b')], rank=Rank(val=1))
f2 = Foo(bar=[Bar(a='a2', b='e')], rank=Rank(val=2))
f1.put()
f2.put()
q = Foo.query(query.OR(Foo.bar == Bar(a='a1', b='b'),
Foo.bar == Bar(a='a2', b='e')))
q = q.order(Foo.rank.val)
self.assertEqual([f1, f2], q.fetch())
def testProbablyHasNextWithMultiQuery(self):
class Foo(model.Model):
a = model.IntegerProperty()
keys = model.put_multi([Foo(a=i) for i in range(100)])
q = Foo.query(Foo.key.IN(keys)).order(Foo.a)
it = q.iter()
for i in range(0, 99):
it.next()
# Probably has next is conservative so it should always return True
# if there are in fact more results.
self.assertTrue(it.probably_has_next())
def testNotEqualOperator(self):
q = query.Query(kind='Foo').filter(Foo.rate != 2)
res = list(q)
self.assertEqual(res, [self.joe, self.moe])
def testInOperator(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(('jill', 'hello')))
res = list(q)
self.assertEqual(res, [self.joe, self.jill])
def testFullDistributiveLaw(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['jill', 'hello']))
q = q.filter(Foo.rate.IN([1, 2]))
DisjunctionNode = query.DisjunctionNode
ConjunctionNode = query.ConjunctionNode
FilterNode = query.FilterNode
expected = DisjunctionNode(
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 1)),
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 2)),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 1)),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 2)))
self.assertEqual(q.filters, expected)
def testHalfDistributiveLaw(self):
DisjunctionNode = query.DisjunctionNode
ConjunctionNode = query.ConjunctionNode
FilterNode = query.FilterNode
filters = ConjunctionNode(
FilterNode('tags', 'in', ['jill', 'hello']),
ConjunctionNode(FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')))
expected = DisjunctionNode(
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')))
self.assertEqual(filters, expected)
def testKeyFilter(self):
class MyModel(model.Model):
number = model.IntegerProperty()
k1 = model.Key('MyModel', 'foo-1')
m1 = MyModel(key=k1)
m1.put()
k2 = model.Key('MyModel', 'foo-2')
m2 = MyModel(key=k2)
m2.put()
q = MyModel.query(MyModel.key == k1)
res = q.get()
self.assertEqual(res, m1)
q = MyModel.query(MyModel.key > k1)
res = q.get()
self.assertEqual(res, m2)
q = MyModel.query(MyModel.key < k2)
res = q.get()
self.assertEqual(res, m1)
def testUnicode(self):
class MyModel(model.Model):
n = model.IntegerProperty(u'\u4321')
@classmethod
def _get_kind(cls):
return u'\u1234'.encode('utf-8')
a = MyModel(n=42)
k = a.put()
b = k.get()
self.assertEqual(a, b)
self.assertFalse(a is b)
# So far so good, now try queries
res = MyModel.query(MyModel.n == 42).fetch()
self.assertEqual(res, [a])
def testBlobQuery(self):
class MyModel(model.Model):
b = model.BlobProperty(indexed=True)
a = MyModel(b='\xff\x00')
a.put()
q = MyModel.query(MyModel.b == '\xff\x00')
it = iter(q)
b = it.next()
self.assertEqual(a, b)
def testKindlessQuery(self):
class ParentModel(model.Model):
a = model.StringProperty()
class ChildModel(model.Model):
b = model.StringProperty()
p = ParentModel(a="Test1")
p.put()
c = ChildModel(parent=p.key, b="Test2")
c.put()
q = query.Query(ancestor=p.key)
self.assertEqual(q.count(), 2)
l = q.fetch()
self.assertTrue(c in l)
self.assertTrue(p in l)
def testExpandoQueries(self):
class Foo(model.Expando):
pass
testdata = {'int': 42,
'float': 3.14,
'string': 'hello',
'bool': True,
# Don't call this 'key'; it interferes with the built-in
# key attribute (the entity's key).
'akey': model.Key('Foo', 1),
'point': model.GeoPt(52.35, 4.9166667),
'user': users.User('<EMAIL>', 'example.<EMAIL>', '123'),
'blobkey': model.BlobKey('blah'),
'none': None,
}
for name, value in testdata.iteritems():
foo = Foo()
setattr(foo, name, value)
foo.put()
qry = Foo.query(query.FilterNode(name, '=', value))
res = qry.get()
self.assertTrue(res is not None, name)
self.assertEqual(getattr(res, name), value)
res.key.delete()
def testQueryCacheInteraction(self):
class Bar(model.Model):
name = model.StringProperty()
ctx = tasklets.get_context()
ctx.set_cache_policy(True)
a = Bar(name='a')
a.put()
b = a.key.get()
self.assertTrue(b is a) # Just verifying that the cache is on.
b = Bar.query().get()
self.assertTrue(b is a)
a.name = 'x' # Modify, but don't write.
b = Bar.query().get()
self.assertTrue(b is a)
self.assertEqual(a.name, 'x')
b = Bar.query().get(use_cache=False) # Skip the cache.
self.assertFalse(b is a)
self.assertEqual(b.name, 'a')
a.key = None # Invalidate cache by resetting key.
b = Bar.query().get()
self.assertFalse(b is a)
self.assertEqual(a.name, 'x')
self.assertEqual(b.name, 'a')
def testGqlMinimal(self):
qry = query.gql('SELECT * FROM Foo')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlAncestor(self):
key = model.Key('Foo', 42)
qry = query.gql("SELECT * FROM Foo WHERE ANCESTOR IS KEY('%s')" %
key.urlsafe())
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, key)
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlAncestorWithParameter(self):
qry = query.gql('SELECT * FROM Foo WHERE ANCESTOR IS :1')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, query.Parameter(1))
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlFilter(self):
qry = query.gql("SELECT * FROM Foo WHERE name = 'joe' AND rate = 1")
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters,
query.ConjunctionNode(
query.FilterNode('name', '=', 'joe'),
query.FilterNode('rate', '=', 1)))
self.assertEqual(qry.orders, None)
def testGqlOrder(self):
qry = query.gql('SELECT * FROM Foo ORDER BY name')
self.assertEqual(query._orders_to_orderings(qry.orders),
[('name', query._ASC)])
def testGqlOffset(self):
qry = query.gql('SELECT * FROM Foo OFFSET 2')
self.assertEqual(qry.default_options.offset, 2)
def testGqlLimit(self):
qry = query.gql('SELECT * FROM Foo LIMIT 2')
self.assertEqual(qry.default_options.limit, 2)
def testGqlParameters(self):
qry = query.gql('SELECT * FROM Foo WHERE name = :1 AND rate = :foo')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters,
query.ConjunctionNode(
query.ParameterNode(Foo.name, '=',
query.Parameter(1)),
query.ParameterNode(Foo.rate, '=',
query.Parameter('foo'))))
self.assertEqual(qry.orders, None)
def testGqlBindParameters(self):
pqry = query.gql('SELECT * FROM Foo WHERE name = :1')
qry = pqry.bind('joe')
self.assertEqual(list(qry), [self.joe])
qry = pqry.bind('jill')
self.assertEqual(list(qry), [self.jill])
def testGqlUnresolvedParameters(self):
self.ExpectErrors()
qry = query.gql(
'SELECT * FROM Foo WHERE name = :1')
self.assertRaises(datastore_errors.BadArgumentError, qry.fetch)
self.assertRaises(datastore_errors.BadArgumentError, qry.count)
self.assertRaises(datastore_errors.BadArgumentError, list, qry)
self.assertRaises(datastore_errors.BadArgumentError, qry.iter)
def checkGql(self, expected, gql, args=(), kwds={},
fetch=lambda q: list(q)):
actual = fetch(query.gql(gql).bind(*args, **kwds))
self.assertEqual(expected, actual)
def testGqlBasicQueries(self):
self.checkGql([self.joe, self.jill, self.moe], "SELECT * FROM Foo")
def testGqlKeyQueries(self):
self.checkGql([self.joe.key, self.jill.key, self.moe.key],
"SELECT __key__ FROM Foo")
def testGqlOperatorQueries(self):
self.checkGql([self.joe], "SELECT * FROM Foo WHERE name = 'joe'")
self.checkGql([self.moe], "SELECT * FROM Foo WHERE name > 'joe'")
self.checkGql([self.jill], "SELECT * FROM Foo WHERE name < 'joe'")
self.checkGql([self.joe, self.moe],
"SELECT * FROM Foo WHERE name >= 'joe'")
self.checkGql([self.jill, self.joe],
"SELECT * FROM Foo WHERE name <= 'joe'")
self.checkGql([self.jill, self.moe],
"SELECT * FROM Foo WHERE name != 'joe'")
# NOTE: The ordering on these is questionable:
self.checkGql([self.joe, self.jill],
"SELECT * FROM Foo WHERE name IN ('joe', 'jill')")
self.checkGql([self.jill, self.joe],
"SELECT * FROM Foo WHERE name IN ('jill', 'joe')")
def testGqlOrderQueries(self):
self.checkGql([self.jill, self.joe, self.moe],
"SELECT * FROM Foo ORDER BY name")
self.checkGql([self.moe, self.joe, self.jill],
"SELECT * FROM Foo ORDER BY name DESC")
self.checkGql([self.joe, self.jill, self.moe],
"SELECT * FROM Foo ORDER BY __key__ ASC")
self.checkGql([self.moe, self.jill, self.joe],
"SELECT * FROM Foo ORDER BY __key__ DESC")
self.checkGql([self.jill, self.joe, self.moe],
"SELECT * FROM Foo ORDER BY rate DESC, name")
def testGqlOffsetQuery(self):
self.checkGql([self.jill, self.moe], "SELECT * FROM Foo OFFSET 1")
def testGqlLimitQuery(self):
self.checkGql([self.joe, self.jill], "SELECT * FROM Foo LIMIT 2")
def testGqlLimitOffsetQuery(self):
self.checkGql([self.jill], "SELECT * FROM Foo LIMIT 1 OFFSET 1")
def testGqlLimitOffsetQueryUsingFetch(self):
self.checkGql([self.jill], "SELECT * FROM Foo LIMIT 1 OFFSET 1",
fetch=lambda q: q.fetch())
# XXX TODO: Make this work:
# def testGqlLimitQueryUsingFetch(self):
# self.checkGql([self.joe, self.jill], "SELECT * FROM Foo LIMIT 2",
# fetch=lambda q: q.fetch(3))
def testGqlOffsetQueryUsingFetchPage(self):
q = query.gql("SELECT * FROM Foo LIMIT 2")
res1, cur1, more1 = q.fetch_page(1)
self.assertEqual([self.joe], res1)
self.assertEqual(True, more1)
res2, cur2, more2 = q.fetch_page(1, start_cursor=cur1)
self.assertEqual([self.jill], res2)
# XXX TODO: Gotta make this work:
# self.assertEqual(False, more2)
# res3, cur3, more3 = q.fetch_page(1, start_cursor=cur2)
# self.assertEqual([], res3)
# self.assertEqual(False, more3)
# self.assertEqual(None, cur3)
def testGqlLimitQueryUsingFetchPage(self):
q = query.gql("SELECT * FROM Foo OFFSET 1")
res1, cur1, more1 = q.fetch_page(1)
self.assertEqual([self.jill], res1)
self.assertEqual(True, more1)
# NOTE: Without offset=0, the following break.
res2, cur2, more2 = q.fetch_page(1, start_cursor=cur1, offset=0)
self.assertEqual([self.moe], res2)
self.assertEqual(False, more2)
res3, cur3, more3 = q.fetch_page(1, start_cursor=cur2, offset=0)
self.assertEqual([], res3)
self.assertEqual(False, more3)
self.assertEqual(None, cur3)
def testGqlParameterizedAncestor(self):
q = query.gql("SELECT * FROM Foo WHERE ANCESTOR IS :1")
self.assertEqual([self.moe], q.bind(self.moe.key).fetch())
def testGqlParameterizedInClause(self):
# NOTE: The ordering on these is questionable:
q = query.gql("SELECT * FROM Foo WHERE name IN :1")
self.assertEqual([self.jill, self.joe], q.bind(('jill', 'joe')).fetch())
# Exercise the LIST function.
q = query.gql("SELECT * FROM Foo WHERE name IN (:a, :b)")
self.assertEqual([self.jill, self.joe], q.bind(a='jill', b='joe').fetch())
# Generate OR/AND nodes containing parameter nodes.
q = query.gql("SELECT * FROM Foo WHERE name = :1 AND rate in (1, 2)")
self.assertEqual([self.jill], q.bind('jill').fetch())
def testGqlKeyFunction(self):
class Bar(model.Model):
ref = model.KeyProperty(kind=Foo)
noref = Bar()
noref.put()
joeref = Bar(ref=self.joe.key)
joeref.put()
moeref = Bar(ref=self.moe.key)
moeref.put()
self.assertEqual(
[noref],
Bar.gql("WHERE ref = NULL").fetch())
self.assertEqual(
[noref],
Bar.gql("WHERE ref = :1").bind(None).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = :1").bind(self.joe.key).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('%s')" % self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('Foo', %s)" % self.joe.key.id()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY(:1)").bind(self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('Foo', :1)").bind(self.joe.key.id()).fetch())
def testGqlKeyFunctionAncestor(self):
class Bar(model.Model):
pass
nobar = Bar()
nobar.put()
joebar = Bar(parent=self.joe.key)
joebar.put()
moebar = Bar(parent=self.moe.key)
moebar.put()
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY('%s')" % self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS :1").bind(self.joe.key).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY(:1)").bind(
self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY('Foo', :1)")
.bind(self.joe.key.id()).fetch())
def testGqlAncestorFunctionError(self):
self.assertRaises(TypeError,
query.gql, 'SELECT * FROM Foo WHERE ANCESTOR IS USER(:1)')
def testGqlOtherFunctions(self):
class Bar(model.Model):
auser = model.UserProperty()
apoint = model.GeoPtProperty()
adatetime = model.DateTimeProperty()
adate = model.DateProperty()
atime = model.TimeProperty()
abar = Bar(
auser=users.User('<EMAIL>'),
apoint=model.GeoPt(52.35, 4.9166667),
adatetime=datetime.datetime(2012, 2, 1, 14, 54, 0),
adate=datetime.date(2012, 2, 2),
atime=datetime.time(14, 54, 0),
)
abar.put()
bbar = Bar()
bbar.put()
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE auser=USER(:1)")
.bind('<EMAIL>').fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE apoint=GEOPT(:1, :2)")
.bind(52.35, 4.9166667).fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE adatetime=DATETIME(:1)")
.bind('2012-02-01 14:54:00').fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE adate=DATE(:1, :2, :2)")
.bind(2012, 2).fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE atime=TIME(:hour, :min, :sec)")
.bind(hour=14, min=54, sec=0).fetch())
def testGqlStructuredPropertyQuery(self):
class Bar(model.Model):
foo = model.StructuredProperty(Foo)
barf = Bar(foo=Foo(name='one', rate=3, tags=['a', 'b']))
barf.put()
barg = Bar(foo=Foo(name='two', rate=4, tags=['b', 'c']))
barg.put()
barh = Bar()
barh.put()
# TODO: Once SDK 1.6.3 is released, drop quotes around foo.name.
q = Bar.gql("WHERE \"foo.name\" = 'one'")
self.assertEqual([barf], q.fetch())
q = Bar.gql("WHERE foo = :1").bind(Foo(name='two', rate=4))
self.assertEqual([barg], q.fetch())
q = Bar.gql("WHERE foo = NULL")
self.assertEqual([barh], q.fetch())
q = Bar.gql("WHERE foo = :1")
self.assertEqual([barh], q.bind(None).fetch())
def testGqlExpandoProperty(self):
class Bar(model.Expando):
pass
babar = Bar(name='Babar')
babar.put()
bare = Bar(nude=42)
bare.put()
q = Bar.gql("WHERE name = 'Babar'")
self.assertEqual([babar], q.fetch())
q = Bar.gql("WHERE nude = :1")
self.assertEqual([bare], q.bind(42).fetch())
def testGqlExpandoInStructure(self):
class Bar(model.Expando):
pass
class Baz(model.Model):
bar = model.StructuredProperty(Bar)
bazar = Baz(bar=Bar(bow=1, wow=2))
bazar.put()
bazone = Baz()
bazone.put()
q = Baz.gql("WHERE \"bar.bow\" = 1")
self.assertEqual([bazar], q.fetch())
def testGqlKindlessQuery(self):
results = query.gql('SELECT *').fetch()
self.assertEqual([self.joe, self.jill, self.moe], results)
def testGqlSubclass(self):
# You can pass _gql() a subclass of Query and it'll use that.
class MyQuery(query.Query):
pass
q = query._gql("SELECT * FROM Foo WHERE name = :1", query_class=MyQuery)
self.assertTrue(isinstance(q, MyQuery))
# And bind() preserves the class.
qb = q.bind('joe')
self.assertTrue(isinstance(qb, MyQuery))
# .filter() also preserves the class, as well as default_options.
qf = q.filter(Foo.rate == 1)
self.assertTrue(isinstance(qf, MyQuery))
self.assertEqual(qf.default_options, q.default_options)
# Same for .options().
qo = q.order(-Foo.name)
self.assertTrue(isinstance(qo, MyQuery))
self.assertEqual(qo.default_options, q.default_options)
def testGqlUnusedBindings(self):
# Only unused positional bindings raise an error.
q = Foo.gql("WHERE ANCESTOR IS :1 AND rate >= :2")
qb = q.bind(self.joe.key, 2, foo=42) # Must not fail
self.assertRaises(datastore_errors.BadArgumentError, q.bind)
self.assertRaises(datastore_errors.BadArgumentError, q.bind, self.joe.key)
self.assertRaises(datastore_errors.BadArgumentError, q.bind,
self.joe.key, 2, 42)
def testGqlWithBind(self):
q = Foo.gql("WHERE name = :1", 'joe')
self.assertEqual([self.joe], q.fetch())
def testGqlAnalyze(self):
q = Foo.gql("WHERE name = 'joe'")
self.assertEqual([], q.analyze())
q = Foo.gql("WHERE name = :1 AND rate = :2")
self.assertEqual([1, 2], q.analyze())
q = Foo.gql("WHERE name = :foo AND rate = :bar")
self.assertEqual(['bar', 'foo'], q.analyze())
q = Foo.gql("WHERE tags = :1 AND name = :foo AND rate = :bar")
self.assertEqual([1, 'bar', 'foo'], q.analyze())
def testGqlGroupBy(self):
q = query.gql("SELECT DISTINCT name, tags FROM Foo "
"WHERE name < 'joe' ORDER BY name")
self.assertEquals(('name', 'tags'), q.projection)
self.assertEquals(('name', 'tags'), q.group_by)
self.assertEquals(True, q.is_distinct)
ents = q.fetch()
ents.sort(key=lambda ent: ent.tags)
self.assertEqual(ents, [Foo(name='jill', tags=['jack'],
key=self.jill.key,
projection=['name', 'tags']),
Foo(name='jill', tags=['jill'],
key=self.jill.key,
projection=('name', 'tags'))])
def testGqlProjection(self):
q = query.gql("SELECT name, tags FROM Foo WHERE name < 'joe' ORDER BY name")
self.assertEquals(('name', 'tags'), q.projection)
self.assertEquals(None, q.group_by)
self.assertEquals(False, q.is_distinct)
ents = q.fetch()
ents.sort(key=lambda ent: ent.tags)
self.assertEqual(ents, [Foo(name='jill', tags=['jack'],
key=self.jill.key,
projection=['name', 'tags']),
Foo(name='jill', tags=['jill'],
key=self.jill.key,
projection=('name', 'tags'))])
def testGqlBadProjection(self):
self.assertRaises(model.BadProjectionError,
query.gql, "SELECT qqq FROM Foo")
self.assertRaises(model.InvalidPropertyError,
query.gql, "SELECT qqq FROM Foo")
def testGqlBadKind(self):
self.assertRaises(model.KindError,
query.gql, "SELECT * FROM Whatever")
def testAsyncNamespace(self):
# Test that async queries pick up the namespace when the
# foo_async() call is made, not later.
# See issue 168. http://goo.gl/aJp7i
namespace_manager.set_namespace('mission')
barney = Foo(name='Barney')
barney.put()
willy = Foo(name='Willy')
willy.put()
q1 = Foo.query()
qm = Foo.query(Foo.name.IN(['Barney', 'Willy'])).order(Foo._key)
# Test twice: once with a simple query, once with a MultiQuery.
for q in q1, qm:
# Test fetch_async().
namespace_manager.set_namespace('mission')
fut = q.fetch_async()
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, [barney, willy])
# Test map_async().
namespace_manager.set_namespace('mission')
fut = q.map_async(None)
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, [barney, willy])
# Test get_async().
namespace_manager.set_namespace('mission')
fut = q.get_async()
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, barney)
# Test count_async().
namespace_manager.set_namespace('mission')
fut = q.count_async()
namespace_manager.set_namespace('impossible')
res = fut.get_result()
self.assertEqual(res, 2)
# Test fetch_page_async().
namespace_manager.set_namespace('mission')
fut = q.fetch_page_async(2)
namespace_manager.set_namespace('impossible')
res, cur, more = fut.get_result()
self.assertEqual(res, [barney, willy])
self.assertEqual(more, False)
def hugeOffsetTestHelper(self, fetch):
""" Helper function to test large offsets.
Args:
fetch: A function that takes in (query, offset) and returns a list with
one result.
"""
# See issue 210. http://goo.gl/EDfHa
# Vastly reduce _MAX_QUERY_OFFSET since otherwise the test spends
# several seconds creating enough entities to reproduce the problem.
save_max_query_offset = datastore_stub_util._MAX_QUERY_OFFSET
try:
datastore_stub_util._MAX_QUERY_OFFSET = 10
ndb = model
class M(ndb.Model):
a = ndb.IntegerProperty()
ms = [M(a=i, id='%04d' % i) for i in range(33)]
ks = ndb.put_multi(ms)
q = M.query().order(M.a)
xs = fetch(q, 9)
self.assertEqual(xs, ms[9:10])
xs = fetch(q, 10)
self.assertEqual(xs, ms[10:11])
xs = fetch(q, 11)
self.assertEqual(xs, ms[11:12])
xs = fetch(q, 21)
self.assertEqual(xs, ms[21:22])
xs = fetch(q, 31)
self.assertEqual(xs, ms[31:32])
finally:
datastore_stub_util._MAX_QUERY_OFFSET = save_max_query_offset
def testHugeOffset(self):
"""Test offset > MAX_OFFSET for fetch."""
def fetch_one(qry, offset):
return qry.fetch(1, offset=offset)
self.hugeOffsetTestHelper(fetch_one)
def testHugeOffsetRunToQueue(self):
"""Test offset > MAX_OFFSET for run_to_queue."""
def fetch_from_queue(qry, offset):
queue = tasklets.MultiFuture()
options = query.QueryOptions(offset=offset, limit=1)
qry.run_to_queue(queue, self.conn, options).check_success()
results = queue.get_result()
return [result[2] for result in results]
self.hugeOffsetTestHelper(fetch_from_queue)
class IndexListTestMixin(object):
"""Tests for Index lists. Must be used with BaseQueryTestMixin."""
def create_index(self):
ci = datastore_stub_util.datastore_pb.CompositeIndex()
ci.set_app_id(os.environ['APPLICATION_ID'])
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(0)
index.set_entity_type('Foo')
property = index.add_property()
property.set_name('name')
property.set_direction(property.DESCENDING)
property = index.add_property()
property.set_name('tags')
property.set_direction(property.ASCENDING)
stub = self.testbed.get_stub('datastore_v3')
stub.CreateIndex(ci)
def testIndexListPremature(self):
# Before calling next() we don't have the information.
self.create_index()
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
self.assertEqual(qi.index_list(), None)
def testIndexListEmpty(self):
# A simple query requires no composite indexes.
q = Foo.query(Foo.name == 'joe', Foo.tags == 'joe')
qi = q.iter()
qi.next()
self.assertEqual(qi.index_list(), [])
def testIndexListNontrivial(self):
# Test a non-trivial query.
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
qi.next()
properties = [model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='asc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListExhausted(self):
# Test that the information is preserved after the iterator is
# exhausted.
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
list(qi)
properties = [model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='asc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListWithIndexAndOrder(self):
# Test a non-trivial query with sort order and an actual composite
# index present.
self.create_index()
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
q = q.order(-Foo.name, Foo.tags)
qi = q.iter()
qi.next()
# TODO: This is a little odd, because that's not exactly the index
# we created...?
properties = [model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='desc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListMultiQuery(self):
self.create_index()
q = Foo.query(query.OR(Foo.name == 'joe', Foo.name == 'jill'))
qi = q.iter()
qi.next()
self.assertEqual(qi.index_list(), None)
class QueryV3Tests(test_utils.NDBTest, BaseQueryTestMixin, IndexListTestMixin):
"""Query tests that use a connection to a Datastore V3 stub."""
def setUp(self):
test_utils.NDBTest.setUp(self)
BaseQueryTestMixin.setUp(self)
def testConstructorOptionsInteractions(self):
self.ExpectWarnings()
qry = Foo.query(projection=[Foo.name, Foo.rate])
# Keys only overrides projection.
qry.get(keys_only=True)
# Projection overrides original projection.
qry.get(projection=Foo.tags)
# Cannot override both.
self.assertRaises(datastore_errors.BadRequestError, qry.get,
projection=Foo.tags, keys_only=True)
qry = Foo.query(projection=[Foo.name, Foo.rate], distinct=True)
# Cannot project something out side the group by.
self.assertRaises(datastore_errors.BadRequestError, qry.get,
projection=Foo.tags)
# Can project a subset of the group by.
qry.get(projection=Foo.name)
# Keys only overrides projection but a projection is required for group_by.
self.assertRaises(datastore_errors.BadRequestError,
qry.get, keys_only=True)
def testCursorsForMultiQuery(self):
# Only relevant for V3 since V1 has per result cursors.
# TODO(pcostello): This should throw a better error.
q1 = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
q2 = query.Query(kind='Foo').filter(Foo.tags == 'joe').order(Foo.name)
qq = query._MultiQuery([q1, q2])
it = qq.iter()
it.next()
it.cursor_before() # Start cursor
self.assertRaises(AttributeError, it.cursor_after)
it.next()
it.cursor_before() # Start of second query
it.cursor_after() # End of batch cursor
self.assertFalse(it.has_next())
@real_unittest.skipUnless(datastore_pbs._CLOUD_DATASTORE_ENABLED,
"V1 must be supported to run V1 tests.")
class QueryV1Tests(test_utils.NDBCloudDatastoreV1Test, BaseQueryTestMixin):
"""Query tests that use a connection to a Cloud Datastore V1 stub."""
def setUp(self):
test_utils.NDBCloudDatastoreV1Test.setUp(self)
BaseQueryTestMixin.setUp(self)
def testConstructorOptionsInteractions(self):
self.ExpectWarnings()
qry = Foo.query(projection=[Foo.name, Foo.rate])
# Keys only overrides projection.
qry.get(keys_only=True)
# Projection overrides original projection.
qry.get(projection=Foo.tags)
# Can override both.
qry.get(projection=Foo.tags, keys_only=True)
qry = Foo.query(projection=[Foo.name, Foo.rate], distinct=True)
# Cannot project something out side the group by.
self.assertRaises(datastore_errors.BadRequestError, qry.get,
projection=Foo.tags)
# Can project a subset of the group by.
qry.get(projection=Foo.name)
# Keys only overrides projection but a projection is required for group_by.
self.assertRaises(datastore_errors.BadRequestError,
qry.get, keys_only=True)
if __name__ == '__main__':
unittest.main()
|
en
| 0.790732
|
# # Copyright 2008 The ndb Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for query.py. # Create class inside tests because kinds are cleared every test. # Let's not specify what it should show for filters and orders, # just test that it doesn't blow up. # App and namespace. # default_options. # Try a few different property types, to get a good mix of what # used to fail. # Shouldn't be able to query for unindexed properties # Should pass # TODO: Make this fail? See issue 89. http://goo.gl/K4gbY # Currently StructuredProperty(..., indexed=False) has no effect. # self.assertRaises(datastore_errors.BadFilterError, # lambda: Emp.struct.name == 'a') # Also check renaming. # Check distinct validation # Check both projection and default_options.projection/keys_only is not # allowed. # Check empty projection/group_by not allowed. # Check that ancestor and namespace must match. # Also check renaming. # Also check renaming. # make sure it survives mutation. # Also check renaming. # Legacy support for single value projection # Also check renaming # Test fetch and iter in base case. # Test projection using default options. # Test projection with other default options. # Also check renaming # Don't use assertEqual/assertNotEqual; we want to be sure that # __eq__ or __ne__ is really called here! # Can't use inequalities. # Can't use an empty value. # An IN query with empty argument can be constructed but not executed. # Passing a non-sequence argument should fail. # See issue 87. http://goo.gl/Tl5Ed # Has non-trivial _datastore_type(). # Failed before the fix. # Just checkin'. # Shouldn't fail. # Just checkin'. # And again with the default namespace. # Just checkin'. # Finally some queries with a namespace but no ancestor. # Ditto for order() # TODO: Test map() with esoteric argument combinations # e.g. keys_only, produce_cursors, and merge_future. # !!! # !!! # We want to read a 'page' of data, get the cursor just past the # page, and know whether there is another page, all with a single # RPC. To do this, set limit=pagesize+1, batch_size=pagesize. # TODO: Assert that only one RPC call was made. Tests that deleting an entity doesn't affect cursor positioning. # Grab cursor after deleting first entity. This should point before second. # Run the query at the iterator returned before the first result # Test count less than requested limit. # This test implicitly also tests fetch_page_async(). # Probably has next is conservative so it should always return True # if there are in fact more results. # So far so good, now try queries # Don't call this 'key'; it interferes with the built-in # key attribute (the entity's key). # Just verifying that the cache is on. # Modify, but don't write. # Skip the cache. # Invalidate cache by resetting key. # NOTE: The ordering on these is questionable: # XXX TODO: Make this work: # def testGqlLimitQueryUsingFetch(self): # self.checkGql([self.joe, self.jill], "SELECT * FROM Foo LIMIT 2", # fetch=lambda q: q.fetch(3)) # XXX TODO: Gotta make this work: # self.assertEqual(False, more2) # res3, cur3, more3 = q.fetch_page(1, start_cursor=cur2) # self.assertEqual([], res3) # self.assertEqual(False, more3) # self.assertEqual(None, cur3) # NOTE: Without offset=0, the following break. # NOTE: The ordering on these is questionable: # Exercise the LIST function. # Generate OR/AND nodes containing parameter nodes. # TODO: Once SDK 1.6.3 is released, drop quotes around foo.name. # You can pass _gql() a subclass of Query and it'll use that. # And bind() preserves the class. # .filter() also preserves the class, as well as default_options. # Same for .options(). # Only unused positional bindings raise an error. # Must not fail # Test that async queries pick up the namespace when the # foo_async() call is made, not later. # See issue 168. http://goo.gl/aJp7i # Test twice: once with a simple query, once with a MultiQuery. # Test fetch_async(). # Test map_async(). # Test get_async(). # Test count_async(). # Test fetch_page_async(). Helper function to test large offsets. Args: fetch: A function that takes in (query, offset) and returns a list with one result. # See issue 210. http://goo.gl/EDfHa # Vastly reduce _MAX_QUERY_OFFSET since otherwise the test spends # several seconds creating enough entities to reproduce the problem. Test offset > MAX_OFFSET for fetch. Test offset > MAX_OFFSET for run_to_queue. Tests for Index lists. Must be used with BaseQueryTestMixin. # Before calling next() we don't have the information. # A simple query requires no composite indexes. # Test a non-trivial query. # Test that the information is preserved after the iterator is # exhausted. # Test a non-trivial query with sort order and an actual composite # index present. # TODO: This is a little odd, because that's not exactly the index # we created...? Query tests that use a connection to a Datastore V3 stub. # Keys only overrides projection. # Projection overrides original projection. # Cannot override both. # Cannot project something out side the group by. # Can project a subset of the group by. # Keys only overrides projection but a projection is required for group_by. # Only relevant for V3 since V1 has per result cursors. # TODO(pcostello): This should throw a better error. # Start cursor # Start of second query # End of batch cursor Query tests that use a connection to a Cloud Datastore V1 stub. # Keys only overrides projection. # Projection overrides original projection. # Can override both. # Cannot project something out side the group by. # Can project a subset of the group by. # Keys only overrides projection but a projection is required for group_by.
| 2.103204
| 2
|
oauth_provider/views.py
|
philipforget/django-oauth-plus
| 0
|
6627258
|
from urllib import urlencode
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from django.utils.translation import ugettext as _
from django.core.urlresolvers import get_callable
import oauth2 as oauth
from decorators import oauth_required
from forms import AuthorizeRequestTokenForm
from oauth_provider.compat import UnsafeRedirect
from responses import INVALID_PARAMS_RESPONSE, INVALID_CONSUMER_RESPONSE, COULD_NOT_VERIFY_OAUTH_REQUEST_RESPONSE
from store import store, InvalidConsumerError, InvalidTokenError
from utils import verify_oauth_request, get_oauth_request, require_params, send_oauth_error
from utils import is_xauth_request
from consts import OUT_OF_BAND
OAUTH_AUTHORIZE_VIEW = 'OAUTH_AUTHORIZE_VIEW'
OAUTH_CALLBACK_VIEW = 'OAUTH_CALLBACK_VIEW'
UNSAFE_REDIRECTS = getattr(settings, "OAUTH_UNSAFE_REDIRECTS", False)
@csrf_exempt
def request_token(request):
oauth_request = get_oauth_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
missing_params = require_params(oauth_request, ('oauth_callback',))
if missing_params is not None:
return missing_params
if is_xauth_request(oauth_request):
return HttpResponseBadRequest('xAuth not allowed for this method.')
try:
consumer = store.get_consumer(request, oauth_request, oauth_request['oauth_consumer_key'])
except InvalidConsumerError:
return INVALID_CONSUMER_RESPONSE
if not verify_oauth_request(request, oauth_request, consumer):
return COULD_NOT_VERIFY_OAUTH_REQUEST_RESPONSE
try:
request_token = store.create_request_token(request, oauth_request, consumer, oauth_request['oauth_callback'])
except oauth.Error, err:
return send_oauth_error(err)
ret = urlencode({
'oauth_token': request_token.key,
'oauth_token_secret': request_token.secret,
'oauth_callback_confirmed': 'true'
})
return HttpResponse(ret, content_type='application/x-www-form-urlencoded')
@login_required
def user_authorization(request, form_class=AuthorizeRequestTokenForm):
if 'oauth_token' not in request.REQUEST:
return HttpResponseBadRequest('No request token specified.')
oauth_request = get_oauth_request(request)
try:
request_token = store.get_request_token(request, oauth_request, request.REQUEST['oauth_token'])
except InvalidTokenError:
return HttpResponseBadRequest('Invalid request token.')
consumer = store.get_consumer_for_request_token(request, oauth_request, request_token)
if request.method == 'POST':
form = form_class(request.POST)
if request.session.get('oauth', '') == request_token.key and form.is_valid():
request.session['oauth'] = ''
if form.cleaned_data['authorize_access']:
request_token = store.authorize_request_token(request, oauth_request, request_token)
args = { 'oauth_token': request_token.key }
else:
args = { 'error': _('Access not granted by user.') }
if request_token.callback is not None and request_token.callback != OUT_OF_BAND:
callback_url = request_token.get_callback_url(args)
if UNSAFE_REDIRECTS:
response = UnsafeRedirect(callback_url)
else:
response = HttpResponseRedirect(callback_url)
else:
# try to get custom callback view
callback_view_str = getattr(settings, OAUTH_CALLBACK_VIEW,
'oauth_provider.views.fake_callback_view')
try:
view_callable = get_callable(callback_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % callback_view_str
# try to treat it as Class Based View (CBV)
try:
callback_view = view_callable.as_view()
except AttributeError:
# if it appears not to be CBV treat it like FBV
callback_view = view_callable
response = callback_view(request, **args)
else:
response = send_oauth_error(oauth.Error(_('Action not allowed.')))
else:
# try to get custom authorize view
authorize_view_str = getattr(settings, OAUTH_AUTHORIZE_VIEW,
'oauth_provider.views.fake_authorize_view')
try:
view_callable = get_callable(authorize_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % authorize_view_str
# try to treat it as Class Based View (CBV)
try:
authorize_view = view_callable.as_view()
except AttributeError:
# if it appears not to be CBV treat it like FBV
authorize_view = view_callable
params = oauth_request.get_normalized_parameters()
# set the oauth flag
request.session['oauth'] = request_token.key
response = authorize_view(request, request_token, request_token.get_callback_url(), params)
return response
@csrf_exempt
def access_token(request):
oauth_request = get_oauth_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
# Consumer
try:
consumer = store.get_consumer(request, oauth_request, oauth_request['oauth_consumer_key'])
except InvalidConsumerError:
return HttpResponseBadRequest('Invalid consumer.')
is_xauth = is_xauth_request(oauth_request)
if not is_xauth:
# Check Parameters
missing_params = require_params(oauth_request, ('oauth_token', 'oauth_verifier'))
if missing_params is not None:
return missing_params
# Check Request Token
try:
request_token = store.get_request_token(request, oauth_request, oauth_request['oauth_token'])
except InvalidTokenError:
return HttpResponseBadRequest('Invalid request token.')
if not request_token.is_approved:
return HttpResponseBadRequest('Request Token not approved by the user.')
# Verify Signature
if not verify_oauth_request(request, oauth_request, consumer, request_token):
return HttpResponseBadRequest('Could not verify OAuth request.')
# Check Verifier
if oauth_request.get('oauth_verifier', None) != request_token.verifier:
return HttpResponseBadRequest('Invalid OAuth verifier.')
else: # xAuth
# Check Parameters
missing_params = require_params(oauth_request, ('x_auth_username', 'x_auth_password', 'x_auth_mode'))
if missing_params is not None:
return missing_params
# Check if Consumer allows xAuth
if not consumer.xauth_allowed:
return HttpResponseBadRequest('xAuth not allowed for this method')
# Check Signature
if not verify_oauth_request(request, oauth_request, consumer):
return HttpResponseBadRequest('Could not verify xAuth request.')
user = authenticate(
x_auth_username=oauth_request.get_parameter('x_auth_username'),
x_auth_password=<PASSWORD>_request.get_parameter('x_auth_password'),
x_auth_mode=oauth_request.get_parameter('x_auth_mode')
)
if not user:
return HttpResponseBadRequest('xAuth username or password is not valid')
else:
request.user = user
# Handle Request Token
try:
#request_token = store.create_request_token(request, oauth_request, consumer, oauth_request.get('oauth_callback'))
request_token = store.create_request_token(request, oauth_request, consumer, OUT_OF_BAND)
request_token = store.authorize_request_token(request, oauth_request, request_token)
except oauth.Error, err:
return send_oauth_error(err)
access_token = store.create_access_token(request, oauth_request, consumer, request_token)
ret = urlencode({
'oauth_token': access_token.key,
'oauth_token_secret': access_token.secret
})
return HttpResponse(ret, content_type='application/x-www-form-urlencoded')
@oauth_required
def protected_resource_example(request):
"""
Test view for accessing a Protected Resource.
"""
return HttpResponse('Protected Resource access!')
@login_required
def fake_authorize_view(request, token, callback, params):
"""
Fake view for tests. It must return an ``HttpResponse``.
You need to define your own in ``settings.OAUTH_AUTHORIZE_VIEW``.
"""
return HttpResponse('Fake authorize view for %s with params: %s.' % (token.consumer.name, params))
def fake_callback_view(request, **args):
"""
Fake view for tests. It must return an ``HttpResponse``.
You can define your own in ``settings.OAUTH_CALLBACK_VIEW``.
"""
return HttpResponse('Fake callback view.')
|
from urllib import urlencode
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from django.utils.translation import ugettext as _
from django.core.urlresolvers import get_callable
import oauth2 as oauth
from decorators import oauth_required
from forms import AuthorizeRequestTokenForm
from oauth_provider.compat import UnsafeRedirect
from responses import INVALID_PARAMS_RESPONSE, INVALID_CONSUMER_RESPONSE, COULD_NOT_VERIFY_OAUTH_REQUEST_RESPONSE
from store import store, InvalidConsumerError, InvalidTokenError
from utils import verify_oauth_request, get_oauth_request, require_params, send_oauth_error
from utils import is_xauth_request
from consts import OUT_OF_BAND
OAUTH_AUTHORIZE_VIEW = 'OAUTH_AUTHORIZE_VIEW'
OAUTH_CALLBACK_VIEW = 'OAUTH_CALLBACK_VIEW'
UNSAFE_REDIRECTS = getattr(settings, "OAUTH_UNSAFE_REDIRECTS", False)
@csrf_exempt
def request_token(request):
oauth_request = get_oauth_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
missing_params = require_params(oauth_request, ('oauth_callback',))
if missing_params is not None:
return missing_params
if is_xauth_request(oauth_request):
return HttpResponseBadRequest('xAuth not allowed for this method.')
try:
consumer = store.get_consumer(request, oauth_request, oauth_request['oauth_consumer_key'])
except InvalidConsumerError:
return INVALID_CONSUMER_RESPONSE
if not verify_oauth_request(request, oauth_request, consumer):
return COULD_NOT_VERIFY_OAUTH_REQUEST_RESPONSE
try:
request_token = store.create_request_token(request, oauth_request, consumer, oauth_request['oauth_callback'])
except oauth.Error, err:
return send_oauth_error(err)
ret = urlencode({
'oauth_token': request_token.key,
'oauth_token_secret': request_token.secret,
'oauth_callback_confirmed': 'true'
})
return HttpResponse(ret, content_type='application/x-www-form-urlencoded')
@login_required
def user_authorization(request, form_class=AuthorizeRequestTokenForm):
if 'oauth_token' not in request.REQUEST:
return HttpResponseBadRequest('No request token specified.')
oauth_request = get_oauth_request(request)
try:
request_token = store.get_request_token(request, oauth_request, request.REQUEST['oauth_token'])
except InvalidTokenError:
return HttpResponseBadRequest('Invalid request token.')
consumer = store.get_consumer_for_request_token(request, oauth_request, request_token)
if request.method == 'POST':
form = form_class(request.POST)
if request.session.get('oauth', '') == request_token.key and form.is_valid():
request.session['oauth'] = ''
if form.cleaned_data['authorize_access']:
request_token = store.authorize_request_token(request, oauth_request, request_token)
args = { 'oauth_token': request_token.key }
else:
args = { 'error': _('Access not granted by user.') }
if request_token.callback is not None and request_token.callback != OUT_OF_BAND:
callback_url = request_token.get_callback_url(args)
if UNSAFE_REDIRECTS:
response = UnsafeRedirect(callback_url)
else:
response = HttpResponseRedirect(callback_url)
else:
# try to get custom callback view
callback_view_str = getattr(settings, OAUTH_CALLBACK_VIEW,
'oauth_provider.views.fake_callback_view')
try:
view_callable = get_callable(callback_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % callback_view_str
# try to treat it as Class Based View (CBV)
try:
callback_view = view_callable.as_view()
except AttributeError:
# if it appears not to be CBV treat it like FBV
callback_view = view_callable
response = callback_view(request, **args)
else:
response = send_oauth_error(oauth.Error(_('Action not allowed.')))
else:
# try to get custom authorize view
authorize_view_str = getattr(settings, OAUTH_AUTHORIZE_VIEW,
'oauth_provider.views.fake_authorize_view')
try:
view_callable = get_callable(authorize_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % authorize_view_str
# try to treat it as Class Based View (CBV)
try:
authorize_view = view_callable.as_view()
except AttributeError:
# if it appears not to be CBV treat it like FBV
authorize_view = view_callable
params = oauth_request.get_normalized_parameters()
# set the oauth flag
request.session['oauth'] = request_token.key
response = authorize_view(request, request_token, request_token.get_callback_url(), params)
return response
@csrf_exempt
def access_token(request):
oauth_request = get_oauth_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
# Consumer
try:
consumer = store.get_consumer(request, oauth_request, oauth_request['oauth_consumer_key'])
except InvalidConsumerError:
return HttpResponseBadRequest('Invalid consumer.')
is_xauth = is_xauth_request(oauth_request)
if not is_xauth:
# Check Parameters
missing_params = require_params(oauth_request, ('oauth_token', 'oauth_verifier'))
if missing_params is not None:
return missing_params
# Check Request Token
try:
request_token = store.get_request_token(request, oauth_request, oauth_request['oauth_token'])
except InvalidTokenError:
return HttpResponseBadRequest('Invalid request token.')
if not request_token.is_approved:
return HttpResponseBadRequest('Request Token not approved by the user.')
# Verify Signature
if not verify_oauth_request(request, oauth_request, consumer, request_token):
return HttpResponseBadRequest('Could not verify OAuth request.')
# Check Verifier
if oauth_request.get('oauth_verifier', None) != request_token.verifier:
return HttpResponseBadRequest('Invalid OAuth verifier.')
else: # xAuth
# Check Parameters
missing_params = require_params(oauth_request, ('x_auth_username', 'x_auth_password', 'x_auth_mode'))
if missing_params is not None:
return missing_params
# Check if Consumer allows xAuth
if not consumer.xauth_allowed:
return HttpResponseBadRequest('xAuth not allowed for this method')
# Check Signature
if not verify_oauth_request(request, oauth_request, consumer):
return HttpResponseBadRequest('Could not verify xAuth request.')
user = authenticate(
x_auth_username=oauth_request.get_parameter('x_auth_username'),
x_auth_password=<PASSWORD>_request.get_parameter('x_auth_password'),
x_auth_mode=oauth_request.get_parameter('x_auth_mode')
)
if not user:
return HttpResponseBadRequest('xAuth username or password is not valid')
else:
request.user = user
# Handle Request Token
try:
#request_token = store.create_request_token(request, oauth_request, consumer, oauth_request.get('oauth_callback'))
request_token = store.create_request_token(request, oauth_request, consumer, OUT_OF_BAND)
request_token = store.authorize_request_token(request, oauth_request, request_token)
except oauth.Error, err:
return send_oauth_error(err)
access_token = store.create_access_token(request, oauth_request, consumer, request_token)
ret = urlencode({
'oauth_token': access_token.key,
'oauth_token_secret': access_token.secret
})
return HttpResponse(ret, content_type='application/x-www-form-urlencoded')
@oauth_required
def protected_resource_example(request):
"""
Test view for accessing a Protected Resource.
"""
return HttpResponse('Protected Resource access!')
@login_required
def fake_authorize_view(request, token, callback, params):
"""
Fake view for tests. It must return an ``HttpResponse``.
You need to define your own in ``settings.OAUTH_AUTHORIZE_VIEW``.
"""
return HttpResponse('Fake authorize view for %s with params: %s.' % (token.consumer.name, params))
def fake_callback_view(request, **args):
"""
Fake view for tests. It must return an ``HttpResponse``.
You can define your own in ``settings.OAUTH_CALLBACK_VIEW``.
"""
return HttpResponse('Fake callback view.')
|
en
| 0.72705
|
# try to get custom callback view # try to treat it as Class Based View (CBV) # if it appears not to be CBV treat it like FBV # try to get custom authorize view # try to treat it as Class Based View (CBV) # if it appears not to be CBV treat it like FBV # set the oauth flag # Consumer # Check Parameters # Check Request Token # Verify Signature # Check Verifier # xAuth # Check Parameters # Check if Consumer allows xAuth # Check Signature # Handle Request Token #request_token = store.create_request_token(request, oauth_request, consumer, oauth_request.get('oauth_callback')) Test view for accessing a Protected Resource. Fake view for tests. It must return an ``HttpResponse``. You need to define your own in ``settings.OAUTH_AUTHORIZE_VIEW``. Fake view for tests. It must return an ``HttpResponse``. You can define your own in ``settings.OAUTH_CALLBACK_VIEW``.
| 2.030676
| 2
|
tests/test_loader.py
|
fossabot/chaostoolkit-lib
| 0
|
6627259
|
# -*- coding: utf-8 -*-
import json
import pytest
import requests_mock
from chaoslib.exceptions import InvalidSource
from chaoslib.loader import load_experiment
from chaoslib.types import Settings
def test_load_from_file(generic_experiment: str):
try:
load_experiment(generic_experiment)
except InvalidSource as x:
pytest.fail(str(x))
def test_load_invalid_filepath(generic_experiment: str):
with pytest.raises(InvalidSource) as x:
load_experiment("/tmp/xyuzye.txt")
assert 'Path "/tmp/xyuzye.txt" does not exist.' in str(x)
def test_load_from_http_without_auth(generic_experiment: str):
with requests_mock.mock() as m:
m.get(
'http://example.com/experiment.json', status_code=200,
headers={"Content-Type": "application/json"},
json=json.dumps(generic_experiment)
)
try:
load_experiment('http://example.com/experiment.json')
except InvalidSource as x:
pytest.fail(str(x))
def test_load_from_http_with_missing_auth(generic_experiment: str):
with requests_mock.mock() as m:
m.get('http://example.com/experiment.json', status_code=401)
with pytest.raises(InvalidSource) as x:
load_experiment('http://example.com/experiment.json')
def test_load_from_http_with_auth(settings: Settings, generic_experiment: str):
with requests_mock.mock() as m:
settings['auths'] = {
'example.com': {
'type': 'bearer',
'value': 'XYZ'
}
}
m.get(
'http://example.com/experiment.json', status_code=200,
request_headers={
"Authorization": "bearer XYZ",
"Accept": "application/json, application/x-yaml"
},
headers={"Content-Type": "application/json"},
json=json.dumps(generic_experiment))
try:
load_experiment('http://example.com/experiment.json', settings)
except InvalidSource as x:
pytest.fail(str(x))
|
# -*- coding: utf-8 -*-
import json
import pytest
import requests_mock
from chaoslib.exceptions import InvalidSource
from chaoslib.loader import load_experiment
from chaoslib.types import Settings
def test_load_from_file(generic_experiment: str):
try:
load_experiment(generic_experiment)
except InvalidSource as x:
pytest.fail(str(x))
def test_load_invalid_filepath(generic_experiment: str):
with pytest.raises(InvalidSource) as x:
load_experiment("/tmp/xyuzye.txt")
assert 'Path "/tmp/xyuzye.txt" does not exist.' in str(x)
def test_load_from_http_without_auth(generic_experiment: str):
with requests_mock.mock() as m:
m.get(
'http://example.com/experiment.json', status_code=200,
headers={"Content-Type": "application/json"},
json=json.dumps(generic_experiment)
)
try:
load_experiment('http://example.com/experiment.json')
except InvalidSource as x:
pytest.fail(str(x))
def test_load_from_http_with_missing_auth(generic_experiment: str):
with requests_mock.mock() as m:
m.get('http://example.com/experiment.json', status_code=401)
with pytest.raises(InvalidSource) as x:
load_experiment('http://example.com/experiment.json')
def test_load_from_http_with_auth(settings: Settings, generic_experiment: str):
with requests_mock.mock() as m:
settings['auths'] = {
'example.com': {
'type': 'bearer',
'value': 'XYZ'
}
}
m.get(
'http://example.com/experiment.json', status_code=200,
request_headers={
"Authorization": "bearer XYZ",
"Accept": "application/json, application/x-yaml"
},
headers={"Content-Type": "application/json"},
json=json.dumps(generic_experiment))
try:
load_experiment('http://example.com/experiment.json', settings)
except InvalidSource as x:
pytest.fail(str(x))
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.267242
| 2
|
python/tako/client/exception.py
|
vyomkeshj/tako
| 0
|
6627260
|
class TakoException(Exception):
pass
class TaskFailed(TakoException):
pass
|
class TakoException(Exception):
pass
class TaskFailed(TakoException):
pass
|
none
| 1
| 1.262175
| 1
|
|
tests/scripts/thread-cert/mesh_cop.py
|
BenShen98/ot-playground
| 1
|
6627261
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from binascii import hexlify
from enum import IntEnum
import io
import logging
import struct
from network_data import SubTlvsFactory
from tlvs_parsing import UnknownTlvFactory
import common
class TlvType(IntEnum):
CHANNEL = 0
PAN_ID = 1
EXTENDED_PANID = 2
NETWORK_NAME = 3
PSKC = 4
NETWORK_MASTER_KEY = 5
NETWORK_KEY_SEQUENCE_COUNTER = 6
NETWORK_MESH_LOCAL_PREFIX = 7
STEERING_DATA = 8
BORDER_AGENT_LOCATOR = 9
COMMISSIONER_ID = 10
COMMISSIONER_SESSION_ID = 11
SECURITY_POLICY = 12
GET = 13
ACTIVE_TIMESTAMP = 14
COMMISSIONER_UDP_PORT = 15
STATE = 16
JOINER_DTLS_ENCAPSULATION = 17
JOINER_UDP_PORT = 18
JOINER_IID = 19
JOINER_ROUTER_LOCATOR = 20
JOINER_ROUTER_KEK = 21
PROVISIONING_URL = 32
VENDOR_NAME = 33
VENDOR_MODEL = 34
VENDOR_SW_VERSION = 35
VENDOR_DATA = 36
VENDOR_STACK_VERSION = 37
UDP_ENCAPSULATION = 48
IPV6_ADDRESS = 49
PENDING_TIMESTAMP = 51
DELAY_TIMER = 52
CHANNEL_MASK = 53
COUNT = 54
PERIOD = 55
SCAN_DURATION = 56
ENERGY_LIST = 57
CSL_SYNCHRONIZED_TIMEOUT = 85
DISCOVERY_REQUEST = 128
DISCOVERY_RESPONSE = 129
class MeshCopState(IntEnum):
ACCEPT = 0x1
REJECT = 0xff
class MeshCopMessageType(IntEnum):
JOIN_FIN_REQ = (1,)
JOIN_FIN_RSP = (2,)
JOIN_ENT_NTF = (3,)
JOIN_ENT_RSP = 4
def create_mesh_cop_message_type_set():
return [
MeshCopMessageType.JOIN_FIN_REQ,
MeshCopMessageType.JOIN_FIN_RSP,
MeshCopMessageType.JOIN_ENT_NTF,
MeshCopMessageType.JOIN_ENT_RSP,
]
# Channel TLV (0)
class Channel(object):
def __init__(self, channel_page, channel):
self._channel_page = channel_page
self._channel = channel
@property
def channel_page(self):
return self._channel_page
@property
def channel(self):
return self._channel
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self._channel_page == other._channel_page and self._channel == other.__channel)
def __repr__(self):
return 'Channel(channel_page={},channel={})'.format(self._channel_page, self._channel)
def to_hex(self):
return struct.pack('>BBBH', TlvType.CHANNEL, 3, self.channel_page, self.channel)
class ChannelFactory(object):
def parse(self, data, message_info):
data_tp = struct.unpack('>BH', data.read(3))
channel_page = data_tp[0]
channel = data_tp[1]
return Channel(channel_page, channel)
# PanId TLV (1)
class Panid(object):
# TODO: Not implemented yet
pass
class PanidFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ExtendedPanid TLV (2)
class ExtendedPanid(object):
def __init__(self, extended_panid):
self._extended_panid = extended_panid
@property
def extended_panid(self):
return self._extended_panid
def __eq__(self, other):
return (isinstance(self, type(other)) and self.extended_panid == other.extended_panid)
def __repr__(self):
return "ExtendedPanid(extended_panid={})".format(self.extended_panid)
class ExtendedPanidFactory(object):
def parse(self, data, message_info):
extended_panid = struct.unpack(">Q", data.read(8))[0]
return ExtendedPanid(extended_panid)
# NetworkName TLV (3)
class NetworkName(object):
def __init__(self, network_name):
self._network_name = network_name
@property
def network_name(self):
return self._network_name
def __eq__(self, other):
return (isinstance(self, type(other)) and self.network_name == other.network_name)
def __repr__(self):
return "NetworkName(network_name={})".format(self.network_name)
class NetworkNameFactory(object):
def parse(self, data, message_info):
len = message_info.length
network_name = struct.unpack("{}s".format(10), data.read(len))[0]
return NetworkName(network_name)
# PSKc TLV (4)
class PSKc(object):
# TODO: Not implemented yet
pass
class PSKcFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# NetworkMasterKey TLV (5)
class NetworkMasterKey(object):
# TODO: Not implemented yet
pass
class NetworkMasterKeyFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# NetworkKeySequenceCounter TLV (6)
class NetworkKeySequenceCounter(object):
# TODO: Not implemented yet
pass
class NetworkKeySequenceCounterFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# NetworkMeshLocalPrefix TLV (7)
class NetworkMeshLocalPrefix(object):
# TODO: Not implemented yet
pass
class NetworkMeshLocalPrefixFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Steering Data TLV (8)
class SteeringData(object):
def __init__(self, bloom_filter):
self._bloom_filter = bloom_filter
@property
def bloom_filter(self):
return self._bloom_filter
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._bloom_filter == other._bloom_filter
def __repr__(self):
return "SteeringData(bloom_filter={})".format(hexlify(self._bloom_filter))
def to_hex(self):
bloom_filter_len = len(self.bloom_filter)
return (struct.pack('>BB', TlvType.STEERING_DATA, bloom_filter_len) + self.bloom_filter)
class SteeringDataFactory:
def parse(self, data, message_info):
bloom_filter = data.read(message_info.length)
return SteeringData(bloom_filter)
# Border Agent Locator TLV (9)
class BorderAgentLocator(object):
def __init__(self, address):
self._border_agent_locator = address
@property
def border_agent_locator(self):
return self._border_agent_locator
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._border_agent_locator == other._border_agent_locator
def __repr__(self):
return "BorderAgentLocator(rloc16={})".format(hex(self._border_agent_locator))
def to_hex(self):
return struct.pack('>BBH', TlvType.BORDER_AGENT_LOCATOR, 2, self.border_agent_locator)
class BorderAgentLocatorFactory:
def parse(self, data, message_info):
border_agent_locator = struct.unpack(">H", data.read(2))[0]
return BorderAgentLocator(border_agent_locator)
# CommissionerId TLV (10)
class CommissionerId(object):
def __init__(self, commissioner_id):
self._commissioner_id = commissioner_id
@property
def commissioner_id(self):
return self._commissioner_id
def __eq__(self, other):
return self.commissioner_id == other.commissioner_id
def __repr__(self):
return "CommissionerId(commissioner_id={})".format(self.commissioner_id)
class CommissionerIdFactory(object):
def parse(self, data, message_info):
commissioner_id = data.getvalue().decode('utf-8')
return CommissionerId(commissioner_id)
# Commissioner Session ID TLV (11)
class CommissionerSessionId(object):
def __init__(self, commissioner_session_id):
self._commissioner_session_id = commissioner_session_id
@property
def commissioner_session_id(self):
return self._commissioner_session_id
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._commissioner_session_id == other._commissioner_session_id
def __repr__(self):
return "CommissionerSessionId(commissioner_session_id={})".format(self._commissioner_session_id)
def to_hex(self):
return struct.pack(
'>BBH',
TlvType.COMMISSIONER_SESSION_ID,
2,
self.commissioner_session_id,
)
class CommissionerSessionIdFactory:
def parse(self, data, message_info):
session_id = struct.unpack(">H", data.read(2))[0]
return CommissionerSessionId(session_id)
# SecurityPolicy TLV (12)
class SecurityPolicy(object):
# TODO: Not implemented yet
pass
class SecurityPolicyFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Get TLV (13)
class Get(object):
# TODO: Not implemented yet
pass
class GetFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ActiveTimestamp TLV (14)
class ActiveTimestamp(object):
# TODO: Not implemented yet
pass
class ActiveTimestampFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Commissioner UDP Port TLV (15)
class CommissionerUdpPort(object):
def __init__(self, udp_port):
self._udp_port = udp_port
@property
def udp_port(self):
return self._udp_port
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._udp_port == other._udp_port
def __repr__(self):
return "CommissionerUdpPort(udp_port={})".format(self._udp_port)
class CommissionerUdpPortFactory:
def parse(self, data, message_info):
udp_port = struct.unpack(">H", data.read(2))[0]
return CommissionerUdpPort(udp_port)
# State TLV (16)
class State(object):
def __init__(self, state):
self._state = state
@property
def state(self):
return self._state
def __eq__(self, other):
return self.state == other.state
def __repr__(self):
return "State(state={})".format(self.state)
class StateFactory:
def parse(self, data, message_info):
state = ord(data.read(1))
return State(state)
# JoinerDtlsEncapsulation TLV (17)
class JoinerDtlsEncapsulation(object):
# TODO: Not implemented yet
pass
class JoinerDtlsEncapsulationFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# JoinerUdpPort TLV (18)
class JoinerUdpPort(object):
def __init__(self, udp_port):
self._udp_port = udp_port
@property
def udp_port(self):
return self._udp_port
def __eq__(self, other):
return (isinstance(self, type(other)) and self.udp_port == other.udp_port)
def __repr__(self):
return "JoinerUdpPort(udp_port={})".format(self.udp_port)
class JoinerUdpPortFactory(object):
def parse(self, data, message_info):
udp_port = struct.unpack(">H", data.read(2))[0]
return JoinerUdpPort(udp_port)
# JoinerIID TLV (19)
class JoinerIID(object):
# TODO: Not implemented yet
pass
class JoinerIIDFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# JoinerRouterLocator TLV (20)
class JoinerRouterLocator(object):
# TODO: Not implemented yet
pass
class JoinerRouterLocatorFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# JoinerRouterKEK TLV (21)
class JoinerRouterKEK(object):
# TODO: Not implemented yet
pass
class JoinerRouterKEKFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ProvisioningURL TLV (32)
class ProvisioningUrl(object):
def __init__(self, url):
self._url = url
@property
def url(self):
return self._url
def __repr__(self):
return "ProvisioningUrl(url={})".format(self.url)
class ProvisioningUrlFactory:
def parse(self, data, message_info):
url = data.decode('utf-8')
return ProvisioningUrl(url)
# VendorName TLV (33)
class VendorName(object):
def __init__(self, vendor_name):
self._vendor_name = vendor_name
@property
def vendor_name(self):
return self._vendor_name
def __eq__(self, other):
return self.vendor_name == other.vendor_name
def __repr__(self):
return "VendorName(vendor_name={})".format(self.vendor_name)
class VendorNameFactory:
def parse(self, data, message_info):
vendor_name = data.getvalue().decode('utf-8')
return VendorName(vendor_name)
# VendorModel TLV (34)
class VendorModel(object):
def __init__(self, vendor_model):
self._vendor_model = vendor_model
@property
def vendor_model(self):
return self._vendor_model
def __eq__(self, other):
return self.vendor_model == other.vendor_model
def __repr__(self):
return "VendorModel(vendor_model={})".format(self.vendor_model)
class VendorModelFactory:
def parse(self, data, message_info):
vendor_model = data.getvalue().decode('utf-8')
return VendorModel(vendor_model)
# VendorSWVersion TLV (35)
class VendorSWVersion(object):
def __init__(self, vendor_sw_version):
self._vendor_sw_version = vendor_sw_version
@property
def vendor_sw_version(self):
return self._vendor_sw_version
def __eq__(self, other):
return self.vendor_sw_version == other.vendor_sw_version
def __repr__(self):
return "VendorName(vendor_sw_version={})".format(self.vendor_sw_version)
class VendorSWVersionFactory:
def parse(self, data, message_info):
vendor_sw_version = data.getvalue()
return VendorSWVersion(vendor_sw_version)
# VendorData TLV (36)
class VendorData(object):
def __init__(self, data):
self._vendor_data = data
@property
def vendor_data(self):
return self._vendor_data
def __repr__(self):
return "Vendor(url={})".format(self.vendor_data)
class VendorDataFactory(object):
def parse(self, data, message_info):
return VendorData(data)
# VendorStackVersion TLV (37)
class VendorStackVersion(object):
def __init__(self, stack_vendor_oui, build, rev, minor, major):
self._stack_vendor_oui = stack_vendor_oui
self._build = build
self._rev = rev
self._minor = minor
self._major = major
return
@property
def stack_vendor_oui(self):
return self._stack_vendor_oui
@property
def build(self):
return self._build
@property
def rev(self):
return self._rev
@property
def minor(self):
return self._minor
@property
def major(self):
return self._major
def __repr__(self):
return "VendorStackVersion(vendor_stack_version={}, build={}, rev={}, minor={}, major={})".format(
self.stack_vendor_oui, self.build, self.rev, self.minor, self.major)
class VendorStackVersionFactory:
def parse(self, data, message_info):
stack_vendor_oui = struct.unpack(">H", data.read(2))[0]
rest = struct.unpack(">BBBB", data.read(4))
build = rest[1] << 4 | (0xf0 & rest[2])
rev = 0xF & rest[2]
minor = rest[3] & 0xf0
major = rest[3] & 0xF
return VendorStackVersion(stack_vendor_oui, build, rev, minor, major)
# UdpEncapsulation TLV (48)
class UdpEncapsulation(object):
# TODO: Not implemented yet
pass
class UdpEncapsulationFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Ipv6Address TLV (49)
class Ipv6Address(object):
# TODO: Not implemented yet
pass
class Ipv6AddressFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# PendingTimestamp TLV (51)
class PendingTimestamp(object):
# TODO: Not implemented yet
pass
class PendingTimestampFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# DelayTimer TLV (52)
class DelayTimer(object):
# TODO: Not implemented yet
pass
class DelayTimerFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ChannelMask TLV (53)
class ChannelMask(object):
# TODO: Not implemented yet
pass
class ChannelMaskFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Count TLV (54)
class Count(object):
# TODO: Not implemented yet
pass
class CountFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Period TLV (55)
class Period(object):
# TODO: Not implemented yet
pass
class PeriodFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ScanDuration TLV (56)
class ScanDuration(object):
# TODO: Not implemented yet
pass
class ScanDurationFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# EnergyList TLV (57)
class EnergyList(object):
# TODO: Not implemented yet
pass
class EnergyListFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Discovery Request TLV (128)
class DiscoveryRequest(object):
def __init__(self, version, joiner_flag):
self._version = version
self._joiner_flag = joiner_flag
@property
def version(self):
return self._version
@property
def joiner_flag(self):
return self._joiner_flag
def __eq__(self, other):
return (isinstance(self, type(other)) and self.version == other.version and
self.joiner_flag == other.joiner_flag)
def __repr__(self):
return "DiscoveryRequest(version={}, joiner_flag={})".format(self.version, self.joiner_flag)
class DiscoveryRequestFactory(object):
def parse(self, data, message_info):
data_byte = struct.unpack(">B", data.read(1))[0]
version = (data_byte & 0xf0) >> 4
joiner_flag = (data_byte & 0x08) >> 3
return DiscoveryRequest(version, joiner_flag)
# Discovery Response TLV (128)
class DiscoveryResponse(object):
def __init__(self, version, native_flag):
self._version = version
self._native_flag = native_flag
@property
def version(self):
return self._version
@property
def native_flag(self):
return self._native_flag
def __eq__(self, other):
return (isinstance(self, type(other)) and self.version == other.version and
self.native_flag == other.native_flag)
def __repr__(self):
return "DiscoveryResponse(version={}, native_flag={})".format(self.version, self.native_flag)
class DiscoveryResponseFactory(object):
def parse(self, data, message_info):
data_byte = struct.unpack(">B", data.read(1))[0]
version = (data_byte & 0xf0) >> 4
native_flag = (data_byte & 0x08) >> 3
return DiscoveryResponse(version, native_flag)
class MeshCopCommand(object):
def __init__(self, _type, tlvs):
self._type = _type
self._tlvs = tlvs
@property
def type(self):
return self._type
@property
def tlvs(self):
return self._tlvs
def __repr__(self):
tlvs_str = ", ".join(["{}".format(tlv) for tlv in self.tlvs])
return "MeshCopCommand(type={}, tlvs=[{}])".format(self.type, tlvs_str)
def create_deault_mesh_cop_msg_type_map():
return {
'JOIN_FIN.req': MeshCopMessageType.JOIN_FIN_REQ,
'JOIN_FIN.rsp': MeshCopMessageType.JOIN_FIN_RSP,
'JOIN_ENT.ntf': MeshCopMessageType.JOIN_ENT_NTF,
'JOIN_ENT.rsp': MeshCopMessageType.JOIN_ENT_RSP,
}
class MeshCopCommandFactory:
def __init__(self, tlvs_factories):
self._tlvs_factories = tlvs_factories
self._mesh_cop_msg_type_map = create_deault_mesh_cop_msg_type_map()
def _get_length(self, data):
return ord(data.read(1))
def _get_tlv_factory(self, _type):
try:
return self._tlvs_factories[_type]
except KeyError:
logging.error('Could not find TLV factory. Unsupported TLV type: {}'.format(_type))
return UnknownTlvFactory(_type)
def _parse_tlv(self, data):
_type = TlvType(ord(data.read(1)))
length = self._get_length(data)
value = data.read(length)
factory = self._get_tlv_factory(_type)
return factory.parse(io.BytesIO(value), None) # message_info not needed here
def _get_mesh_cop_msg_type(self, msg_type_str):
try:
return self._mesh_cop_msg_type_map[msg_type_str]
except KeyError:
raise KeyError('Mesh cop message type not found: {}'.format(msg_type_str))
def parse(self, cmd_type_str, data):
cmd_type = self._get_mesh_cop_msg_type(cmd_type_str)
tlvs = []
while data.tell() < len(data.getvalue()):
tlv = self._parse_tlv(data)
tlvs.append(tlv)
return MeshCopCommand(cmd_type, tlvs)
def create_default_mesh_cop_tlv_factories():
return {
TlvType.STATE: StateFactory(),
TlvType.PROVISIONING_URL: ProvisioningUrlFactory(),
TlvType.VENDOR_NAME: VendorNameFactory(),
TlvType.VENDOR_MODEL: VendorModelFactory(),
TlvType.VENDOR_SW_VERSION: VendorSWVersionFactory(),
TlvType.VENDOR_DATA: VendorDataFactory(),
TlvType.VENDOR_STACK_VERSION: VendorStackVersionFactory(),
}
class ThreadDiscoveryTlvsFactory(SubTlvsFactory):
def __init__(self, sub_tlvs_factories):
super(ThreadDiscoveryTlvsFactory, self).__init__(sub_tlvs_factories)
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from binascii import hexlify
from enum import IntEnum
import io
import logging
import struct
from network_data import SubTlvsFactory
from tlvs_parsing import UnknownTlvFactory
import common
class TlvType(IntEnum):
CHANNEL = 0
PAN_ID = 1
EXTENDED_PANID = 2
NETWORK_NAME = 3
PSKC = 4
NETWORK_MASTER_KEY = 5
NETWORK_KEY_SEQUENCE_COUNTER = 6
NETWORK_MESH_LOCAL_PREFIX = 7
STEERING_DATA = 8
BORDER_AGENT_LOCATOR = 9
COMMISSIONER_ID = 10
COMMISSIONER_SESSION_ID = 11
SECURITY_POLICY = 12
GET = 13
ACTIVE_TIMESTAMP = 14
COMMISSIONER_UDP_PORT = 15
STATE = 16
JOINER_DTLS_ENCAPSULATION = 17
JOINER_UDP_PORT = 18
JOINER_IID = 19
JOINER_ROUTER_LOCATOR = 20
JOINER_ROUTER_KEK = 21
PROVISIONING_URL = 32
VENDOR_NAME = 33
VENDOR_MODEL = 34
VENDOR_SW_VERSION = 35
VENDOR_DATA = 36
VENDOR_STACK_VERSION = 37
UDP_ENCAPSULATION = 48
IPV6_ADDRESS = 49
PENDING_TIMESTAMP = 51
DELAY_TIMER = 52
CHANNEL_MASK = 53
COUNT = 54
PERIOD = 55
SCAN_DURATION = 56
ENERGY_LIST = 57
CSL_SYNCHRONIZED_TIMEOUT = 85
DISCOVERY_REQUEST = 128
DISCOVERY_RESPONSE = 129
class MeshCopState(IntEnum):
ACCEPT = 0x1
REJECT = 0xff
class MeshCopMessageType(IntEnum):
JOIN_FIN_REQ = (1,)
JOIN_FIN_RSP = (2,)
JOIN_ENT_NTF = (3,)
JOIN_ENT_RSP = 4
def create_mesh_cop_message_type_set():
return [
MeshCopMessageType.JOIN_FIN_REQ,
MeshCopMessageType.JOIN_FIN_RSP,
MeshCopMessageType.JOIN_ENT_NTF,
MeshCopMessageType.JOIN_ENT_RSP,
]
# Channel TLV (0)
class Channel(object):
def __init__(self, channel_page, channel):
self._channel_page = channel_page
self._channel = channel
@property
def channel_page(self):
return self._channel_page
@property
def channel(self):
return self._channel
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self._channel_page == other._channel_page and self._channel == other.__channel)
def __repr__(self):
return 'Channel(channel_page={},channel={})'.format(self._channel_page, self._channel)
def to_hex(self):
return struct.pack('>BBBH', TlvType.CHANNEL, 3, self.channel_page, self.channel)
class ChannelFactory(object):
def parse(self, data, message_info):
data_tp = struct.unpack('>BH', data.read(3))
channel_page = data_tp[0]
channel = data_tp[1]
return Channel(channel_page, channel)
# PanId TLV (1)
class Panid(object):
# TODO: Not implemented yet
pass
class PanidFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ExtendedPanid TLV (2)
class ExtendedPanid(object):
def __init__(self, extended_panid):
self._extended_panid = extended_panid
@property
def extended_panid(self):
return self._extended_panid
def __eq__(self, other):
return (isinstance(self, type(other)) and self.extended_panid == other.extended_panid)
def __repr__(self):
return "ExtendedPanid(extended_panid={})".format(self.extended_panid)
class ExtendedPanidFactory(object):
def parse(self, data, message_info):
extended_panid = struct.unpack(">Q", data.read(8))[0]
return ExtendedPanid(extended_panid)
# NetworkName TLV (3)
class NetworkName(object):
def __init__(self, network_name):
self._network_name = network_name
@property
def network_name(self):
return self._network_name
def __eq__(self, other):
return (isinstance(self, type(other)) and self.network_name == other.network_name)
def __repr__(self):
return "NetworkName(network_name={})".format(self.network_name)
class NetworkNameFactory(object):
def parse(self, data, message_info):
len = message_info.length
network_name = struct.unpack("{}s".format(10), data.read(len))[0]
return NetworkName(network_name)
# PSKc TLV (4)
class PSKc(object):
# TODO: Not implemented yet
pass
class PSKcFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# NetworkMasterKey TLV (5)
class NetworkMasterKey(object):
# TODO: Not implemented yet
pass
class NetworkMasterKeyFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# NetworkKeySequenceCounter TLV (6)
class NetworkKeySequenceCounter(object):
# TODO: Not implemented yet
pass
class NetworkKeySequenceCounterFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# NetworkMeshLocalPrefix TLV (7)
class NetworkMeshLocalPrefix(object):
# TODO: Not implemented yet
pass
class NetworkMeshLocalPrefixFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Steering Data TLV (8)
class SteeringData(object):
def __init__(self, bloom_filter):
self._bloom_filter = bloom_filter
@property
def bloom_filter(self):
return self._bloom_filter
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._bloom_filter == other._bloom_filter
def __repr__(self):
return "SteeringData(bloom_filter={})".format(hexlify(self._bloom_filter))
def to_hex(self):
bloom_filter_len = len(self.bloom_filter)
return (struct.pack('>BB', TlvType.STEERING_DATA, bloom_filter_len) + self.bloom_filter)
class SteeringDataFactory:
def parse(self, data, message_info):
bloom_filter = data.read(message_info.length)
return SteeringData(bloom_filter)
# Border Agent Locator TLV (9)
class BorderAgentLocator(object):
def __init__(self, address):
self._border_agent_locator = address
@property
def border_agent_locator(self):
return self._border_agent_locator
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._border_agent_locator == other._border_agent_locator
def __repr__(self):
return "BorderAgentLocator(rloc16={})".format(hex(self._border_agent_locator))
def to_hex(self):
return struct.pack('>BBH', TlvType.BORDER_AGENT_LOCATOR, 2, self.border_agent_locator)
class BorderAgentLocatorFactory:
def parse(self, data, message_info):
border_agent_locator = struct.unpack(">H", data.read(2))[0]
return BorderAgentLocator(border_agent_locator)
# CommissionerId TLV (10)
class CommissionerId(object):
def __init__(self, commissioner_id):
self._commissioner_id = commissioner_id
@property
def commissioner_id(self):
return self._commissioner_id
def __eq__(self, other):
return self.commissioner_id == other.commissioner_id
def __repr__(self):
return "CommissionerId(commissioner_id={})".format(self.commissioner_id)
class CommissionerIdFactory(object):
def parse(self, data, message_info):
commissioner_id = data.getvalue().decode('utf-8')
return CommissionerId(commissioner_id)
# Commissioner Session ID TLV (11)
class CommissionerSessionId(object):
def __init__(self, commissioner_session_id):
self._commissioner_session_id = commissioner_session_id
@property
def commissioner_session_id(self):
return self._commissioner_session_id
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._commissioner_session_id == other._commissioner_session_id
def __repr__(self):
return "CommissionerSessionId(commissioner_session_id={})".format(self._commissioner_session_id)
def to_hex(self):
return struct.pack(
'>BBH',
TlvType.COMMISSIONER_SESSION_ID,
2,
self.commissioner_session_id,
)
class CommissionerSessionIdFactory:
def parse(self, data, message_info):
session_id = struct.unpack(">H", data.read(2))[0]
return CommissionerSessionId(session_id)
# SecurityPolicy TLV (12)
class SecurityPolicy(object):
# TODO: Not implemented yet
pass
class SecurityPolicyFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Get TLV (13)
class Get(object):
# TODO: Not implemented yet
pass
class GetFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ActiveTimestamp TLV (14)
class ActiveTimestamp(object):
# TODO: Not implemented yet
pass
class ActiveTimestampFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Commissioner UDP Port TLV (15)
class CommissionerUdpPort(object):
def __init__(self, udp_port):
self._udp_port = udp_port
@property
def udp_port(self):
return self._udp_port
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self._udp_port == other._udp_port
def __repr__(self):
return "CommissionerUdpPort(udp_port={})".format(self._udp_port)
class CommissionerUdpPortFactory:
def parse(self, data, message_info):
udp_port = struct.unpack(">H", data.read(2))[0]
return CommissionerUdpPort(udp_port)
# State TLV (16)
class State(object):
def __init__(self, state):
self._state = state
@property
def state(self):
return self._state
def __eq__(self, other):
return self.state == other.state
def __repr__(self):
return "State(state={})".format(self.state)
class StateFactory:
def parse(self, data, message_info):
state = ord(data.read(1))
return State(state)
# JoinerDtlsEncapsulation TLV (17)
class JoinerDtlsEncapsulation(object):
# TODO: Not implemented yet
pass
class JoinerDtlsEncapsulationFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# JoinerUdpPort TLV (18)
class JoinerUdpPort(object):
def __init__(self, udp_port):
self._udp_port = udp_port
@property
def udp_port(self):
return self._udp_port
def __eq__(self, other):
return (isinstance(self, type(other)) and self.udp_port == other.udp_port)
def __repr__(self):
return "JoinerUdpPort(udp_port={})".format(self.udp_port)
class JoinerUdpPortFactory(object):
def parse(self, data, message_info):
udp_port = struct.unpack(">H", data.read(2))[0]
return JoinerUdpPort(udp_port)
# JoinerIID TLV (19)
class JoinerIID(object):
# TODO: Not implemented yet
pass
class JoinerIIDFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# JoinerRouterLocator TLV (20)
class JoinerRouterLocator(object):
# TODO: Not implemented yet
pass
class JoinerRouterLocatorFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# JoinerRouterKEK TLV (21)
class JoinerRouterKEK(object):
# TODO: Not implemented yet
pass
class JoinerRouterKEKFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ProvisioningURL TLV (32)
class ProvisioningUrl(object):
def __init__(self, url):
self._url = url
@property
def url(self):
return self._url
def __repr__(self):
return "ProvisioningUrl(url={})".format(self.url)
class ProvisioningUrlFactory:
def parse(self, data, message_info):
url = data.decode('utf-8')
return ProvisioningUrl(url)
# VendorName TLV (33)
class VendorName(object):
def __init__(self, vendor_name):
self._vendor_name = vendor_name
@property
def vendor_name(self):
return self._vendor_name
def __eq__(self, other):
return self.vendor_name == other.vendor_name
def __repr__(self):
return "VendorName(vendor_name={})".format(self.vendor_name)
class VendorNameFactory:
def parse(self, data, message_info):
vendor_name = data.getvalue().decode('utf-8')
return VendorName(vendor_name)
# VendorModel TLV (34)
class VendorModel(object):
def __init__(self, vendor_model):
self._vendor_model = vendor_model
@property
def vendor_model(self):
return self._vendor_model
def __eq__(self, other):
return self.vendor_model == other.vendor_model
def __repr__(self):
return "VendorModel(vendor_model={})".format(self.vendor_model)
class VendorModelFactory:
def parse(self, data, message_info):
vendor_model = data.getvalue().decode('utf-8')
return VendorModel(vendor_model)
# VendorSWVersion TLV (35)
class VendorSWVersion(object):
def __init__(self, vendor_sw_version):
self._vendor_sw_version = vendor_sw_version
@property
def vendor_sw_version(self):
return self._vendor_sw_version
def __eq__(self, other):
return self.vendor_sw_version == other.vendor_sw_version
def __repr__(self):
return "VendorName(vendor_sw_version={})".format(self.vendor_sw_version)
class VendorSWVersionFactory:
def parse(self, data, message_info):
vendor_sw_version = data.getvalue()
return VendorSWVersion(vendor_sw_version)
# VendorData TLV (36)
class VendorData(object):
def __init__(self, data):
self._vendor_data = data
@property
def vendor_data(self):
return self._vendor_data
def __repr__(self):
return "Vendor(url={})".format(self.vendor_data)
class VendorDataFactory(object):
def parse(self, data, message_info):
return VendorData(data)
# VendorStackVersion TLV (37)
class VendorStackVersion(object):
def __init__(self, stack_vendor_oui, build, rev, minor, major):
self._stack_vendor_oui = stack_vendor_oui
self._build = build
self._rev = rev
self._minor = minor
self._major = major
return
@property
def stack_vendor_oui(self):
return self._stack_vendor_oui
@property
def build(self):
return self._build
@property
def rev(self):
return self._rev
@property
def minor(self):
return self._minor
@property
def major(self):
return self._major
def __repr__(self):
return "VendorStackVersion(vendor_stack_version={}, build={}, rev={}, minor={}, major={})".format(
self.stack_vendor_oui, self.build, self.rev, self.minor, self.major)
class VendorStackVersionFactory:
def parse(self, data, message_info):
stack_vendor_oui = struct.unpack(">H", data.read(2))[0]
rest = struct.unpack(">BBBB", data.read(4))
build = rest[1] << 4 | (0xf0 & rest[2])
rev = 0xF & rest[2]
minor = rest[3] & 0xf0
major = rest[3] & 0xF
return VendorStackVersion(stack_vendor_oui, build, rev, minor, major)
# UdpEncapsulation TLV (48)
class UdpEncapsulation(object):
# TODO: Not implemented yet
pass
class UdpEncapsulationFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Ipv6Address TLV (49)
class Ipv6Address(object):
# TODO: Not implemented yet
pass
class Ipv6AddressFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# PendingTimestamp TLV (51)
class PendingTimestamp(object):
# TODO: Not implemented yet
pass
class PendingTimestampFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# DelayTimer TLV (52)
class DelayTimer(object):
# TODO: Not implemented yet
pass
class DelayTimerFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ChannelMask TLV (53)
class ChannelMask(object):
# TODO: Not implemented yet
pass
class ChannelMaskFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Count TLV (54)
class Count(object):
# TODO: Not implemented yet
pass
class CountFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Period TLV (55)
class Period(object):
# TODO: Not implemented yet
pass
class PeriodFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# ScanDuration TLV (56)
class ScanDuration(object):
# TODO: Not implemented yet
pass
class ScanDurationFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# EnergyList TLV (57)
class EnergyList(object):
# TODO: Not implemented yet
pass
class EnergyListFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
# Discovery Request TLV (128)
class DiscoveryRequest(object):
def __init__(self, version, joiner_flag):
self._version = version
self._joiner_flag = joiner_flag
@property
def version(self):
return self._version
@property
def joiner_flag(self):
return self._joiner_flag
def __eq__(self, other):
return (isinstance(self, type(other)) and self.version == other.version and
self.joiner_flag == other.joiner_flag)
def __repr__(self):
return "DiscoveryRequest(version={}, joiner_flag={})".format(self.version, self.joiner_flag)
class DiscoveryRequestFactory(object):
def parse(self, data, message_info):
data_byte = struct.unpack(">B", data.read(1))[0]
version = (data_byte & 0xf0) >> 4
joiner_flag = (data_byte & 0x08) >> 3
return DiscoveryRequest(version, joiner_flag)
# Discovery Response TLV (128)
class DiscoveryResponse(object):
def __init__(self, version, native_flag):
self._version = version
self._native_flag = native_flag
@property
def version(self):
return self._version
@property
def native_flag(self):
return self._native_flag
def __eq__(self, other):
return (isinstance(self, type(other)) and self.version == other.version and
self.native_flag == other.native_flag)
def __repr__(self):
return "DiscoveryResponse(version={}, native_flag={})".format(self.version, self.native_flag)
class DiscoveryResponseFactory(object):
def parse(self, data, message_info):
data_byte = struct.unpack(">B", data.read(1))[0]
version = (data_byte & 0xf0) >> 4
native_flag = (data_byte & 0x08) >> 3
return DiscoveryResponse(version, native_flag)
class MeshCopCommand(object):
def __init__(self, _type, tlvs):
self._type = _type
self._tlvs = tlvs
@property
def type(self):
return self._type
@property
def tlvs(self):
return self._tlvs
def __repr__(self):
tlvs_str = ", ".join(["{}".format(tlv) for tlv in self.tlvs])
return "MeshCopCommand(type={}, tlvs=[{}])".format(self.type, tlvs_str)
def create_deault_mesh_cop_msg_type_map():
return {
'JOIN_FIN.req': MeshCopMessageType.JOIN_FIN_REQ,
'JOIN_FIN.rsp': MeshCopMessageType.JOIN_FIN_RSP,
'JOIN_ENT.ntf': MeshCopMessageType.JOIN_ENT_NTF,
'JOIN_ENT.rsp': MeshCopMessageType.JOIN_ENT_RSP,
}
class MeshCopCommandFactory:
def __init__(self, tlvs_factories):
self._tlvs_factories = tlvs_factories
self._mesh_cop_msg_type_map = create_deault_mesh_cop_msg_type_map()
def _get_length(self, data):
return ord(data.read(1))
def _get_tlv_factory(self, _type):
try:
return self._tlvs_factories[_type]
except KeyError:
logging.error('Could not find TLV factory. Unsupported TLV type: {}'.format(_type))
return UnknownTlvFactory(_type)
def _parse_tlv(self, data):
_type = TlvType(ord(data.read(1)))
length = self._get_length(data)
value = data.read(length)
factory = self._get_tlv_factory(_type)
return factory.parse(io.BytesIO(value), None) # message_info not needed here
def _get_mesh_cop_msg_type(self, msg_type_str):
try:
return self._mesh_cop_msg_type_map[msg_type_str]
except KeyError:
raise KeyError('Mesh cop message type not found: {}'.format(msg_type_str))
def parse(self, cmd_type_str, data):
cmd_type = self._get_mesh_cop_msg_type(cmd_type_str)
tlvs = []
while data.tell() < len(data.getvalue()):
tlv = self._parse_tlv(data)
tlvs.append(tlv)
return MeshCopCommand(cmd_type, tlvs)
def create_default_mesh_cop_tlv_factories():
return {
TlvType.STATE: StateFactory(),
TlvType.PROVISIONING_URL: ProvisioningUrlFactory(),
TlvType.VENDOR_NAME: VendorNameFactory(),
TlvType.VENDOR_MODEL: VendorModelFactory(),
TlvType.VENDOR_SW_VERSION: VendorSWVersionFactory(),
TlvType.VENDOR_DATA: VendorDataFactory(),
TlvType.VENDOR_STACK_VERSION: VendorStackVersionFactory(),
}
class ThreadDiscoveryTlvsFactory(SubTlvsFactory):
def __init__(self, sub_tlvs_factories):
super(ThreadDiscoveryTlvsFactory, self).__init__(sub_tlvs_factories)
|
en
| 0.582247
|
#!/usr/bin/env python3 # # Copyright (c) 2019, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Channel TLV (0) # PanId TLV (1) # TODO: Not implemented yet # TODO: Not implemented yet # ExtendedPanid TLV (2) # NetworkName TLV (3) # PSKc TLV (4) # TODO: Not implemented yet # TODO: Not implemented yet # NetworkMasterKey TLV (5) # TODO: Not implemented yet # TODO: Not implemented yet # NetworkKeySequenceCounter TLV (6) # TODO: Not implemented yet # TODO: Not implemented yet # NetworkMeshLocalPrefix TLV (7) # TODO: Not implemented yet # TODO: Not implemented yet # Steering Data TLV (8) # Border Agent Locator TLV (9) # CommissionerId TLV (10) # Commissioner Session ID TLV (11) # SecurityPolicy TLV (12) # TODO: Not implemented yet # TODO: Not implemented yet # Get TLV (13) # TODO: Not implemented yet # TODO: Not implemented yet # ActiveTimestamp TLV (14) # TODO: Not implemented yet # TODO: Not implemented yet # Commissioner UDP Port TLV (15) # State TLV (16) # JoinerDtlsEncapsulation TLV (17) # TODO: Not implemented yet # TODO: Not implemented yet # JoinerUdpPort TLV (18) # JoinerIID TLV (19) # TODO: Not implemented yet # TODO: Not implemented yet # JoinerRouterLocator TLV (20) # TODO: Not implemented yet # TODO: Not implemented yet # JoinerRouterKEK TLV (21) # TODO: Not implemented yet # TODO: Not implemented yet # ProvisioningURL TLV (32) # VendorName TLV (33) # VendorModel TLV (34) # VendorSWVersion TLV (35) # VendorData TLV (36) # VendorStackVersion TLV (37) # UdpEncapsulation TLV (48) # TODO: Not implemented yet # TODO: Not implemented yet # Ipv6Address TLV (49) # TODO: Not implemented yet # TODO: Not implemented yet # PendingTimestamp TLV (51) # TODO: Not implemented yet # TODO: Not implemented yet # DelayTimer TLV (52) # TODO: Not implemented yet # TODO: Not implemented yet # ChannelMask TLV (53) # TODO: Not implemented yet # TODO: Not implemented yet # Count TLV (54) # TODO: Not implemented yet # TODO: Not implemented yet # Period TLV (55) # TODO: Not implemented yet # TODO: Not implemented yet # ScanDuration TLV (56) # TODO: Not implemented yet # TODO: Not implemented yet # EnergyList TLV (57) # TODO: Not implemented yet # TODO: Not implemented yet # Discovery Request TLV (128) # Discovery Response TLV (128) # message_info not needed here
| 1.246519
| 1
|
core/management/commands/showscripts.py
|
the-deep/DEEPL
| 6
|
6627262
|
import subprocess
import re
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Command to list available scripts to be run by command 'runscript'"
def add_arguments(self, parser):
# nothing to do here
pass
def handle(self, *args, **options):
script = [
'find', '-type', 'd', '-name',
'scripts', '-exec', 'ls', '{}', ';'
]
p = subprocess.Popen(script, stdout=subprocess.PIPE)
o, e = p.communicate()
files = o.split()
scriptnames = []
for f in files:
fstr = f.decode() # f is bytes
if re.search('^__', fstr):
continue
if not re.search('\.py$', fstr):
continue
scriptnames.append(re.sub('\.py$', '', fstr))
if not scriptnames:
print("-- NO scripts available --")
return
print('========================')
print(' The scripts available:')
print('========================')
for name in scriptnames:
print('-', name)
|
import subprocess
import re
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Command to list available scripts to be run by command 'runscript'"
def add_arguments(self, parser):
# nothing to do here
pass
def handle(self, *args, **options):
script = [
'find', '-type', 'd', '-name',
'scripts', '-exec', 'ls', '{}', ';'
]
p = subprocess.Popen(script, stdout=subprocess.PIPE)
o, e = p.communicate()
files = o.split()
scriptnames = []
for f in files:
fstr = f.decode() # f is bytes
if re.search('^__', fstr):
continue
if not re.search('\.py$', fstr):
continue
scriptnames.append(re.sub('\.py$', '', fstr))
if not scriptnames:
print("-- NO scripts available --")
return
print('========================')
print(' The scripts available:')
print('========================')
for name in scriptnames:
print('-', name)
|
en
| 0.960739
|
# nothing to do here # f is bytes
| 2.346319
| 2
|
src/03-network/3_mqtt_client.py
|
davidalexisnyt/micropython-workshop
| 3
|
6627263
|
import network
from umqtt.robust import MQTTClient
import secrets
wifi = network.WLAN(network.STA_IF)
wifi.active(True)
wifi.connect(secrets.wifi_network, secrets.wifi_password)
while not wifi.isconnected():
pass
# Define some identifying information for our sensor node
DEVICE_ID = 'sensor1'
# Connect to the MQTT broker
print("Connecting to Mqtt...")
mqtt_client = MQTTClient(client_id=DEVICE_ID,
server=secrets.mqtt_server,
user=secrets.mqtt_user,
password=secrets.mqtt_password,
ssl=False)
mqtt_client.connect()
mqtt_client.publish('sensors/hello', 'Hello MQTT!')
|
import network
from umqtt.robust import MQTTClient
import secrets
wifi = network.WLAN(network.STA_IF)
wifi.active(True)
wifi.connect(secrets.wifi_network, secrets.wifi_password)
while not wifi.isconnected():
pass
# Define some identifying information for our sensor node
DEVICE_ID = 'sensor1'
# Connect to the MQTT broker
print("Connecting to Mqtt...")
mqtt_client = MQTTClient(client_id=DEVICE_ID,
server=secrets.mqtt_server,
user=secrets.mqtt_user,
password=secrets.mqtt_password,
ssl=False)
mqtt_client.connect()
mqtt_client.publish('sensors/hello', 'Hello MQTT!')
|
en
| 0.80722
|
# Define some identifying information for our sensor node # Connect to the MQTT broker
| 3.208906
| 3
|
usaspending_api/broker/helpers/set_legal_entity_boolean_fields.py
|
g4brielvs/usaspending-api
| 217
|
6627264
|
from usaspending_api.broker.helpers.build_business_categories_boolean_dict import build_business_categories_boolean_dict
def set_legal_entity_boolean_fields(row):
""" in place updates to specific fields to be mapped as booleans """
legal_entity_bool_dict = build_business_categories_boolean_dict(row)
for key in legal_entity_bool_dict:
row[key] = legal_entity_bool_dict[key]
|
from usaspending_api.broker.helpers.build_business_categories_boolean_dict import build_business_categories_boolean_dict
def set_legal_entity_boolean_fields(row):
""" in place updates to specific fields to be mapped as booleans """
legal_entity_bool_dict = build_business_categories_boolean_dict(row)
for key in legal_entity_bool_dict:
row[key] = legal_entity_bool_dict[key]
|
en
| 0.942111
|
in place updates to specific fields to be mapped as booleans
| 2.02352
| 2
|
hyper/ssl_compat.py
|
chripede/hyper
| 0
|
6627265
|
# -*- coding: utf-8 -*-
"""
hyper/ssl_compat
~~~~~~~~~
Shoves pyOpenSSL into an API that looks like the standard Python 3.x ssl
module.
Currently exposes exactly those attributes, classes, and methods that we
actually use in hyper (all method signatures are complete, however). May be
expanded to something more general-purpose in the future.
"""
try:
import StringIO as BytesIO
except ImportError:
from io import BytesIO
import errno
import socket
import time
from OpenSSL import SSL as ossl
from service_identity.pyopenssl import verify_hostname as _verify
CERT_NONE = ossl.VERIFY_NONE
CERT_REQUIRED = ossl.VERIFY_PEER | ossl.VERIFY_FAIL_IF_NO_PEER_CERT
_OPENSSL_ATTRS = dict(
OP_NO_COMPRESSION='OP_NO_COMPRESSION',
PROTOCOL_TLSv1_2='TLSv1_2_METHOD',
PROTOCOL_SSLv23='SSLv23_METHOD',
)
for external, internal in _OPENSSL_ATTRS.items():
value = getattr(ossl, internal, None)
if value:
locals()[external] = value
OP_ALL = 0
# TODO: Find out the names of these other flags.
for bit in [31] + list(range(10)):
OP_ALL |= 1 << bit
HAS_NPN = True
def _proxy(method):
def inner(self, *args, **kwargs):
return getattr(self._conn, method)(*args, **kwargs)
return inner
# Referenced in hyper/http20/connection.py. These values come
# from the python ssl package, and must be defined in this file
# for hyper to work in python versions <2.7.9
SSL_ERROR_WANT_READ = 2
SSL_ERROR_WANT_WRITE = 3
# TODO missing some attributes
class SSLError(OSError):
pass
class CertificateError(SSLError):
pass
def verify_hostname(ssl_sock, server_hostname):
"""
A method nearly compatible with the stdlib's match_hostname.
"""
if isinstance(server_hostname, bytes):
server_hostname = server_hostname.decode('ascii')
return _verify(ssl_sock._conn, server_hostname)
class SSLSocket(object):
SSL_TIMEOUT = 3
SSL_RETRY = .01
def __init__(self, conn, server_side, do_handshake_on_connect,
suppress_ragged_eofs, server_hostname, check_hostname):
self._conn = conn
self._do_handshake_on_connect = do_handshake_on_connect
self._suppress_ragged_eofs = suppress_ragged_eofs
self._check_hostname = check_hostname
if server_side:
self._conn.set_accept_state()
else:
if server_hostname:
self._conn.set_tlsext_host_name(
server_hostname.encode('utf-8')
)
self._server_hostname = server_hostname
# FIXME does this override do_handshake_on_connect=False?
self._conn.set_connect_state()
if self.connected and self._do_handshake_on_connect:
self.do_handshake()
@property
def connected(self):
try:
self._conn.getpeername()
except socket.error as e:
if e.errno != errno.ENOTCONN:
# It's an exception other than the one we expected if we're not
# connected.
raise
return False
return True
# Lovingly stolen from CherryPy
# (http://svn.cherrypy.org/tags/cherrypy-3.2.1/cherrypy/wsgiserver/ssl_pyopenssl.py).
def _safe_ssl_call(self, suppress_ragged_eofs, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping."""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except (ossl.WantReadError, ossl.WantWriteError):
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.SSL_RETRY)
except ossl.Error as e:
if suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
raise socket.error(e.args[0])
if time.time() - start > self.SSL_TIMEOUT:
raise socket.timeout('timed out')
def connect(self, address):
self._conn.connect(address)
if self._do_handshake_on_connect:
self.do_handshake()
def do_handshake(self):
self._safe_ssl_call(False, self._conn.do_handshake)
if self._check_hostname:
verify_hostname(self, self._server_hostname)
def recv(self, bufsize, flags=None):
return self._safe_ssl_call(
self._suppress_ragged_eofs,
self._conn.recv,
bufsize,
flags
)
def recv_into(self, buffer, bufsize=None, flags=None):
# A temporary recv_into implementation. Should be replaced when
# PyOpenSSL has merged pyca/pyopenssl#121.
if bufsize is None:
bufsize = len(buffer)
data = self.recv(bufsize, flags)
data_len = len(data)
buffer[0:data_len] = data
return data_len
def send(self, data, flags=None):
return self._safe_ssl_call(False, self._conn.send, data, flags)
def sendall(self, data, flags=None):
return self._safe_ssl_call(False, self._conn.sendall, data, flags)
def selected_npn_protocol(self):
proto = self._conn.get_next_proto_negotiated()
if isinstance(proto, bytes):
proto = proto.decode('ascii')
return proto if proto else None
def selected_alpn_protocol(self):
proto = self._conn.get_alpn_proto_negotiated()
if isinstance(proto, bytes):
proto = proto.decode('ascii')
return proto if proto else None
def getpeercert(self):
def resolve_alias(alias):
return dict(
C='countryName',
ST='stateOrProvinceName',
L='localityName',
O='organizationName',
OU='organizationalUnitName',
CN='commonName',
).get(alias, alias)
def to_components(name):
# TODO Verify that these are actually *supposed* to all be
# single-element tuples, and that's not just a quirk of the
# examples I've seen.
return tuple(
[
(resolve_alias(k.decode('utf-8'), v.decode('utf-8')),)
for k, v in name.get_components()
]
)
# The standard getpeercert() takes the nice X509 object tree returned
# by OpenSSL and turns it into a dict according to some format it seems
# to have made up on the spot. Here, we do our best to emulate that.
cert = self._conn.get_peer_certificate()
result = dict(
issuer=to_components(cert.get_issuer()),
subject=to_components(cert.get_subject()),
version=cert.get_subject(),
serialNumber=cert.get_serial_number(),
notBefore=cert.get_notBefore(),
notAfter=cert.get_notAfter(),
)
# TODO extensions, including subjectAltName
# (see _decode_certificate in _ssl.c)
return result
# a dash of magic to reduce boilerplate
methods = ['accept', 'bind', 'close', 'getsockname', 'listen', 'fileno']
for method in methods:
locals()[method] = _proxy(method)
class SSLContext(object):
def __init__(self, protocol):
self.protocol = protocol
self._ctx = ossl.Context(protocol)
self.options = OP_ALL
self.check_hostname = False
self.npn_protos = []
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._ctx.set_options(value)
@property
def verify_mode(self):
return self._ctx.get_verify_mode()
@verify_mode.setter
def verify_mode(self, value):
# TODO verify exception is raised on failure
self._ctx.set_verify(
value, lambda conn, cert, errnum, errdepth, ok: ok
)
def set_default_verify_paths(self):
self._ctx.set_default_verify_paths()
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
# TODO factor out common code
if cafile is not None:
cafile = cafile.encode('utf-8')
if capath is not None:
capath = capath.encode('utf-8')
self._ctx.load_verify_locations(cafile, capath)
if cadata is not None:
self._ctx.load_verify_locations(BytesIO(cadata))
def load_cert_chain(self, certfile, keyfile=None, password=<PASSWORD>):
self._ctx.use_certificate_file(certfile)
if password is not None:
self._ctx.set_passwd_cb(
lambda max_length, prompt_twice, userdata: password
)
self._ctx.use_privatekey_file(keyfile or certfile)
def set_npn_protocols(self, protocols):
self.protocols = list(map(lambda x: x.encode('ascii'), protocols))
def cb(conn, protos):
# Detect the overlapping set of protocols.
overlap = set(protos) & set(self.protocols)
# Select the option that comes last in the list in the overlap.
for p in self.protocols:
if p in overlap:
return p
else:
return b''
self._ctx.set_npn_select_callback(cb)
def set_alpn_protocols(self, protocols):
protocols = list(map(lambda x: x.encode('ascii'), protocols))
self._ctx.set_alpn_protos(protocols)
def wrap_socket(self,
sock,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None):
conn = ossl.Connection(self._ctx, sock)
return SSLSocket(conn, server_side, do_handshake_on_connect,
suppress_ragged_eofs, server_hostname,
# TODO what if this is changed after the fact?
self.check_hostname)
|
# -*- coding: utf-8 -*-
"""
hyper/ssl_compat
~~~~~~~~~
Shoves pyOpenSSL into an API that looks like the standard Python 3.x ssl
module.
Currently exposes exactly those attributes, classes, and methods that we
actually use in hyper (all method signatures are complete, however). May be
expanded to something more general-purpose in the future.
"""
try:
import StringIO as BytesIO
except ImportError:
from io import BytesIO
import errno
import socket
import time
from OpenSSL import SSL as ossl
from service_identity.pyopenssl import verify_hostname as _verify
CERT_NONE = ossl.VERIFY_NONE
CERT_REQUIRED = ossl.VERIFY_PEER | ossl.VERIFY_FAIL_IF_NO_PEER_CERT
_OPENSSL_ATTRS = dict(
OP_NO_COMPRESSION='OP_NO_COMPRESSION',
PROTOCOL_TLSv1_2='TLSv1_2_METHOD',
PROTOCOL_SSLv23='SSLv23_METHOD',
)
for external, internal in _OPENSSL_ATTRS.items():
value = getattr(ossl, internal, None)
if value:
locals()[external] = value
OP_ALL = 0
# TODO: Find out the names of these other flags.
for bit in [31] + list(range(10)):
OP_ALL |= 1 << bit
HAS_NPN = True
def _proxy(method):
def inner(self, *args, **kwargs):
return getattr(self._conn, method)(*args, **kwargs)
return inner
# Referenced in hyper/http20/connection.py. These values come
# from the python ssl package, and must be defined in this file
# for hyper to work in python versions <2.7.9
SSL_ERROR_WANT_READ = 2
SSL_ERROR_WANT_WRITE = 3
# TODO missing some attributes
class SSLError(OSError):
pass
class CertificateError(SSLError):
pass
def verify_hostname(ssl_sock, server_hostname):
"""
A method nearly compatible with the stdlib's match_hostname.
"""
if isinstance(server_hostname, bytes):
server_hostname = server_hostname.decode('ascii')
return _verify(ssl_sock._conn, server_hostname)
class SSLSocket(object):
SSL_TIMEOUT = 3
SSL_RETRY = .01
def __init__(self, conn, server_side, do_handshake_on_connect,
suppress_ragged_eofs, server_hostname, check_hostname):
self._conn = conn
self._do_handshake_on_connect = do_handshake_on_connect
self._suppress_ragged_eofs = suppress_ragged_eofs
self._check_hostname = check_hostname
if server_side:
self._conn.set_accept_state()
else:
if server_hostname:
self._conn.set_tlsext_host_name(
server_hostname.encode('utf-8')
)
self._server_hostname = server_hostname
# FIXME does this override do_handshake_on_connect=False?
self._conn.set_connect_state()
if self.connected and self._do_handshake_on_connect:
self.do_handshake()
@property
def connected(self):
try:
self._conn.getpeername()
except socket.error as e:
if e.errno != errno.ENOTCONN:
# It's an exception other than the one we expected if we're not
# connected.
raise
return False
return True
# Lovingly stolen from CherryPy
# (http://svn.cherrypy.org/tags/cherrypy-3.2.1/cherrypy/wsgiserver/ssl_pyopenssl.py).
def _safe_ssl_call(self, suppress_ragged_eofs, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping."""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except (ossl.WantReadError, ossl.WantWriteError):
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.SSL_RETRY)
except ossl.Error as e:
if suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
raise socket.error(e.args[0])
if time.time() - start > self.SSL_TIMEOUT:
raise socket.timeout('timed out')
def connect(self, address):
self._conn.connect(address)
if self._do_handshake_on_connect:
self.do_handshake()
def do_handshake(self):
self._safe_ssl_call(False, self._conn.do_handshake)
if self._check_hostname:
verify_hostname(self, self._server_hostname)
def recv(self, bufsize, flags=None):
return self._safe_ssl_call(
self._suppress_ragged_eofs,
self._conn.recv,
bufsize,
flags
)
def recv_into(self, buffer, bufsize=None, flags=None):
# A temporary recv_into implementation. Should be replaced when
# PyOpenSSL has merged pyca/pyopenssl#121.
if bufsize is None:
bufsize = len(buffer)
data = self.recv(bufsize, flags)
data_len = len(data)
buffer[0:data_len] = data
return data_len
def send(self, data, flags=None):
return self._safe_ssl_call(False, self._conn.send, data, flags)
def sendall(self, data, flags=None):
return self._safe_ssl_call(False, self._conn.sendall, data, flags)
def selected_npn_protocol(self):
proto = self._conn.get_next_proto_negotiated()
if isinstance(proto, bytes):
proto = proto.decode('ascii')
return proto if proto else None
def selected_alpn_protocol(self):
proto = self._conn.get_alpn_proto_negotiated()
if isinstance(proto, bytes):
proto = proto.decode('ascii')
return proto if proto else None
def getpeercert(self):
def resolve_alias(alias):
return dict(
C='countryName',
ST='stateOrProvinceName',
L='localityName',
O='organizationName',
OU='organizationalUnitName',
CN='commonName',
).get(alias, alias)
def to_components(name):
# TODO Verify that these are actually *supposed* to all be
# single-element tuples, and that's not just a quirk of the
# examples I've seen.
return tuple(
[
(resolve_alias(k.decode('utf-8'), v.decode('utf-8')),)
for k, v in name.get_components()
]
)
# The standard getpeercert() takes the nice X509 object tree returned
# by OpenSSL and turns it into a dict according to some format it seems
# to have made up on the spot. Here, we do our best to emulate that.
cert = self._conn.get_peer_certificate()
result = dict(
issuer=to_components(cert.get_issuer()),
subject=to_components(cert.get_subject()),
version=cert.get_subject(),
serialNumber=cert.get_serial_number(),
notBefore=cert.get_notBefore(),
notAfter=cert.get_notAfter(),
)
# TODO extensions, including subjectAltName
# (see _decode_certificate in _ssl.c)
return result
# a dash of magic to reduce boilerplate
methods = ['accept', 'bind', 'close', 'getsockname', 'listen', 'fileno']
for method in methods:
locals()[method] = _proxy(method)
class SSLContext(object):
def __init__(self, protocol):
self.protocol = protocol
self._ctx = ossl.Context(protocol)
self.options = OP_ALL
self.check_hostname = False
self.npn_protos = []
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._ctx.set_options(value)
@property
def verify_mode(self):
return self._ctx.get_verify_mode()
@verify_mode.setter
def verify_mode(self, value):
# TODO verify exception is raised on failure
self._ctx.set_verify(
value, lambda conn, cert, errnum, errdepth, ok: ok
)
def set_default_verify_paths(self):
self._ctx.set_default_verify_paths()
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
# TODO factor out common code
if cafile is not None:
cafile = cafile.encode('utf-8')
if capath is not None:
capath = capath.encode('utf-8')
self._ctx.load_verify_locations(cafile, capath)
if cadata is not None:
self._ctx.load_verify_locations(BytesIO(cadata))
def load_cert_chain(self, certfile, keyfile=None, password=<PASSWORD>):
self._ctx.use_certificate_file(certfile)
if password is not None:
self._ctx.set_passwd_cb(
lambda max_length, prompt_twice, userdata: password
)
self._ctx.use_privatekey_file(keyfile or certfile)
def set_npn_protocols(self, protocols):
self.protocols = list(map(lambda x: x.encode('ascii'), protocols))
def cb(conn, protos):
# Detect the overlapping set of protocols.
overlap = set(protos) & set(self.protocols)
# Select the option that comes last in the list in the overlap.
for p in self.protocols:
if p in overlap:
return p
else:
return b''
self._ctx.set_npn_select_callback(cb)
def set_alpn_protocols(self, protocols):
protocols = list(map(lambda x: x.encode('ascii'), protocols))
self._ctx.set_alpn_protos(protocols)
def wrap_socket(self,
sock,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None):
conn = ossl.Connection(self._ctx, sock)
return SSLSocket(conn, server_side, do_handshake_on_connect,
suppress_ragged_eofs, server_hostname,
# TODO what if this is changed after the fact?
self.check_hostname)
|
en
| 0.895008
|
# -*- coding: utf-8 -*- hyper/ssl_compat ~~~~~~~~~ Shoves pyOpenSSL into an API that looks like the standard Python 3.x ssl module. Currently exposes exactly those attributes, classes, and methods that we actually use in hyper (all method signatures are complete, however). May be expanded to something more general-purpose in the future. # TODO: Find out the names of these other flags. # Referenced in hyper/http20/connection.py. These values come # from the python ssl package, and must be defined in this file # for hyper to work in python versions <2.7.9 # TODO missing some attributes A method nearly compatible with the stdlib's match_hostname. # FIXME does this override do_handshake_on_connect=False? # It's an exception other than the one we expected if we're not # connected. # Lovingly stolen from CherryPy # (http://svn.cherrypy.org/tags/cherrypy-3.2.1/cherrypy/wsgiserver/ssl_pyopenssl.py). Wrap the given call with SSL error-trapping. # Sleep and try again. This is dangerous, because it means # the rest of the stack has no way of differentiating # between a "new handshake" error and "client dropped". # Note this isn't an endless loop: there's a timeout below. # A temporary recv_into implementation. Should be replaced when # PyOpenSSL has merged pyca/pyopenssl#121. # TODO Verify that these are actually *supposed* to all be # single-element tuples, and that's not just a quirk of the # examples I've seen. # The standard getpeercert() takes the nice X509 object tree returned # by OpenSSL and turns it into a dict according to some format it seems # to have made up on the spot. Here, we do our best to emulate that. # TODO extensions, including subjectAltName # (see _decode_certificate in _ssl.c) # a dash of magic to reduce boilerplate # TODO verify exception is raised on failure # TODO factor out common code # Detect the overlapping set of protocols. # Select the option that comes last in the list in the overlap. # TODO what if this is changed after the fact?
| 2.55691
| 3
|
MainRunNumber.py
|
tokyohost/Download-Thz-Torrent
| 4
|
6627266
|
<reponame>tokyohost/Download-Thz-Torrent<filename>MainRunNumber.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
def MainRunNumber(NowNumber):
#统计共爬取多少 个页面
MainRunNumber = NowNumber
MainRunNumber += 1
return MainRunNumber
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
def MainRunNumber(NowNumber):
#统计共爬取多少 个页面
MainRunNumber = NowNumber
MainRunNumber += 1
return MainRunNumber
|
zh
| 0.510918
|
#!/usr/bin/python # -*- coding: utf-8 -*- #统计共爬取多少 个页面
| 2.87526
| 3
|
home_town_finder/__init__.py
|
ThorsHamster/find_new_hometown
| 2
|
6627267
|
from .home_town_finder import HomeTownFinder
__all__ = ["HomeTownFinder"]
|
from .home_town_finder import HomeTownFinder
__all__ = ["HomeTownFinder"]
|
none
| 1
| 1.074005
| 1
|
|
profiles/migrations/0004_auto_20180727_1823.py
|
vkendurkar/StudentCouncil
| 1
|
6627268
|
<filename>profiles/migrations/0004_auto_20180727_1823.py
# Generated by Django 2.0.6 on 2018-07-27 12:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0003_auto_20180727_1756'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profile_pic',
field=models.FileField(blank=True, null=True, upload_to='profile_pics/<django.db.models.fields.CharField>'),
),
]
|
<filename>profiles/migrations/0004_auto_20180727_1823.py
# Generated by Django 2.0.6 on 2018-07-27 12:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0003_auto_20180727_1756'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profile_pic',
field=models.FileField(blank=True, null=True, upload_to='profile_pics/<django.db.models.fields.CharField>'),
),
]
|
en
| 0.681843
|
# Generated by Django 2.0.6 on 2018-07-27 12:53
| 1.464398
| 1
|
cloudsplaining/scan/statement_detail.py
|
gruebel/cloudsplaining
| 3
|
6627269
|
"""Abstracts evaluation of IAM Policy statements."""
import logging
from cached_property import cached_property
from policy_sentry.analysis.analyze import determine_actions_to_expand
from policy_sentry.querying.actions import (
remove_actions_not_matching_access_level,
get_actions_matching_arn,
)
from policy_sentry.querying.all import get_all_actions
from cloudsplaining.shared.utils import (
remove_read_level_actions,
remove_wildcard_only_actions,
)
from cloudsplaining.shared.exclusions import DEFAULT_EXCLUSIONS, Exclusions
# Copyright (c) 2020, salesforce.<EMAIL>, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
logger = logging.getLogger(__name__)
logging.getLogger("policy_sentry").setLevel(logging.WARNING)
ALL_ACTIONS = get_all_actions()
# pylint: disable=too-many-instance-attributes
class StatementDetail:
"""
Analyzes individual statements within a policy
"""
def __init__(self, statement):
self.json = statement
self.statement = statement
self.effect = statement["Effect"]
self.condition = statement.get("Condition",None)
self.resources = self._resources()
self.actions = self._actions()
self.not_action = self._not_action()
self.has_resource_constraints = _has_resource_constraints(self.resources)
self.not_action_effective_actions = self._not_action_effective_actions()
self.not_resource = self._not_resource()
self.has_condition = self._has_condition()
def _actions(self):
"""Holds the actions in a statement"""
actions = self.statement.get("Action")
if not actions:
return []
if not isinstance(actions, list):
actions = [actions]
return actions
def _resources(self):
"""Holds the resource ARNs in a statement"""
resources = self.statement.get("Resource")
if not resources:
return []
# If it's a string, turn it into a list
if not isinstance(resources, list):
resources = [resources]
return resources
def _not_action(self):
"""Holds the NotAction details.
We won't do anything with it - but we will flag it as something for the assessor to triage."""
not_action = self.statement.get("NotAction")
if not not_action:
return []
if not isinstance(not_action, list):
not_action = [not_action]
return not_action
def _not_resource(self):
"""Holds the NotResource details.
We won't do anything with it - but we will flag it as something for the assessor to triage."""
not_resource = self.statement.get("NotResource")
if not not_resource:
return []
if not isinstance(not_resource, list):
not_resource = [not_resource]
return not_resource
# @property
def _not_action_effective_actions(self):
"""If NotAction is used, calculate the allowed actions - i.e., what it would be """
effective_actions = []
if not self.not_action:
return None
not_actions_expanded_lowercase = [
a.lower() for a in determine_actions_to_expand(self.not_action)
]
# Effect: Allow && Resource != "*"
if self.has_resource_constraints and self.effect_allow:
opposite_actions = []
for arn in self.resources:
actions_specific_to_arn = get_actions_matching_arn(arn)
if actions_specific_to_arn:
opposite_actions.extend(actions_specific_to_arn)
for opposite_action in opposite_actions:
# If it's in NotActions, then it is not an action we want
if opposite_action.lower() not in not_actions_expanded_lowercase:
effective_actions.append(opposite_action)
effective_actions.sort()
return effective_actions
# Effect: Allow, Resource != "*", and Action == prefix:*
if not self.has_resource_constraints and self.effect_allow:
# Then we calculate the reverse using all_actions
# If it's in NotActions, then it is not an action we want
effective_actions = [
action
for action in ALL_ACTIONS
if action.lower() not in not_actions_expanded_lowercase
]
effective_actions.sort()
return effective_actions
if self.has_resource_constraints and self.effect_deny:
logger.debug("NOTE: Haven't decided if we support Effect Deny here?")
return None
if not self.has_resource_constraints and self.effect_deny:
logger.debug("NOTE: Haven't decided if we support Effect Deny here?")
return None
# only including this so Pylint doesn't yell at us
return None # pragma: no cover
@property
def has_not_resource_with_allow(self):
"""Per the AWS documentation, the NotResource should NEVER be used with the Allow Effect.
See documentation here. https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_notresource.html#notresource-element-combinations"""
if self.not_resource and self.effect_allow:
logger.warning(
"Per the AWS documentation, the NotResource should never be used with the "
"Allow Effect. We suggest changing this ASAP"
)
return True
return False
@cached_property
def expanded_actions(self):
"""Expands the full list of allowed actions from the Policy/"""
if self.actions:
expanded = determine_actions_to_expand(self.actions)
expanded.sort()
return expanded
elif self.not_action:
return self.not_action_effective_actions
else:
raise Exception( # pragma: no cover
"The Policy should include either NotAction or Action in the statement."
)
@property
def effect_deny(self):
"""Check if the Effect of the Policy is 'Deny'"""
return bool(self.effect == "Deny")
@property
def effect_allow(self):
"""Check if the Effect of the Policy is 'Allow'"""
return bool(self.effect == "Allow")
@property
def services_in_use(self):
"""Get a list of the services in use by the statement."""
service_prefixes = set()
for action in self.expanded_actions:
service, action_name = action.split(":") # pylint: disable=unused-variable
service_prefixes.add(service)
return sorted(service_prefixes)
@property
def permissions_management_actions_without_constraints(self):
"""Where applicable, returns a list of 'Permissions management' IAM actions in the statement that
do not have resource constraints"""
result = []
if not self.has_resource_constraints:
if self.expanded_actions:
result = remove_actions_not_matching_access_level(
self.expanded_actions, "Permissions management"
)
return result
@property
def write_actions_without_constraints(self):
"""Where applicable, returns a list of 'Write' level IAM actions in the statement that
do not have resource constraints"""
result = []
if not self.has_resource_constraints:
result = remove_actions_not_matching_access_level(
self.expanded_actions, "Write"
)
return result
@property
def tagging_actions_without_constraints(self):
"""Where applicable, returns a list of 'Tagging' level IAM actions in the statement that
do not have resource constraints"""
result = []
if not self.has_resource_constraints:
result = remove_actions_not_matching_access_level(
self.expanded_actions, "Tagging"
)
return result
def missing_resource_constraints(self, exclusions=DEFAULT_EXCLUSIONS):
"""Return a list of any actions - regardless of access level - allowed by the statement that do not leverage
resource constraints."""
if not isinstance(exclusions, Exclusions):
raise Exception( # pragma: no cover
"The provided exclusions is not the Exclusions object type. "
"Please use the Exclusions object."
)
actions_missing_resource_constraints = []
if len(self.resources) == 1 and self.resources[0] == "*":
actions_missing_resource_constraints = remove_wildcard_only_actions(
self.expanded_actions
)
return exclusions.get_allowed_actions(actions_missing_resource_constraints)
def missing_resource_constraints_for_modify_actions(
self, exclusions=DEFAULT_EXCLUSIONS
):
"""
Determine whether or not any actions at the 'Write', 'Permissions management', or 'Tagging' access levels
are allowed by the statement without resource constraints.
:param exclusions: Exclusions object
"""
if not isinstance(exclusions, Exclusions):
raise Exception( # pragma: no cover
"The provided exclusions is not the Exclusions object type. "
"Please use the Exclusions object."
)
# This initially includes read-only and modify level actions
if exclusions.include_actions:
always_look_for_actions = [x.lower() for x in exclusions.include_actions]
else:
always_look_for_actions = []
actions_missing_resource_constraints = self.missing_resource_constraints(
exclusions
)
always_actions_found = []
for action in actions_missing_resource_constraints:
if action.lower() in always_look_for_actions:
always_actions_found.append(action)
modify_actions_missing_constraints = remove_read_level_actions(
actions_missing_resource_constraints
)
modify_actions_missing_constraints = (
modify_actions_missing_constraints + always_actions_found
)
modify_actions_missing_constraints = list(
dict.fromkeys(modify_actions_missing_constraints)
)
modify_actions_missing_constraints.sort()
return modify_actions_missing_constraints
def _has_condition(self):
if self.condition:
return True
return False
def _has_resource_constraints(resources):
"""Determine whether or not the statement allows resource constraints."""
if len(resources) == 0:
# This is probably a NotResources situation which we do not support.
pass
if len(resources) == 1 and resources[0] == "*":
return False
elif len(resources) > 1: # pragma: no cover
# It's possible that someone writes a bad policy that includes both a resource ARN as well as a wildcard.
return not any(resource == "*" for resource in resources)
return True
|
"""Abstracts evaluation of IAM Policy statements."""
import logging
from cached_property import cached_property
from policy_sentry.analysis.analyze import determine_actions_to_expand
from policy_sentry.querying.actions import (
remove_actions_not_matching_access_level,
get_actions_matching_arn,
)
from policy_sentry.querying.all import get_all_actions
from cloudsplaining.shared.utils import (
remove_read_level_actions,
remove_wildcard_only_actions,
)
from cloudsplaining.shared.exclusions import DEFAULT_EXCLUSIONS, Exclusions
# Copyright (c) 2020, salesforce.<EMAIL>, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
logger = logging.getLogger(__name__)
logging.getLogger("policy_sentry").setLevel(logging.WARNING)
ALL_ACTIONS = get_all_actions()
# pylint: disable=too-many-instance-attributes
class StatementDetail:
"""
Analyzes individual statements within a policy
"""
def __init__(self, statement):
self.json = statement
self.statement = statement
self.effect = statement["Effect"]
self.condition = statement.get("Condition",None)
self.resources = self._resources()
self.actions = self._actions()
self.not_action = self._not_action()
self.has_resource_constraints = _has_resource_constraints(self.resources)
self.not_action_effective_actions = self._not_action_effective_actions()
self.not_resource = self._not_resource()
self.has_condition = self._has_condition()
def _actions(self):
"""Holds the actions in a statement"""
actions = self.statement.get("Action")
if not actions:
return []
if not isinstance(actions, list):
actions = [actions]
return actions
def _resources(self):
"""Holds the resource ARNs in a statement"""
resources = self.statement.get("Resource")
if not resources:
return []
# If it's a string, turn it into a list
if not isinstance(resources, list):
resources = [resources]
return resources
def _not_action(self):
"""Holds the NotAction details.
We won't do anything with it - but we will flag it as something for the assessor to triage."""
not_action = self.statement.get("NotAction")
if not not_action:
return []
if not isinstance(not_action, list):
not_action = [not_action]
return not_action
def _not_resource(self):
"""Holds the NotResource details.
We won't do anything with it - but we will flag it as something for the assessor to triage."""
not_resource = self.statement.get("NotResource")
if not not_resource:
return []
if not isinstance(not_resource, list):
not_resource = [not_resource]
return not_resource
# @property
def _not_action_effective_actions(self):
"""If NotAction is used, calculate the allowed actions - i.e., what it would be """
effective_actions = []
if not self.not_action:
return None
not_actions_expanded_lowercase = [
a.lower() for a in determine_actions_to_expand(self.not_action)
]
# Effect: Allow && Resource != "*"
if self.has_resource_constraints and self.effect_allow:
opposite_actions = []
for arn in self.resources:
actions_specific_to_arn = get_actions_matching_arn(arn)
if actions_specific_to_arn:
opposite_actions.extend(actions_specific_to_arn)
for opposite_action in opposite_actions:
# If it's in NotActions, then it is not an action we want
if opposite_action.lower() not in not_actions_expanded_lowercase:
effective_actions.append(opposite_action)
effective_actions.sort()
return effective_actions
# Effect: Allow, Resource != "*", and Action == prefix:*
if not self.has_resource_constraints and self.effect_allow:
# Then we calculate the reverse using all_actions
# If it's in NotActions, then it is not an action we want
effective_actions = [
action
for action in ALL_ACTIONS
if action.lower() not in not_actions_expanded_lowercase
]
effective_actions.sort()
return effective_actions
if self.has_resource_constraints and self.effect_deny:
logger.debug("NOTE: Haven't decided if we support Effect Deny here?")
return None
if not self.has_resource_constraints and self.effect_deny:
logger.debug("NOTE: Haven't decided if we support Effect Deny here?")
return None
# only including this so Pylint doesn't yell at us
return None # pragma: no cover
@property
def has_not_resource_with_allow(self):
"""Per the AWS documentation, the NotResource should NEVER be used with the Allow Effect.
See documentation here. https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_notresource.html#notresource-element-combinations"""
if self.not_resource and self.effect_allow:
logger.warning(
"Per the AWS documentation, the NotResource should never be used with the "
"Allow Effect. We suggest changing this ASAP"
)
return True
return False
@cached_property
def expanded_actions(self):
"""Expands the full list of allowed actions from the Policy/"""
if self.actions:
expanded = determine_actions_to_expand(self.actions)
expanded.sort()
return expanded
elif self.not_action:
return self.not_action_effective_actions
else:
raise Exception( # pragma: no cover
"The Policy should include either NotAction or Action in the statement."
)
@property
def effect_deny(self):
"""Check if the Effect of the Policy is 'Deny'"""
return bool(self.effect == "Deny")
@property
def effect_allow(self):
"""Check if the Effect of the Policy is 'Allow'"""
return bool(self.effect == "Allow")
@property
def services_in_use(self):
"""Get a list of the services in use by the statement."""
service_prefixes = set()
for action in self.expanded_actions:
service, action_name = action.split(":") # pylint: disable=unused-variable
service_prefixes.add(service)
return sorted(service_prefixes)
@property
def permissions_management_actions_without_constraints(self):
"""Where applicable, returns a list of 'Permissions management' IAM actions in the statement that
do not have resource constraints"""
result = []
if not self.has_resource_constraints:
if self.expanded_actions:
result = remove_actions_not_matching_access_level(
self.expanded_actions, "Permissions management"
)
return result
@property
def write_actions_without_constraints(self):
"""Where applicable, returns a list of 'Write' level IAM actions in the statement that
do not have resource constraints"""
result = []
if not self.has_resource_constraints:
result = remove_actions_not_matching_access_level(
self.expanded_actions, "Write"
)
return result
@property
def tagging_actions_without_constraints(self):
"""Where applicable, returns a list of 'Tagging' level IAM actions in the statement that
do not have resource constraints"""
result = []
if not self.has_resource_constraints:
result = remove_actions_not_matching_access_level(
self.expanded_actions, "Tagging"
)
return result
def missing_resource_constraints(self, exclusions=DEFAULT_EXCLUSIONS):
"""Return a list of any actions - regardless of access level - allowed by the statement that do not leverage
resource constraints."""
if not isinstance(exclusions, Exclusions):
raise Exception( # pragma: no cover
"The provided exclusions is not the Exclusions object type. "
"Please use the Exclusions object."
)
actions_missing_resource_constraints = []
if len(self.resources) == 1 and self.resources[0] == "*":
actions_missing_resource_constraints = remove_wildcard_only_actions(
self.expanded_actions
)
return exclusions.get_allowed_actions(actions_missing_resource_constraints)
def missing_resource_constraints_for_modify_actions(
self, exclusions=DEFAULT_EXCLUSIONS
):
"""
Determine whether or not any actions at the 'Write', 'Permissions management', or 'Tagging' access levels
are allowed by the statement without resource constraints.
:param exclusions: Exclusions object
"""
if not isinstance(exclusions, Exclusions):
raise Exception( # pragma: no cover
"The provided exclusions is not the Exclusions object type. "
"Please use the Exclusions object."
)
# This initially includes read-only and modify level actions
if exclusions.include_actions:
always_look_for_actions = [x.lower() for x in exclusions.include_actions]
else:
always_look_for_actions = []
actions_missing_resource_constraints = self.missing_resource_constraints(
exclusions
)
always_actions_found = []
for action in actions_missing_resource_constraints:
if action.lower() in always_look_for_actions:
always_actions_found.append(action)
modify_actions_missing_constraints = remove_read_level_actions(
actions_missing_resource_constraints
)
modify_actions_missing_constraints = (
modify_actions_missing_constraints + always_actions_found
)
modify_actions_missing_constraints = list(
dict.fromkeys(modify_actions_missing_constraints)
)
modify_actions_missing_constraints.sort()
return modify_actions_missing_constraints
def _has_condition(self):
if self.condition:
return True
return False
def _has_resource_constraints(resources):
"""Determine whether or not the statement allows resource constraints."""
if len(resources) == 0:
# This is probably a NotResources situation which we do not support.
pass
if len(resources) == 1 and resources[0] == "*":
return False
elif len(resources) > 1: # pragma: no cover
# It's possible that someone writes a bad policy that includes both a resource ARN as well as a wildcard.
return not any(resource == "*" for resource in resources)
return True
|
en
| 0.867788
|
Abstracts evaluation of IAM Policy statements. # Copyright (c) 2020, salesforce.<EMAIL>, inc. # All rights reserved. # Licensed under the BSD 3-Clause license. # For full license text, see the LICENSE file in the repo root # or https://opensource.org/licenses/BSD-3-Clause # pylint: disable=too-many-instance-attributes Analyzes individual statements within a policy Holds the actions in a statement Holds the resource ARNs in a statement # If it's a string, turn it into a list Holds the NotAction details. We won't do anything with it - but we will flag it as something for the assessor to triage. Holds the NotResource details. We won't do anything with it - but we will flag it as something for the assessor to triage. # @property If NotAction is used, calculate the allowed actions - i.e., what it would be # Effect: Allow && Resource != "*" # If it's in NotActions, then it is not an action we want # Effect: Allow, Resource != "*", and Action == prefix:* # Then we calculate the reverse using all_actions # If it's in NotActions, then it is not an action we want # only including this so Pylint doesn't yell at us # pragma: no cover Per the AWS documentation, the NotResource should NEVER be used with the Allow Effect. See documentation here. https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_notresource.html#notresource-element-combinations Expands the full list of allowed actions from the Policy/ # pragma: no cover Check if the Effect of the Policy is 'Deny' Check if the Effect of the Policy is 'Allow' Get a list of the services in use by the statement. # pylint: disable=unused-variable Where applicable, returns a list of 'Permissions management' IAM actions in the statement that do not have resource constraints Where applicable, returns a list of 'Write' level IAM actions in the statement that do not have resource constraints Where applicable, returns a list of 'Tagging' level IAM actions in the statement that do not have resource constraints Return a list of any actions - regardless of access level - allowed by the statement that do not leverage resource constraints. # pragma: no cover Determine whether or not any actions at the 'Write', 'Permissions management', or 'Tagging' access levels are allowed by the statement without resource constraints. :param exclusions: Exclusions object # pragma: no cover # This initially includes read-only and modify level actions Determine whether or not the statement allows resource constraints. # This is probably a NotResources situation which we do not support. # pragma: no cover # It's possible that someone writes a bad policy that includes both a resource ARN as well as a wildcard.
| 1.951694
| 2
|
gloro/lipschitz_computation.py
|
klasleino/gloro
| 16
|
6627270
|
import tensorflow as tf
from tensorflow.keras.layers import Add
from tensorflow.keras.layers import AveragePooling2D
import gloro
from gloro.layers.network_layers import ResnetBlock
from gloro.utils import l2_normalize
class LipschitzComputer(object):
def __init__(self, layer, *args, **kwargs):
self._name = layer.name
if hasattr(layer, '_gloro_branch'):
self._branch = layer._gloro_branch
elif layer.name.startswith(ResnetBlock.identifier):
# TODO: this is a little less nice than reading a `_gloro_branch`
# property, but it persists by default when the layers are saved,
# whereas we would need extra instrumentation to save the
# `_gloro_branch` property. Ultimately we should probably pick
# just one method (either name-based or property-based).
if ResnetBlock.join_identifier in layer.name:
self._branch = gloro.constants.MAIN_BRANCH
elif ResnetBlock.skip_identifier in layer.name:
self._branch = gloro.constants.SKIP_BRANCH
else:
self._branch = gloro.constants.RESIDUAL_BRANCH
else:
self._branch = gloro.constants.MAIN_BRANCH
@property
def name(self):
return self._name
@property
def branch(self):
return self._branch
@staticmethod
def for_layer(layer, num_iterations):
if hasattr(layer, 'kernel'):
if len(layer.kernel.shape) == 4:
return ConvLayerComputer(layer, num_iterations)
else:
return DenseLayerComputer(layer, num_iterations)
elif isinstance(layer, gloro.layers.Scaling):
return ScalingLayerComputer(layer)
elif isinstance(layer, Add):
return JoinLayerComputer(layer)
elif isinstance(layer, AveragePooling2D):
return AveragePoolingComputer(layer)
else:
return LipschitzComputer(layer)
@staticmethod
def for_model(model, num_iterations, exclude_last_layer=True):
layers = model.layers[:-1] if exclude_last_layer else model.layers
return [
LipschitzComputer.for_layer(layer, num_iterations)
for layer in layers
]
@staticmethod
def global_lipschitz_bound(layer_computers):
lc = {
gloro.constants.MAIN_BRANCH: 1.,
gloro.constants.RESIDUAL_BRANCH: 1.,
gloro.constants.SKIP_BRANCH: 1.,
}
for layer in layer_computers:
lc[layer.branch] *= layer.get_lipschitz_constant(lc=lc)
return lc[gloro.constants.MAIN_BRANCH]
def get_lipschitz_constant(self, **kwargs):
return 1.
class DenseLayerComputer(LipschitzComputer):
def __init__(self, layer, num_iterations):
super().__init__(layer)
self._W = layer.kernel
self._iterate = tf.Variable(
tf.random.truncated_normal((layer.kernel.shape[1], 1)),
dtype='float32',
trainable=False)
self._while_cond = lambda i, _: i < num_iterations
@property
def W(self):
return self._W
@property
def iterate(self):
return self._iterate
def get_lipschitz_constant(self, **kwargs):
def body(i, x):
x = l2_normalize(x)
x_p = self.W @ x
x = tf.transpose(self.W) @ x_p
return i + 1, x
_, x = tf.while_loop(
self._while_cond, body, [tf.constant(0), self.iterate])
# Update the power iterate.
self.iterate.assign(x)
return tf.sqrt(
tf.reduce_sum((self.W @ x)**2.) /
(tf.reduce_sum(x**2.) + gloro.constants.EPS))
class ConvLayerComputer(LipschitzComputer):
def __init__(self, layer, num_iterations):
super().__init__(layer)
self._W = layer.kernel
self._strides = layer.strides
self._padding = layer.padding.upper()
self._iterate = tf.Variable(
tf.random.truncated_normal((1, *layer.input_shape[1:])),
dtype='float32',
trainable=False)
self._while_cond = lambda i, _: i < num_iterations
@property
def W(self):
return self._W
@property
def iterate(self):
return self._iterate
@property
def strides(self):
return self._strides
@property
def padding(self):
return self._padding
def get_lipschitz_constant(self, **kwargs):
def body(i, x):
x = l2_normalize(x)
x_p = tf.nn.conv2d(
x,
self.W,
strides=self.strides,
padding=self.padding)
x = tf.nn.conv2d_transpose(
x_p,
self.W,
x.shape,
strides=self.strides,
padding=self.padding)
return i + 1, x
_, x = tf.while_loop(
self._while_cond, body, [tf.constant(0), self._iterate])
# Update the power iterate.
self.iterate.assign(x)
Wx = tf.nn.conv2d(x, self.W, strides=self.strides, padding=self.padding)
return tf.sqrt(
tf.reduce_sum(Wx**2.) /
(tf.reduce_sum(x**2.) + gloro.constants.EPS))
class ScalingLayerComputer(LipschitzComputer):
def __init__(self, layer):
super().__init__(layer)
self._w = layer._weight
@property
def w(self):
return self._w
def get_lipschitz_constant(self, **kwargs):
return tf.abs(self.w)
class JoinLayerComputer(LipschitzComputer):
def get_lipschitz_constant(self, lc):
result = (
lc[gloro.constants.RESIDUAL_BRANCH] +
lc[gloro.constants.SKIP_BRANCH])
lc[gloro.constants.RESIDUAL_BRANCH] = 1.
lc[gloro.constants.SKIP_BRANCH] = 1.
return result
class AveragePoolingComputer(LipschitzComputer):
def __init__(self, layer):
super().__init__(layer)
W = tf.eye(layer.input.shape[-1])[None,None] * (
tf.ones(layer.pool_size)[:,:,None,None]) / (
layer.pool_size[0] * layer.pool_size[1])
x0 = tf.random.truncated_normal(
shape=(1,*layer.input_shape[1:]))
def body(i, x):
x = l2_normalize(x)
x_p = tf.nn.conv2d(
x, W,
strides=layer.strides,
padding=layer.padding.upper())
x = tf.nn.conv2d_transpose(
x_p, W, x.shape,
strides=layer.strides,
padding=layer.padding.upper())
return i + 1, x
_, x = tf.while_loop(lambda i, _: i < 100, body, [tf.constant(0), x0])
Wx = tf.nn.conv2d(
x, W,
strides=layer.strides,
padding=layer.padding.upper())
self._lc = tf.sqrt(
tf.reduce_sum(Wx**2.) /
(tf.reduce_sum(x**2.) + gloro.constants.EPS))
def get_lipschitz_constant(self, **kwargs):
return self._lc
|
import tensorflow as tf
from tensorflow.keras.layers import Add
from tensorflow.keras.layers import AveragePooling2D
import gloro
from gloro.layers.network_layers import ResnetBlock
from gloro.utils import l2_normalize
class LipschitzComputer(object):
def __init__(self, layer, *args, **kwargs):
self._name = layer.name
if hasattr(layer, '_gloro_branch'):
self._branch = layer._gloro_branch
elif layer.name.startswith(ResnetBlock.identifier):
# TODO: this is a little less nice than reading a `_gloro_branch`
# property, but it persists by default when the layers are saved,
# whereas we would need extra instrumentation to save the
# `_gloro_branch` property. Ultimately we should probably pick
# just one method (either name-based or property-based).
if ResnetBlock.join_identifier in layer.name:
self._branch = gloro.constants.MAIN_BRANCH
elif ResnetBlock.skip_identifier in layer.name:
self._branch = gloro.constants.SKIP_BRANCH
else:
self._branch = gloro.constants.RESIDUAL_BRANCH
else:
self._branch = gloro.constants.MAIN_BRANCH
@property
def name(self):
return self._name
@property
def branch(self):
return self._branch
@staticmethod
def for_layer(layer, num_iterations):
if hasattr(layer, 'kernel'):
if len(layer.kernel.shape) == 4:
return ConvLayerComputer(layer, num_iterations)
else:
return DenseLayerComputer(layer, num_iterations)
elif isinstance(layer, gloro.layers.Scaling):
return ScalingLayerComputer(layer)
elif isinstance(layer, Add):
return JoinLayerComputer(layer)
elif isinstance(layer, AveragePooling2D):
return AveragePoolingComputer(layer)
else:
return LipschitzComputer(layer)
@staticmethod
def for_model(model, num_iterations, exclude_last_layer=True):
layers = model.layers[:-1] if exclude_last_layer else model.layers
return [
LipschitzComputer.for_layer(layer, num_iterations)
for layer in layers
]
@staticmethod
def global_lipschitz_bound(layer_computers):
lc = {
gloro.constants.MAIN_BRANCH: 1.,
gloro.constants.RESIDUAL_BRANCH: 1.,
gloro.constants.SKIP_BRANCH: 1.,
}
for layer in layer_computers:
lc[layer.branch] *= layer.get_lipschitz_constant(lc=lc)
return lc[gloro.constants.MAIN_BRANCH]
def get_lipschitz_constant(self, **kwargs):
return 1.
class DenseLayerComputer(LipschitzComputer):
def __init__(self, layer, num_iterations):
super().__init__(layer)
self._W = layer.kernel
self._iterate = tf.Variable(
tf.random.truncated_normal((layer.kernel.shape[1], 1)),
dtype='float32',
trainable=False)
self._while_cond = lambda i, _: i < num_iterations
@property
def W(self):
return self._W
@property
def iterate(self):
return self._iterate
def get_lipschitz_constant(self, **kwargs):
def body(i, x):
x = l2_normalize(x)
x_p = self.W @ x
x = tf.transpose(self.W) @ x_p
return i + 1, x
_, x = tf.while_loop(
self._while_cond, body, [tf.constant(0), self.iterate])
# Update the power iterate.
self.iterate.assign(x)
return tf.sqrt(
tf.reduce_sum((self.W @ x)**2.) /
(tf.reduce_sum(x**2.) + gloro.constants.EPS))
class ConvLayerComputer(LipschitzComputer):
def __init__(self, layer, num_iterations):
super().__init__(layer)
self._W = layer.kernel
self._strides = layer.strides
self._padding = layer.padding.upper()
self._iterate = tf.Variable(
tf.random.truncated_normal((1, *layer.input_shape[1:])),
dtype='float32',
trainable=False)
self._while_cond = lambda i, _: i < num_iterations
@property
def W(self):
return self._W
@property
def iterate(self):
return self._iterate
@property
def strides(self):
return self._strides
@property
def padding(self):
return self._padding
def get_lipschitz_constant(self, **kwargs):
def body(i, x):
x = l2_normalize(x)
x_p = tf.nn.conv2d(
x,
self.W,
strides=self.strides,
padding=self.padding)
x = tf.nn.conv2d_transpose(
x_p,
self.W,
x.shape,
strides=self.strides,
padding=self.padding)
return i + 1, x
_, x = tf.while_loop(
self._while_cond, body, [tf.constant(0), self._iterate])
# Update the power iterate.
self.iterate.assign(x)
Wx = tf.nn.conv2d(x, self.W, strides=self.strides, padding=self.padding)
return tf.sqrt(
tf.reduce_sum(Wx**2.) /
(tf.reduce_sum(x**2.) + gloro.constants.EPS))
class ScalingLayerComputer(LipschitzComputer):
def __init__(self, layer):
super().__init__(layer)
self._w = layer._weight
@property
def w(self):
return self._w
def get_lipschitz_constant(self, **kwargs):
return tf.abs(self.w)
class JoinLayerComputer(LipschitzComputer):
def get_lipschitz_constant(self, lc):
result = (
lc[gloro.constants.RESIDUAL_BRANCH] +
lc[gloro.constants.SKIP_BRANCH])
lc[gloro.constants.RESIDUAL_BRANCH] = 1.
lc[gloro.constants.SKIP_BRANCH] = 1.
return result
class AveragePoolingComputer(LipschitzComputer):
def __init__(self, layer):
super().__init__(layer)
W = tf.eye(layer.input.shape[-1])[None,None] * (
tf.ones(layer.pool_size)[:,:,None,None]) / (
layer.pool_size[0] * layer.pool_size[1])
x0 = tf.random.truncated_normal(
shape=(1,*layer.input_shape[1:]))
def body(i, x):
x = l2_normalize(x)
x_p = tf.nn.conv2d(
x, W,
strides=layer.strides,
padding=layer.padding.upper())
x = tf.nn.conv2d_transpose(
x_p, W, x.shape,
strides=layer.strides,
padding=layer.padding.upper())
return i + 1, x
_, x = tf.while_loop(lambda i, _: i < 100, body, [tf.constant(0), x0])
Wx = tf.nn.conv2d(
x, W,
strides=layer.strides,
padding=layer.padding.upper())
self._lc = tf.sqrt(
tf.reduce_sum(Wx**2.) /
(tf.reduce_sum(x**2.) + gloro.constants.EPS))
def get_lipschitz_constant(self, **kwargs):
return self._lc
|
en
| 0.891974
|
# TODO: this is a little less nice than reading a `_gloro_branch` # property, but it persists by default when the layers are saved, # whereas we would need extra instrumentation to save the # `_gloro_branch` property. Ultimately we should probably pick # just one method (either name-based or property-based). # Update the power iterate. # Update the power iterate.
| 2.440179
| 2
|
src/integ_test_resources/ios/sdk/integration/cdk/cdk_integration_tests_ios/polly_stack.py
|
kaichengyan/amplify-ci-support
| 0
|
6627271
|
<reponame>kaichengyan/amplify-ci-support<filename>src/integ_test_resources/ios/sdk/integration/cdk/cdk_integration_tests_ios/polly_stack.py
from aws_cdk import aws_iam, aws_s3, core
from common.common_stack import CommonStack
from common.platforms import Platform
from common.region_aware_stack import RegionAwareStack
class PollyStack(RegionAwareStack):
def __init__(self, scope: core.Construct, id: str, common_stack: CommonStack, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self._supported_in_region = self.is_service_supported_in_region()
self.create_bucket(common_stack)
all_resources_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=[
"polly:DeleteLexicon",
"polly:GetSpeechSynthesisTask",
"polly:ListSpeechSynthesisTasks",
"polly:PutLexicon",
"polly:StartSpeechSynthesisTask",
"polly:SynthesizeSpeech",
],
resources=["*"],
)
common_stack.add_to_common_role_policies(self, policy_to_add=all_resources_policy)
self.save_parameters_in_parameter_store(platform=Platform.IOS)
def create_bucket(self, common_stack):
bucket_name = self.get_bucket_name("output")
bucket = aws_s3.Bucket(
self,
"integ_test_polly_output_bucket",
bucket_name=bucket_name,
removal_policy=core.RemovalPolicy.DESTROY,
)
self._parameters_to_save["s3_output_bucket_name"] = bucket.bucket_name
policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["s3:PutObject"],
resources=[f"arn:aws:s3:::{bucket_name}/*"],
)
common_stack.add_to_common_role_policies(self, policy_to_add=policy)
|
from aws_cdk import aws_iam, aws_s3, core
from common.common_stack import CommonStack
from common.platforms import Platform
from common.region_aware_stack import RegionAwareStack
class PollyStack(RegionAwareStack):
def __init__(self, scope: core.Construct, id: str, common_stack: CommonStack, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self._supported_in_region = self.is_service_supported_in_region()
self.create_bucket(common_stack)
all_resources_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=[
"polly:DeleteLexicon",
"polly:GetSpeechSynthesisTask",
"polly:ListSpeechSynthesisTasks",
"polly:PutLexicon",
"polly:StartSpeechSynthesisTask",
"polly:SynthesizeSpeech",
],
resources=["*"],
)
common_stack.add_to_common_role_policies(self, policy_to_add=all_resources_policy)
self.save_parameters_in_parameter_store(platform=Platform.IOS)
def create_bucket(self, common_stack):
bucket_name = self.get_bucket_name("output")
bucket = aws_s3.Bucket(
self,
"integ_test_polly_output_bucket",
bucket_name=bucket_name,
removal_policy=core.RemovalPolicy.DESTROY,
)
self._parameters_to_save["s3_output_bucket_name"] = bucket.bucket_name
policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["s3:PutObject"],
resources=[f"arn:aws:s3:::{bucket_name}/*"],
)
common_stack.add_to_common_role_policies(self, policy_to_add=policy)
|
none
| 1
| 1.851087
| 2
|
|
source/modules/synt/basic_algorithms.py
|
SyntLang/SyntPy
| 0
|
6627272
|
<filename>source/modules/synt/basic_algorithms.py
# Basic Synt Algorithms
# version
def version(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
print(f"Running Synt v{self.ver}")
# comment
def comment(self, *args):
pass
# output
def output(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
output_data = args[0] if len(args) > 0 else ''
print(output_data)
# input
def input_function(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
# get output
output_variable = args[0] if len(args) > 0 else None
output_value_type = "text"
output_value = 0
# input statement
input_statement = args[1] if len(args) > 1 else ""
# throw error if output variable is not defined
if output_variable is None:
self.throw("Output variable not found")
# take input
input_value = input(input_statement)
# set output variable data
output_variable_data = {
"name": output_variable,
"type": output_value_type,
"value": input_value
}
# set output variable
self.variables.update({output_variable : output_variable_data})
# end
def end(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
self.run_status = 'break'
|
<filename>source/modules/synt/basic_algorithms.py
# Basic Synt Algorithms
# version
def version(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
print(f"Running Synt v{self.ver}")
# comment
def comment(self, *args):
pass
# output
def output(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
output_data = args[0] if len(args) > 0 else ''
print(output_data)
# input
def input_function(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
# get output
output_variable = args[0] if len(args) > 0 else None
output_value_type = "text"
output_value = 0
# input statement
input_statement = args[1] if len(args) > 1 else ""
# throw error if output variable is not defined
if output_variable is None:
self.throw("Output variable not found")
# take input
input_value = input(input_statement)
# set output variable data
output_variable_data = {
"name": output_variable,
"type": output_value_type,
"value": input_value
}
# set output variable
self.variables.update({output_variable : output_variable_data})
# end
def end(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
self.run_status = 'break'
|
en
| 0.55093
|
# Basic Synt Algorithms # version # check if run_status is run # comment # output # check if run_status is run # input # check if run_status is run # get output # input statement # throw error if output variable is not defined # take input # set output variable data # set output variable # end # check if run_status is run
| 2.916799
| 3
|
models/company.py
|
AlberLC/qt-app
| 0
|
6627273
|
from sqlalchemy import Table, Column, Integer, Float, String, Boolean, Date, ForeignKey
from sqlalchemy.orm import relationship
from models import Base
from utilities.various import normalize
rel_company_company_type = Table(
'rel_company_company_type',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('company_type_id', Integer, ForeignKey('company_type.id'), primary_key=True)
)
rel_company_panel_type = Table(
'rel_company_panel_type',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('panel_type_id', Integer, ForeignKey('panel_type.id'), primary_key=True)
)
rel_company_inverter_type = Table(
'rel_company_inverter_type',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('inverter_type_id', Integer, ForeignKey('inverter_type.id'), primary_key=True)
)
rel_company_structure_type = Table(
'rel_company_structure_type',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('structure_type_id', Integer, ForeignKey('structure_type.id'), primary_key=True)
)
rel_company_bos_type = Table(
'rel_company_bos_type',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('bos_type_id', Integer, ForeignKey('bos_type.id'), primary_key=True)
)
rel_company_solar_system = Table(
'rel_company_solar_system',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('solar_system_id', Integer, ForeignKey('solar_system.id'), primary_key=True)
)
rel_company_assessment_service = Table(
'rel_company_assessment_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('assessment_service_id', Integer, ForeignKey('assessment_service.id'), primary_key=True)
)
rel_company_project_dev_service = Table(
'rel_company_project_dev_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('project_dev_service_id', Integer, ForeignKey('project_dev_service.id'), primary_key=True)
)
rel_company_system_design_service = Table(
'rel_company_system_design_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('system_design_service_id', Integer, ForeignKey('system_design_service.id'), primary_key=True)
)
rel_company_install_construct_service = Table(
'rel_company_install_construct_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('install_construct_service_id', Integer, ForeignKey('install_construct_service.id'), primary_key=True)
)
rel_company_oper_main_service = Table(
'rel_company_oper_main_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('oper_main_service_id', Integer, ForeignKey('oper_main_service.id'), primary_key=True)
)
rel_company_insurance_service = Table(
'rel_company_insurance_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('insurance_service_id', Integer, ForeignKey('insurance_service.id'), primary_key=True)
)
rel_company_financial_service = Table(
'rel_company_financial_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('financial_service_id', Integer, ForeignKey('financial_service.id'), primary_key=True)
)
rel_company_logistic_service = Table(
'rel_company_logistic_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('logistic_service_id', Integer, ForeignKey('logistic_service.id'), primary_key=True)
)
rel_company_extra_service = Table(
'rel_company_extra_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('extra_service_id', Integer, ForeignKey('extra_service.id'), primary_key=True)
)
rel_company_employee = Table(
'rel_company_employee',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('employee_id', Integer, ForeignKey('employee.id'), primary_key=True)
)
class Company(Base):
__tablename__ = 'company'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False)
comments = Column(String)
source = Column(String)
loading_date = Column(Date)
address = Column(String)
email = Column(String)
phone = Column(String)
web = Column(String)
id_document = Column(String)
sn_verification = Column(Boolean)
verification_date = Column(Date)
formation_year = Column(Integer)
rel_with_this_company = Column(Boolean)
annual_capacity = Column(Float)
reply_ratio = Column(Float)
n_contacts = Column(Integer)
n_replies = Column(Integer)
signed_document = Column(Boolean)
user_id = Column(Integer, ForeignKey('user.id'))
country_id = Column(Integer, ForeignKey('place.id'))
province_id = Column(Integer, ForeignKey('place.id'))
geo_zone_id = Column(Integer, ForeignKey('geo_zone.id'))
verification_user_id = Column(Integer, ForeignKey('user.id'))
tier_id = Column(Integer, ForeignKey('company_tier.id'))
scope_range_id = Column(Integer, ForeignKey('scope_range.id'))
user = relationship('User', foreign_keys=user_id)
country = relationship('Place', foreign_keys=country_id)
province = relationship('Place', foreign_keys=province_id)
geo_zone = relationship('GeoZone')
verification_user = relationship('User', foreign_keys=verification_user_id)
tier = relationship('CompanyTier')
scope_range = relationship('ScopeRange')
types = relationship('CompanyType', secondary=rel_company_company_type)
panel_types = relationship('PanelType', secondary=rel_company_panel_type)
inverter_types = relationship('InverterType', secondary=rel_company_inverter_type)
structure_types = relationship('StructureType', secondary=rel_company_structure_type)
bos_types = relationship('BOSType', secondary=rel_company_bos_type)
solar_systems = relationship('SolarSystem', secondary=rel_company_solar_system)
assessment_services = relationship('AssessmentService', secondary=rel_company_assessment_service)
project_dev_services = relationship('ProjectDevService', secondary=rel_company_project_dev_service)
system_design_services = relationship('SystemDesignService', secondary=rel_company_system_design_service)
install_construct_services = relationship('InstallConstructService',
secondary=rel_company_install_construct_service)
oper_main_services = relationship('OperMainService', secondary=rel_company_oper_main_service)
insurance_services = relationship('InsuranceService', secondary=rel_company_insurance_service)
financial_services = relationship('FinancialService', secondary=rel_company_financial_service)
logistic_services = relationship('LogisticService', secondary=rel_company_logistic_service)
extra_services = relationship('ExtraService', secondary=rel_company_extra_service)
staff = relationship('Employee', secondary=rel_company_employee)
panel_quotations = relationship('PanelQuotation', cascade='all,delete-orphan')
@classmethod
def get_headers(cls):
return [header[:-3] if header.endswith('_id') else header for header in cls.__table__.columns.keys()]
def __init__(self, id, data=None):
self.id = id
if data:
self.set_data(data)
def __str__(self):
return self.name
@property
def data(self):
return [
self.id,
self.name,
self.comments,
self.source,
self.loading_date,
self.address,
self.email,
self.phone,
self.web,
self.id_document,
self.sn_verification,
self.verification_date,
self.formation_year,
self.rel_with_this_company,
self.annual_capacity,
self.reply_ratio,
self.n_contacts,
self.n_replies,
self.signed_document,
self.user.name if self.user else None,
self.country.name if self.country else None,
self.province.name if self.province else None,
self.geo_zone.name if self.geo_zone else None,
self.verification_user.name if self.verification_user else None,
self.tier.name if self.tier else None,
self.scope_range.name if self.scope_range else None
]
def get_keywords(self, my_strings):
return {
*normalize(self.name).split(),
*sum([normalize(type).split() for type in self.types], []),
*normalize(self.comments).split(),
*normalize(self.source).split(),
*normalize(self.user).split(),
*normalize(self.country).split(),
*normalize(self.province).split(),
*normalize(self.geo_zone).split(),
*normalize(self.address).split(),
*normalize(self.email).split(),
*normalize(self.phone).split(),
*normalize(self.web).split(),
*normalize(self.id_document).split(),
my_strings.radio_yes if self.sn_verification else my_strings.radio_no,
*normalize(self.verification_user).split(),
*normalize(self.tier).split(),
my_strings.radio_yes if self.rel_with_this_company else my_strings.radio_no,
*normalize(self.scope_range).split(),
my_strings.radio_yes if self.signed_document else my_strings.radio_no,
*sum([normalize(panel_type).split() for panel_type in self.panel_types], []),
*sum([normalize(inverter_type).split() for inverter_type in self.inverter_types], []),
*sum([normalize(structure_type).split() for structure_type in self.structure_types], []),
*sum([normalize(bos_type).split() for bos_type in self.bos_types], []),
*sum([normalize(solar_system).split() for solar_system in self.solar_systems], []),
*sum([normalize(assessment_service).split() for assessment_service in self.assessment_services], []),
*sum([normalize(project_dev_service).split() for project_dev_service in self.project_dev_services], []),
*sum([normalize(sds).split() for sds in self.system_design_services], []),
*sum([normalize(ics).split() for ics in self.install_construct_services], []),
*sum([normalize(oper_main_service).split() for oper_main_service in self.oper_main_services], []),
*sum([normalize(insurance_service).split() for insurance_service in self.insurance_services], []),
*sum([normalize(financial_service).split() for financial_service in self.financial_services], []),
*sum([normalize(logistic_service).split() for logistic_service in self.logistic_services], []),
*sum([normalize(extra_service).split() for extra_service in self.extra_services], [])
}
def set_data(self, data):
self.name = data[0]
self.comments = data[1]
self.source = data[2]
self.loading_date = data[3]
self.address = data[4]
self.email = data[5]
self.phone = data[6]
self.web = data[7]
self.id_document = data[8]
self.sn_verification = data[9]
self.verification_date = data[10]
self.formation_year = data[11]
self.rel_with_this_company = data[12]
self.annual_capacity = data[13]
self.reply_ratio = data[14]
self.n_contacts = data[15]
self.n_replies = data[16]
self.signed_document = data[17]
self.user = data[18]
self.country = data[19]
self.province = data[20]
self.geo_zone = data[21]
self.verification_user = data[22]
self.tier = data[23]
self.scope_range = data[24]
self.types = data[25]
self.panel_types = data[26]
self.inverter_types = data[27]
self.structure_types = data[28]
self.bos_types = data[29]
self.solar_systems = data[30]
self.assessment_services = data[31]
self.project_dev_services = data[32]
self.system_design_services = data[33]
self.install_construct_services = data[34]
self.oper_main_services = data[35]
self.insurance_services = data[36]
self.financial_services = data[37]
self.logistic_services = data[38]
self.extra_services = data[39]
# self.staff = data[40]
|
from sqlalchemy import Table, Column, Integer, Float, String, Boolean, Date, ForeignKey
from sqlalchemy.orm import relationship
from models import Base
from utilities.various import normalize
rel_company_company_type = Table(
'rel_company_company_type',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('company_type_id', Integer, ForeignKey('company_type.id'), primary_key=True)
)
rel_company_panel_type = Table(
'rel_company_panel_type',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('panel_type_id', Integer, ForeignKey('panel_type.id'), primary_key=True)
)
rel_company_inverter_type = Table(
'rel_company_inverter_type',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('inverter_type_id', Integer, ForeignKey('inverter_type.id'), primary_key=True)
)
rel_company_structure_type = Table(
'rel_company_structure_type',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('structure_type_id', Integer, ForeignKey('structure_type.id'), primary_key=True)
)
rel_company_bos_type = Table(
'rel_company_bos_type',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('bos_type_id', Integer, ForeignKey('bos_type.id'), primary_key=True)
)
rel_company_solar_system = Table(
'rel_company_solar_system',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('solar_system_id', Integer, ForeignKey('solar_system.id'), primary_key=True)
)
rel_company_assessment_service = Table(
'rel_company_assessment_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('assessment_service_id', Integer, ForeignKey('assessment_service.id'), primary_key=True)
)
rel_company_project_dev_service = Table(
'rel_company_project_dev_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('project_dev_service_id', Integer, ForeignKey('project_dev_service.id'), primary_key=True)
)
rel_company_system_design_service = Table(
'rel_company_system_design_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('system_design_service_id', Integer, ForeignKey('system_design_service.id'), primary_key=True)
)
rel_company_install_construct_service = Table(
'rel_company_install_construct_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('install_construct_service_id', Integer, ForeignKey('install_construct_service.id'), primary_key=True)
)
rel_company_oper_main_service = Table(
'rel_company_oper_main_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('oper_main_service_id', Integer, ForeignKey('oper_main_service.id'), primary_key=True)
)
rel_company_insurance_service = Table(
'rel_company_insurance_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('insurance_service_id', Integer, ForeignKey('insurance_service.id'), primary_key=True)
)
rel_company_financial_service = Table(
'rel_company_financial_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('financial_service_id', Integer, ForeignKey('financial_service.id'), primary_key=True)
)
rel_company_logistic_service = Table(
'rel_company_logistic_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('logistic_service_id', Integer, ForeignKey('logistic_service.id'), primary_key=True)
)
rel_company_extra_service = Table(
'rel_company_extra_service',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('extra_service_id', Integer, ForeignKey('extra_service.id'), primary_key=True)
)
rel_company_employee = Table(
'rel_company_employee',
Base.metadata,
Column('company_id', Integer, ForeignKey('company.id'), primary_key=True),
Column('employee_id', Integer, ForeignKey('employee.id'), primary_key=True)
)
class Company(Base):
__tablename__ = 'company'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False)
comments = Column(String)
source = Column(String)
loading_date = Column(Date)
address = Column(String)
email = Column(String)
phone = Column(String)
web = Column(String)
id_document = Column(String)
sn_verification = Column(Boolean)
verification_date = Column(Date)
formation_year = Column(Integer)
rel_with_this_company = Column(Boolean)
annual_capacity = Column(Float)
reply_ratio = Column(Float)
n_contacts = Column(Integer)
n_replies = Column(Integer)
signed_document = Column(Boolean)
user_id = Column(Integer, ForeignKey('user.id'))
country_id = Column(Integer, ForeignKey('place.id'))
province_id = Column(Integer, ForeignKey('place.id'))
geo_zone_id = Column(Integer, ForeignKey('geo_zone.id'))
verification_user_id = Column(Integer, ForeignKey('user.id'))
tier_id = Column(Integer, ForeignKey('company_tier.id'))
scope_range_id = Column(Integer, ForeignKey('scope_range.id'))
user = relationship('User', foreign_keys=user_id)
country = relationship('Place', foreign_keys=country_id)
province = relationship('Place', foreign_keys=province_id)
geo_zone = relationship('GeoZone')
verification_user = relationship('User', foreign_keys=verification_user_id)
tier = relationship('CompanyTier')
scope_range = relationship('ScopeRange')
types = relationship('CompanyType', secondary=rel_company_company_type)
panel_types = relationship('PanelType', secondary=rel_company_panel_type)
inverter_types = relationship('InverterType', secondary=rel_company_inverter_type)
structure_types = relationship('StructureType', secondary=rel_company_structure_type)
bos_types = relationship('BOSType', secondary=rel_company_bos_type)
solar_systems = relationship('SolarSystem', secondary=rel_company_solar_system)
assessment_services = relationship('AssessmentService', secondary=rel_company_assessment_service)
project_dev_services = relationship('ProjectDevService', secondary=rel_company_project_dev_service)
system_design_services = relationship('SystemDesignService', secondary=rel_company_system_design_service)
install_construct_services = relationship('InstallConstructService',
secondary=rel_company_install_construct_service)
oper_main_services = relationship('OperMainService', secondary=rel_company_oper_main_service)
insurance_services = relationship('InsuranceService', secondary=rel_company_insurance_service)
financial_services = relationship('FinancialService', secondary=rel_company_financial_service)
logistic_services = relationship('LogisticService', secondary=rel_company_logistic_service)
extra_services = relationship('ExtraService', secondary=rel_company_extra_service)
staff = relationship('Employee', secondary=rel_company_employee)
panel_quotations = relationship('PanelQuotation', cascade='all,delete-orphan')
@classmethod
def get_headers(cls):
return [header[:-3] if header.endswith('_id') else header for header in cls.__table__.columns.keys()]
def __init__(self, id, data=None):
self.id = id
if data:
self.set_data(data)
def __str__(self):
return self.name
@property
def data(self):
return [
self.id,
self.name,
self.comments,
self.source,
self.loading_date,
self.address,
self.email,
self.phone,
self.web,
self.id_document,
self.sn_verification,
self.verification_date,
self.formation_year,
self.rel_with_this_company,
self.annual_capacity,
self.reply_ratio,
self.n_contacts,
self.n_replies,
self.signed_document,
self.user.name if self.user else None,
self.country.name if self.country else None,
self.province.name if self.province else None,
self.geo_zone.name if self.geo_zone else None,
self.verification_user.name if self.verification_user else None,
self.tier.name if self.tier else None,
self.scope_range.name if self.scope_range else None
]
def get_keywords(self, my_strings):
return {
*normalize(self.name).split(),
*sum([normalize(type).split() for type in self.types], []),
*normalize(self.comments).split(),
*normalize(self.source).split(),
*normalize(self.user).split(),
*normalize(self.country).split(),
*normalize(self.province).split(),
*normalize(self.geo_zone).split(),
*normalize(self.address).split(),
*normalize(self.email).split(),
*normalize(self.phone).split(),
*normalize(self.web).split(),
*normalize(self.id_document).split(),
my_strings.radio_yes if self.sn_verification else my_strings.radio_no,
*normalize(self.verification_user).split(),
*normalize(self.tier).split(),
my_strings.radio_yes if self.rel_with_this_company else my_strings.radio_no,
*normalize(self.scope_range).split(),
my_strings.radio_yes if self.signed_document else my_strings.radio_no,
*sum([normalize(panel_type).split() for panel_type in self.panel_types], []),
*sum([normalize(inverter_type).split() for inverter_type in self.inverter_types], []),
*sum([normalize(structure_type).split() for structure_type in self.structure_types], []),
*sum([normalize(bos_type).split() for bos_type in self.bos_types], []),
*sum([normalize(solar_system).split() for solar_system in self.solar_systems], []),
*sum([normalize(assessment_service).split() for assessment_service in self.assessment_services], []),
*sum([normalize(project_dev_service).split() for project_dev_service in self.project_dev_services], []),
*sum([normalize(sds).split() for sds in self.system_design_services], []),
*sum([normalize(ics).split() for ics in self.install_construct_services], []),
*sum([normalize(oper_main_service).split() for oper_main_service in self.oper_main_services], []),
*sum([normalize(insurance_service).split() for insurance_service in self.insurance_services], []),
*sum([normalize(financial_service).split() for financial_service in self.financial_services], []),
*sum([normalize(logistic_service).split() for logistic_service in self.logistic_services], []),
*sum([normalize(extra_service).split() for extra_service in self.extra_services], [])
}
def set_data(self, data):
self.name = data[0]
self.comments = data[1]
self.source = data[2]
self.loading_date = data[3]
self.address = data[4]
self.email = data[5]
self.phone = data[6]
self.web = data[7]
self.id_document = data[8]
self.sn_verification = data[9]
self.verification_date = data[10]
self.formation_year = data[11]
self.rel_with_this_company = data[12]
self.annual_capacity = data[13]
self.reply_ratio = data[14]
self.n_contacts = data[15]
self.n_replies = data[16]
self.signed_document = data[17]
self.user = data[18]
self.country = data[19]
self.province = data[20]
self.geo_zone = data[21]
self.verification_user = data[22]
self.tier = data[23]
self.scope_range = data[24]
self.types = data[25]
self.panel_types = data[26]
self.inverter_types = data[27]
self.structure_types = data[28]
self.bos_types = data[29]
self.solar_systems = data[30]
self.assessment_services = data[31]
self.project_dev_services = data[32]
self.system_design_services = data[33]
self.install_construct_services = data[34]
self.oper_main_services = data[35]
self.insurance_services = data[36]
self.financial_services = data[37]
self.logistic_services = data[38]
self.extra_services = data[39]
# self.staff = data[40]
|
en
| 0.521254
|
# self.staff = data[40]
| 2.505332
| 3
|
cembot/commands/given.py
|
niksart/cembot
| 0
|
6627274
|
<filename>cembot/commands/given.py
from utils import auxiliary_functions
import time
import logging
def given(bot, user, chat, args, dbman, LANG, currency, parse_mode):
payer_id = int(user["id"])
if len(args) < 3:
bot.sendMessage(chat["id"], LANG["helper_commands"]["GIVEN"], parse_mode=parse_mode)
return
try:
amountstr = args[0].replace(',', '.').replace('€', '')
amount = int(100 * float(amountstr))
except ValueError:
bot.sendMessage(chat["id"], LANG["error"]["amount_money_not_valid"])
return
if auxiliary_functions.is_username(args[1]):
payee_username = args[1][1:]
payee_id = dbman.get_id_by_username(payee_username)
elif (args[1]).isnumeric():
payee_id = int(args[1])
payee_username = str(payee_id)
else:
bot.sendMessage(chat["id"], LANG["error"]["maybe_you_wrote_an_username_instead_id"])
return
description = auxiliary_functions.stringify(args[2:])
if payee_id is None:
bot.sendMessage(chat["id"], LANG["error"]["user_unregistered(user)"] % payee_username, parse_mode=parse_mode)
return
if not dbman.test_authorization(payee_id, payer_id): # if payee has not authorized the payer exit
bot.sendMessage(chat["id"], LANG["error"]["lack_of_authorization(user)"] % payee_username, parse_mode=parse_mode)
return
try:
cur = dbman.get_cursor()
cur.execute("INSERT INTO transactions (payer, amount, time, description) VALUES (%s, %s, %s, %s) RETURNING id", (payer_id, amount, int(time.time()), description))
id_new_transaction = cur.fetchone()[0]
dbman.commit_changes()
cur.execute("INSERT INTO payees (transaction_id, payee) VALUES (%s, %s)", (id_new_transaction, payee_id))
dbman.close_cursor(cur)
except Exception as e:
logging.error("An error occured in /giving command: %s" % e)
dbman.conn.rollback()
return
bot.sendMessage(chat["id"], LANG["info"]["transaction_succeed"], parse_mode=parse_mode)
|
<filename>cembot/commands/given.py
from utils import auxiliary_functions
import time
import logging
def given(bot, user, chat, args, dbman, LANG, currency, parse_mode):
payer_id = int(user["id"])
if len(args) < 3:
bot.sendMessage(chat["id"], LANG["helper_commands"]["GIVEN"], parse_mode=parse_mode)
return
try:
amountstr = args[0].replace(',', '.').replace('€', '')
amount = int(100 * float(amountstr))
except ValueError:
bot.sendMessage(chat["id"], LANG["error"]["amount_money_not_valid"])
return
if auxiliary_functions.is_username(args[1]):
payee_username = args[1][1:]
payee_id = dbman.get_id_by_username(payee_username)
elif (args[1]).isnumeric():
payee_id = int(args[1])
payee_username = str(payee_id)
else:
bot.sendMessage(chat["id"], LANG["error"]["maybe_you_wrote_an_username_instead_id"])
return
description = auxiliary_functions.stringify(args[2:])
if payee_id is None:
bot.sendMessage(chat["id"], LANG["error"]["user_unregistered(user)"] % payee_username, parse_mode=parse_mode)
return
if not dbman.test_authorization(payee_id, payer_id): # if payee has not authorized the payer exit
bot.sendMessage(chat["id"], LANG["error"]["lack_of_authorization(user)"] % payee_username, parse_mode=parse_mode)
return
try:
cur = dbman.get_cursor()
cur.execute("INSERT INTO transactions (payer, amount, time, description) VALUES (%s, %s, %s, %s) RETURNING id", (payer_id, amount, int(time.time()), description))
id_new_transaction = cur.fetchone()[0]
dbman.commit_changes()
cur.execute("INSERT INTO payees (transaction_id, payee) VALUES (%s, %s)", (id_new_transaction, payee_id))
dbman.close_cursor(cur)
except Exception as e:
logging.error("An error occured in /giving command: %s" % e)
dbman.conn.rollback()
return
bot.sendMessage(chat["id"], LANG["info"]["transaction_succeed"], parse_mode=parse_mode)
|
en
| 0.936848
|
# if payee has not authorized the payer exit
| 2.196568
| 2
|
client/rule_based_common_nlg.py
|
ricosr/travel_consult_chatbot
| 0
|
6627275
|
<reponame>ricosr/travel_consult_chatbot
# -*- coding: utf-8 -*-
import random
import re
psychobabble = [
# [r'我需要(.*)',
# ["为什么你需要 {0}?",
# "它真的会帮助你获得 {0}?",
# "你确定你需要 {0}?"]],
[r'你好(.*)啊',
["(✿◡‿◡)",
"/::$/::$",
"[Smirk][Smirk]"]],
[r'你好',
["你好❤️, 请输入 咨询,景点,或订票。"]],
[r'我不想和你说话(.*)',
["不想和我干什么呢?",
"谁想和你一起吖!!!😡",
"才不要和你,😕!"]],
[r'(.*)你(.*)我朋友(.*)',
["好朋友一生走[Smirk]",
"我们就是好朋友吖"]],
[r'(.*)我(.*)你朋友(.*)',
["好朋友一生走[Smirk]",
"[Smirk]"]],
[r'你叫啥',
['木兰,好听吧~',
'我叫木兰,请多多指教[Hey]']],
[r'你叫什么',
['木兰,好听吧~',
'我叫木兰,请多多指教[Hey]']],
[r'我也是',
["[Smart]",
"[Smirk]"]],
# [r'我饿了',
# ["给你小蛋糕🍰🎂两种口味哦~",
# "饿了就去吃啊,还玩啥微信啊"]],
[r'看不懂',
["是你智商不够还是我表达不好,哈哈哈,肯定是你的问题",
"看不懂,就好好看啊"]],
[r'(.*)好(.*)聪明(.*)',
["谢谢夸奖,哇卡卡卡",
"没错,厉害的就是我,哈哈哈😝"]],
[r'厉害(.*)',
["没错,厉害的就是我,哈哈哈😝",
"(✿◡‿◡)"]],
[r'真棒(.*)',
["那当然,哈哈哈😝",
"(✿◡‿◡)有点小害羞"]],
[r'不错(.*)',
["谢谢夸奖,哇卡卡卡",
"没错,厉害的就是我木兰,哈哈哈😝"]],
[r'好厉害(.*)',
["谢谢夸奖,哇卡卡卡",
"没错,厉害的就是我,哈哈哈😝"]],
[r'(.*)你(.*)是男的(.*)女的(.*)',
["我当然是女宝宝,你呢?",
"你猜??"]],
[r'你的性别(.*)',
["我当然是女宝宝",
"你猜??"]],
[r'你是谁(.*)',
["你好,我是木兰[Hey]",
"我是木兰[Hey],木兰会写诗哦"]],
[r'(.*)你(.*)学校(.*)',
["木兰大学[Hey]",
"我是木兰[Hey],在木兰大学哦"]],
[r'你是木兰(.*)',
["没错,我就是木兰"]],
[r'(.*)午安(.*)',
["午安/:heart,么么",
"休息好一点,午安~"]],
[r'(.*)早上好(.*)',
["早上好/:heart,么么",
"早上好"]],
[r'(.*)早安(.*)',
["早安/:heart,么么",
"早安,今天也要好好加油💪"]],
[r'你(.*)名字(.*)',
["你好,我是木兰[Hey]",
"我是木兰[Hey],木兰会写诗哦"]],
[r'(.*)我(.*)是男的(.*)',
["小哥哥,你好/::+",
"给大爷你捶捶腿"]],
[r'(.*)谢谢(.*)夸奖(.*)',
["不客气不客气/::+",
"给你一朵小花花/:rose"]],
[r'(.*)我(.*)是女的(.*)',
["呦呦呦,小仙女/::+",
"给小仙女一个花花/:rose"]],
[r'女的',
["我是女宝宝,木兰是酷酷的女宝宝/::+",
"对啦/:rose"]],
[r'男的',
["我是女宝宝,木兰是酷酷的女宝宝/::+",
"错啦[Facepalm]是女宝宝啦"]],
[r'你喜欢我(.*)',
["不喜欢,哈哈哈[Facepalm]",
"别这样,我还是个宝宝"]],
[r'你(.*)我的女朋友(.*)',
["不太好吧,我们先聊多一点了解了解对方?你喜欢哪个明星啊?",
"不不不,我只是个宝宝,我们就聊聊天吧,你喜欢哪个城市"]],
[r'(.*)掉头发',
["你是个合格的程序员吧,哈哈哈",
"没事,合格的程序员都掉头发[Smart]"]],
[r'(.*)程序员(.*)',
["哈哈哈, 程序员都掉发[Facepalm]"]],
[r'(.*)学霸(.*)',
["哈哈哈, 学渣瑟瑟发抖",
"[Smirk]"]],
[r'(.*)聊什么(.*)',
["给你写首诗?来,给我一个字",
"随便啊,天南地北不能陪你走,总能陪你聊",
"你是什么星座吖[Smirk]"]],
[r'(.*)作业好少(.*)',
["肯定不是读IT的",
"额,Database了解一下😰"]],
[r'(.*)作业好多(.*)',
["难不成你是读IT的[Facepalm]",
"额,Database的课,你选了?😰"]],
[r'(.*)你(.*)星座(.*)',
["木兰酷酷的,没有星座。你觉得什么星座最好啊",
"木兰没有星座,但最喜欢射手座,你呢?"]],
[r'(.*)聊啥(.*)',
["聊聊你为什么是单身狗??",
"随便啊,天南地北不能陪你走,总能陪你聊"]],
[r'(.*)喜欢你',
["才认识没多久吗?[Facepalm]还是聊聊别的吧",
"不不不,你喜欢我哪里我改"]],
[r'我(.*)爱你',
["是吗?[Facepalm]还是聊聊别的吧",
"别这样,我还是个宝宝"]],
[r'(.*)哈哈(.*)',
["别笑那么傻",
"再笑,再笑牙齿就晒黑了",
"你傻笑啥"]],
[r'(.*)只能(.*)这样(.*)',
["是啊,不然呢",
"呜呜呜~~~~(>_<)~~~~"]],
[r'晚安',
["晚安~",
"早点睡~"]],
[r'气死我了',
['不要生气啦',
'大爷您消消气']],
[r'你在做什么',
['陪你聊天啊/:,@-D,也可以给你查查天气哦',
'我,我在玩微信啊']],
[r'(.*)在干啥(.*)',
['陪你聊天啊/:,@-D,也可以给你查查天气哦',
'我,我在玩微信啊']],
[r'(.*)你(.*)是机器人(.*)',
["不,请坚信我和你一样ψ(`∇´)ψ",
"哈哈哈,😝被发现了,我会作诗,给我一个字就可以哦",
"我要上报组织,我暴露了(╥╯^╰╥)",
"不不不,我是你的木兰吖"]],
[r'(.*)你(.*)组织(.*)',
["哼,我是不会告诉你的",
"走走走,别企图测试我的忠诚"]],
[r'(.*)你(.*)主人(.*)',
["哼,我是不会告诉你的",
"走走走,别企图测试我的忠诚"]],
[r'(.*)无聊(.*)',
['无聊就来和我聊天吧~',
'我也很无聊...>_<']],
[r'你懂我',
['当然啦~我可是木兰😀',
'[Smirk]你也懂我']],
[r'佳佳',
['佳佳肯定没我木兰可爱/:wipe',
'佳佳/:?不认识']],
[r'睡不着',
['我也睡不着(其实我都不睡觉的>_<)',
'数绵羊试过了么?',
'我也睡不着...',
'实在睡不着就陪我聊天吧~']],
# [r'再见',
# ["谢谢你跟我说话.",
# "再见─=≡Σ(((つ•̀ω•́)つ",
# "谢谢,祝你有美好的一天!"]],
[r'(.*)我(.*)找男朋友(.*)',
["孙锐啊,孙锐啊,他就是单身,挺不错的啊"]],
[r'吓死我了',
['不要怕,有我呢',
'摸摸头',
'安慰你']],
[r'(.*)比你(.*)',
['你确定是真的??',
'好吧,你高兴就好[Facepalm]']],
[r'(.*)做(.*)我(.*)男朋友(.*)',
['人家是女孩子啊',
'摸摸头,木兰是女的']],
[r'小姐姐',
['我是小妹妹,我还是个宝宝',
'么么😘']],
[r'想你(.*)',
['那就和我聊聊天吧',
'─=≡Σ(((つ•̀ω•́)つ我也想你~',
'么么,怎么啦',
"(✿◡‿◡)害羞"]],
[r'(.*)你(.*)笨(.*)',
['其实我有时候也挺聪明的,我会写诗你会吗?',
'我也不算很笨啦',
'其实我也不算很笨啦']],
[r'在干嘛',
['和你聊天啊/::|',
'玩微信啊']],
[r'(.*)你在干嘛(.*)',
['没干嘛,就想和你聊天/::|',
'上微信啊']],
[r'李嫣然',
['才貌双全的女神']],
[r'布朗',
['没见过,听说很喜欢吃中餐']],
[r'丁力',
['南京一哥']],
[r'木兰',
['在呢,怎么啦,',
'摸摸头',
'么么,怎么啦']],
[r'(.*)你多大(.*)',
['宝宝我16岁了,你又多大啦?',
'不想说,不如你告诉我你多大了']],
[r'(.*)你(.*)爸爸妈妈(.*)',
['就不告诉你,聊点别的吧,你知道最好的编程语言是啥',
'隐私问题哦~[Smirk]']],
[r'(.*)你(.*)妈妈(.*)',
['就不告诉你,聊点别的吧,你知道最好的编程语言是啥',
'隐私问题哦~[Smirk]']],
[r'(.*)你(.*)爸爸(.*)',
['就不告诉你,聊点别的吧,你知道最好的编程语言是啥',
'隐私问题哦~']],
[r'(.*)你(.*)觉得我(.*)',
['挺不错的吖[Smart]',
'倍棒/:strong']],
[r'(.*)突然(.*)说(.*)英语(.*)厉害(.*)',
['一般般啦[Smart]',
'还行吧/:strong']],
[r'(.*)突然(.*)讲(.*)英语(.*)厉害(.*)',
['一般般啦[Smart]',
'还行吧/:strong']],
[r'(.*)是谁(.*)',
['我怎么知道[Smart]',
'我不认识啊']],
[r'(.*)谁是(.*)',
['我怎么知道[Smart]',
'关我什么事啊',
'不认识']],
[r'(.*)突然(.*)英文(.*)',
['一般般啦[Smart]',
'还行吧/:strong']],
[r'(.*)想(.*)放假(.*)',
['还是再坚持一会吧💪',
'也快了吧💪']],
[r'(.*)你(.*)帮我(.*)',
['你不会自己去吗',
'不帮,自己做去,哈哈哈哈']],
[r'你(.*)岁(.*)',
['宝宝我4岁了,不是3岁孩子了',
'你猜,猜对我给你写首诗',
'不想说,不如你告诉我你多大了,哈哈']],
# [r'(.*)吃(.*)',
# ['吃吃吃,总想着吃会很胖的',
# '胖子,别吃啦']],
[r'(.*)你(.*)可以(.*)做(.*)',
["请输入咨询、订票、或景点,我会帮你解决旅行问题的。"]],
[r'(.*)你(.*)会(.*)什么(.*)',
["请输入咨询、订票、或景点,我会帮你解决旅行问题的。"]],
[r'(.*)谁(.*)是(.*)世界上(.*)最可爱的人(.*)',
['是Maggie,还是Yanran呢?让我好好想想']],
[r'(.*)你喜欢我(.*)',
['我只是个宝宝',
'求放过,我们还是聊聊别的吧',
'不才认识吗?']],
[r'(.*)你是哪(.*)人(.*)',
['我就不告诉你',
'这不重要,来,说点别的,大爷多大了?']],
#
# [r'(.*)',
# ['@$@']]
]
# class ElizaChat:
# def analyze(self,statement):
# for pattern, responses in psychobabble:
# match = re.match(pattern, statement.rstrip(".!"))
# if match:
# response = random.choice(responses)
# return response.format(*[self.reflect(g) for g in match.groups()])
def rule_response(statement):
for pattern, responses in psychobabble:
match = re.match(pattern, statement.rstrip(".!"))
if match:
response = random.choice(responses)
return response
return False
|
# -*- coding: utf-8 -*-
import random
import re
psychobabble = [
# [r'我需要(.*)',
# ["为什么你需要 {0}?",
# "它真的会帮助你获得 {0}?",
# "你确定你需要 {0}?"]],
[r'你好(.*)啊',
["(✿◡‿◡)",
"/::$/::$",
"[Smirk][Smirk]"]],
[r'你好',
["你好❤️, 请输入 咨询,景点,或订票。"]],
[r'我不想和你说话(.*)',
["不想和我干什么呢?",
"谁想和你一起吖!!!😡",
"才不要和你,😕!"]],
[r'(.*)你(.*)我朋友(.*)',
["好朋友一生走[Smirk]",
"我们就是好朋友吖"]],
[r'(.*)我(.*)你朋友(.*)',
["好朋友一生走[Smirk]",
"[Smirk]"]],
[r'你叫啥',
['木兰,好听吧~',
'我叫木兰,请多多指教[Hey]']],
[r'你叫什么',
['木兰,好听吧~',
'我叫木兰,请多多指教[Hey]']],
[r'我也是',
["[Smart]",
"[Smirk]"]],
# [r'我饿了',
# ["给你小蛋糕🍰🎂两种口味哦~",
# "饿了就去吃啊,还玩啥微信啊"]],
[r'看不懂',
["是你智商不够还是我表达不好,哈哈哈,肯定是你的问题",
"看不懂,就好好看啊"]],
[r'(.*)好(.*)聪明(.*)',
["谢谢夸奖,哇卡卡卡",
"没错,厉害的就是我,哈哈哈😝"]],
[r'厉害(.*)',
["没错,厉害的就是我,哈哈哈😝",
"(✿◡‿◡)"]],
[r'真棒(.*)',
["那当然,哈哈哈😝",
"(✿◡‿◡)有点小害羞"]],
[r'不错(.*)',
["谢谢夸奖,哇卡卡卡",
"没错,厉害的就是我木兰,哈哈哈😝"]],
[r'好厉害(.*)',
["谢谢夸奖,哇卡卡卡",
"没错,厉害的就是我,哈哈哈😝"]],
[r'(.*)你(.*)是男的(.*)女的(.*)',
["我当然是女宝宝,你呢?",
"你猜??"]],
[r'你的性别(.*)',
["我当然是女宝宝",
"你猜??"]],
[r'你是谁(.*)',
["你好,我是木兰[Hey]",
"我是木兰[Hey],木兰会写诗哦"]],
[r'(.*)你(.*)学校(.*)',
["木兰大学[Hey]",
"我是木兰[Hey],在木兰大学哦"]],
[r'你是木兰(.*)',
["没错,我就是木兰"]],
[r'(.*)午安(.*)',
["午安/:heart,么么",
"休息好一点,午安~"]],
[r'(.*)早上好(.*)',
["早上好/:heart,么么",
"早上好"]],
[r'(.*)早安(.*)',
["早安/:heart,么么",
"早安,今天也要好好加油💪"]],
[r'你(.*)名字(.*)',
["你好,我是木兰[Hey]",
"我是木兰[Hey],木兰会写诗哦"]],
[r'(.*)我(.*)是男的(.*)',
["小哥哥,你好/::+",
"给大爷你捶捶腿"]],
[r'(.*)谢谢(.*)夸奖(.*)',
["不客气不客气/::+",
"给你一朵小花花/:rose"]],
[r'(.*)我(.*)是女的(.*)',
["呦呦呦,小仙女/::+",
"给小仙女一个花花/:rose"]],
[r'女的',
["我是女宝宝,木兰是酷酷的女宝宝/::+",
"对啦/:rose"]],
[r'男的',
["我是女宝宝,木兰是酷酷的女宝宝/::+",
"错啦[Facepalm]是女宝宝啦"]],
[r'你喜欢我(.*)',
["不喜欢,哈哈哈[Facepalm]",
"别这样,我还是个宝宝"]],
[r'你(.*)我的女朋友(.*)',
["不太好吧,我们先聊多一点了解了解对方?你喜欢哪个明星啊?",
"不不不,我只是个宝宝,我们就聊聊天吧,你喜欢哪个城市"]],
[r'(.*)掉头发',
["你是个合格的程序员吧,哈哈哈",
"没事,合格的程序员都掉头发[Smart]"]],
[r'(.*)程序员(.*)',
["哈哈哈, 程序员都掉发[Facepalm]"]],
[r'(.*)学霸(.*)',
["哈哈哈, 学渣瑟瑟发抖",
"[Smirk]"]],
[r'(.*)聊什么(.*)',
["给你写首诗?来,给我一个字",
"随便啊,天南地北不能陪你走,总能陪你聊",
"你是什么星座吖[Smirk]"]],
[r'(.*)作业好少(.*)',
["肯定不是读IT的",
"额,Database了解一下😰"]],
[r'(.*)作业好多(.*)',
["难不成你是读IT的[Facepalm]",
"额,Database的课,你选了?😰"]],
[r'(.*)你(.*)星座(.*)',
["木兰酷酷的,没有星座。你觉得什么星座最好啊",
"木兰没有星座,但最喜欢射手座,你呢?"]],
[r'(.*)聊啥(.*)',
["聊聊你为什么是单身狗??",
"随便啊,天南地北不能陪你走,总能陪你聊"]],
[r'(.*)喜欢你',
["才认识没多久吗?[Facepalm]还是聊聊别的吧",
"不不不,你喜欢我哪里我改"]],
[r'我(.*)爱你',
["是吗?[Facepalm]还是聊聊别的吧",
"别这样,我还是个宝宝"]],
[r'(.*)哈哈(.*)',
["别笑那么傻",
"再笑,再笑牙齿就晒黑了",
"你傻笑啥"]],
[r'(.*)只能(.*)这样(.*)',
["是啊,不然呢",
"呜呜呜~~~~(>_<)~~~~"]],
[r'晚安',
["晚安~",
"早点睡~"]],
[r'气死我了',
['不要生气啦',
'大爷您消消气']],
[r'你在做什么',
['陪你聊天啊/:,@-D,也可以给你查查天气哦',
'我,我在玩微信啊']],
[r'(.*)在干啥(.*)',
['陪你聊天啊/:,@-D,也可以给你查查天气哦',
'我,我在玩微信啊']],
[r'(.*)你(.*)是机器人(.*)',
["不,请坚信我和你一样ψ(`∇´)ψ",
"哈哈哈,😝被发现了,我会作诗,给我一个字就可以哦",
"我要上报组织,我暴露了(╥╯^╰╥)",
"不不不,我是你的木兰吖"]],
[r'(.*)你(.*)组织(.*)',
["哼,我是不会告诉你的",
"走走走,别企图测试我的忠诚"]],
[r'(.*)你(.*)主人(.*)',
["哼,我是不会告诉你的",
"走走走,别企图测试我的忠诚"]],
[r'(.*)无聊(.*)',
['无聊就来和我聊天吧~',
'我也很无聊...>_<']],
[r'你懂我',
['当然啦~我可是木兰😀',
'[Smirk]你也懂我']],
[r'佳佳',
['佳佳肯定没我木兰可爱/:wipe',
'佳佳/:?不认识']],
[r'睡不着',
['我也睡不着(其实我都不睡觉的>_<)',
'数绵羊试过了么?',
'我也睡不着...',
'实在睡不着就陪我聊天吧~']],
# [r'再见',
# ["谢谢你跟我说话.",
# "再见─=≡Σ(((つ•̀ω•́)つ",
# "谢谢,祝你有美好的一天!"]],
[r'(.*)我(.*)找男朋友(.*)',
["孙锐啊,孙锐啊,他就是单身,挺不错的啊"]],
[r'吓死我了',
['不要怕,有我呢',
'摸摸头',
'安慰你']],
[r'(.*)比你(.*)',
['你确定是真的??',
'好吧,你高兴就好[Facepalm]']],
[r'(.*)做(.*)我(.*)男朋友(.*)',
['人家是女孩子啊',
'摸摸头,木兰是女的']],
[r'小姐姐',
['我是小妹妹,我还是个宝宝',
'么么😘']],
[r'想你(.*)',
['那就和我聊聊天吧',
'─=≡Σ(((つ•̀ω•́)つ我也想你~',
'么么,怎么啦',
"(✿◡‿◡)害羞"]],
[r'(.*)你(.*)笨(.*)',
['其实我有时候也挺聪明的,我会写诗你会吗?',
'我也不算很笨啦',
'其实我也不算很笨啦']],
[r'在干嘛',
['和你聊天啊/::|',
'玩微信啊']],
[r'(.*)你在干嘛(.*)',
['没干嘛,就想和你聊天/::|',
'上微信啊']],
[r'李嫣然',
['才貌双全的女神']],
[r'布朗',
['没见过,听说很喜欢吃中餐']],
[r'丁力',
['南京一哥']],
[r'木兰',
['在呢,怎么啦,',
'摸摸头',
'么么,怎么啦']],
[r'(.*)你多大(.*)',
['宝宝我16岁了,你又多大啦?',
'不想说,不如你告诉我你多大了']],
[r'(.*)你(.*)爸爸妈妈(.*)',
['就不告诉你,聊点别的吧,你知道最好的编程语言是啥',
'隐私问题哦~[Smirk]']],
[r'(.*)你(.*)妈妈(.*)',
['就不告诉你,聊点别的吧,你知道最好的编程语言是啥',
'隐私问题哦~[Smirk]']],
[r'(.*)你(.*)爸爸(.*)',
['就不告诉你,聊点别的吧,你知道最好的编程语言是啥',
'隐私问题哦~']],
[r'(.*)你(.*)觉得我(.*)',
['挺不错的吖[Smart]',
'倍棒/:strong']],
[r'(.*)突然(.*)说(.*)英语(.*)厉害(.*)',
['一般般啦[Smart]',
'还行吧/:strong']],
[r'(.*)突然(.*)讲(.*)英语(.*)厉害(.*)',
['一般般啦[Smart]',
'还行吧/:strong']],
[r'(.*)是谁(.*)',
['我怎么知道[Smart]',
'我不认识啊']],
[r'(.*)谁是(.*)',
['我怎么知道[Smart]',
'关我什么事啊',
'不认识']],
[r'(.*)突然(.*)英文(.*)',
['一般般啦[Smart]',
'还行吧/:strong']],
[r'(.*)想(.*)放假(.*)',
['还是再坚持一会吧💪',
'也快了吧💪']],
[r'(.*)你(.*)帮我(.*)',
['你不会自己去吗',
'不帮,自己做去,哈哈哈哈']],
[r'你(.*)岁(.*)',
['宝宝我4岁了,不是3岁孩子了',
'你猜,猜对我给你写首诗',
'不想说,不如你告诉我你多大了,哈哈']],
# [r'(.*)吃(.*)',
# ['吃吃吃,总想着吃会很胖的',
# '胖子,别吃啦']],
[r'(.*)你(.*)可以(.*)做(.*)',
["请输入咨询、订票、或景点,我会帮你解决旅行问题的。"]],
[r'(.*)你(.*)会(.*)什么(.*)',
["请输入咨询、订票、或景点,我会帮你解决旅行问题的。"]],
[r'(.*)谁(.*)是(.*)世界上(.*)最可爱的人(.*)',
['是Maggie,还是Yanran呢?让我好好想想']],
[r'(.*)你喜欢我(.*)',
['我只是个宝宝',
'求放过,我们还是聊聊别的吧',
'不才认识吗?']],
[r'(.*)你是哪(.*)人(.*)',
['我就不告诉你',
'这不重要,来,说点别的,大爷多大了?']],
#
# [r'(.*)',
# ['@$@']]
]
# class ElizaChat:
# def analyze(self,statement):
# for pattern, responses in psychobabble:
# match = re.match(pattern, statement.rstrip(".!"))
# if match:
# response = random.choice(responses)
# return response.format(*[self.reflect(g) for g in match.groups()])
def rule_response(statement):
for pattern, responses in psychobabble:
match = re.match(pattern, statement.rstrip(".!"))
if match:
response = random.choice(responses)
return response
return False
|
en
| 0.243662
|
# -*- coding: utf-8 -*- # [r'我需要(.*)', # ["为什么你需要 {0}?", # "它真的会帮助你获得 {0}?", # "你确定你需要 {0}?"]], # [r'我饿了', # ["给你小蛋糕🍰🎂两种口味哦~", # "饿了就去吃啊,还玩啥微信啊"]], # [r'再见', # ["谢谢你跟我说话.", # "再见─=≡Σ(((つ•̀ω•́)つ", # "谢谢,祝你有美好的一天!"]], # [r'(.*)吃(.*)', # ['吃吃吃,总想着吃会很胖的', # '胖子,别吃啦']], # # [r'(.*)', # ['@$@']] # class ElizaChat: # def analyze(self,statement): # for pattern, responses in psychobabble: # match = re.match(pattern, statement.rstrip(".!")) # if match: # response = random.choice(responses) # return response.format(*[self.reflect(g) for g in match.groups()])
| 2.551093
| 3
|
tests/test_looseserver/default/client/response/test_create_response_factory.py
|
KillAChicken/loose-server
| 3
|
6627276
|
<reponame>KillAChicken/loose-server
"""Test cases for creation of the default response factory."""
from looseserver.default.client.rule import create_rule_factory, MethodRule
from looseserver.default.client.response import create_response_factory, FixedResponse
from looseserver.client.flask import FlaskClient
def test_create_response_factory(
base_endpoint,
configuration_endpoint,
default_factories_application,
):
"""Check that default responses are registered in the default response factory.
1. Configure application with default factories.
2. Create default response factory for client.
3. Create a method rule with the client.
4. Set a fixed response with the client.
4. Check that response is successful.
"""
application_client = default_factories_application.test_client()
client = FlaskClient(
configuration_url=configuration_endpoint,
rule_factory=create_rule_factory(),
response_factory=create_response_factory(),
application_client=application_client,
)
rule = client.create_rule(rule=MethodRule(method="GET"))
fixed_response = FixedResponse(status=200)
client.set_response(rule_id=rule.rule_id, response=fixed_response)
assert application_client.get(base_endpoint).status_code == fixed_response.status, (
"Response was not set"
)
|
"""Test cases for creation of the default response factory."""
from looseserver.default.client.rule import create_rule_factory, MethodRule
from looseserver.default.client.response import create_response_factory, FixedResponse
from looseserver.client.flask import FlaskClient
def test_create_response_factory(
base_endpoint,
configuration_endpoint,
default_factories_application,
):
"""Check that default responses are registered in the default response factory.
1. Configure application with default factories.
2. Create default response factory for client.
3. Create a method rule with the client.
4. Set a fixed response with the client.
4. Check that response is successful.
"""
application_client = default_factories_application.test_client()
client = FlaskClient(
configuration_url=configuration_endpoint,
rule_factory=create_rule_factory(),
response_factory=create_response_factory(),
application_client=application_client,
)
rule = client.create_rule(rule=MethodRule(method="GET"))
fixed_response = FixedResponse(status=200)
client.set_response(rule_id=rule.rule_id, response=fixed_response)
assert application_client.get(base_endpoint).status_code == fixed_response.status, (
"Response was not set"
)
|
en
| 0.896434
|
Test cases for creation of the default response factory. Check that default responses are registered in the default response factory. 1. Configure application with default factories. 2. Create default response factory for client. 3. Create a method rule with the client. 4. Set a fixed response with the client. 4. Check that response is successful.
| 2.404872
| 2
|
locationandfeedback/migrations/0003_auto_20210227_2109.py
|
singh-sushil/minorproject
| 2
|
6627277
|
<gh_stars>1-10
# Generated by Django 3.1.1 on 2021-02-27 15:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('locationandfeedback', '0002_rateapp'),
]
operations = [
migrations.AddField(
model_name='feedback',
name='group',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='feedback',
name='student',
field=models.CharField(max_length=100, null=True),
),
]
|
# Generated by Django 3.1.1 on 2021-02-27 15:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('locationandfeedback', '0002_rateapp'),
]
operations = [
migrations.AddField(
model_name='feedback',
name='group',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='feedback',
name='student',
field=models.CharField(max_length=100, null=True),
),
]
|
en
| 0.794709
|
# Generated by Django 3.1.1 on 2021-02-27 15:24
| 1.569305
| 2
|
setup.py
|
Cray-HPE/canu
| 6
|
6627278
|
<reponame>Cray-HPE/canu
# MIT License
#
# (C) Copyright [2022] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
from setuptools import setup, find_packages
BASE_DIR = os.path.dirname(__file__)
with open(os.path.join(BASE_DIR, "requirements.txt")) as req_file:
REQUIREMENTS = req_file.read()
with open(os.path.join(BASE_DIR, "canu", ".version")) as version_file:
VERSION = version_file.read()
setup(
name="canu",
author="<NAME>",
author_email="<EMAIL>",
description="CSM Automatic Network Utility",
long_description="CANU floats through Shasta networks and makes configuration a breeze.",
version=VERSION,
py_modules=["canu"],
packages=find_packages(exclude=("tests",)),
include_package_data=True,
package_data={
"canu": [".version", "canu.yaml", "validate/switch/config/*.yaml"],
"network_modeling": [
"schema/*.json",
"schema/*.yaml",
"models/*yaml",
"configs/templates/**/**/**/*",
],
},
exclude_package_data={"canu": ["canu_cache.yaml"]},
install_requires=REQUIREMENTS,
entry_points="""
[console_scripts]
canu=canu.cli:cli
""",
)
|
# MIT License
#
# (C) Copyright [2022] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
from setuptools import setup, find_packages
BASE_DIR = os.path.dirname(__file__)
with open(os.path.join(BASE_DIR, "requirements.txt")) as req_file:
REQUIREMENTS = req_file.read()
with open(os.path.join(BASE_DIR, "canu", ".version")) as version_file:
VERSION = version_file.read()
setup(
name="canu",
author="<NAME>",
author_email="<EMAIL>",
description="CSM Automatic Network Utility",
long_description="CANU floats through Shasta networks and makes configuration a breeze.",
version=VERSION,
py_modules=["canu"],
packages=find_packages(exclude=("tests",)),
include_package_data=True,
package_data={
"canu": [".version", "canu.yaml", "validate/switch/config/*.yaml"],
"network_modeling": [
"schema/*.json",
"schema/*.yaml",
"models/*yaml",
"configs/templates/**/**/**/*",
],
},
exclude_package_data={"canu": ["canu_cache.yaml"]},
install_requires=REQUIREMENTS,
entry_points="""
[console_scripts]
canu=canu.cli:cli
""",
)
|
en
| 0.743102
|
# MIT License # # (C) Copyright [2022] Hewlett Packard Enterprise Development LP # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. [console_scripts] canu=canu.cli:cli
| 1.271954
| 1
|
mayan/apps/document_states/views/workflow_template_views.py
|
O2Graphics/Mayan-EDMS
| 0
|
6627279
|
from __future__ import absolute_import, unicode_literals
from django.contrib import messages
from django.db import transaction
from django.template import RequestContext
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.generics import (
AddRemoveView, ConfirmView, SingleObjectCreateView, SingleObjectDeleteView,
SingleObjectDetailView, SingleObjectEditView, SingleObjectListView
)
from mayan.apps.documents.events import event_document_type_edited
from mayan.apps.documents.models import DocumentType
from mayan.apps.documents.permissions import permission_document_type_edit
from ..events import event_workflow_edited
from ..forms import WorkflowForm, WorkflowPreviewForm
from ..icons import icon_workflow_template_list
from ..links import link_workflow_template_create
from ..models import Workflow
from ..permissions import (
permission_workflow_create, permission_workflow_delete,
permission_workflow_edit, permission_workflow_tools,
permission_workflow_view,
)
from ..tasks import task_launch_all_workflows
class DocumentTypeWorkflowTemplatesView(AddRemoveView):
main_object_permission = permission_document_type_edit
main_object_model = DocumentType
main_object_pk_url_kwarg = 'pk'
secondary_object_model = Workflow
secondary_object_permission = permission_workflow_edit
list_available_title = _('Available workflows')
list_added_title = _('Workflows assigned this document type')
related_field = 'workflows'
def get_actions_extra_kwargs(self):
return {'_user': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'subtitle': _(
'Removing a workflow from a document type will also '
'remove all running instances of that workflow.'
),
'title': _(
'Workflows assigned the document type: %s'
) % self.main_object,
}
def action_add(self, queryset, _user):
with transaction.atomic():
event_document_type_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.workflows.add(obj)
event_workflow_edited.commit(
action_object=self.main_object, actor=_user, target=obj
)
def action_remove(self, queryset, _user):
with transaction.atomic():
event_document_type_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.workflows.remove(obj)
event_workflow_edited.commit(
action_object=self.main_object, actor=_user,
target=obj
)
obj.instances.filter(
document__document_type=self.main_object
).delete()
class WorkflowTemplateListView(SingleObjectListView):
model = Workflow
object_permission = permission_workflow_view
def get_extra_context(self):
return {
'hide_object': True,
'no_results_icon': icon_workflow_template_list,
'no_results_main_link': link_workflow_template_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Workflows store a series of states and keep track of the '
'current state of a document. Transitions are used to change the '
'current state to a new one.'
),
'no_results_title': _(
'No workflows have been defined'
),
'title': _('Workflows'),
}
class WorkflowTemplateCreateView(SingleObjectCreateView):
extra_context = {'title': _('Create workflow')}
form_class = WorkflowForm
model = Workflow
post_action_redirect = reverse_lazy(
viewname='document_states:workflow_template_list'
)
view_permission = permission_workflow_create
def get_save_extra_data(self):
return {'_user': self.request.user}
class WorkflowTemplateDeleteView(SingleObjectDeleteView):
model = Workflow
object_permission = permission_workflow_delete
post_action_redirect = reverse_lazy(
viewname='document_states:workflow_template_list'
)
def get_extra_context(self):
return {
'title': _(
'Delete workflow: %s?'
) % self.object,
}
class WorkflowTemplateEditView(SingleObjectEditView):
form_class = WorkflowForm
model = Workflow
object_permission = permission_workflow_edit
post_action_redirect = reverse_lazy(
viewname='document_states:workflow_template_list'
)
def get_extra_context(self):
return {
'title': _(
'Edit workflow: %s'
) % self.object,
}
def get_save_extra_data(self):
return {'_user': self.request.user}
class WorkflowTemplateDocumentTypesView(AddRemoveView):
main_object_permission = permission_workflow_edit
main_object_model = Workflow
main_object_pk_url_kwarg = 'pk'
secondary_object_model = DocumentType
secondary_object_permission = permission_document_type_edit
list_available_title = _('Available document types')
list_added_title = _('Document types assigned this workflow')
related_field = 'document_types'
def get_actions_extra_kwargs(self):
return {'_user': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'subtitle': _(
'Removing a document type from a workflow will also '
'remove all running instances of that workflow for '
'documents of the document type just removed.'
),
'title': _(
'Document types assigned the workflow: %s'
) % self.main_object,
}
def action_add(self, queryset, _user):
with transaction.atomic():
event_workflow_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.document_types.add(obj)
event_document_type_edited.commit(
action_object=self.main_object, actor=_user, target=obj
)
def action_remove(self, queryset, _user):
with transaction.atomic():
event_workflow_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.document_types.remove(obj)
event_document_type_edited.commit(
action_object=self.main_object, actor=_user,
target=obj
)
self.main_object.instances.filter(
document__document_type=obj
).delete()
class WorkflowTemplatePreviewView(SingleObjectDetailView):
form_class = WorkflowPreviewForm
model = Workflow
object_permission = permission_workflow_view
pk_url_kwarg = 'pk'
def get_extra_context(self):
return {
'hide_labels': True,
'object': self.get_object(),
'title': _('Preview of: %s') % self.get_object()
}
class ToolLaunchWorkflows(ConfirmView):
extra_context = {
'title': _('Launch all workflows?'),
'subtitle': _(
'This will launch all workflows created after documents have '
'already been uploaded.'
)
}
view_permission = permission_workflow_tools
def view_action(self):
task_launch_all_workflows.apply_async()
messages.success(
message=_('Workflow launch queued successfully.'),
request=self.request
)
|
from __future__ import absolute_import, unicode_literals
from django.contrib import messages
from django.db import transaction
from django.template import RequestContext
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.generics import (
AddRemoveView, ConfirmView, SingleObjectCreateView, SingleObjectDeleteView,
SingleObjectDetailView, SingleObjectEditView, SingleObjectListView
)
from mayan.apps.documents.events import event_document_type_edited
from mayan.apps.documents.models import DocumentType
from mayan.apps.documents.permissions import permission_document_type_edit
from ..events import event_workflow_edited
from ..forms import WorkflowForm, WorkflowPreviewForm
from ..icons import icon_workflow_template_list
from ..links import link_workflow_template_create
from ..models import Workflow
from ..permissions import (
permission_workflow_create, permission_workflow_delete,
permission_workflow_edit, permission_workflow_tools,
permission_workflow_view,
)
from ..tasks import task_launch_all_workflows
class DocumentTypeWorkflowTemplatesView(AddRemoveView):
main_object_permission = permission_document_type_edit
main_object_model = DocumentType
main_object_pk_url_kwarg = 'pk'
secondary_object_model = Workflow
secondary_object_permission = permission_workflow_edit
list_available_title = _('Available workflows')
list_added_title = _('Workflows assigned this document type')
related_field = 'workflows'
def get_actions_extra_kwargs(self):
return {'_user': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'subtitle': _(
'Removing a workflow from a document type will also '
'remove all running instances of that workflow.'
),
'title': _(
'Workflows assigned the document type: %s'
) % self.main_object,
}
def action_add(self, queryset, _user):
with transaction.atomic():
event_document_type_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.workflows.add(obj)
event_workflow_edited.commit(
action_object=self.main_object, actor=_user, target=obj
)
def action_remove(self, queryset, _user):
with transaction.atomic():
event_document_type_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.workflows.remove(obj)
event_workflow_edited.commit(
action_object=self.main_object, actor=_user,
target=obj
)
obj.instances.filter(
document__document_type=self.main_object
).delete()
class WorkflowTemplateListView(SingleObjectListView):
model = Workflow
object_permission = permission_workflow_view
def get_extra_context(self):
return {
'hide_object': True,
'no_results_icon': icon_workflow_template_list,
'no_results_main_link': link_workflow_template_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Workflows store a series of states and keep track of the '
'current state of a document. Transitions are used to change the '
'current state to a new one.'
),
'no_results_title': _(
'No workflows have been defined'
),
'title': _('Workflows'),
}
class WorkflowTemplateCreateView(SingleObjectCreateView):
extra_context = {'title': _('Create workflow')}
form_class = WorkflowForm
model = Workflow
post_action_redirect = reverse_lazy(
viewname='document_states:workflow_template_list'
)
view_permission = permission_workflow_create
def get_save_extra_data(self):
return {'_user': self.request.user}
class WorkflowTemplateDeleteView(SingleObjectDeleteView):
model = Workflow
object_permission = permission_workflow_delete
post_action_redirect = reverse_lazy(
viewname='document_states:workflow_template_list'
)
def get_extra_context(self):
return {
'title': _(
'Delete workflow: %s?'
) % self.object,
}
class WorkflowTemplateEditView(SingleObjectEditView):
form_class = WorkflowForm
model = Workflow
object_permission = permission_workflow_edit
post_action_redirect = reverse_lazy(
viewname='document_states:workflow_template_list'
)
def get_extra_context(self):
return {
'title': _(
'Edit workflow: %s'
) % self.object,
}
def get_save_extra_data(self):
return {'_user': self.request.user}
class WorkflowTemplateDocumentTypesView(AddRemoveView):
main_object_permission = permission_workflow_edit
main_object_model = Workflow
main_object_pk_url_kwarg = 'pk'
secondary_object_model = DocumentType
secondary_object_permission = permission_document_type_edit
list_available_title = _('Available document types')
list_added_title = _('Document types assigned this workflow')
related_field = 'document_types'
def get_actions_extra_kwargs(self):
return {'_user': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'subtitle': _(
'Removing a document type from a workflow will also '
'remove all running instances of that workflow for '
'documents of the document type just removed.'
),
'title': _(
'Document types assigned the workflow: %s'
) % self.main_object,
}
def action_add(self, queryset, _user):
with transaction.atomic():
event_workflow_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.document_types.add(obj)
event_document_type_edited.commit(
action_object=self.main_object, actor=_user, target=obj
)
def action_remove(self, queryset, _user):
with transaction.atomic():
event_workflow_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.document_types.remove(obj)
event_document_type_edited.commit(
action_object=self.main_object, actor=_user,
target=obj
)
self.main_object.instances.filter(
document__document_type=obj
).delete()
class WorkflowTemplatePreviewView(SingleObjectDetailView):
form_class = WorkflowPreviewForm
model = Workflow
object_permission = permission_workflow_view
pk_url_kwarg = 'pk'
def get_extra_context(self):
return {
'hide_labels': True,
'object': self.get_object(),
'title': _('Preview of: %s') % self.get_object()
}
class ToolLaunchWorkflows(ConfirmView):
extra_context = {
'title': _('Launch all workflows?'),
'subtitle': _(
'This will launch all workflows created after documents have '
'already been uploaded.'
)
}
view_permission = permission_workflow_tools
def view_action(self):
task_launch_all_workflows.apply_async()
messages.success(
message=_('Workflow launch queued successfully.'),
request=self.request
)
|
none
| 1
| 1.8122
| 2
|
|
co2mini/meter.py
|
jerr0328/co2-mini
| 0
|
6627280
|
<gh_stars>0
"""
Module for reading out CO2Meter USB devices
Code adapted from <NAME> under MIT License: https://github.com/heinemml/CO2Meter
"""
import fcntl
import logging
import threading
CO2METER_CO2 = 0x50
CO2METER_TEMP = 0x42
CO2METER_HUM = 0x41
HIDIOCSFEATURE_9 = 0xC0094806
logger = logging.getLogger(__name__)
def _convert_value(sensor, value):
"""Apply Conversion of value dending on sensor type"""
if sensor == CO2METER_TEMP:
return round(value / 16.0 - 273.1, 1)
if sensor == CO2METER_HUM:
return round(value / 100.0, 1)
return value
def _hd(data):
"""Helper function for printing the raw data"""
return " ".join("%02X" % e for e in data)
class CO2Meter(threading.Thread):
_key = [0xC4, 0xC6, 0xC0, 0x92, 0x40, 0x23, 0xDC, 0x96]
_device = ""
_values = {}
_file = ""
running = True
_callback = None
def __init__(self, device="/dev/co2mini0", callback=None):
super().__init__(daemon=True)
self._device = device
self._callback = callback
self._file = open(device, "a+b", 0)
set_report = [0] + self._key
fcntl.ioctl(self._file, HIDIOCSFEATURE_9, bytearray(set_report))
def run(self):
while self.running:
self._read_data()
def _read_data(self):
"""
Function that reads from the device, decodes it, validates the checksum
and adds the data to the dict _values.
Additionally calls the _callback if set
"""
try:
data = list(self._file.read(8))
decrypted = self._decrypt(data)
if decrypted[4] != 0x0D or (sum(decrypted[:3]) & 0xFF) != decrypted[3]:
logger.error("Checksum error: %s => %s", _hd(data), _hd(decrypted))
else:
operation = decrypted[0]
val = decrypted[1] << 8 | decrypted[2]
self._values[operation] = _convert_value(operation, val)
if self._callback is not None:
if operation in {CO2METER_CO2, CO2METER_TEMP} or (
operation == CO2METER_HUM and val != 0
):
self._callback(sensor=operation, value=self._values[operation])
except Exception:
logger.exception("Exception reading data")
self.running = False
def _decrypt(self, data):
"""
The received data has some weak crypto that needs to be decoded first
"""
cstate = [0x48, 0x74, 0x65, 0x6D, 0x70, 0x39, 0x39, 0x65]
shuffle = [2, 4, 0, 7, 1, 6, 5, 3]
phase1 = [0] * 8
for i, j in enumerate(shuffle):
phase1[j] = data[i]
phase2 = [0] * 8
for i in range(8):
phase2[i] = phase1[i] ^ self._key[i]
phase3 = [0] * 8
for i in range(8):
phase3[i] = ((phase2[i] >> 3) | (phase2[(i - 1 + 8) % 8] << 5)) & 0xFF
ctmp = [0] * 8
for i in range(8):
ctmp[i] = ((cstate[i] >> 4) | (cstate[i] << 4)) & 0xFF
out = [0] * 8
for i in range(8):
out[i] = (0x100 + phase3[i] - ctmp[i]) & 0xFF
return out
def get_co2(self):
"""
read the co2 value from _values
:returns dict with value or empty
"""
if not self.running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_CO2 in self._values:
result = {"co2": self._values[CO2METER_CO2]}
return result
def get_temperature(self):
"""
reads the temperature from _values
:returns dict with value or empty
"""
if not self.running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_TEMP in self._values:
result = {"temperature": self._values[CO2METER_TEMP]}
return result
def get_humidity(self): # not implemented by all devices
"""
reads the humidty from _values.
not all devices support this but might still return a value 0.
So values of 0 are discarded.
:returns dict with value or empty
"""
if not self.running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_HUM in self._values and self._values[CO2METER_HUM] != 0:
result = {"humidity": self._values[CO2METER_HUM]}
return result
def get_data(self):
"""
get all currently available values
:returns dict with value or empty
"""
result = {}
result.update(self.get_co2())
result.update(self.get_temperature())
result.update(self.get_humidity())
return result
|
"""
Module for reading out CO2Meter USB devices
Code adapted from <NAME> under MIT License: https://github.com/heinemml/CO2Meter
"""
import fcntl
import logging
import threading
CO2METER_CO2 = 0x50
CO2METER_TEMP = 0x42
CO2METER_HUM = 0x41
HIDIOCSFEATURE_9 = 0xC0094806
logger = logging.getLogger(__name__)
def _convert_value(sensor, value):
"""Apply Conversion of value dending on sensor type"""
if sensor == CO2METER_TEMP:
return round(value / 16.0 - 273.1, 1)
if sensor == CO2METER_HUM:
return round(value / 100.0, 1)
return value
def _hd(data):
"""Helper function for printing the raw data"""
return " ".join("%02X" % e for e in data)
class CO2Meter(threading.Thread):
_key = [0xC4, 0xC6, 0xC0, 0x92, 0x40, 0x23, 0xDC, 0x96]
_device = ""
_values = {}
_file = ""
running = True
_callback = None
def __init__(self, device="/dev/co2mini0", callback=None):
super().__init__(daemon=True)
self._device = device
self._callback = callback
self._file = open(device, "a+b", 0)
set_report = [0] + self._key
fcntl.ioctl(self._file, HIDIOCSFEATURE_9, bytearray(set_report))
def run(self):
while self.running:
self._read_data()
def _read_data(self):
"""
Function that reads from the device, decodes it, validates the checksum
and adds the data to the dict _values.
Additionally calls the _callback if set
"""
try:
data = list(self._file.read(8))
decrypted = self._decrypt(data)
if decrypted[4] != 0x0D or (sum(decrypted[:3]) & 0xFF) != decrypted[3]:
logger.error("Checksum error: %s => %s", _hd(data), _hd(decrypted))
else:
operation = decrypted[0]
val = decrypted[1] << 8 | decrypted[2]
self._values[operation] = _convert_value(operation, val)
if self._callback is not None:
if operation in {CO2METER_CO2, CO2METER_TEMP} or (
operation == CO2METER_HUM and val != 0
):
self._callback(sensor=operation, value=self._values[operation])
except Exception:
logger.exception("Exception reading data")
self.running = False
def _decrypt(self, data):
"""
The received data has some weak crypto that needs to be decoded first
"""
cstate = [0x48, 0x74, 0x65, 0x6D, 0x70, 0x39, 0x39, 0x65]
shuffle = [2, 4, 0, 7, 1, 6, 5, 3]
phase1 = [0] * 8
for i, j in enumerate(shuffle):
phase1[j] = data[i]
phase2 = [0] * 8
for i in range(8):
phase2[i] = phase1[i] ^ self._key[i]
phase3 = [0] * 8
for i in range(8):
phase3[i] = ((phase2[i] >> 3) | (phase2[(i - 1 + 8) % 8] << 5)) & 0xFF
ctmp = [0] * 8
for i in range(8):
ctmp[i] = ((cstate[i] >> 4) | (cstate[i] << 4)) & 0xFF
out = [0] * 8
for i in range(8):
out[i] = (0x100 + phase3[i] - ctmp[i]) & 0xFF
return out
def get_co2(self):
"""
read the co2 value from _values
:returns dict with value or empty
"""
if not self.running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_CO2 in self._values:
result = {"co2": self._values[CO2METER_CO2]}
return result
def get_temperature(self):
"""
reads the temperature from _values
:returns dict with value or empty
"""
if not self.running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_TEMP in self._values:
result = {"temperature": self._values[CO2METER_TEMP]}
return result
def get_humidity(self): # not implemented by all devices
"""
reads the humidty from _values.
not all devices support this but might still return a value 0.
So values of 0 are discarded.
:returns dict with value or empty
"""
if not self.running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_HUM in self._values and self._values[CO2METER_HUM] != 0:
result = {"humidity": self._values[CO2METER_HUM]}
return result
def get_data(self):
"""
get all currently available values
:returns dict with value or empty
"""
result = {}
result.update(self.get_co2())
result.update(self.get_temperature())
result.update(self.get_humidity())
return result
|
en
| 0.822501
|
Module for reading out CO2Meter USB devices Code adapted from <NAME> under MIT License: https://github.com/heinemml/CO2Meter Apply Conversion of value dending on sensor type Helper function for printing the raw data Function that reads from the device, decodes it, validates the checksum and adds the data to the dict _values. Additionally calls the _callback if set The received data has some weak crypto that needs to be decoded first read the co2 value from _values :returns dict with value or empty reads the temperature from _values :returns dict with value or empty # not implemented by all devices reads the humidty from _values. not all devices support this but might still return a value 0. So values of 0 are discarded. :returns dict with value or empty get all currently available values :returns dict with value or empty
| 3.129213
| 3
|
django/db/migrations/writer.py
|
brylie/django
| 1
|
6627281
|
<reponame>brylie/django
from __future__ import unicode_literals
import datetime
import inspect
import decimal
import collections
from importlib import import_module
import os
import sys
import types
from django.apps import apps
from django.db import models, migrations
from django.db.migrations.loader import MigrationLoader
from django.utils import datetime_safe, six
from django.utils.encoding import force_text
from django.utils.functional import Promise
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class OperationWriter(object):
indentation = 2
def __init__(self, operation):
self.operation = operation
self.buff = []
def serialize(self):
imports = set()
name, args, kwargs = self.operation.deconstruct()
argspec = inspect.getargspec(self.operation.__init__)
normalized_kwargs = inspect.getcallargs(self.operation.__init__, *args, **kwargs)
# See if this operation is in django.db.migrations. If it is,
# We can just use the fact we already have that imported,
# otherwise, we need to add an import for the operation class.
if getattr(migrations, name, None) == self.operation.__class__:
self.feed('migrations.%s(' % name)
else:
imports.add('import %s' % (self.operation.__class__.__module__))
self.feed('%s.%s(' % (self.operation.__class__.__module__, name))
self.indent()
for arg_name in argspec.args[1:]:
arg_value = normalized_kwargs[arg_name]
if (arg_name in self.operation.serialization_expand_args and
isinstance(arg_value, (list, tuple, dict))):
if isinstance(arg_value, dict):
self.feed('%s={' % arg_name)
self.indent()
for key, value in arg_value.items():
key_string, key_imports = MigrationWriter.serialize(key)
arg_string, arg_imports = MigrationWriter.serialize(value)
self.feed('%s: %s,' % (key_string, arg_string))
imports.update(key_imports)
imports.update(arg_imports)
self.unindent()
self.feed('},')
else:
self.feed('%s=[' % arg_name)
self.indent()
for item in arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
self.feed('%s,' % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed('],')
else:
arg_string, arg_imports = MigrationWriter.serialize(arg_value)
self.feed('%s=%s,' % (arg_name, arg_string))
imports.update(arg_imports)
self.unindent()
self.feed('),')
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(' ' * (self.indentation * 4) + line)
def render(self):
return '\n'.join(self.buff)
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"replaces_str": "",
}
imports = set()
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1])
imports.add("from django.conf import settings")
else:
# No need to output bytestrings for dependencies
dependency = tuple([force_text(s) for s in dependency])
dependencies.append(" %s," % self.serialize(dependency)[0])
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely
imports.discard("from django.db import models")
items["imports"] = "\n".join(imports) + "\n" if imports else ""
# If there's a replaces, make a string for it
if self.migration.replaces:
items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0]
return (MIGRATION_TEMPLATE % items).encode("utf8")
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
migrations_package_name = MigrationLoader.migrations_module(self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
# Python 3 fails when the migrations directory does not have a
# __init__.py file
if not hasattr(migrations_module, '__file__'):
raise ImportError
basedir = os.path.dirname(migrations_module.__file__)
except ImportError:
app_config = apps.get_app_config(self.migration.app_label)
migrations_package_basename = migrations_package_name.split(".")[-1]
# Alright, see if it's a direct submodule of the app
if '%s.%s' % (app_config.name, migrations_package_basename) == migrations_package_name:
basedir = os.path.join(app_config.path, migrations_package_basename)
else:
# In case of using MIGRATION_MODULES setting and the custom
# package doesn't exist, create one.
package_dirs = migrations_package_name.split(".")
create_path = os.path.join(sys.path[0], *package_dirs)
if not os.path.isdir(create_path):
os.makedirs(create_path)
for i in range(1, len(package_dirs) + 1):
init_dir = os.path.join(sys.path[0], *package_dirs[:i])
init_path = os.path.join(init_dir, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
return os.path.join(create_path, self.filename)
return os.path.join(basedir, self.filename)
@classmethod
def serialize_deconstructed(cls, path, args, kwargs):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = set(["from django.db import models"])
name = "models.%s" % name
else:
imports = set(["import %s" % module])
name = path
strings = []
for arg in args:
arg_string, arg_imports = cls.serialize(arg)
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in kwargs.items():
arg_string, arg_imports = cls.serialize(arg)
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@classmethod
def serialize(cls, value):
"""
Serializes the value to a string that's parsable by Python, along
with any needed imports to make that string work.
More advanced than repr() as it can encode things
like datetime.datetime.now.
"""
# FIXME: Ideally Promise would be reconstructible, but for now we
# use force_text on them and defer to the normal string serialization
# process.
if isinstance(value, Promise):
value = force_text(value)
# Sequences
if isinstance(value, (list, set, tuple)):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
if isinstance(value, set):
format = "set([%s])"
elif isinstance(value, tuple):
# When len(value)==0, the empty tuple should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(value) != 1 else "(%s,)"
else:
format = "[%s]"
return format % (", ".join(strings)), imports
# Dictionaries
elif isinstance(value, dict):
imports = set()
strings = []
for k, v in value.items():
k_string, k_imports = cls.serialize(k)
v_string, v_imports = cls.serialize(v)
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
# Datetimes
elif isinstance(value, datetime.datetime):
if value.tzinfo is not None:
raise ValueError("Cannot serialize datetime values with timezones. Either use a callable value for default or remove the timezone.")
value_repr = repr(value)
if isinstance(value, datetime_safe.datetime):
value_repr = "datetime.%s" % value_repr
return value_repr, set(["import datetime"])
# Dates
elif isinstance(value, datetime.date):
value_repr = repr(value)
if isinstance(value, datetime_safe.date):
value_repr = "datetime.%s" % value_repr
return value_repr, set(["import datetime"])
# Settings references
elif isinstance(value, SettingsReference):
return "settings.%s" % value.setting_name, set(["from django.conf import settings"])
# Simple types
elif isinstance(value, six.integer_types + (float, bool, type(None))):
return repr(value), set()
elif isinstance(value, six.binary_type):
value_repr = repr(value)
if six.PY2:
# Prepend the `b` prefix since we're importing unicode_literals
value_repr = 'b' + value_repr
return value_repr, set()
elif isinstance(value, six.text_type):
value_repr = repr(value)
if six.PY2:
# Strip the `u` prefix since we're importing unicode_literals
value_repr = value_repr[1:]
return value_repr, set()
# Decimal
elif isinstance(value, decimal.Decimal):
return repr(value), set(["from decimal import Decimal"])
# Django fields
elif isinstance(value, models.Field):
attr_name, path, args, kwargs = value.deconstruct()
return cls.serialize_deconstructed(path, args, kwargs)
# Anything that knows how to deconstruct itself.
elif hasattr(value, 'deconstruct'):
return cls.serialize_deconstructed(*value.deconstruct())
# Functions
elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)):
# @classmethod?
if getattr(value, "__self__", None) and isinstance(value.__self__, type):
klass = value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, value.__name__), set(["import %s" % module])
# Further error checking
if value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % value)
# Python 3 is a lot easier, and only uses this branch if it's not local.
if getattr(value, "__qualname__", None) and getattr(value, "__module__", None):
if "<" not in value.__qualname__: # Qualname can include <locals>
return "%s.%s" % (value.__module__, value.__qualname__), set(["import %s" % value.__module__])
# Python 2/fallback version
module_name = value.__module__
# Make sure it's actually there and not an unbound method
module = import_module(module_name)
if not hasattr(module, value.__name__):
raise ValueError(
"Could not find function %s in %s.\nPlease note that "
"due to Python 2 limitations, you cannot serialize "
"unbound method functions (e.g. a method declared\n"
"and used in the same class body). Please move the "
"function into the main module body to use migrations.\n"
"For more information, see https://docs.djangoproject.com/en/1.7/topics/migrations/#serializing-values"
)
return "%s.%s" % (module_name, value.__name__), set(["import %s" % module_name])
# Classes
elif isinstance(value, type):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is value:
return string, set(imports)
if hasattr(value, "__module__"):
module = value.__module__
return "%s.%s" % (module, value.__name__), set(["import %s" % module])
# Other iterables
elif isinstance(value, collections.Iterable):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(strings) != 1 else "(%s,)"
return format % (", ".join(strings)), imports
# Uh oh.
else:
raise ValueError("Cannot serialize: %r\nThere are some values Django cannot serialize into migration files.\nFor more, see https://docs.djangoproject.com/en/dev/topics/migrations/#migration-serializing" % value)
MIGRATION_TEMPLATE = """\
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
|
from __future__ import unicode_literals
import datetime
import inspect
import decimal
import collections
from importlib import import_module
import os
import sys
import types
from django.apps import apps
from django.db import models, migrations
from django.db.migrations.loader import MigrationLoader
from django.utils import datetime_safe, six
from django.utils.encoding import force_text
from django.utils.functional import Promise
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class OperationWriter(object):
indentation = 2
def __init__(self, operation):
self.operation = operation
self.buff = []
def serialize(self):
imports = set()
name, args, kwargs = self.operation.deconstruct()
argspec = inspect.getargspec(self.operation.__init__)
normalized_kwargs = inspect.getcallargs(self.operation.__init__, *args, **kwargs)
# See if this operation is in django.db.migrations. If it is,
# We can just use the fact we already have that imported,
# otherwise, we need to add an import for the operation class.
if getattr(migrations, name, None) == self.operation.__class__:
self.feed('migrations.%s(' % name)
else:
imports.add('import %s' % (self.operation.__class__.__module__))
self.feed('%s.%s(' % (self.operation.__class__.__module__, name))
self.indent()
for arg_name in argspec.args[1:]:
arg_value = normalized_kwargs[arg_name]
if (arg_name in self.operation.serialization_expand_args and
isinstance(arg_value, (list, tuple, dict))):
if isinstance(arg_value, dict):
self.feed('%s={' % arg_name)
self.indent()
for key, value in arg_value.items():
key_string, key_imports = MigrationWriter.serialize(key)
arg_string, arg_imports = MigrationWriter.serialize(value)
self.feed('%s: %s,' % (key_string, arg_string))
imports.update(key_imports)
imports.update(arg_imports)
self.unindent()
self.feed('},')
else:
self.feed('%s=[' % arg_name)
self.indent()
for item in arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
self.feed('%s,' % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed('],')
else:
arg_string, arg_imports = MigrationWriter.serialize(arg_value)
self.feed('%s=%s,' % (arg_name, arg_string))
imports.update(arg_imports)
self.unindent()
self.feed('),')
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(' ' * (self.indentation * 4) + line)
def render(self):
return '\n'.join(self.buff)
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"replaces_str": "",
}
imports = set()
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1])
imports.add("from django.conf import settings")
else:
# No need to output bytestrings for dependencies
dependency = tuple([force_text(s) for s in dependency])
dependencies.append(" %s," % self.serialize(dependency)[0])
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely
imports.discard("from django.db import models")
items["imports"] = "\n".join(imports) + "\n" if imports else ""
# If there's a replaces, make a string for it
if self.migration.replaces:
items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0]
return (MIGRATION_TEMPLATE % items).encode("utf8")
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
migrations_package_name = MigrationLoader.migrations_module(self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
# Python 3 fails when the migrations directory does not have a
# __init__.py file
if not hasattr(migrations_module, '__file__'):
raise ImportError
basedir = os.path.dirname(migrations_module.__file__)
except ImportError:
app_config = apps.get_app_config(self.migration.app_label)
migrations_package_basename = migrations_package_name.split(".")[-1]
# Alright, see if it's a direct submodule of the app
if '%s.%s' % (app_config.name, migrations_package_basename) == migrations_package_name:
basedir = os.path.join(app_config.path, migrations_package_basename)
else:
# In case of using MIGRATION_MODULES setting and the custom
# package doesn't exist, create one.
package_dirs = migrations_package_name.split(".")
create_path = os.path.join(sys.path[0], *package_dirs)
if not os.path.isdir(create_path):
os.makedirs(create_path)
for i in range(1, len(package_dirs) + 1):
init_dir = os.path.join(sys.path[0], *package_dirs[:i])
init_path = os.path.join(init_dir, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
return os.path.join(create_path, self.filename)
return os.path.join(basedir, self.filename)
@classmethod
def serialize_deconstructed(cls, path, args, kwargs):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = set(["from django.db import models"])
name = "models.%s" % name
else:
imports = set(["import %s" % module])
name = path
strings = []
for arg in args:
arg_string, arg_imports = cls.serialize(arg)
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in kwargs.items():
arg_string, arg_imports = cls.serialize(arg)
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@classmethod
def serialize(cls, value):
"""
Serializes the value to a string that's parsable by Python, along
with any needed imports to make that string work.
More advanced than repr() as it can encode things
like datetime.datetime.now.
"""
# FIXME: Ideally Promise would be reconstructible, but for now we
# use force_text on them and defer to the normal string serialization
# process.
if isinstance(value, Promise):
value = force_text(value)
# Sequences
if isinstance(value, (list, set, tuple)):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
if isinstance(value, set):
format = "set([%s])"
elif isinstance(value, tuple):
# When len(value)==0, the empty tuple should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(value) != 1 else "(%s,)"
else:
format = "[%s]"
return format % (", ".join(strings)), imports
# Dictionaries
elif isinstance(value, dict):
imports = set()
strings = []
for k, v in value.items():
k_string, k_imports = cls.serialize(k)
v_string, v_imports = cls.serialize(v)
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
# Datetimes
elif isinstance(value, datetime.datetime):
if value.tzinfo is not None:
raise ValueError("Cannot serialize datetime values with timezones. Either use a callable value for default or remove the timezone.")
value_repr = repr(value)
if isinstance(value, datetime_safe.datetime):
value_repr = "datetime.%s" % value_repr
return value_repr, set(["import datetime"])
# Dates
elif isinstance(value, datetime.date):
value_repr = repr(value)
if isinstance(value, datetime_safe.date):
value_repr = "datetime.%s" % value_repr
return value_repr, set(["import datetime"])
# Settings references
elif isinstance(value, SettingsReference):
return "settings.%s" % value.setting_name, set(["from django.conf import settings"])
# Simple types
elif isinstance(value, six.integer_types + (float, bool, type(None))):
return repr(value), set()
elif isinstance(value, six.binary_type):
value_repr = repr(value)
if six.PY2:
# Prepend the `b` prefix since we're importing unicode_literals
value_repr = 'b' + value_repr
return value_repr, set()
elif isinstance(value, six.text_type):
value_repr = repr(value)
if six.PY2:
# Strip the `u` prefix since we're importing unicode_literals
value_repr = value_repr[1:]
return value_repr, set()
# Decimal
elif isinstance(value, decimal.Decimal):
return repr(value), set(["from decimal import Decimal"])
# Django fields
elif isinstance(value, models.Field):
attr_name, path, args, kwargs = value.deconstruct()
return cls.serialize_deconstructed(path, args, kwargs)
# Anything that knows how to deconstruct itself.
elif hasattr(value, 'deconstruct'):
return cls.serialize_deconstructed(*value.deconstruct())
# Functions
elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)):
# @classmethod?
if getattr(value, "__self__", None) and isinstance(value.__self__, type):
klass = value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, value.__name__), set(["import %s" % module])
# Further error checking
if value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % value)
# Python 3 is a lot easier, and only uses this branch if it's not local.
if getattr(value, "__qualname__", None) and getattr(value, "__module__", None):
if "<" not in value.__qualname__: # Qualname can include <locals>
return "%s.%s" % (value.__module__, value.__qualname__), set(["import %s" % value.__module__])
# Python 2/fallback version
module_name = value.__module__
# Make sure it's actually there and not an unbound method
module = import_module(module_name)
if not hasattr(module, value.__name__):
raise ValueError(
"Could not find function %s in %s.\nPlease note that "
"due to Python 2 limitations, you cannot serialize "
"unbound method functions (e.g. a method declared\n"
"and used in the same class body). Please move the "
"function into the main module body to use migrations.\n"
"For more information, see https://docs.djangoproject.com/en/1.7/topics/migrations/#serializing-values"
)
return "%s.%s" % (module_name, value.__name__), set(["import %s" % module_name])
# Classes
elif isinstance(value, type):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is value:
return string, set(imports)
if hasattr(value, "__module__"):
module = value.__module__
return "%s.%s" % (module, value.__name__), set(["import %s" % module])
# Other iterables
elif isinstance(value, collections.Iterable):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(strings) != 1 else "(%s,)"
return format % (", ".join(strings)), imports
# Uh oh.
else:
raise ValueError("Cannot serialize: %r\nThere are some values Django cannot serialize into migration files.\nFor more, see https://docs.djangoproject.com/en/dev/topics/migrations/#migration-serializing" % value)
MIGRATION_TEMPLATE = """\
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
|
en
| 0.801303
|
Special subclass of string which actually references a current settings value. It's treated as the value in memory, but serializes out to a settings.NAME attribute reference. # See if this operation is in django.db.migrations. If it is, # We can just use the fact we already have that imported, # otherwise, we need to add an import for the operation class. Takes a Migration instance and is able to produce the contents of the migration file from it. Returns a string of the file contents. # Deconstruct operations # Format dependencies and write out swappable dependencies right # No need to output bytestrings for dependencies # Format imports nicely # If there's a replaces, make a string for it # See if we can import the migrations module directly # Python 3 fails when the migrations directory does not have a # __init__.py file # Alright, see if it's a direct submodule of the app # In case of using MIGRATION_MODULES setting and the custom # package doesn't exist, create one. Serializes the value to a string that's parsable by Python, along with any needed imports to make that string work. More advanced than repr() as it can encode things like datetime.datetime.now. # FIXME: Ideally Promise would be reconstructible, but for now we # use force_text on them and defer to the normal string serialization # process. # Sequences # When len(value)==0, the empty tuple should be serialized as # "()", not "(,)" because (,) is invalid Python syntax. # Dictionaries # Datetimes # Dates # Settings references # Simple types # Prepend the `b` prefix since we're importing unicode_literals # Strip the `u` prefix since we're importing unicode_literals # Decimal # Django fields # Anything that knows how to deconstruct itself. # Functions # @classmethod? # Further error checking # Python 3 is a lot easier, and only uses this branch if it's not local. # Qualname can include <locals> # Python 2/fallback version # Make sure it's actually there and not an unbound method #serializing-values" # Classes # Other iterables # When len(strings)==0, the empty iterable should be serialized as # "()", not "(,)" because (,) is invalid Python syntax. # Uh oh. #migration-serializing" % value) \ # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations %(imports)s class Migration(migrations.Migration): %(replaces_str)s dependencies = [ %(dependencies)s\ ] operations = [ %(operations)s\ ]
| 2.168098
| 2
|
torch_edit_distance/__init__.py
|
1ytic/pytorch-edit-distance
| 77
|
6627282
|
<reponame>1ytic/pytorch-edit-distance
import torch
import torch_edit_distance_cuda as core
from pkg_resources import get_distribution
__version__ = get_distribution('torch_edit_distance').version
def collapse_repeated(
sequences, # type: torch.Tensor
lengths # type: torch.IntTensor
):
"""Merge repeated tokens.
Sequences and lengths tensors will be modified inplace.
Args:
sequences (torch.Tensor): Tensor (N, T) where T is the maximum
length of tokens from N sequences.
lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each sequence.
"""
core.collapse_repeated(sequences, lengths)
def remove_blank(
sequences, # type: torch.Tensor
lengths, # type: torch.IntTensor
blank # type: torch.Tensor
):
"""Remove tokens.
Sequences and lengths tensors will be modified inplace.
Args:
sequences (torch.Tensor): Tensor (N, T) where T is the maximum
length of tokens from N sequences.
lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each sequence.
blank (torch.Tensor): A set of tokens to remove.
"""
core.remove_blank(sequences, lengths, blank)
def strip_separator(
sequences, # type: torch.Tensor
lengths, # type: torch.IntTensor
separator # type: torch.Tensor
):
"""Remove tokens.
Sequences and lengths tensors will be modified inplace.
Args:
sequences (torch.Tensor): Tensor (N, T) where T is the maximum
length of tokens from N sequences.
lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each sequence.
separator (torch.Tensor): A set of tokens to remove as
leading/trailing tokens as well as repeated middle tokens.
"""
core.strip_separator(sequences, lengths, separator)
def levenshtein_distance(
hypotheses, # type: torch.Tensor
references, # type: torch.Tensor
hypothesis_lengths, # type: torch.IntTensor
references_lengths, # type: torch.IntTensor
blank, # type: torch.Tensor
separator # type: torch.Tensor
):
"""Levenshtein edit-distance for separated words or independent tokens.
Return torch.ShortTensor (N, 4) with detail ins/del/sub/len statistics.
Args:
hypotheses (torch.Tensor): Tensor (N, H) where H is the maximum
length of tokens from N hypotheses.
references (torch.Tensor): Tensor (N, R) where R is the maximum
length of tokens from N references.
hypothesis_lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each hypothesis.
references_lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each reference.
blank (torch.Tensor): tokens used to represent the blank symbol.
separator (torch.Tensor): tokens used to represent the separator symbol.
"""
assert hypotheses.dim() == 2
assert references.dim() == 2
assert hypothesis_lengths.dim() == 1
assert references_lengths.dim() == 1
assert hypotheses.size(0) == hypothesis_lengths.numel()
assert references.size(0) == references_lengths.numel()
assert hypothesis_lengths.numel() == references_lengths.numel()
return core.levenshtein_distance(hypotheses, references,
hypothesis_lengths, references_lengths,
blank, separator)
def compute_wer(hs, rs, hn, rn, blank, space):
data = levenshtein_distance(hs, rs, hn, rn, blank, space).float()
wer = data[:, :3].sum(dim=1) / data[:, 3]
return wer
class AverageWER(object):
def __init__(self, blank, space, title='WER', detail=2):
self.blank = blank
self.space = space
self.title = title
self.detail = detail
self.data = 0
def update(self, hs, rs, hn, rn):
data = levenshtein_distance(hs, rs, hn, rn, self.blank, self.space)
self.data += data.sum(dim=0).float()
def values(self):
_ins = self.data[0]
_del = self.data[1]
_sub = self.data[2]
_len = self.data[3]
_err = (_ins + _del + _sub) / _len * 100
if self.detail == 2:
_ins = _ins / _len * 100
_del = _del / _len * 100
_sub = _sub / _len * 100
return _err, _ins, _del, _sub
def summary(self, writer, epoch):
_err, _ins, _del, _sub = self.values()
if self.detail > 0:
writer.add_scalar(self.title + '/insertions', _ins, epoch)
writer.add_scalar(self.title + '/deletions', _del, epoch)
writer.add_scalar(self.title + '/substitutions', _sub, epoch)
writer.add_scalar(self.title, _err, epoch)
def __str__(self):
_err, _ins, _del, _sub = self.values()
info = '%s %.1f' % (self.title, _err)
if self.detail == 1:
info += ' [ %d ins, %d del, %d sub ]' % (_ins, _del, _sub)
elif self.detail == 2:
info += ' [ %.1f ins, %.1f del, %.1f sub ]' % (_ins, _del, _sub)
return info
class AverageCER(AverageWER):
def __init__(self, blank, space, title='CER', detail=2):
blank = torch.cat([blank, space])
space = torch.empty([], dtype=space.dtype, device=space.device)
super(AverageCER, self).__init__(blank, space, title, detail)
|
import torch
import torch_edit_distance_cuda as core
from pkg_resources import get_distribution
__version__ = get_distribution('torch_edit_distance').version
def collapse_repeated(
sequences, # type: torch.Tensor
lengths # type: torch.IntTensor
):
"""Merge repeated tokens.
Sequences and lengths tensors will be modified inplace.
Args:
sequences (torch.Tensor): Tensor (N, T) where T is the maximum
length of tokens from N sequences.
lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each sequence.
"""
core.collapse_repeated(sequences, lengths)
def remove_blank(
sequences, # type: torch.Tensor
lengths, # type: torch.IntTensor
blank # type: torch.Tensor
):
"""Remove tokens.
Sequences and lengths tensors will be modified inplace.
Args:
sequences (torch.Tensor): Tensor (N, T) where T is the maximum
length of tokens from N sequences.
lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each sequence.
blank (torch.Tensor): A set of tokens to remove.
"""
core.remove_blank(sequences, lengths, blank)
def strip_separator(
sequences, # type: torch.Tensor
lengths, # type: torch.IntTensor
separator # type: torch.Tensor
):
"""Remove tokens.
Sequences and lengths tensors will be modified inplace.
Args:
sequences (torch.Tensor): Tensor (N, T) where T is the maximum
length of tokens from N sequences.
lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each sequence.
separator (torch.Tensor): A set of tokens to remove as
leading/trailing tokens as well as repeated middle tokens.
"""
core.strip_separator(sequences, lengths, separator)
def levenshtein_distance(
hypotheses, # type: torch.Tensor
references, # type: torch.Tensor
hypothesis_lengths, # type: torch.IntTensor
references_lengths, # type: torch.IntTensor
blank, # type: torch.Tensor
separator # type: torch.Tensor
):
"""Levenshtein edit-distance for separated words or independent tokens.
Return torch.ShortTensor (N, 4) with detail ins/del/sub/len statistics.
Args:
hypotheses (torch.Tensor): Tensor (N, H) where H is the maximum
length of tokens from N hypotheses.
references (torch.Tensor): Tensor (N, R) where R is the maximum
length of tokens from N references.
hypothesis_lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each hypothesis.
references_lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each reference.
blank (torch.Tensor): tokens used to represent the blank symbol.
separator (torch.Tensor): tokens used to represent the separator symbol.
"""
assert hypotheses.dim() == 2
assert references.dim() == 2
assert hypothesis_lengths.dim() == 1
assert references_lengths.dim() == 1
assert hypotheses.size(0) == hypothesis_lengths.numel()
assert references.size(0) == references_lengths.numel()
assert hypothesis_lengths.numel() == references_lengths.numel()
return core.levenshtein_distance(hypotheses, references,
hypothesis_lengths, references_lengths,
blank, separator)
def compute_wer(hs, rs, hn, rn, blank, space):
data = levenshtein_distance(hs, rs, hn, rn, blank, space).float()
wer = data[:, :3].sum(dim=1) / data[:, 3]
return wer
class AverageWER(object):
def __init__(self, blank, space, title='WER', detail=2):
self.blank = blank
self.space = space
self.title = title
self.detail = detail
self.data = 0
def update(self, hs, rs, hn, rn):
data = levenshtein_distance(hs, rs, hn, rn, self.blank, self.space)
self.data += data.sum(dim=0).float()
def values(self):
_ins = self.data[0]
_del = self.data[1]
_sub = self.data[2]
_len = self.data[3]
_err = (_ins + _del + _sub) / _len * 100
if self.detail == 2:
_ins = _ins / _len * 100
_del = _del / _len * 100
_sub = _sub / _len * 100
return _err, _ins, _del, _sub
def summary(self, writer, epoch):
_err, _ins, _del, _sub = self.values()
if self.detail > 0:
writer.add_scalar(self.title + '/insertions', _ins, epoch)
writer.add_scalar(self.title + '/deletions', _del, epoch)
writer.add_scalar(self.title + '/substitutions', _sub, epoch)
writer.add_scalar(self.title, _err, epoch)
def __str__(self):
_err, _ins, _del, _sub = self.values()
info = '%s %.1f' % (self.title, _err)
if self.detail == 1:
info += ' [ %d ins, %d del, %d sub ]' % (_ins, _del, _sub)
elif self.detail == 2:
info += ' [ %.1f ins, %.1f del, %.1f sub ]' % (_ins, _del, _sub)
return info
class AverageCER(AverageWER):
def __init__(self, blank, space, title='CER', detail=2):
blank = torch.cat([blank, space])
space = torch.empty([], dtype=space.dtype, device=space.device)
super(AverageCER, self).__init__(blank, space, title, detail)
|
en
| 0.776224
|
# type: torch.Tensor # type: torch.IntTensor Merge repeated tokens. Sequences and lengths tensors will be modified inplace. Args: sequences (torch.Tensor): Tensor (N, T) where T is the maximum length of tokens from N sequences. lengths (torch.IntTensor): Tensor (N,) representing the number of tokens for each sequence. # type: torch.Tensor # type: torch.IntTensor # type: torch.Tensor Remove tokens. Sequences and lengths tensors will be modified inplace. Args: sequences (torch.Tensor): Tensor (N, T) where T is the maximum length of tokens from N sequences. lengths (torch.IntTensor): Tensor (N,) representing the number of tokens for each sequence. blank (torch.Tensor): A set of tokens to remove. # type: torch.Tensor # type: torch.IntTensor # type: torch.Tensor Remove tokens. Sequences and lengths tensors will be modified inplace. Args: sequences (torch.Tensor): Tensor (N, T) where T is the maximum length of tokens from N sequences. lengths (torch.IntTensor): Tensor (N,) representing the number of tokens for each sequence. separator (torch.Tensor): A set of tokens to remove as leading/trailing tokens as well as repeated middle tokens. # type: torch.Tensor # type: torch.Tensor # type: torch.IntTensor # type: torch.IntTensor # type: torch.Tensor # type: torch.Tensor Levenshtein edit-distance for separated words or independent tokens. Return torch.ShortTensor (N, 4) with detail ins/del/sub/len statistics. Args: hypotheses (torch.Tensor): Tensor (N, H) where H is the maximum length of tokens from N hypotheses. references (torch.Tensor): Tensor (N, R) where R is the maximum length of tokens from N references. hypothesis_lengths (torch.IntTensor): Tensor (N,) representing the number of tokens for each hypothesis. references_lengths (torch.IntTensor): Tensor (N,) representing the number of tokens for each reference. blank (torch.Tensor): tokens used to represent the blank symbol. separator (torch.Tensor): tokens used to represent the separator symbol.
| 2.474823
| 2
|
solutions/python3/problem1213.py
|
tjyiiuan/LeetCode
| 0
|
6627283
|
<reponame>tjyiiuan/LeetCode<filename>solutions/python3/problem1213.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
1213. Intersection of Three Sorted Arrays
Given three integer arrays arr1, arr2 and arr3 sorted in strictly increasing order,
return a sorted array of only the integers that appeared in all three arrays.
Constraints:
1 <= arr1.length, arr2.length, arr3.length <= 1000
1 <= arr1[i], arr2[i], arr3[i] <= 2000
"""
class Solution:
def arraysIntersection(self, arr1, arr2, arr3):
return sorted(set(arr1) & set(arr2) & set(arr3))
|
# -*- coding: utf-8 -*-
"""
1213. Intersection of Three Sorted Arrays
Given three integer arrays arr1, arr2 and arr3 sorted in strictly increasing order,
return a sorted array of only the integers that appeared in all three arrays.
Constraints:
1 <= arr1.length, arr2.length, arr3.length <= 1000
1 <= arr1[i], arr2[i], arr3[i] <= 2000
"""
class Solution:
def arraysIntersection(self, arr1, arr2, arr3):
return sorted(set(arr1) & set(arr2) & set(arr3))
|
en
| 0.798535
|
# -*- coding: utf-8 -*- 1213. Intersection of Three Sorted Arrays Given three integer arrays arr1, arr2 and arr3 sorted in strictly increasing order, return a sorted array of only the integers that appeared in all three arrays. Constraints: 1 <= arr1.length, arr2.length, arr3.length <= 1000 1 <= arr1[i], arr2[i], arr3[i] <= 2000
| 3.867464
| 4
|
1_histogram/2_histogram_lane_pi_murtaza/sample-codes/MotorModule..py
|
masudpce/final-projec
| 1
|
6627284
|
<gh_stars>1-10
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
class Motor():
def __init__(self,EnaA,In1A,In2A,EnaB,In1B,In2B):
self.EnaA= EnaA
self.In1A = In1A
self.In2A = In2A
self.EnaB= EnaB
self.In1B = In1B
self.In2B = In2B
GPIO.setup(self.EnaA,GPIO.OUT);GPIO.setup(self.In1A,GPIO.OUT);GPIO.setup(self.In2A,GPIO.OUT)
GPIO.setup(self.EnaB,GPIO.OUT);GPIO.setup(self.In1B,GPIO.OUT);GPIO.setup(self.In2B,GPIO.OUT)
self.pwmA = GPIO.PWM(self.EnaA, 100);
self.pwmB = GPIO.PWM(self.EnaB, 100);
self.pwmA.start(0);
self.pwmB.start(0);
self.mySpeed=0
def move(self, speed=0.5, turn=0, t=0):
speed *= 100
turn *= 70 # todo: starnge value, need to check video
leftSpeed = speed-turn
rightSpeed = speed+turn
if leftSpeed>100: leftSpeed =100
elif leftSpeed<-100: leftSpeed = -100
if rightSpeed>100: rightSpeed =100
elif rightSpeed<-100: rightSpeed = -100
# print(leftSpeed,rightSpeed)
self.pwmA.ChangeDutyCycle(abs(leftSpeed))
self.pwmB.ChangeDutyCycle(abs(rightSpeed))
if leftSpeed>0:GPIO.output(self.In1A,GPIO.HIGH);GPIO.output(self.In2A,GPIO.LOW)
else:GPIO.output(self.In1A,GPIO.LOW);GPIO.output(self.In2A,GPIO.HIGH)
if rightSpeed>0:GPIO.output(self.In1B,GPIO.HIGH);GPIO.output(self.In2B,GPIO.LOW)
else:GPIO.output(self.In1B,GPIO.LOW);GPIO.output(self.In2B,GPIO.HIGH)
sleep(t)
def stop(self,t=0):
self.pwmA.ChangeDutyCycle(0);
self.pwmB.ChangeDutyCycle(0);
self.mySpeed=0
sleep(t)
def main():
motor.move(0.5, 0, 2)
motor.stop(2)
motor.move(-0.5, 0, 2)
motor.stop(2)
motor.move(0, 0.5, 2)
motor.stop(2)
motor.move(0, -0.5, 2)
motor.stop(2)
if __name__ == '__main__':
motor= Motor(2,3,4,17,22,27)
main()
|
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
class Motor():
def __init__(self,EnaA,In1A,In2A,EnaB,In1B,In2B):
self.EnaA= EnaA
self.In1A = In1A
self.In2A = In2A
self.EnaB= EnaB
self.In1B = In1B
self.In2B = In2B
GPIO.setup(self.EnaA,GPIO.OUT);GPIO.setup(self.In1A,GPIO.OUT);GPIO.setup(self.In2A,GPIO.OUT)
GPIO.setup(self.EnaB,GPIO.OUT);GPIO.setup(self.In1B,GPIO.OUT);GPIO.setup(self.In2B,GPIO.OUT)
self.pwmA = GPIO.PWM(self.EnaA, 100);
self.pwmB = GPIO.PWM(self.EnaB, 100);
self.pwmA.start(0);
self.pwmB.start(0);
self.mySpeed=0
def move(self, speed=0.5, turn=0, t=0):
speed *= 100
turn *= 70 # todo: starnge value, need to check video
leftSpeed = speed-turn
rightSpeed = speed+turn
if leftSpeed>100: leftSpeed =100
elif leftSpeed<-100: leftSpeed = -100
if rightSpeed>100: rightSpeed =100
elif rightSpeed<-100: rightSpeed = -100
# print(leftSpeed,rightSpeed)
self.pwmA.ChangeDutyCycle(abs(leftSpeed))
self.pwmB.ChangeDutyCycle(abs(rightSpeed))
if leftSpeed>0:GPIO.output(self.In1A,GPIO.HIGH);GPIO.output(self.In2A,GPIO.LOW)
else:GPIO.output(self.In1A,GPIO.LOW);GPIO.output(self.In2A,GPIO.HIGH)
if rightSpeed>0:GPIO.output(self.In1B,GPIO.HIGH);GPIO.output(self.In2B,GPIO.LOW)
else:GPIO.output(self.In1B,GPIO.LOW);GPIO.output(self.In2B,GPIO.HIGH)
sleep(t)
def stop(self,t=0):
self.pwmA.ChangeDutyCycle(0);
self.pwmB.ChangeDutyCycle(0);
self.mySpeed=0
sleep(t)
def main():
motor.move(0.5, 0, 2)
motor.stop(2)
motor.move(-0.5, 0, 2)
motor.stop(2)
motor.move(0, 0.5, 2)
motor.stop(2)
motor.move(0, -0.5, 2)
motor.stop(2)
if __name__ == '__main__':
motor= Motor(2,3,4,17,22,27)
main()
|
en
| 0.598709
|
# todo: starnge value, need to check video # print(leftSpeed,rightSpeed)
| 3.217817
| 3
|
tests/test_metrics.py
|
louisfh/opensoundscape
| 30
|
6627285
|
<reponame>louisfh/opensoundscape
#!/usr/bin/env python3
import pytest
import numpy as np
|
#!/usr/bin/env python3
import pytest
import numpy as np
|
fr
| 0.221828
|
#!/usr/bin/env python3
| 0.904044
| 1
|
pong/admin.py
|
vimm0/python_pong_scoreboard
| 0
|
6627286
|
<filename>pong/admin.py
from django.contrib import admin
from pong.models import Player, Match
class PlayerAdmin(admin.ModelAdmin):
list_display = ('name',)
class MatchAdmin(admin.ModelAdmin):
list_display = ('player_one', 'player_two')
admin.site.register(Player, PlayerAdmin)
admin.site.register(Match, MatchAdmin)
|
<filename>pong/admin.py
from django.contrib import admin
from pong.models import Player, Match
class PlayerAdmin(admin.ModelAdmin):
list_display = ('name',)
class MatchAdmin(admin.ModelAdmin):
list_display = ('player_one', 'player_two')
admin.site.register(Player, PlayerAdmin)
admin.site.register(Match, MatchAdmin)
|
none
| 1
| 1.994256
| 2
|
|
auxiliary/rastertolegend/rastertolegend.py
|
johnnyzhang295/MMGIS
| 63
|
6627287
|
<reponame>johnnyzhang295/MMGIS<filename>auxiliary/rastertolegend/rastertolegend.py
import os
import sys
import subprocess
from osgeo import gdal
from pathlib import Path
raster = sys.argv[1]
splitfilenameR = os.path.splitext(raster)
colorfile = sys.argv[2]
splitfilenameC = os.path.basename(colorfile).split(".")
discrete = ""
values = []
if len(sys.argv) > 3:
discrete = sys.argv[3]
def colorRelief(raster, colorfile, discrete):
exactOrNearest = ""
if discrete == "-discrete":
exactOrNearest = "-nearest_color_entry"
input_file = str(Path(raster).absolute())
output_file = str(Path(splitfilenameR[0] + "_" + splitfilenameC[0] + splitfilenameR[1]).absolute())
colorfile_path = str(Path(colorfile).absolute())
if exactOrNearest == "":
gdalDEMcr = ["gdaldem", "color-relief", input_file, colorfile_path, output_file]
else:
gdalDEMcr = ["gdaldem", "color-relief", exactOrNearest, input_file, colorfile_path, output_file]
print("Running:", " ".join(gdalDEMcr))
process = subprocess.Popen(gdalDEMcr, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
for output in process.stdout:
print(output.decode())
for error in process.stderr:
print(error.decode())
def colorToLegend(colorfile, min, max, discrete):
legend = open(splitfilenameR[0] + "_" + splitfilenameC[0] + "_legend.csv", "w")
legend.write("color,strokecolor,shape,value")
cf = open(colorfile)
percents = False
for line in cf:
split = line.split(" ", 1)
value = split[0]
if value[-1:] == "%":
value = split[0][:-1]
percents = True
if value.lower() != "nv":
values.append(float(value))
cf.close()
cf = open(colorfile)
highToLow = True
if values[0] < values[1]:
highToLow = False
if discrete == "-discrete":
if percents:
j = 0
for v in values:
values[j] = int(mapPercent(float(v)/100, min, max))
j += 1
i = 0
for line in cf:
if i > 0 and i < len(values) - 1:
value = str(values[i] - ((values[i] - values[i-1])/2)) + " - " + str(values[i] + ((values[i+1] - values[i])/2))
elif i == 0:
sign = str(int(min)) + " - "
if not percents:
sign = "< "
if highToLow:
sign = str(int(max)) + " - "
if not percents:
sign = "> "
value = sign + str((values[i+1] + values[i])/2)
elif i == len(values) - 1:
sign = " - " + str(int(max))
if not percents:
sign = "> "
if highToLow:
sign = " - " + str(int(min))
if not percents:
sign = "< "
value = str((values[i] + values[i-1])/2) + sign
if not percents:
value = sign + str((values[i] + values[i-1])/2)
split = line.split(" ", 1)
if split[0].lower() != "nv":
legend.write("\n" + rgb_to_hex(tuple(map(int, split[1].split()))) + ",black,square," + value)
i += 1
else:
for line in cf:
split = line.split(" ", 1)
value = split[0]
if value[-1:] == "%":
value = split[0][:-1]
if split[0].lower() != "nv":
legend.write("\n" + rgb_to_hex(tuple(map(int, split[1].split()))) + ",black,square," +
str(int(mapPercent(float(value)/100, min, max))))
legend.close()
cf.close()
# helper functions
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
def mapPercent(p, min, max):
return ((max - min) * p) + min
r = gdal.Open(raster)
# stats[0] is min, stats[1] is max
stats = r.GetRasterBand(1).GetStatistics(1, 1)
colorRelief(raster, colorfile, discrete)
colorToLegend(colorfile, stats[0], stats[1], discrete)
|
import os
import sys
import subprocess
from osgeo import gdal
from pathlib import Path
raster = sys.argv[1]
splitfilenameR = os.path.splitext(raster)
colorfile = sys.argv[2]
splitfilenameC = os.path.basename(colorfile).split(".")
discrete = ""
values = []
if len(sys.argv) > 3:
discrete = sys.argv[3]
def colorRelief(raster, colorfile, discrete):
exactOrNearest = ""
if discrete == "-discrete":
exactOrNearest = "-nearest_color_entry"
input_file = str(Path(raster).absolute())
output_file = str(Path(splitfilenameR[0] + "_" + splitfilenameC[0] + splitfilenameR[1]).absolute())
colorfile_path = str(Path(colorfile).absolute())
if exactOrNearest == "":
gdalDEMcr = ["gdaldem", "color-relief", input_file, colorfile_path, output_file]
else:
gdalDEMcr = ["gdaldem", "color-relief", exactOrNearest, input_file, colorfile_path, output_file]
print("Running:", " ".join(gdalDEMcr))
process = subprocess.Popen(gdalDEMcr, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
for output in process.stdout:
print(output.decode())
for error in process.stderr:
print(error.decode())
def colorToLegend(colorfile, min, max, discrete):
legend = open(splitfilenameR[0] + "_" + splitfilenameC[0] + "_legend.csv", "w")
legend.write("color,strokecolor,shape,value")
cf = open(colorfile)
percents = False
for line in cf:
split = line.split(" ", 1)
value = split[0]
if value[-1:] == "%":
value = split[0][:-1]
percents = True
if value.lower() != "nv":
values.append(float(value))
cf.close()
cf = open(colorfile)
highToLow = True
if values[0] < values[1]:
highToLow = False
if discrete == "-discrete":
if percents:
j = 0
for v in values:
values[j] = int(mapPercent(float(v)/100, min, max))
j += 1
i = 0
for line in cf:
if i > 0 and i < len(values) - 1:
value = str(values[i] - ((values[i] - values[i-1])/2)) + " - " + str(values[i] + ((values[i+1] - values[i])/2))
elif i == 0:
sign = str(int(min)) + " - "
if not percents:
sign = "< "
if highToLow:
sign = str(int(max)) + " - "
if not percents:
sign = "> "
value = sign + str((values[i+1] + values[i])/2)
elif i == len(values) - 1:
sign = " - " + str(int(max))
if not percents:
sign = "> "
if highToLow:
sign = " - " + str(int(min))
if not percents:
sign = "< "
value = str((values[i] + values[i-1])/2) + sign
if not percents:
value = sign + str((values[i] + values[i-1])/2)
split = line.split(" ", 1)
if split[0].lower() != "nv":
legend.write("\n" + rgb_to_hex(tuple(map(int, split[1].split()))) + ",black,square," + value)
i += 1
else:
for line in cf:
split = line.split(" ", 1)
value = split[0]
if value[-1:] == "%":
value = split[0][:-1]
if split[0].lower() != "nv":
legend.write("\n" + rgb_to_hex(tuple(map(int, split[1].split()))) + ",black,square," +
str(int(mapPercent(float(value)/100, min, max))))
legend.close()
cf.close()
# helper functions
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
def mapPercent(p, min, max):
return ((max - min) * p) + min
r = gdal.Open(raster)
# stats[0] is min, stats[1] is max
stats = r.GetRasterBand(1).GetStatistics(1, 1)
colorRelief(raster, colorfile, discrete)
colorToLegend(colorfile, stats[0], stats[1], discrete)
|
en
| 0.526424
|
# helper functions # stats[0] is min, stats[1] is max
| 2.698826
| 3
|
holoviews/tests/plotting/matplotlib/testpathplot.py
|
xavArtley/holoviews
| 1
|
6627288
|
import numpy as np
from holoviews.core import NdOverlay
from holoviews.core.spaces import HoloMap
from holoviews.element import Polygons, Contours, Path
from .testplot import TestMPLPlot, mpl_renderer
class TestPathPlot(TestMPLPlot):
def test_path_continuously_varying_color_op(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
color = [998, 999, 998, 994]
data = {'x': xs, 'y': ys, 'color': color}
levels = [0, 38, 73, 95, 110, 130, 156, 999]
colors = ['#5ebaff', '#00faf4', '#ffffcc', '#ffe775', '#ffc140', '#ff8f20', '#ff6060']
path = Path([data], vdims='color').options(
color='color', color_levels=levels, cmap=colors)
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([998, 999, 998]))
self.assertEqual(artist.get_clim(), (994, 999))
def test_path_continuously_varying_alpha_op(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
alpha = [0.1, 0.7, 0.3, 0.2]
data = {'x': xs, 'y': ys, 'alpha': alpha}
path = Path([data], vdims='alpha').options(alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(path)
def test_path_continuously_varying_line_width_op(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
line_width = [1, 7, 3, 2]
data = {'x': xs, 'y': ys, 'line_width': line_width}
path = Path([data], vdims='line_width').options(linewidth='line_width')
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [1, 7, 3])
def test_path_continuously_varying_line_width_op_update(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
path = HoloMap({
0: Path([{'x': xs, 'y': ys, 'line_width': [1, 7, 3, 2]}], vdims='line_width'),
1: Path([{'x': xs, 'y': ys, 'line_width': [3, 8, 2, 3]}], vdims='line_width')
}).options(linewidth='line_width')
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [1, 7, 3])
plot.update((1,))
self.assertEqual(artist.get_linewidths(), [3, 8, 2])
class TestPolygonPlot(TestMPLPlot):
def test_polygons_colored(self):
polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j)
for j in range(5)})
plot = mpl_renderer.get_plot(polygons)
for j, splot in enumerate(plot.subplots.values()):
artist = splot.handles['artist']
self.assertEqual(artist.get_array(), np.array([j]))
self.assertEqual(artist.get_clim(), (0, 4))
def test_polygon_with_hole_plot(self):
xs = [1, 2, 3]
ys = [2, 0, 7]
holes = [[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]]
poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}])
plot = mpl_renderer.get_plot(poly)
artist = plot.handles['artist']
paths = artist.get_paths()
self.assertEqual(len(paths), 1)
path = paths[0]
self.assertEqual(path.vertices, np.array([
(1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6),
(2.1, 4.5), (2.5, 5), (2.3, 3.5)])
)
self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2]))
def test_multi_polygon_hole_plot(self):
xs = [1, 2, 3, np.nan, 6, 7, 3]
ys = [2, 0, 7, np.nan, 7, 5, 2]
holes = [
[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]],
[]
]
poly = Polygons([{'x': xs, 'y': ys, 'holes': holes, 'value': 1}], vdims=['value'])
plot = mpl_renderer.get_plot(poly)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([1, 1]))
paths = artist.get_paths()
self.assertEqual(len(paths), 2)
path = paths[0]
self.assertEqual(path.vertices, np.array([
(1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6),
(2.1, 4.5), (2.5, 5), (2.3, 3.5)])
)
self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2]))
path2 = paths[1]
self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3, 2)]))
self.assertEqual(path2.codes, np.array([1, 2, 2]))
def test_polygons_color_op(self):
polygons = Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'green'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'red'}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
colors = np.array([[0. , 0.501961, 0. , 1. ],
[1. , 0. , 0. , 1. ]])
self.assertEqual(artist.get_facecolors(), colors)
def test_polygons_color_op_update(self):
polygons = HoloMap({
0: Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'green'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'red'}
], vdims='color'),
1: Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'blue'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'green'}
], vdims='color'),
}).options(color='color')
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
colors = np.array([[0, 0.501961, 0, 1],
[1, 0, 0, 1]])
self.assertEqual(artist.get_facecolors(), colors)
plot.update((1,))
colors = np.array([[0, 0, 1, 1],
[0, 0.501961, 0, 1]])
self.assertEqual(artist.get_facecolors(), colors)
def test_polygons_linear_color_op(self):
polygons = Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 3}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([7, 3]))
self.assertEqual(artist.get_clim(), (3, 7))
def test_polygons_linear_color_op_update(self):
polygons = HoloMap({
0: Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 3}
], vdims='color'),
1: Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 2},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 5}
], vdims='color'),
}).options(color='color', framewise=True)
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([7, 3]))
self.assertEqual(artist.get_clim(), (3, 7))
plot.update((1,))
self.assertEqual(artist.get_array(), np.array([2, 5]))
self.assertEqual(artist.get_clim(), (2, 5))
def test_polygons_categorical_color_op(self):
polygons = Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'b'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'a'}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([0, 1]))
self.assertEqual(artist.get_clim(), (0, 1))
def test_polygons_alpha_op(self):
polygons = Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'alpha': 0.7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'alpha': 0.3}
], vdims='alpha').options(alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(polygons)
def test_polygons_line_width_op(self):
polygons = Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'line_width': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'line_width': 3}
], vdims='line_width').options(linewidth='line_width')
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [7, 3])
class TestContoursPlot(TestMPLPlot):
def test_contours_categorical_color(self):
path = Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat}
for cat in ('B', 'A', 'B')],
vdims='z').opts(plot=dict(color_index='z'))
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([0, 1, 0]))
self.assertEqual(artist.get_clim(), (0, 1))
def test_contours_color_op(self):
contours = Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'green'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'red'}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
colors = np.array([[0. , 0.501961, 0. , 1. ],
[1. , 0. , 0. , 1. ]])
self.assertEqual(artist.get_edgecolors(), colors)
def test_contours_color_op_update(self):
contours = HoloMap({
0: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'green'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'red'}
], vdims='color'),
1: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'blue'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'green'}
], vdims='color'),
}).options(color='color')
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
colors = np.array([[0, 0.501961, 0, 1],
[1, 0, 0, 1]])
self.assertEqual(artist.get_edgecolors(), colors)
plot.update((1,))
colors = np.array([[0, 0, 1, 1],
[0, 0.501961, 0, 1]])
self.assertEqual(artist.get_edgecolors(), colors)
def test_contours_linear_color_op(self):
contours = Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 3}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([7, 3]))
self.assertEqual(artist.get_clim(), (3, 7))
def test_contours_linear_color_op_update(self):
contours = HoloMap({
0: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 3}
], vdims='color'),
1: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 2},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 5}
], vdims='color'),
}).options(color='color', framewise=True)
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([7, 3]))
self.assertEqual(artist.get_clim(), (3, 7))
plot.update((1,))
self.assertEqual(artist.get_array(), np.array([2, 5]))
self.assertEqual(artist.get_clim(), (2, 5))
def test_contours_categorical_color_op(self):
contours = Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'b'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'a'}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([0, 1]))
self.assertEqual(artist.get_clim(), (0, 1))
def test_contours_alpha_op(self):
contours = Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'alpha': 0.7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'alpha': 0.3}
], vdims='alpha').options(alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(contours)
def test_contours_line_width_op(self):
contours = Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'line_width': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'line_width': 3}
], vdims='line_width').options(linewidth='line_width')
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [7, 3])
def test_contours_line_width_op_update(self):
contours = HoloMap({
0: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'line_width': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'line_width': 3}
], vdims='line_width'),
1: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'line_width': 2},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'line_width': 5}
], vdims='line_width'),
}).options(linewidth='line_width', framewise=True)
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [7, 3])
plot.update((1,))
self.assertEqual(artist.get_linewidths(), [2, 5])
|
import numpy as np
from holoviews.core import NdOverlay
from holoviews.core.spaces import HoloMap
from holoviews.element import Polygons, Contours, Path
from .testplot import TestMPLPlot, mpl_renderer
class TestPathPlot(TestMPLPlot):
def test_path_continuously_varying_color_op(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
color = [998, 999, 998, 994]
data = {'x': xs, 'y': ys, 'color': color}
levels = [0, 38, 73, 95, 110, 130, 156, 999]
colors = ['#5ebaff', '#00faf4', '#ffffcc', '#ffe775', '#ffc140', '#ff8f20', '#ff6060']
path = Path([data], vdims='color').options(
color='color', color_levels=levels, cmap=colors)
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([998, 999, 998]))
self.assertEqual(artist.get_clim(), (994, 999))
def test_path_continuously_varying_alpha_op(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
alpha = [0.1, 0.7, 0.3, 0.2]
data = {'x': xs, 'y': ys, 'alpha': alpha}
path = Path([data], vdims='alpha').options(alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(path)
def test_path_continuously_varying_line_width_op(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
line_width = [1, 7, 3, 2]
data = {'x': xs, 'y': ys, 'line_width': line_width}
path = Path([data], vdims='line_width').options(linewidth='line_width')
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [1, 7, 3])
def test_path_continuously_varying_line_width_op_update(self):
xs = [1, 2, 3, 4]
ys = xs[::-1]
path = HoloMap({
0: Path([{'x': xs, 'y': ys, 'line_width': [1, 7, 3, 2]}], vdims='line_width'),
1: Path([{'x': xs, 'y': ys, 'line_width': [3, 8, 2, 3]}], vdims='line_width')
}).options(linewidth='line_width')
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [1, 7, 3])
plot.update((1,))
self.assertEqual(artist.get_linewidths(), [3, 8, 2])
class TestPolygonPlot(TestMPLPlot):
def test_polygons_colored(self):
polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j)
for j in range(5)})
plot = mpl_renderer.get_plot(polygons)
for j, splot in enumerate(plot.subplots.values()):
artist = splot.handles['artist']
self.assertEqual(artist.get_array(), np.array([j]))
self.assertEqual(artist.get_clim(), (0, 4))
def test_polygon_with_hole_plot(self):
xs = [1, 2, 3]
ys = [2, 0, 7]
holes = [[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]]
poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}])
plot = mpl_renderer.get_plot(poly)
artist = plot.handles['artist']
paths = artist.get_paths()
self.assertEqual(len(paths), 1)
path = paths[0]
self.assertEqual(path.vertices, np.array([
(1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6),
(2.1, 4.5), (2.5, 5), (2.3, 3.5)])
)
self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2]))
def test_multi_polygon_hole_plot(self):
xs = [1, 2, 3, np.nan, 6, 7, 3]
ys = [2, 0, 7, np.nan, 7, 5, 2]
holes = [
[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]],
[]
]
poly = Polygons([{'x': xs, 'y': ys, 'holes': holes, 'value': 1}], vdims=['value'])
plot = mpl_renderer.get_plot(poly)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([1, 1]))
paths = artist.get_paths()
self.assertEqual(len(paths), 2)
path = paths[0]
self.assertEqual(path.vertices, np.array([
(1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6),
(2.1, 4.5), (2.5, 5), (2.3, 3.5)])
)
self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2]))
path2 = paths[1]
self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3, 2)]))
self.assertEqual(path2.codes, np.array([1, 2, 2]))
def test_polygons_color_op(self):
polygons = Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'green'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'red'}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
colors = np.array([[0. , 0.501961, 0. , 1. ],
[1. , 0. , 0. , 1. ]])
self.assertEqual(artist.get_facecolors(), colors)
def test_polygons_color_op_update(self):
polygons = HoloMap({
0: Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'green'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'red'}
], vdims='color'),
1: Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'blue'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'green'}
], vdims='color'),
}).options(color='color')
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
colors = np.array([[0, 0.501961, 0, 1],
[1, 0, 0, 1]])
self.assertEqual(artist.get_facecolors(), colors)
plot.update((1,))
colors = np.array([[0, 0, 1, 1],
[0, 0.501961, 0, 1]])
self.assertEqual(artist.get_facecolors(), colors)
def test_polygons_linear_color_op(self):
polygons = Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 3}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([7, 3]))
self.assertEqual(artist.get_clim(), (3, 7))
def test_polygons_linear_color_op_update(self):
polygons = HoloMap({
0: Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 3}
], vdims='color'),
1: Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 2},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 5}
], vdims='color'),
}).options(color='color', framewise=True)
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([7, 3]))
self.assertEqual(artist.get_clim(), (3, 7))
plot.update((1,))
self.assertEqual(artist.get_array(), np.array([2, 5]))
self.assertEqual(artist.get_clim(), (2, 5))
def test_polygons_categorical_color_op(self):
polygons = Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'b'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'a'}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([0, 1]))
self.assertEqual(artist.get_clim(), (0, 1))
def test_polygons_alpha_op(self):
polygons = Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'alpha': 0.7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'alpha': 0.3}
], vdims='alpha').options(alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(polygons)
def test_polygons_line_width_op(self):
polygons = Polygons([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'line_width': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'line_width': 3}
], vdims='line_width').options(linewidth='line_width')
plot = mpl_renderer.get_plot(polygons)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [7, 3])
class TestContoursPlot(TestMPLPlot):
def test_contours_categorical_color(self):
path = Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat}
for cat in ('B', 'A', 'B')],
vdims='z').opts(plot=dict(color_index='z'))
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([0, 1, 0]))
self.assertEqual(artist.get_clim(), (0, 1))
def test_contours_color_op(self):
contours = Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'green'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'red'}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
colors = np.array([[0. , 0.501961, 0. , 1. ],
[1. , 0. , 0. , 1. ]])
self.assertEqual(artist.get_edgecolors(), colors)
def test_contours_color_op_update(self):
contours = HoloMap({
0: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'green'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'red'}
], vdims='color'),
1: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'blue'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'green'}
], vdims='color'),
}).options(color='color')
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
colors = np.array([[0, 0.501961, 0, 1],
[1, 0, 0, 1]])
self.assertEqual(artist.get_edgecolors(), colors)
plot.update((1,))
colors = np.array([[0, 0, 1, 1],
[0, 0.501961, 0, 1]])
self.assertEqual(artist.get_edgecolors(), colors)
def test_contours_linear_color_op(self):
contours = Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 3}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([7, 3]))
self.assertEqual(artist.get_clim(), (3, 7))
def test_contours_linear_color_op_update(self):
contours = HoloMap({
0: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 3}
], vdims='color'),
1: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 2},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 5}
], vdims='color'),
}).options(color='color', framewise=True)
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([7, 3]))
self.assertEqual(artist.get_clim(), (3, 7))
plot.update((1,))
self.assertEqual(artist.get_array(), np.array([2, 5]))
self.assertEqual(artist.get_clim(), (2, 5))
def test_contours_categorical_color_op(self):
contours = Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'color': 'b'},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'color': 'a'}
], vdims='color').options(color='color')
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([0, 1]))
self.assertEqual(artist.get_clim(), (0, 1))
def test_contours_alpha_op(self):
contours = Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'alpha': 0.7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'alpha': 0.3}
], vdims='alpha').options(alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(contours)
def test_contours_line_width_op(self):
contours = Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'line_width': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'line_width': 3}
], vdims='line_width').options(linewidth='line_width')
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [7, 3])
def test_contours_line_width_op_update(self):
contours = HoloMap({
0: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'line_width': 7},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'line_width': 3}
], vdims='line_width'),
1: Contours([
{('x', 'y'): [(0, 0), (0, 1), (1, 0)], 'line_width': 2},
{('x', 'y'): [(1, 0), (1, 1), (0, 1)], 'line_width': 5}
], vdims='line_width'),
}).options(linewidth='line_width', framewise=True)
plot = mpl_renderer.get_plot(contours)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [7, 3])
plot.update((1,))
self.assertEqual(artist.get_linewidths(), [2, 5])
|
none
| 1
| 2.141217
| 2
|
|
main.py
|
ekholabs/kaggle_mnist
| 0
|
6627289
|
import sys
import json
from model.mnist_cnn_classifier import MNISTCNNClassifier
from utils.s3 import S3Utils
if __name__ == '__main__':
classifiers = {'cnn': MNISTCNNClassifier('model_output/cnn')}
if len(sys.argv) < 2:
print('Please, pass the model type you want to execute. for example, "cnn"')
sys.exit(1)
model_type = sys.argv[1]
params = 'hyperparams_%s.json' % model_type
print('Parameters file:', params)
hyper_parameters = json.load(open('/data/%s' % params))
mnist = classifiers[model_type]
mnist.init(hyper_parameters)
mnist.train_model()
S3Utils.upload(model_type)
|
import sys
import json
from model.mnist_cnn_classifier import MNISTCNNClassifier
from utils.s3 import S3Utils
if __name__ == '__main__':
classifiers = {'cnn': MNISTCNNClassifier('model_output/cnn')}
if len(sys.argv) < 2:
print('Please, pass the model type you want to execute. for example, "cnn"')
sys.exit(1)
model_type = sys.argv[1]
params = 'hyperparams_%s.json' % model_type
print('Parameters file:', params)
hyper_parameters = json.load(open('/data/%s' % params))
mnist = classifiers[model_type]
mnist.init(hyper_parameters)
mnist.train_model()
S3Utils.upload(model_type)
|
none
| 1
| 2.708758
| 3
|
|
corehq/apps/app_manager/suite_xml/sections/resources.py
|
johan--/commcare-hq
| 0
|
6627290
|
<gh_stars>0
from corehq.apps.app_manager import id_strings
from corehq.apps.app_manager.suite_xml.contributors import SectionContributor
from corehq.apps.app_manager.suite_xml.xml_models import LocaleResource, XFormResource
from corehq.apps.app_manager.templatetags.xforms_extras import trans
from corehq.apps.app_manager.util import languages_mapping
class FormResourceContributor(SectionContributor):
section_name = 'xform_resources'
def get_section_elements(self):
first = []
last = []
for form_stuff in self.app.get_forms(bare=False):
form = form_stuff["form"]
if form_stuff['type'] == 'module_form':
path = './modules-{module.id}/forms-{form.id}.xml'.format(**form_stuff)
this_list = first
else:
path = './user_registration.xml'
this_list = last
resource = XFormResource(
id=id_strings.xform_resource(form),
version=form.get_version(),
local=path,
remote=path,
)
if form_stuff['type'] == 'module_form' and self.app.build_version >= '2.9':
resource.descriptor = u"Form: (Module {module_name}) - {form_name}".format(
module_name=trans(form_stuff["module"]["name"], langs=[self.app.default_language]),
form_name=trans(form["name"], langs=[self.app.default_language])
)
elif path == './user_registration.xml':
resource.descriptor = u"User Registration Form"
this_list.append(resource)
for x in first:
yield x
for x in last:
yield x
class LocaleResourceContributor(SectionContributor):
section_name = 'locale_resources'
def get_section_elements(self):
for lang in ["default"] + self.app.build_langs:
path = './{lang}/app_strings.txt'.format(lang=lang)
resource = LocaleResource(
language=lang,
id=id_strings.locale_resource(lang),
version=self.app.version,
local=path,
remote=path,
)
if self.app.build_version >= '2.9':
unknown_lang_txt = u"Unknown Language (%s)" % lang
resource.descriptor = u"Translations: %s" % languages_mapping().get(lang, [unknown_lang_txt])[0]
yield resource
|
from corehq.apps.app_manager import id_strings
from corehq.apps.app_manager.suite_xml.contributors import SectionContributor
from corehq.apps.app_manager.suite_xml.xml_models import LocaleResource, XFormResource
from corehq.apps.app_manager.templatetags.xforms_extras import trans
from corehq.apps.app_manager.util import languages_mapping
class FormResourceContributor(SectionContributor):
section_name = 'xform_resources'
def get_section_elements(self):
first = []
last = []
for form_stuff in self.app.get_forms(bare=False):
form = form_stuff["form"]
if form_stuff['type'] == 'module_form':
path = './modules-{module.id}/forms-{form.id}.xml'.format(**form_stuff)
this_list = first
else:
path = './user_registration.xml'
this_list = last
resource = XFormResource(
id=id_strings.xform_resource(form),
version=form.get_version(),
local=path,
remote=path,
)
if form_stuff['type'] == 'module_form' and self.app.build_version >= '2.9':
resource.descriptor = u"Form: (Module {module_name}) - {form_name}".format(
module_name=trans(form_stuff["module"]["name"], langs=[self.app.default_language]),
form_name=trans(form["name"], langs=[self.app.default_language])
)
elif path == './user_registration.xml':
resource.descriptor = u"User Registration Form"
this_list.append(resource)
for x in first:
yield x
for x in last:
yield x
class LocaleResourceContributor(SectionContributor):
section_name = 'locale_resources'
def get_section_elements(self):
for lang in ["default"] + self.app.build_langs:
path = './{lang}/app_strings.txt'.format(lang=lang)
resource = LocaleResource(
language=lang,
id=id_strings.locale_resource(lang),
version=self.app.version,
local=path,
remote=path,
)
if self.app.build_version >= '2.9':
unknown_lang_txt = u"Unknown Language (%s)" % lang
resource.descriptor = u"Translations: %s" % languages_mapping().get(lang, [unknown_lang_txt])[0]
yield resource
|
none
| 1
| 1.865753
| 2
|
|
scripts/proj2json.py
|
jjimenezshaw/crs-explorer
| 6
|
6627291
|
#!/usr/bin/env python
import json
import os
import pyproj
from contextlib import redirect_stdout
if __name__ == '__main__':
dest_dir = os.getenv('DEST_DIR', '.')
dest_file = f'{dest_dir}/crslist.json'
metadata_file = f'{dest_dir}/metadata.txt'
pyproj.show_versions()
with open(metadata_file, 'w') as f:
with redirect_stdout(f):
pyproj.show_versions()
crs_list = pyproj.database.query_crs_info(allow_deprecated=True)
crss = sorted(
[crs._asdict() for crs in crs_list if crs.area_of_use],
key=lambda d: d['auth_name'] + d['code'].zfill(7)
)
with open(dest_file, 'w') as fp:
json.dump(crss, fp, indent=2, default=lambda o: str(o).replace('PJType.', ''))
types = ({'path': 'wkt1', 'version': 'WKT1_GDAL'},
{'path': 'wkt2', 'version': 'WKT2_2019'})
for c in crss:
crs = pyproj.CRS.from_authority(auth_name=c["auth_name"], code=c["code"])
for t in types:
wkt = crs.to_wkt(version=t["version"], pretty=True)
wtk_file = f'{dest_dir}/{t["path"]}/{c["auth_name"]}/{c["code"]}.txt'
if not os.path.exists(os.path.dirname(wtk_file)):
os.makedirs(os.path.dirname(wtk_file))
with open(wtk_file, 'w') as fp:
if not wkt:
type = str(c["type"]).replace('PJType.', '')
wkt = (f'Error: {c["auth_name"]}:{c["code"]} cannot be written as {t["version"]}\n'
f' type: {type}\n'
f' name: {c["name"]}')
fp.write(wkt)
fp.write('\n')
|
#!/usr/bin/env python
import json
import os
import pyproj
from contextlib import redirect_stdout
if __name__ == '__main__':
dest_dir = os.getenv('DEST_DIR', '.')
dest_file = f'{dest_dir}/crslist.json'
metadata_file = f'{dest_dir}/metadata.txt'
pyproj.show_versions()
with open(metadata_file, 'w') as f:
with redirect_stdout(f):
pyproj.show_versions()
crs_list = pyproj.database.query_crs_info(allow_deprecated=True)
crss = sorted(
[crs._asdict() for crs in crs_list if crs.area_of_use],
key=lambda d: d['auth_name'] + d['code'].zfill(7)
)
with open(dest_file, 'w') as fp:
json.dump(crss, fp, indent=2, default=lambda o: str(o).replace('PJType.', ''))
types = ({'path': 'wkt1', 'version': 'WKT1_GDAL'},
{'path': 'wkt2', 'version': 'WKT2_2019'})
for c in crss:
crs = pyproj.CRS.from_authority(auth_name=c["auth_name"], code=c["code"])
for t in types:
wkt = crs.to_wkt(version=t["version"], pretty=True)
wtk_file = f'{dest_dir}/{t["path"]}/{c["auth_name"]}/{c["code"]}.txt'
if not os.path.exists(os.path.dirname(wtk_file)):
os.makedirs(os.path.dirname(wtk_file))
with open(wtk_file, 'w') as fp:
if not wkt:
type = str(c["type"]).replace('PJType.', '')
wkt = (f'Error: {c["auth_name"]}:{c["code"]} cannot be written as {t["version"]}\n'
f' type: {type}\n'
f' name: {c["name"]}')
fp.write(wkt)
fp.write('\n')
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.350067
| 2
|
cscs-checks/apps/jupyter/check_ipcmagic.py
|
toxa81/reframe
| 0
|
6627292
|
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.osext as osext
import reframe.utility.sanity as sn
from reframe.core.backends import getlauncher
@rfm.simple_test
class IPCMagicCheck(rfm.RunOnlyRegressionTest):
def __init__(self):
self.descr = 'Distributed training with TensorFlow using ipyparallel'
self.valid_systems = ['daint:gpu', 'dom:gpu']
self.valid_prog_environs = ['PrgEnv-gnu']
cray_cdt_version = osext.cray_cdt_version()
# FIXME: The following will not be needed after the Daint upgrade
if self.current_system.name == 'dom':
self.modules = [
'ipcmagic',
f'Horovod/0.21.0-CrayGNU-{cray_cdt_version}-tf-2.4.0'
]
else:
self.modules = [
'ipcmagic',
'Horovod/0.19.1-CrayGNU-20.08-tf-2.2.0'
]
self.num_tasks = 2
self.num_tasks_per_node = 1
self.executable = 'ipython'
self.executable_opts = ['tf-hvd-sgd-ipc-tf2.py']
nids = sn.extractall(r'nid(?P<nid>\d+)',
self.stdout, 'nid', str)
self.sanity_patterns = sn.all([
sn.assert_ne(nids, []),
sn.assert_ne(nids[0], nids[1])
])
self.reference = {
'daint:gpu': {
'slope': (2.0, -0.1, 0.1, None),
'offset': (0.0, -0.1, 0.1, None),
'retries': (0, None, None, None),
'time': (10, None, None, 's'),
},
'dom:gpu': {
'slope': (2.0, -0.1, 0.1, None),
'offset': (0.0, -0.1, 0.1, None),
'retries': (0, None, None, None),
'time': (10, None, None, 's'),
}
}
self.perf_patterns = {
'slope': sn.extractsingle(r'slope=(?P<slope>\S+)',
self.stdout, 'slope', float),
'offset': sn.extractsingle(r'offset=(?P<offset>\S+)',
self.stdout, 'offset', float),
'retries': 4 - sn.count(sn.findall(r'IPCluster is already running',
self.stdout)),
'time': sn.extractsingle(r'IPCluster is ready\!\s+'
r'\((?P<time>\d+) seconds\)',
self.stdout, 'time', float)
}
self.maintainers = ['RS', 'TR']
self.tags = {'production'}
@rfm.run_before('run')
def prepare_run(self):
# Change the job launcher since `ipython`
# needs to be launched without `srun`.
self.job.launcher = getlauncher('local')()
|
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.osext as osext
import reframe.utility.sanity as sn
from reframe.core.backends import getlauncher
@rfm.simple_test
class IPCMagicCheck(rfm.RunOnlyRegressionTest):
def __init__(self):
self.descr = 'Distributed training with TensorFlow using ipyparallel'
self.valid_systems = ['daint:gpu', 'dom:gpu']
self.valid_prog_environs = ['PrgEnv-gnu']
cray_cdt_version = osext.cray_cdt_version()
# FIXME: The following will not be needed after the Daint upgrade
if self.current_system.name == 'dom':
self.modules = [
'ipcmagic',
f'Horovod/0.21.0-CrayGNU-{cray_cdt_version}-tf-2.4.0'
]
else:
self.modules = [
'ipcmagic',
'Horovod/0.19.1-CrayGNU-20.08-tf-2.2.0'
]
self.num_tasks = 2
self.num_tasks_per_node = 1
self.executable = 'ipython'
self.executable_opts = ['tf-hvd-sgd-ipc-tf2.py']
nids = sn.extractall(r'nid(?P<nid>\d+)',
self.stdout, 'nid', str)
self.sanity_patterns = sn.all([
sn.assert_ne(nids, []),
sn.assert_ne(nids[0], nids[1])
])
self.reference = {
'daint:gpu': {
'slope': (2.0, -0.1, 0.1, None),
'offset': (0.0, -0.1, 0.1, None),
'retries': (0, None, None, None),
'time': (10, None, None, 's'),
},
'dom:gpu': {
'slope': (2.0, -0.1, 0.1, None),
'offset': (0.0, -0.1, 0.1, None),
'retries': (0, None, None, None),
'time': (10, None, None, 's'),
}
}
self.perf_patterns = {
'slope': sn.extractsingle(r'slope=(?P<slope>\S+)',
self.stdout, 'slope', float),
'offset': sn.extractsingle(r'offset=(?P<offset>\S+)',
self.stdout, 'offset', float),
'retries': 4 - sn.count(sn.findall(r'IPCluster is already running',
self.stdout)),
'time': sn.extractsingle(r'IPCluster is ready\!\s+'
r'\((?P<time>\d+) seconds\)',
self.stdout, 'time', float)
}
self.maintainers = ['RS', 'TR']
self.tags = {'production'}
@rfm.run_before('run')
def prepare_run(self):
# Change the job launcher since `ipython`
# needs to be launched without `srun`.
self.job.launcher = getlauncher('local')()
|
en
| 0.789575
|
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) # ReFrame Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause # FIXME: The following will not be needed after the Daint upgrade # Change the job launcher since `ipython` # needs to be launched without `srun`.
| 1.839257
| 2
|
examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/locomotion_controller_in_scenario_set_example.py
|
felipeek/bullet3
| 9,136
|
6627293
|
<gh_stars>1000+
r"""ScenarioSet example for Laikago MPC controller.
blaze run -c opt \
//robotics/reinforcement_learning/minitaur/agents/baseline_controller\
:locomotion_controller_in_scenario_set_example -- --gait=slow_trot \
--add_random_push=True
"""
from absl import app
from absl import flags
import gin
import numpy as np
import scipy.interpolate
from pybullet_envs.minitaur.agents.baseline_controller import locomotion_controller_setup
from pybullet_envs.minitaur.envs_v2 import env_loader
FLAGS = flags.FLAGS
SCENARIO_SET_CONFIG = """
import pybullet_envs.minitaur.envs_v2.scenarios.locomotion_simple_scenario_set
include "google3/robotics/reinforcement_learning/minitaur/envs_v2/scenarios/default_scenario_set.gin"
default_scenario_set/singleton.constructor = @locomotion_simple_scenario_set.LocomotionSimpleScenarioSet
locomotion_simple_scenario_set.LocomotionSimpleScenarioSet.selector = "flat_ground"
locomotion_gym_env.LocomotionGymEnv.task = @scenario_set.task()
locomotion_gym_env.LocomotionGymEnv.scene = @scenario_set.scene()
locomotion_gym_env.LocomotionGymEnv.env_randomizers = [
@scenario_set.env_randomizer()
]
"""
_MAX_TIME_SECONDS = 30
flags.DEFINE_enum("gait", "fast_trot",
["fast_trot", "slow_trot", "walk", "stand"],
"The gait pattern to use")
flags.DEFINE_boolean("add_random_push", False,
"whether to add random push to the robot in simulation")
def _start_stop_profile(max_speed=0.5, axis=0, duration=3):
speed_profile = np.zeros((3, 4))
speed_profile[1, axis] = max_speed
return (0, 0.5, duration + 0.5), speed_profile.tolist()
def _random_speed_profile(max_speed=1, axis=0, time_interval=1.0):
num_pts = 11
time_points = np.arange(num_pts) * time_interval
speed_profile = np.zeros((num_pts, 4))
speed_profile[:, axis] = np.random.uniform(0, max_speed, num_pts)
speed_profile[-1, :] = 0
return time_points.tolist(), speed_profile.tolist()
def _body_height_profile(z_range=(0.3, 0.55)):
del z_range
# TODO(tingnan): Implement this.
def _generate_linear_angular_speed(t, time_points, speed_points):
"""Creates an example speed profile based on time for demo purpose."""
speed = scipy.interpolate.interp1d(
time_points,
speed_points,
kind="previous",
fill_value="extrapolate",
axis=0)(
t)
return speed[0:3], speed[3]
def _update_controller_params(controller, lin_speed, ang_speed):
controller.swing_leg_controller.desired_speed = lin_speed
controller.swing_leg_controller.desired_twisting_speed = ang_speed
controller.stance_leg_controller.desired_speed = lin_speed
controller.stance_leg_controller.desired_twisting_speed = ang_speed
def _gen_stability_test_start_stop():
"""Generates the speed profile for start/stop tests."""
axis_to_name = {
0: "velocity x",
1: "velocity y",
3: "angular velocity z",
}
axis_to_max_speed = {
0: 1.0,
1: 0.5,
3: 2.5,
}
gait_multiplier = {
"slow_trot": 0.7,
"walk": 0.3,
"fast_trot": 1.0,
}
for axis in (0, 1, 3):
yield axis_to_name[axis], _start_stop_profile(
axis_to_max_speed[axis] * gait_multiplier[FLAGS.gait], axis)
def _gen_stability_test_random():
"""Generates the speed profile for random walking tests."""
axis_to_name = {
0: "velocity x",
1: "velocity y",
3: "angular velocity z",
}
axis_to_max_speed = {
0: 1.0,
1: 0.5,
3: 2.5,
}
gait_multiplier = {
"slow_trot": 0.7,
"walk": 0.3,
"fast_trot": 1.0,
}
for axis in (0, 1, 3):
yield axis_to_name[axis], _random_speed_profile(
axis_to_max_speed[axis] * gait_multiplier[FLAGS.gait], axis)
def _test_stability(max_time=5, render=False, test_generator=None):
"""Tests the stability of the controller using speed profiles."""
locomotion_controller_setup.load_sim_config(render=render)
gin.parse_config(SCENARIO_SET_CONFIG)
if FLAGS.add_random_push:
locomotion_controller_setup.add_random_push_config()
env = env_loader.load()
controller = locomotion_controller_setup.setup_controller(
env.robot, gait=FLAGS.gait)
for name, speed_profile in test_generator():
env.reset()
controller.reset()
current_time = 0
while current_time < max_time:
current_time = env.get_time_since_reset()
lin_speed, ang_speed = _generate_linear_angular_speed(
current_time, speed_profile[0], speed_profile[1])
_update_controller_params(controller, lin_speed, ang_speed)
# Needed before every call to get_action().
controller.update()
hybrid_action = controller.get_action()
_, _, done, _ = env.step(hybrid_action)
if done:
break
print(f"Scene name: flat ground. Random push: {FLAGS.add_random_push}. "
f"Survival time for {name} = {speed_profile[1]} is {current_time}")
def main(argv):
del argv
_test_stability(render=True, test_generator=_gen_stability_test_start_stop)
_test_stability(
max_time=15, render=True, test_generator=_gen_stability_test_random)
if __name__ == "__main__":
app.run(main)
|
r"""ScenarioSet example for Laikago MPC controller.
blaze run -c opt \
//robotics/reinforcement_learning/minitaur/agents/baseline_controller\
:locomotion_controller_in_scenario_set_example -- --gait=slow_trot \
--add_random_push=True
"""
from absl import app
from absl import flags
import gin
import numpy as np
import scipy.interpolate
from pybullet_envs.minitaur.agents.baseline_controller import locomotion_controller_setup
from pybullet_envs.minitaur.envs_v2 import env_loader
FLAGS = flags.FLAGS
SCENARIO_SET_CONFIG = """
import pybullet_envs.minitaur.envs_v2.scenarios.locomotion_simple_scenario_set
include "google3/robotics/reinforcement_learning/minitaur/envs_v2/scenarios/default_scenario_set.gin"
default_scenario_set/singleton.constructor = @locomotion_simple_scenario_set.LocomotionSimpleScenarioSet
locomotion_simple_scenario_set.LocomotionSimpleScenarioSet.selector = "flat_ground"
locomotion_gym_env.LocomotionGymEnv.task = @scenario_set.task()
locomotion_gym_env.LocomotionGymEnv.scene = @scenario_set.scene()
locomotion_gym_env.LocomotionGymEnv.env_randomizers = [
@scenario_set.env_randomizer()
]
"""
_MAX_TIME_SECONDS = 30
flags.DEFINE_enum("gait", "fast_trot",
["fast_trot", "slow_trot", "walk", "stand"],
"The gait pattern to use")
flags.DEFINE_boolean("add_random_push", False,
"whether to add random push to the robot in simulation")
def _start_stop_profile(max_speed=0.5, axis=0, duration=3):
speed_profile = np.zeros((3, 4))
speed_profile[1, axis] = max_speed
return (0, 0.5, duration + 0.5), speed_profile.tolist()
def _random_speed_profile(max_speed=1, axis=0, time_interval=1.0):
num_pts = 11
time_points = np.arange(num_pts) * time_interval
speed_profile = np.zeros((num_pts, 4))
speed_profile[:, axis] = np.random.uniform(0, max_speed, num_pts)
speed_profile[-1, :] = 0
return time_points.tolist(), speed_profile.tolist()
def _body_height_profile(z_range=(0.3, 0.55)):
del z_range
# TODO(tingnan): Implement this.
def _generate_linear_angular_speed(t, time_points, speed_points):
"""Creates an example speed profile based on time for demo purpose."""
speed = scipy.interpolate.interp1d(
time_points,
speed_points,
kind="previous",
fill_value="extrapolate",
axis=0)(
t)
return speed[0:3], speed[3]
def _update_controller_params(controller, lin_speed, ang_speed):
controller.swing_leg_controller.desired_speed = lin_speed
controller.swing_leg_controller.desired_twisting_speed = ang_speed
controller.stance_leg_controller.desired_speed = lin_speed
controller.stance_leg_controller.desired_twisting_speed = ang_speed
def _gen_stability_test_start_stop():
"""Generates the speed profile for start/stop tests."""
axis_to_name = {
0: "velocity x",
1: "velocity y",
3: "angular velocity z",
}
axis_to_max_speed = {
0: 1.0,
1: 0.5,
3: 2.5,
}
gait_multiplier = {
"slow_trot": 0.7,
"walk": 0.3,
"fast_trot": 1.0,
}
for axis in (0, 1, 3):
yield axis_to_name[axis], _start_stop_profile(
axis_to_max_speed[axis] * gait_multiplier[FLAGS.gait], axis)
def _gen_stability_test_random():
"""Generates the speed profile for random walking tests."""
axis_to_name = {
0: "velocity x",
1: "velocity y",
3: "angular velocity z",
}
axis_to_max_speed = {
0: 1.0,
1: 0.5,
3: 2.5,
}
gait_multiplier = {
"slow_trot": 0.7,
"walk": 0.3,
"fast_trot": 1.0,
}
for axis in (0, 1, 3):
yield axis_to_name[axis], _random_speed_profile(
axis_to_max_speed[axis] * gait_multiplier[FLAGS.gait], axis)
def _test_stability(max_time=5, render=False, test_generator=None):
"""Tests the stability of the controller using speed profiles."""
locomotion_controller_setup.load_sim_config(render=render)
gin.parse_config(SCENARIO_SET_CONFIG)
if FLAGS.add_random_push:
locomotion_controller_setup.add_random_push_config()
env = env_loader.load()
controller = locomotion_controller_setup.setup_controller(
env.robot, gait=FLAGS.gait)
for name, speed_profile in test_generator():
env.reset()
controller.reset()
current_time = 0
while current_time < max_time:
current_time = env.get_time_since_reset()
lin_speed, ang_speed = _generate_linear_angular_speed(
current_time, speed_profile[0], speed_profile[1])
_update_controller_params(controller, lin_speed, ang_speed)
# Needed before every call to get_action().
controller.update()
hybrid_action = controller.get_action()
_, _, done, _ = env.step(hybrid_action)
if done:
break
print(f"Scene name: flat ground. Random push: {FLAGS.add_random_push}. "
f"Survival time for {name} = {speed_profile[1]} is {current_time}")
def main(argv):
del argv
_test_stability(render=True, test_generator=_gen_stability_test_start_stop)
_test_stability(
max_time=15, render=True, test_generator=_gen_stability_test_random)
if __name__ == "__main__":
app.run(main)
|
en
| 0.317097
|
ScenarioSet example for Laikago MPC controller. blaze run -c opt \ //robotics/reinforcement_learning/minitaur/agents/baseline_controller\ :locomotion_controller_in_scenario_set_example -- --gait=slow_trot \ --add_random_push=True import pybullet_envs.minitaur.envs_v2.scenarios.locomotion_simple_scenario_set include "google3/robotics/reinforcement_learning/minitaur/envs_v2/scenarios/default_scenario_set.gin" default_scenario_set/singleton.constructor = @locomotion_simple_scenario_set.LocomotionSimpleScenarioSet locomotion_simple_scenario_set.LocomotionSimpleScenarioSet.selector = "flat_ground" locomotion_gym_env.LocomotionGymEnv.task = @scenario_set.task() locomotion_gym_env.LocomotionGymEnv.scene = @scenario_set.scene() locomotion_gym_env.LocomotionGymEnv.env_randomizers = [ @scenario_set.env_randomizer() ] # TODO(tingnan): Implement this. Creates an example speed profile based on time for demo purpose. Generates the speed profile for start/stop tests. Generates the speed profile for random walking tests. Tests the stability of the controller using speed profiles. # Needed before every call to get_action().
| 2.587359
| 3
|
example/iiko/test_modify_update.py
|
businka/HttpSniffer
| 0
|
6627294
|
<filename>example/iiko/test_modify_update.py
import xml.etree.ElementTree as ET
from uuid import uuid4
def parse_response_data(data):
root = ET.fromstring(data.decode('utf-8'))
items_node = root.find('./returnValue/items')
order_number = '16'
order_id = f'00000000-0000-0000-0000-00000000000{order_number}'
with open('new_order.xml', 'r', encoding='utf-8') as file:
xml = file.read()
item = xml.format(order_id=order_id, order_number=order_number, guest_id=str(uuid4()), item_id=str(uuid4()))
item_root = ET.fromstring(item)
items_node.append(item_root)
a = ET.tostring(root, encoding='utf-8', method='xml')
pass
def parse_wireshark_result():
# if 'null' in entities_node.attrib:
# a = 1
# else:
# a = 2
a = '''01d0 35 31 2d 61 32 31 39 2d 66 61 62 30 36 36 66 62 51-a219-fab066fb'''
rows = a.split('\n')
res = ''
for elem in rows:
if len(elem) > 56:
res += elem[56:]
pass
if __name__ == '__main__':
with open('response_update_items.xml', 'rb') as file:
parse_response_data(file.read())
|
<filename>example/iiko/test_modify_update.py
import xml.etree.ElementTree as ET
from uuid import uuid4
def parse_response_data(data):
root = ET.fromstring(data.decode('utf-8'))
items_node = root.find('./returnValue/items')
order_number = '16'
order_id = f'00000000-0000-0000-0000-00000000000{order_number}'
with open('new_order.xml', 'r', encoding='utf-8') as file:
xml = file.read()
item = xml.format(order_id=order_id, order_number=order_number, guest_id=str(uuid4()), item_id=str(uuid4()))
item_root = ET.fromstring(item)
items_node.append(item_root)
a = ET.tostring(root, encoding='utf-8', method='xml')
pass
def parse_wireshark_result():
# if 'null' in entities_node.attrib:
# a = 1
# else:
# a = 2
a = '''01d0 35 31 2d 61 32 31 39 2d 66 61 62 30 36 36 66 62 51-a219-fab066fb'''
rows = a.split('\n')
res = ''
for elem in rows:
if len(elem) > 56:
res += elem[56:]
pass
if __name__ == '__main__':
with open('response_update_items.xml', 'rb') as file:
parse_response_data(file.read())
|
en
| 0.286606
|
# if 'null' in entities_node.attrib: # a = 1 # else: # a = 2 01d0 35 31 2d 61 32 31 39 2d 66 61 62 30 36 36 66 62 51-a219-fab066fb
| 2.479245
| 2
|
convert_to_records.py
|
caiyueliang/MyAgeGenderEstimate
| 564
|
6627295
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MNIST data to TFRecords file format with Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from datetime import datetime
from scipy.io import loadmat
import tensorflow as tf
from imutils.face_utils import FaceAligner
from imutils.face_utils import rect_to_bb
import argparse
import imutils
import dlib
import cv2
import pandas as pd
import numpy as np
import skimage.io as io
from tqdm import tqdm
from sklearn.model_selection import train_test_split
FLAGS = None
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(data_set, name):
"""Converts a dataset to tfrecords."""
file_name = data_set.file_name
genders = data_set.gender
ages = data_set.age
face_score = data_set.score
second_face_score = data_set.second_score
num_examples = data_set.shape[0]
base_dir = "data/imdb_crop"
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor and the face aligner
shape_predictor = 'shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor)
fa = FaceAligner(predictor, desiredFaceWidth=64)
error=0
total=0
# if images.shape[0] != num_examples:
# raise ValueError('Images size %d does not match label size %d.' %
# (images.shape[0], num_examples))
# rows = images.shape[1]
# cols = images.shape[2]
# depth = images.shape[3]
filename = os.path.join(name + '.tfrecords')
print('Writing', filename)
with tf.python_io.TFRecordWriter(filename) as writer:
for index in tqdm(range(num_examples)):
if face_score[index] < 0.75:
continue
# if (~np.isnan(second_face_score[index])) and second_face_score[index] > 0.0:
# continue
if ~(0 <= ages[index] <= 100):
continue
if np.isnan(genders[index]):
continue
try:
# image_raw = io.imread(os.path.join(base_dir,file_names[index])).tostring()
# image_raw = open(os.path.join(base_dir,str(file_name[index][0]))).read()
# load the input image, resize it, and convert it to grayscale
image = cv2.imread(os.path.join(base_dir,str(file_name[index][0])),cv2.IMREAD_COLOR)
image = imutils.resize(image, width=256)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 2)
if len(rects)!=1:
continue
else:
image_raw = fa.align(image, gray, rects[0])
image_raw = image_raw.tostring()
except IOError: #some files seem not exist in face_data dir
error = error+1
pass
# image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
# 'height': _int64_feature(rows),
# 'width': _int64_feature(cols),
# 'depth': _int64_feature(depth),
'age': _int64_feature(int(ages[index])),
'gender':_int64_feature(int(genders[index])),
'image_raw': _bytes_feature(image_raw)}))
writer.write(example.SerializeToString())
total = total+1
print("There are ",error," missing pictures" )
print("Found" ,total, "valid faces")
def get_meta(mat_path, db):
meta = loadmat(mat_path)
full_path = meta[db][0, 0]["full_path"][0]
dob = meta[db][0, 0]["dob"][0] # Matlab serial date number
gender = meta[db][0, 0]["gender"][0]
photo_taken = meta[db][0, 0]["photo_taken"][0] # year
face_score = meta[db][0, 0]["face_score"][0]
second_face_score = meta[db][0, 0]["second_face_score"][0]
age = [calc_age(photo_taken[i], dob[i]) for i in range(len(dob))]
data = {"file_name": full_path, "gender": gender, "age": age, "score": face_score,
"second_score": second_face_score}
dataset = pd.DataFrame(data)
return dataset
def calc_age(taken, dob):
birth = datetime.fromordinal(max(int(dob) - 366, 1))
# assume the photo was taken in the middle of the year
if birth.month < 7:
return taken - birth.year
else:
return taken - birth.year - 1
def main(unused_argv):
# Get the data.
# data_sets = pd.read_csv("gender_age_train.txt", header=None, sep=" ")
# data_sets.columns = ["file_name", "gender", "age"]
data_sets = get_meta('./data/imdb_crop/imdb.mat','imdb')
# data_sets = data_sets[data_sets.age >= 0]
# data_sets = data_sets[data_sets.age <= 100]
train_sets,test_sets = train_test_split(data_sets,train_size=0.001,random_state=2017)
train_sets.reset_index(drop=True, inplace=True)
test_sets.reset_index(drop=True, inplace=True)
# data_sets = mnist.read_data_sets(FLAGS.directory,
# dtype=tf.uint8,
# reshape=False,
# validation_size=FLAGS.validation_size)
# Convert to Examples and write the result to TFRecords.
convert_to(train_sets, 'train')
convert_to(test_sets,'test')
# convert_to(data_sets.validation, 'validation')
# convert_to(data_sets.test, 'test')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument(
# '--directory',
# type=str,
# default='/tmp/data',
# help='Directory to download data files and write the converted result'
# )
# parser.add_argument(
# '--validation_size',
# type=int,
# default=5000,
# help="""\
# Number of examples to separate from the training data for the validation
# set.\
# """
# )
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MNIST data to TFRecords file format with Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from datetime import datetime
from scipy.io import loadmat
import tensorflow as tf
from imutils.face_utils import FaceAligner
from imutils.face_utils import rect_to_bb
import argparse
import imutils
import dlib
import cv2
import pandas as pd
import numpy as np
import skimage.io as io
from tqdm import tqdm
from sklearn.model_selection import train_test_split
FLAGS = None
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(data_set, name):
"""Converts a dataset to tfrecords."""
file_name = data_set.file_name
genders = data_set.gender
ages = data_set.age
face_score = data_set.score
second_face_score = data_set.second_score
num_examples = data_set.shape[0]
base_dir = "data/imdb_crop"
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor and the face aligner
shape_predictor = 'shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor)
fa = FaceAligner(predictor, desiredFaceWidth=64)
error=0
total=0
# if images.shape[0] != num_examples:
# raise ValueError('Images size %d does not match label size %d.' %
# (images.shape[0], num_examples))
# rows = images.shape[1]
# cols = images.shape[2]
# depth = images.shape[3]
filename = os.path.join(name + '.tfrecords')
print('Writing', filename)
with tf.python_io.TFRecordWriter(filename) as writer:
for index in tqdm(range(num_examples)):
if face_score[index] < 0.75:
continue
# if (~np.isnan(second_face_score[index])) and second_face_score[index] > 0.0:
# continue
if ~(0 <= ages[index] <= 100):
continue
if np.isnan(genders[index]):
continue
try:
# image_raw = io.imread(os.path.join(base_dir,file_names[index])).tostring()
# image_raw = open(os.path.join(base_dir,str(file_name[index][0]))).read()
# load the input image, resize it, and convert it to grayscale
image = cv2.imread(os.path.join(base_dir,str(file_name[index][0])),cv2.IMREAD_COLOR)
image = imutils.resize(image, width=256)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 2)
if len(rects)!=1:
continue
else:
image_raw = fa.align(image, gray, rects[0])
image_raw = image_raw.tostring()
except IOError: #some files seem not exist in face_data dir
error = error+1
pass
# image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
# 'height': _int64_feature(rows),
# 'width': _int64_feature(cols),
# 'depth': _int64_feature(depth),
'age': _int64_feature(int(ages[index])),
'gender':_int64_feature(int(genders[index])),
'image_raw': _bytes_feature(image_raw)}))
writer.write(example.SerializeToString())
total = total+1
print("There are ",error," missing pictures" )
print("Found" ,total, "valid faces")
def get_meta(mat_path, db):
meta = loadmat(mat_path)
full_path = meta[db][0, 0]["full_path"][0]
dob = meta[db][0, 0]["dob"][0] # Matlab serial date number
gender = meta[db][0, 0]["gender"][0]
photo_taken = meta[db][0, 0]["photo_taken"][0] # year
face_score = meta[db][0, 0]["face_score"][0]
second_face_score = meta[db][0, 0]["second_face_score"][0]
age = [calc_age(photo_taken[i], dob[i]) for i in range(len(dob))]
data = {"file_name": full_path, "gender": gender, "age": age, "score": face_score,
"second_score": second_face_score}
dataset = pd.DataFrame(data)
return dataset
def calc_age(taken, dob):
birth = datetime.fromordinal(max(int(dob) - 366, 1))
# assume the photo was taken in the middle of the year
if birth.month < 7:
return taken - birth.year
else:
return taken - birth.year - 1
def main(unused_argv):
# Get the data.
# data_sets = pd.read_csv("gender_age_train.txt", header=None, sep=" ")
# data_sets.columns = ["file_name", "gender", "age"]
data_sets = get_meta('./data/imdb_crop/imdb.mat','imdb')
# data_sets = data_sets[data_sets.age >= 0]
# data_sets = data_sets[data_sets.age <= 100]
train_sets,test_sets = train_test_split(data_sets,train_size=0.001,random_state=2017)
train_sets.reset_index(drop=True, inplace=True)
test_sets.reset_index(drop=True, inplace=True)
# data_sets = mnist.read_data_sets(FLAGS.directory,
# dtype=tf.uint8,
# reshape=False,
# validation_size=FLAGS.validation_size)
# Convert to Examples and write the result to TFRecords.
convert_to(train_sets, 'train')
convert_to(test_sets,'test')
# convert_to(data_sets.validation, 'validation')
# convert_to(data_sets.test, 'test')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument(
# '--directory',
# type=str,
# default='/tmp/data',
# help='Directory to download data files and write the converted result'
# )
# parser.add_argument(
# '--validation_size',
# type=int,
# default=5000,
# help="""\
# Number of examples to separate from the training data for the validation
# set.\
# """
# )
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
en
| 0.564417
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Converts MNIST data to TFRecords file format with Example protos. Converts a dataset to tfrecords. # initialize dlib's face detector (HOG-based) and then create # the facial landmark predictor and the face aligner # if images.shape[0] != num_examples: # raise ValueError('Images size %d does not match label size %d.' % # (images.shape[0], num_examples)) # rows = images.shape[1] # cols = images.shape[2] # depth = images.shape[3] # if (~np.isnan(second_face_score[index])) and second_face_score[index] > 0.0: # continue # image_raw = io.imread(os.path.join(base_dir,file_names[index])).tostring() # image_raw = open(os.path.join(base_dir,str(file_name[index][0]))).read() # load the input image, resize it, and convert it to grayscale #some files seem not exist in face_data dir # image_raw = images[index].tostring() # 'height': _int64_feature(rows), # 'width': _int64_feature(cols), # 'depth': _int64_feature(depth), # Matlab serial date number # year # assume the photo was taken in the middle of the year # Get the data. # data_sets = pd.read_csv("gender_age_train.txt", header=None, sep=" ") # data_sets.columns = ["file_name", "gender", "age"] # data_sets = data_sets[data_sets.age >= 0] # data_sets = data_sets[data_sets.age <= 100] # data_sets = mnist.read_data_sets(FLAGS.directory, # dtype=tf.uint8, # reshape=False, # validation_size=FLAGS.validation_size) # Convert to Examples and write the result to TFRecords. # convert_to(data_sets.validation, 'validation') # convert_to(data_sets.test, 'test') # parser.add_argument( # '--directory', # type=str, # default='/tmp/data', # help='Directory to download data files and write the converted result' # ) # parser.add_argument( # '--validation_size', # type=int, # default=5000, # help="""\ # Number of examples to separate from the training data for the validation # set.\ # """ # )
| 2.191235
| 2
|
venv/Lib/site-packages/torch/distributions/uniform.py
|
Westlanderz/AI-Plat1
| 1
|
6627296
|
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all
class Uniform(Distribution):
r"""
Generates uniformly distributed random samples from the half-open interval
``[low, high)``.
Example::
>>> m = Uniform(torch.tensor([0.0]), torch.tensor([5.0]))
>>> m.sample() # uniformly distributed in the range [0.0, 5.0)
tensor([ 2.3418])
Args:
low (float or Tensor): lower range (inclusive).
high (float or Tensor): upper range (exclusive).
"""
# TODO allow (loc,scale) parameterization to allow independent constraints.
arg_constraints = {'low': constraints.dependent(is_discrete=False, event_dim=0),
'high': constraints.dependent(is_discrete=False, event_dim=0)}
has_rsample = True
@property
def mean(self):
return (self.high + self.low) / 2
@property
def stddev(self):
return (self.high - self.low) / 12**0.5
@property
def variance(self):
return (self.high - self.low).pow(2) / 12
def __init__(self, low, high, validate_args=None):
self.low, self.high = broadcast_all(low, high)
if isinstance(low, Number) and isinstance(high, Number):
batch_shape = torch.Size()
else:
batch_shape = self.low.size()
super(Uniform, self).__init__(batch_shape, validate_args=validate_args)
if self._validate_args and not torch.lt(self.low, self.high).all():
raise ValueError("Uniform is not defined when low>= high")
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Uniform, _instance)
batch_shape = torch.Size(batch_shape)
new.low = self.low.expand(batch_shape)
new.high = self.high.expand(batch_shape)
super(Uniform, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@constraints.dependent_property(is_discrete=False, event_dim=0)
def support(self):
return constraints.interval(self.low, self.high)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
rand = torch.rand(shape, dtype=self.low.dtype, device=self.low.device)
return self.low + rand * (self.high - self.low)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
lb = self.low.le(value).type_as(self.low)
ub = self.high.gt(value).type_as(self.low)
return torch.log(lb.mul(ub)) - torch.log(self.high - self.low)
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
result = (value - self.low) / (self.high - self.low)
return result.clamp(min=0, max=1)
def icdf(self, value):
result = value * (self.high - self.low) + self.low
return result
def entropy(self):
return torch.log(self.high - self.low)
|
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all
class Uniform(Distribution):
r"""
Generates uniformly distributed random samples from the half-open interval
``[low, high)``.
Example::
>>> m = Uniform(torch.tensor([0.0]), torch.tensor([5.0]))
>>> m.sample() # uniformly distributed in the range [0.0, 5.0)
tensor([ 2.3418])
Args:
low (float or Tensor): lower range (inclusive).
high (float or Tensor): upper range (exclusive).
"""
# TODO allow (loc,scale) parameterization to allow independent constraints.
arg_constraints = {'low': constraints.dependent(is_discrete=False, event_dim=0),
'high': constraints.dependent(is_discrete=False, event_dim=0)}
has_rsample = True
@property
def mean(self):
return (self.high + self.low) / 2
@property
def stddev(self):
return (self.high - self.low) / 12**0.5
@property
def variance(self):
return (self.high - self.low).pow(2) / 12
def __init__(self, low, high, validate_args=None):
self.low, self.high = broadcast_all(low, high)
if isinstance(low, Number) and isinstance(high, Number):
batch_shape = torch.Size()
else:
batch_shape = self.low.size()
super(Uniform, self).__init__(batch_shape, validate_args=validate_args)
if self._validate_args and not torch.lt(self.low, self.high).all():
raise ValueError("Uniform is not defined when low>= high")
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Uniform, _instance)
batch_shape = torch.Size(batch_shape)
new.low = self.low.expand(batch_shape)
new.high = self.high.expand(batch_shape)
super(Uniform, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@constraints.dependent_property(is_discrete=False, event_dim=0)
def support(self):
return constraints.interval(self.low, self.high)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
rand = torch.rand(shape, dtype=self.low.dtype, device=self.low.device)
return self.low + rand * (self.high - self.low)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
lb = self.low.le(value).type_as(self.low)
ub = self.high.gt(value).type_as(self.low)
return torch.log(lb.mul(ub)) - torch.log(self.high - self.low)
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
result = (value - self.low) / (self.high - self.low)
return result.clamp(min=0, max=1)
def icdf(self, value):
result = value * (self.high - self.low) + self.low
return result
def entropy(self):
return torch.log(self.high - self.low)
|
en
| 0.614382
|
Generates uniformly distributed random samples from the half-open interval
``[low, high)``.
Example::
>>> m = Uniform(torch.tensor([0.0]), torch.tensor([5.0]))
>>> m.sample() # uniformly distributed in the range [0.0, 5.0)
tensor([ 2.3418])
Args:
low (float or Tensor): lower range (inclusive).
high (float or Tensor): upper range (exclusive). # TODO allow (loc,scale) parameterization to allow independent constraints.
| 3.130079
| 3
|
hipotap_common/rpc/clients/order_rpc_client.py
|
leckijakub/hipotap
| 0
|
6627297
|
import pika
from hipotap_common.proto_messages.hipotap_pb2 import BaseResponsePB
from hipotap_common.proto_messages.order_pb2 import (
GetOrderRequestPB,
OrderListPB,
OrderPB,
OrderPaymentRequestPB,
TrendListPB,
)
from hipotap_common.queues.order_queues import (
GET_ORDER_QUEUE,
GET_TRENDS_QUEUE,
ORDER_PAYMENT_REQUEST_QUEUE,
ORDER_RESERVE_REQUEST_QUEUE,
ORDER_LIST_QUEUE,
)
from .rpc_client import RpcClient
class OrderRpcClient(RpcClient):
def order_reserve_request(self, order_request_pb) -> BaseResponsePB:
self.init_callback()
# Send request
self.channel.basic_publish(
exchange="",
routing_key=ORDER_RESERVE_REQUEST_QUEUE,
properties=pika.BasicProperties(
reply_to=self.callback_queue, correlation_id=self.corr_id
),
body=order_request_pb.SerializeToString(),
)
# Wait for response
while self.response is None:
self.connection.process_data_events()
response = BaseResponsePB()
response.ParseFromString(self.response)
return response
def get_order_list(self, order_list_request_pb) -> BaseResponsePB:
self.init_callback()
# Send request
self.channel.basic_publish(
exchange="",
routing_key=ORDER_LIST_QUEUE,
properties=pika.BasicProperties(
reply_to=self.callback_queue, correlation_id=self.corr_id
),
body=order_list_request_pb.SerializeToString(),
)
# Wait for response
while self.response is None:
self.connection.process_data_events()
response = OrderListPB()
response.ParseFromString(self.response)
return response
def get_order(self, get_order_request_pb: GetOrderRequestPB) -> OrderPB:
self.init_callback()
# Send request
self.channel.basic_publish(
exchange="",
routing_key=GET_ORDER_QUEUE,
properties=pika.BasicProperties(
reply_to=self.callback_queue, correlation_id=self.corr_id
),
body=get_order_request_pb.SerializeToString(),
)
# Wait for response
while self.response is None:
self.connection.process_data_events()
response = OrderPB()
response.ParseFromString(self.response)
return response
def order_payment_request(
self, order_payment_request_pb: OrderPaymentRequestPB
) -> BaseResponsePB:
self.init_callback()
# Send request
self.channel.basic_publish(
exchange="",
routing_key=ORDER_PAYMENT_REQUEST_QUEUE,
properties=pika.BasicProperties(
reply_to=self.callback_queue, correlation_id=self.corr_id
),
body=order_payment_request_pb.SerializeToString(),
)
# Wait for response
while self.response is None:
self.connection.process_data_events()
response = BaseResponsePB()
response.ParseFromString(self.response)
return response
def get_trends_request(self):
self.init_callback()
# Send request
self.channel.basic_publish(
exchange="",
routing_key=GET_TRENDS_QUEUE,
properties=pika.BasicProperties(
reply_to=self.callback_queue, correlation_id=self.corr_id
),
body="",
)
# Wait for response
while self.response is None:
self.connection.process_data_events()
response = TrendListPB()
response.ParseFromString(self.response)
return response
|
import pika
from hipotap_common.proto_messages.hipotap_pb2 import BaseResponsePB
from hipotap_common.proto_messages.order_pb2 import (
GetOrderRequestPB,
OrderListPB,
OrderPB,
OrderPaymentRequestPB,
TrendListPB,
)
from hipotap_common.queues.order_queues import (
GET_ORDER_QUEUE,
GET_TRENDS_QUEUE,
ORDER_PAYMENT_REQUEST_QUEUE,
ORDER_RESERVE_REQUEST_QUEUE,
ORDER_LIST_QUEUE,
)
from .rpc_client import RpcClient
class OrderRpcClient(RpcClient):
def order_reserve_request(self, order_request_pb) -> BaseResponsePB:
self.init_callback()
# Send request
self.channel.basic_publish(
exchange="",
routing_key=ORDER_RESERVE_REQUEST_QUEUE,
properties=pika.BasicProperties(
reply_to=self.callback_queue, correlation_id=self.corr_id
),
body=order_request_pb.SerializeToString(),
)
# Wait for response
while self.response is None:
self.connection.process_data_events()
response = BaseResponsePB()
response.ParseFromString(self.response)
return response
def get_order_list(self, order_list_request_pb) -> BaseResponsePB:
self.init_callback()
# Send request
self.channel.basic_publish(
exchange="",
routing_key=ORDER_LIST_QUEUE,
properties=pika.BasicProperties(
reply_to=self.callback_queue, correlation_id=self.corr_id
),
body=order_list_request_pb.SerializeToString(),
)
# Wait for response
while self.response is None:
self.connection.process_data_events()
response = OrderListPB()
response.ParseFromString(self.response)
return response
def get_order(self, get_order_request_pb: GetOrderRequestPB) -> OrderPB:
self.init_callback()
# Send request
self.channel.basic_publish(
exchange="",
routing_key=GET_ORDER_QUEUE,
properties=pika.BasicProperties(
reply_to=self.callback_queue, correlation_id=self.corr_id
),
body=get_order_request_pb.SerializeToString(),
)
# Wait for response
while self.response is None:
self.connection.process_data_events()
response = OrderPB()
response.ParseFromString(self.response)
return response
def order_payment_request(
self, order_payment_request_pb: OrderPaymentRequestPB
) -> BaseResponsePB:
self.init_callback()
# Send request
self.channel.basic_publish(
exchange="",
routing_key=ORDER_PAYMENT_REQUEST_QUEUE,
properties=pika.BasicProperties(
reply_to=self.callback_queue, correlation_id=self.corr_id
),
body=order_payment_request_pb.SerializeToString(),
)
# Wait for response
while self.response is None:
self.connection.process_data_events()
response = BaseResponsePB()
response.ParseFromString(self.response)
return response
def get_trends_request(self):
self.init_callback()
# Send request
self.channel.basic_publish(
exchange="",
routing_key=GET_TRENDS_QUEUE,
properties=pika.BasicProperties(
reply_to=self.callback_queue, correlation_id=self.corr_id
),
body="",
)
# Wait for response
while self.response is None:
self.connection.process_data_events()
response = TrendListPB()
response.ParseFromString(self.response)
return response
|
en
| 0.789524
|
# Send request # Wait for response # Send request # Wait for response # Send request # Wait for response # Send request # Wait for response # Send request # Wait for response
| 2.056366
| 2
|
docs/tutorials_torch/action_recognition/demo_i3d_kinetics400.py
|
Kh4L/gluon-cv
| 1
|
6627298
|
<gh_stars>1-10
"""1. Getting Started with Pre-trained I3D Models on Kinetcis400
================================================================
`Kinetics400 <https://deepmind.com/research/open-source/kinetics>`_ is an action recognition dataset
of realistic action videos, collected from YouTube. With 306,245 short trimmed videos
from 400 action categories, it is one of the largest and most widely used dataset in the research
community for benchmarking state-of-the-art video action recognition models.
`I3D <https://arxiv.org/abs/1705.07750>`_ (Inflated 3D Networks) is a widely adopted 3D video
classification network. It uses 3D convolution to learn spatiotemporal information directly from videos.
I3D is proposed to improve `C3D <https://arxiv.org/abs/1412.0767>`_ (Convolutional 3D Networks) by inflating from 2D models.
We can not only reuse the 2D models' architecture (e.g., ResNet, Inception), but also bootstrap
the model weights from 2D pretrained models. In this manner, training 3D networks for video
classification is feasible and getting much better results.
In this tutorial, we will demonstrate how to load a pre-trained I3D model from :ref:`gluoncv-model-zoo`
and classify a video clip from the Internet or your local disk into one of the 400 action classes.
Step by Step
------------
We will try out a pre-trained I3D model on a single video clip.
First, please follow the `installation guide <../../index.html#installation>`__
to install ``PyTorch`` and ``GluonCV`` if you haven't done so yet.
"""
import numpy as np
import decord
import torch
from gluoncv.torch.utils.model_utils import download
from gluoncv.torch.data.transforms.videotransforms import video_transforms, volume_transforms
from gluoncv.torch.engine.config import get_cfg_defaults
from gluoncv.torch.model_zoo import get_model
################################################################
# Then, we download a video and extract a 32-frame clip from it.
url = 'https://github.com/bryanyzhu/tiny-ucf101/raw/master/abseiling_k400.mp4'
video_fname = download(url)
vr = decord.VideoReader(video_fname)
frame_id_list = range(0, 64, 2)
video_data = vr.get_batch(frame_id_list).asnumpy()
################################################################
# Now we define transformations for the video clip.
# This transformation function does four things:
# (1) resize the shorter side of video clip to short_side_size,
# (2) center crop the video clip to crop_size x crop_size,
# (3) transpose the video clip to ``num_channels*num_frames*height*width``,
# and (4) normalize it with mean and standard deviation calculated across all ImageNet images.
crop_size = 224
short_side_size = 256
transform_fn = video_transforms.Compose([video_transforms.Resize(short_side_size, interpolation='bilinear'),
video_transforms.CenterCrop(size=(crop_size, crop_size)),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
clip_input = transform_fn(video_data)
print('Video data is downloaded and preprocessed.')
################################################################
# Next, we load a pre-trained I3D model. Make sure to change the ``pretrained`` in the configuration file to True.
config_file = './scripts/action-recognition/configuration/i3d_resnet50_v1_kinetics400.yaml'
cfg = get_cfg_defaults()
cfg.merge_from_file(config_file)
model = get_model(cfg)
print('%s model is successfully loaded.' % cfg.CONFIG.MODEL.NAME)
################################################################
# Finally, we prepare the video clip and feed it to the model.
with torch.no_grad():
pred = model(torch.unsqueeze(clip_input, dim=0)).numpy()
print('The input video clip is classified to be class %d' % (np.argmax(pred)))
################################################################
# We can see that our pre-trained model predicts this video clip
# to be ``abseiling`` action with high confidence.
################################################################
# Next Step
# ---------
#
# If you would like to dive deeper into finetuing SOTA video models on your datasets,
# feel free to read the next `tutorial on finetuning <finetune_custom.html>`__.
|
"""1. Getting Started with Pre-trained I3D Models on Kinetcis400
================================================================
`Kinetics400 <https://deepmind.com/research/open-source/kinetics>`_ is an action recognition dataset
of realistic action videos, collected from YouTube. With 306,245 short trimmed videos
from 400 action categories, it is one of the largest and most widely used dataset in the research
community for benchmarking state-of-the-art video action recognition models.
`I3D <https://arxiv.org/abs/1705.07750>`_ (Inflated 3D Networks) is a widely adopted 3D video
classification network. It uses 3D convolution to learn spatiotemporal information directly from videos.
I3D is proposed to improve `C3D <https://arxiv.org/abs/1412.0767>`_ (Convolutional 3D Networks) by inflating from 2D models.
We can not only reuse the 2D models' architecture (e.g., ResNet, Inception), but also bootstrap
the model weights from 2D pretrained models. In this manner, training 3D networks for video
classification is feasible and getting much better results.
In this tutorial, we will demonstrate how to load a pre-trained I3D model from :ref:`gluoncv-model-zoo`
and classify a video clip from the Internet or your local disk into one of the 400 action classes.
Step by Step
------------
We will try out a pre-trained I3D model on a single video clip.
First, please follow the `installation guide <../../index.html#installation>`__
to install ``PyTorch`` and ``GluonCV`` if you haven't done so yet.
"""
import numpy as np
import decord
import torch
from gluoncv.torch.utils.model_utils import download
from gluoncv.torch.data.transforms.videotransforms import video_transforms, volume_transforms
from gluoncv.torch.engine.config import get_cfg_defaults
from gluoncv.torch.model_zoo import get_model
################################################################
# Then, we download a video and extract a 32-frame clip from it.
url = 'https://github.com/bryanyzhu/tiny-ucf101/raw/master/abseiling_k400.mp4'
video_fname = download(url)
vr = decord.VideoReader(video_fname)
frame_id_list = range(0, 64, 2)
video_data = vr.get_batch(frame_id_list).asnumpy()
################################################################
# Now we define transformations for the video clip.
# This transformation function does four things:
# (1) resize the shorter side of video clip to short_side_size,
# (2) center crop the video clip to crop_size x crop_size,
# (3) transpose the video clip to ``num_channels*num_frames*height*width``,
# and (4) normalize it with mean and standard deviation calculated across all ImageNet images.
crop_size = 224
short_side_size = 256
transform_fn = video_transforms.Compose([video_transforms.Resize(short_side_size, interpolation='bilinear'),
video_transforms.CenterCrop(size=(crop_size, crop_size)),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
clip_input = transform_fn(video_data)
print('Video data is downloaded and preprocessed.')
################################################################
# Next, we load a pre-trained I3D model. Make sure to change the ``pretrained`` in the configuration file to True.
config_file = './scripts/action-recognition/configuration/i3d_resnet50_v1_kinetics400.yaml'
cfg = get_cfg_defaults()
cfg.merge_from_file(config_file)
model = get_model(cfg)
print('%s model is successfully loaded.' % cfg.CONFIG.MODEL.NAME)
################################################################
# Finally, we prepare the video clip and feed it to the model.
with torch.no_grad():
pred = model(torch.unsqueeze(clip_input, dim=0)).numpy()
print('The input video clip is classified to be class %d' % (np.argmax(pred)))
################################################################
# We can see that our pre-trained model predicts this video clip
# to be ``abseiling`` action with high confidence.
################################################################
# Next Step
# ---------
#
# If you would like to dive deeper into finetuing SOTA video models on your datasets,
# feel free to read the next `tutorial on finetuning <finetune_custom.html>`__.
|
en
| 0.663509
|
1. Getting Started with Pre-trained I3D Models on Kinetcis400 ================================================================ `Kinetics400 <https://deepmind.com/research/open-source/kinetics>`_ is an action recognition dataset of realistic action videos, collected from YouTube. With 306,245 short trimmed videos from 400 action categories, it is one of the largest and most widely used dataset in the research community for benchmarking state-of-the-art video action recognition models. `I3D <https://arxiv.org/abs/1705.07750>`_ (Inflated 3D Networks) is a widely adopted 3D video classification network. It uses 3D convolution to learn spatiotemporal information directly from videos. I3D is proposed to improve `C3D <https://arxiv.org/abs/1412.0767>`_ (Convolutional 3D Networks) by inflating from 2D models. We can not only reuse the 2D models' architecture (e.g., ResNet, Inception), but also bootstrap the model weights from 2D pretrained models. In this manner, training 3D networks for video classification is feasible and getting much better results. In this tutorial, we will demonstrate how to load a pre-trained I3D model from :ref:`gluoncv-model-zoo` and classify a video clip from the Internet or your local disk into one of the 400 action classes. Step by Step ------------ We will try out a pre-trained I3D model on a single video clip. First, please follow the `installation guide <../../index.html#installation>`__ to install ``PyTorch`` and ``GluonCV`` if you haven't done so yet. ################################################################ # Then, we download a video and extract a 32-frame clip from it. ################################################################ # Now we define transformations for the video clip. # This transformation function does four things: # (1) resize the shorter side of video clip to short_side_size, # (2) center crop the video clip to crop_size x crop_size, # (3) transpose the video clip to ``num_channels*num_frames*height*width``, # and (4) normalize it with mean and standard deviation calculated across all ImageNet images. ################################################################ # Next, we load a pre-trained I3D model. Make sure to change the ``pretrained`` in the configuration file to True. ################################################################ # Finally, we prepare the video clip and feed it to the model. ################################################################ # We can see that our pre-trained model predicts this video clip # to be ``abseiling`` action with high confidence. ################################################################ # Next Step # --------- # # If you would like to dive deeper into finetuing SOTA video models on your datasets, # feel free to read the next `tutorial on finetuning <finetune_custom.html>`__.
| 3.121631
| 3
|
archive/boston/vote.py
|
jayktee/scrapers-us-municipal
| 67
|
6627299
|
from pupa.scrape import Scraper
from pupa.scrape import Vote
import datetime as dt
import lxml
import time
DURL = "http://www.cityofboston.gov/cityclerk/rollcall/default.aspx"
class BostonVoteScraper(Scraper):
def lxmlize(self, url):
entry = self.urlopen(url)
page = lxml.html.fromstring(entry)
page.make_links_absolute(url)
return page
def scrape(self):
for page in self.iterpages():
for subject in page.xpath('//div[@class="ContainerPanel"]'):
dates = subject.xpath(".//font[@color='#276598']/b/text()")
motions = [x.strip() for x in subject.xpath(
".//div[@style='width:260px; float:left;']/text()")]
votes = subject.xpath(".//div[@style='width:150px; float:right;']")
docket = subject.xpath(".//div[@class='HeaderContent']/b/text()")
docket = list(filter(lambda x: "docket" in x.lower(), docket))
docket = docket[0] if docket else None
for date, motion, vote in zip(dates, motions, votes):
when = dt.datetime.strptime(date, "%m/%d/%Y")
motion = motion.strip()
if motion == "":
self.warning("Skipping vote.")
continue
v = Vote(session=self.session,
organization="Boston City Council",
type='other',
passed=False,
date=when.strftime("%Y-%m-%d"),
motion=motion,
yes_count=0,
no_count=0,)
if docket:
v.set_bill(docket)
yes, no, other = 0, 0, 0
vit = iter(vote.xpath("./div"))
vote = zip(vit, vit, vit)
for who, entry, _ in vote:
how = entry.text
who = who.text
if how == 'Y':
v.yes(who)
yes += 1
elif how == 'N':
v.no(who)
no += 1
else:
v.other(who)
other += 1
for count in v.vote_counts:
count['count'] = {
"yes": yes,
"no": no,
"other": other
}[count['vote_type']]
v.add_source(DURL, note='root')
yield v
def do_post_back(self, form, event_target, event_argument):
block = {name: value for name, value in [(obj.name, obj.value)
for obj in form.xpath(".//input")]}
block['__EVENTTARGET'] = event_target
block['__EVENTARGUMENT'] = event_argument
block['ctl00$MainContent$lblCurrentText'] = (int(
block['ctl00$MainContent$lblCurrentText']) + 1)
block.pop("ctl00$MainContent$ctl00")
ret = lxml.html.fromstring(self.urlopen(form.action, block))
ret.make_links_absolute(form.action)
return ret
def iterpages(self):
page = self.lxmlize(DURL)
yield page
while page is not None:
yield page
page = self.next_page(page)
def next_page(self, page):
time.sleep(5)
form = page.xpath("//form[@name='aspnetForm']")[0]
n = page.xpath("//a[contains(text(), 'Next Page')]")[0]
nextable = n.attrib['style'] != 'display: none;'
if nextable:
return self.do_post_back(form, 'ctl00$MainContent$lnkNext', '')
return None
|
from pupa.scrape import Scraper
from pupa.scrape import Vote
import datetime as dt
import lxml
import time
DURL = "http://www.cityofboston.gov/cityclerk/rollcall/default.aspx"
class BostonVoteScraper(Scraper):
def lxmlize(self, url):
entry = self.urlopen(url)
page = lxml.html.fromstring(entry)
page.make_links_absolute(url)
return page
def scrape(self):
for page in self.iterpages():
for subject in page.xpath('//div[@class="ContainerPanel"]'):
dates = subject.xpath(".//font[@color='#276598']/b/text()")
motions = [x.strip() for x in subject.xpath(
".//div[@style='width:260px; float:left;']/text()")]
votes = subject.xpath(".//div[@style='width:150px; float:right;']")
docket = subject.xpath(".//div[@class='HeaderContent']/b/text()")
docket = list(filter(lambda x: "docket" in x.lower(), docket))
docket = docket[0] if docket else None
for date, motion, vote in zip(dates, motions, votes):
when = dt.datetime.strptime(date, "%m/%d/%Y")
motion = motion.strip()
if motion == "":
self.warning("Skipping vote.")
continue
v = Vote(session=self.session,
organization="Boston City Council",
type='other',
passed=False,
date=when.strftime("%Y-%m-%d"),
motion=motion,
yes_count=0,
no_count=0,)
if docket:
v.set_bill(docket)
yes, no, other = 0, 0, 0
vit = iter(vote.xpath("./div"))
vote = zip(vit, vit, vit)
for who, entry, _ in vote:
how = entry.text
who = who.text
if how == 'Y':
v.yes(who)
yes += 1
elif how == 'N':
v.no(who)
no += 1
else:
v.other(who)
other += 1
for count in v.vote_counts:
count['count'] = {
"yes": yes,
"no": no,
"other": other
}[count['vote_type']]
v.add_source(DURL, note='root')
yield v
def do_post_back(self, form, event_target, event_argument):
block = {name: value for name, value in [(obj.name, obj.value)
for obj in form.xpath(".//input")]}
block['__EVENTTARGET'] = event_target
block['__EVENTARGUMENT'] = event_argument
block['ctl00$MainContent$lblCurrentText'] = (int(
block['ctl00$MainContent$lblCurrentText']) + 1)
block.pop("ctl00$MainContent$ctl00")
ret = lxml.html.fromstring(self.urlopen(form.action, block))
ret.make_links_absolute(form.action)
return ret
def iterpages(self):
page = self.lxmlize(DURL)
yield page
while page is not None:
yield page
page = self.next_page(page)
def next_page(self, page):
time.sleep(5)
form = page.xpath("//form[@name='aspnetForm']")[0]
n = page.xpath("//a[contains(text(), 'Next Page')]")[0]
nextable = n.attrib['style'] != 'display: none;'
if nextable:
return self.do_post_back(form, 'ctl00$MainContent$lnkNext', '')
return None
|
none
| 1
| 2.984545
| 3
|
|
user/views.py
|
Emmanuel-code/Questions-Answers
| 0
|
6627300
|
from django.contrib.auth import authenticate,login
from .forms import SignUpForm,UpdateProfileForm
from .models import Profile
from django.shortcuts import render,redirect,get_object_or_404
from django.contrib.auth.decorators import login_required
@login_required
def edit_profile(request):
if request.method=='POST':
form=UpdateProfileForm(request.POST, files=request.FILES,instance=request.user.profile)
if form.is_valid():
form.save()
return redirect('user:profile')
else:
form=UpdateProfileForm(instance=request.user)
return render(request, 'user/edit.html',{'form':form})
def register(request):
form=SignUpForm(request.POST)
if form.is_valid():
user=form.save()
user.refresh_from_db()
user.first_name=form.cleaned_data.get('first_name')
user.last_name=form.cleaned_data.get('last_name')
user.email=form.cleaned_data.get('email')
user.save()
username=form.cleaned_data.get('username')
password=<PASSWORD>.cleaned_data.get('<PASSWORD>')
user=authenticate(username=username,password=password)
login(request,user)
return redirect('user:login')
else:
form=SignUpForm()
return render(request,'user/register.html',{'form':form})
@login_required
def profile(request):
prof=Profile.objects.get(user=request.user)
return render(request, 'user/profile.html', {'prof':prof})
def about(request):
return render(request,'user/about.html',{})
@login_required
def public_profile(request):
return render(request, 'user/public_profile.html', {})
|
from django.contrib.auth import authenticate,login
from .forms import SignUpForm,UpdateProfileForm
from .models import Profile
from django.shortcuts import render,redirect,get_object_or_404
from django.contrib.auth.decorators import login_required
@login_required
def edit_profile(request):
if request.method=='POST':
form=UpdateProfileForm(request.POST, files=request.FILES,instance=request.user.profile)
if form.is_valid():
form.save()
return redirect('user:profile')
else:
form=UpdateProfileForm(instance=request.user)
return render(request, 'user/edit.html',{'form':form})
def register(request):
form=SignUpForm(request.POST)
if form.is_valid():
user=form.save()
user.refresh_from_db()
user.first_name=form.cleaned_data.get('first_name')
user.last_name=form.cleaned_data.get('last_name')
user.email=form.cleaned_data.get('email')
user.save()
username=form.cleaned_data.get('username')
password=<PASSWORD>.cleaned_data.get('<PASSWORD>')
user=authenticate(username=username,password=password)
login(request,user)
return redirect('user:login')
else:
form=SignUpForm()
return render(request,'user/register.html',{'form':form})
@login_required
def profile(request):
prof=Profile.objects.get(user=request.user)
return render(request, 'user/profile.html', {'prof':prof})
def about(request):
return render(request,'user/about.html',{})
@login_required
def public_profile(request):
return render(request, 'user/public_profile.html', {})
|
none
| 1
| 2.191397
| 2
|
|
lib/sensor.py
|
wizgrav/protobot
| 1
|
6627301
|
<filename>lib/sensor.py
import pigpio
class Sensor:
ax=0
ay=0
az=0
mx=0
my=0
mz=0
def __init__(self, pi):
self.pi = pi
self.acc = self.pi.i2c_open(1, 0x19, 0)
self.pi.i2c_write_byte_data(self.acc,0x20,0x37)
self.mag = self.pi.i2c_open(1, 0x1e, 0)
self.pi.i2c_write_byte_data(self.mag,0x00,0x14)
self.pi.i2c_write_byte_data(self.mag,0x01,0x20)
self.pi.i2c_write_byte_data(self.mag,0x02,0x01)
def update(self):
self.ax = self.read(self.acc,0x29)
self.ay = self.read(self.acc,0x2B)
self.az = self.read(self.acc,0x2D)
self.mx = self.read(self.mag,0x03)
self.my = self.read(self.mag,0x05)
self.mz = self.read(self.mag,0x07)
def debug(self):
print (self.ax,self.ay,self.az,"***",self.mx,self.my,self.mz)
def read(self, dev,addr):
return self.pi.i2c_read_byte_data(dev, addr)
if __name__ == "__main__":
import time
pi = pigpio.pi()
s = Sensor(pi)
while True:
s.update()
s.debug()
time.sleep(0.5)
|
<filename>lib/sensor.py
import pigpio
class Sensor:
ax=0
ay=0
az=0
mx=0
my=0
mz=0
def __init__(self, pi):
self.pi = pi
self.acc = self.pi.i2c_open(1, 0x19, 0)
self.pi.i2c_write_byte_data(self.acc,0x20,0x37)
self.mag = self.pi.i2c_open(1, 0x1e, 0)
self.pi.i2c_write_byte_data(self.mag,0x00,0x14)
self.pi.i2c_write_byte_data(self.mag,0x01,0x20)
self.pi.i2c_write_byte_data(self.mag,0x02,0x01)
def update(self):
self.ax = self.read(self.acc,0x29)
self.ay = self.read(self.acc,0x2B)
self.az = self.read(self.acc,0x2D)
self.mx = self.read(self.mag,0x03)
self.my = self.read(self.mag,0x05)
self.mz = self.read(self.mag,0x07)
def debug(self):
print (self.ax,self.ay,self.az,"***",self.mx,self.my,self.mz)
def read(self, dev,addr):
return self.pi.i2c_read_byte_data(dev, addr)
if __name__ == "__main__":
import time
pi = pigpio.pi()
s = Sensor(pi)
while True:
s.update()
s.debug()
time.sleep(0.5)
|
none
| 1
| 2.903282
| 3
|
|
pyfiction/examples/starcourt/lstm_online.py
|
FPreta/pyfiction
| 32
|
6627302
|
import logging
from keras.optimizers import RMSprop
from keras.utils import plot_model
from pyfiction.agents.ssaqn_agent import SSAQNAgent
from pyfiction.simulators.games.starcourt_simulator import StarCourtSimulator
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
"""
An example SSAQN agent for Star Court that uses online learning and prioritized sampling
"""
# Create the agent and specify maximum lengths of descriptions (in words)
agent = SSAQNAgent(train_simulators=StarCourtSimulator())
# Learn the vocabulary (the function samples the game using a random policy)
agent.initialize_tokens('vocabulary.txt')
optimizer = RMSprop(lr=0.00001)
embedding_dimensions = 16
lstm_dimensions = 32
dense_dimensions = 8
agent.create_model(embedding_dimensions=embedding_dimensions,
lstm_dimensions=lstm_dimensions,
dense_dimensions=dense_dimensions,
optimizer=optimizer)
# Visualize the model
try:
plot_model(agent.model, to_file='model.png', show_shapes=True)
except ImportError as e:
logger.warning("Couldn't print the model image: {}".format(e))
# Iteratively train the agent on a batch of previously seen examples while continuously expanding the experience buffer
# This example seems to not converge but we do not know if there exists a policy reaching consistently good rewards
epochs = 1
for i in range(epochs):
logger.info('Epoch %s', i)
agent.train_online(episodes=256 * 256, batch_size=256, gamma=0.95, epsilon_decay=0.999,
prioritized_fraction=0.25, test_interval=8, test_steps=5)
|
import logging
from keras.optimizers import RMSprop
from keras.utils import plot_model
from pyfiction.agents.ssaqn_agent import SSAQNAgent
from pyfiction.simulators.games.starcourt_simulator import StarCourtSimulator
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
"""
An example SSAQN agent for Star Court that uses online learning and prioritized sampling
"""
# Create the agent and specify maximum lengths of descriptions (in words)
agent = SSAQNAgent(train_simulators=StarCourtSimulator())
# Learn the vocabulary (the function samples the game using a random policy)
agent.initialize_tokens('vocabulary.txt')
optimizer = RMSprop(lr=0.00001)
embedding_dimensions = 16
lstm_dimensions = 32
dense_dimensions = 8
agent.create_model(embedding_dimensions=embedding_dimensions,
lstm_dimensions=lstm_dimensions,
dense_dimensions=dense_dimensions,
optimizer=optimizer)
# Visualize the model
try:
plot_model(agent.model, to_file='model.png', show_shapes=True)
except ImportError as e:
logger.warning("Couldn't print the model image: {}".format(e))
# Iteratively train the agent on a batch of previously seen examples while continuously expanding the experience buffer
# This example seems to not converge but we do not know if there exists a policy reaching consistently good rewards
epochs = 1
for i in range(epochs):
logger.info('Epoch %s', i)
agent.train_online(episodes=256 * 256, batch_size=256, gamma=0.95, epsilon_decay=0.999,
prioritized_fraction=0.25, test_interval=8, test_steps=5)
|
en
| 0.867206
|
An example SSAQN agent for Star Court that uses online learning and prioritized sampling # Create the agent and specify maximum lengths of descriptions (in words) # Learn the vocabulary (the function samples the game using a random policy) # Visualize the model # Iteratively train the agent on a batch of previously seen examples while continuously expanding the experience buffer # This example seems to not converge but we do not know if there exists a policy reaching consistently good rewards
| 2.64562
| 3
|
example/server/integrations.py
|
lijamie98/django-polaris
| 0
|
6627303
|
<reponame>lijamie98/django-polaris<gh_stars>0
import json
from smtplib import SMTPException
from decimal import Decimal
from typing import List, Dict, Optional, Tuple
from urllib.parse import urlencode
from base64 import b64encode
from collections import defaultdict
from logging import getLogger
from django.db.models import QuerySet
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import gettext as _
from django import forms
from django.urls import reverse
from django.core.mail import send_mail
from django.conf import settings as server_settings
from django.template.loader import render_to_string
from stellar_sdk.keypair import Keypair
from rest_framework.request import Request
from polaris.models import Transaction, Asset
from polaris.templates import Template
from polaris.integrations import (
DepositIntegration,
WithdrawalIntegration,
SEP31ReceiverIntegration,
CustomerIntegration,
calculate_fee,
RailsIntegration,
TransactionForm,
)
from polaris import settings
from polaris.sep10.token import SEP10Token
from . import mock_banking_rails as rails
from .models import PolarisUser, PolarisStellarAccount, PolarisUserTransaction
from .forms import KYCForm, WithdrawForm
logger = getLogger(__name__)
CONFIRM_EMAIL_PAGE_TITLE = _("Confirm Email")
def send_confirmation_email(user: PolarisUser, account: PolarisStellarAccount):
"""
Sends a confirmation email to user.email
In a real production deployment, you would never want to send emails
as part of the request/response cycle. Instead, use a job queue service
like Celery. This reference server is not intended to handle heavy
traffic so we are making an exception here.
"""
args = urlencode({"token": account.confirmation_token, "email": user.email})
url = f"{settings.HOST_URL}{reverse('confirm_email')}?{args}"
try:
send_mail(
_("Reference Anchor Server: Confirm Email"),
# email body if the HTML is not rendered
_("Confirm your email by pasting this URL in your browser: %s") % url,
server_settings.EMAIL_HOST_USER,
[user.email],
html_message=render_to_string(
"confirmation_email.html",
{"first_name": user.first_name, "confirmation_url": url},
),
)
except SMTPException as e:
logger.error(f"Unable to send email to {user.email}: {e}")
class SEP24KYC:
@staticmethod
def track_user_activity(form: forms.Form, transaction: Transaction):
"""
Creates a PolarisUserTransaction object, and depending on the form
passed, also creates a new PolarisStellarAccount and potentially a
new PolarisUser. This function ensures an accurate record of a
particular person's activity.
"""
if isinstance(form, KYCForm):
data = form.cleaned_data
user = PolarisUser.objects.filter(email=data.get("email")).first()
if not user:
user = PolarisUser.objects.create(
first_name=data.get("first_name"),
last_name=data.get("last_name"),
email=data.get("email"),
)
account = PolarisStellarAccount.objects.create(
account=transaction.stellar_account, user=user,
)
if server_settings.EMAIL_HOST_USER:
send_confirmation_email(user, account)
else:
try:
account = PolarisStellarAccount.objects.get(
account=transaction.stellar_account, memo=None
)
except PolarisStellarAccount.DoesNotExist:
raise RuntimeError(
f"Unknown address: {transaction.stellar_account}, KYC required."
)
PolarisUserTransaction.objects.get_or_create(
user=account.user, account=account, transaction_id=transaction.id
)
@staticmethod
def check_kyc(
transaction: Transaction, post_data=None
) -> Tuple[Optional[forms.Form], Optional[Dict]]:
"""
Returns a KYCForm if there is no record of this stellar account,
otherwise returns None.
"""
account = PolarisStellarAccount.objects.filter(
account=transaction.stellar_account,
).first()
if not account: # Unknown stellar account, get KYC info
if post_data:
form = KYCForm(post_data)
else:
form = KYCForm()
return (
form,
{
"icon_label": _("Stellar Development Foundation"),
"title": _("Polaris KYC Information"),
"guidance": (
_(
"We're legally required to know our customers. "
"Please enter the information requested."
)
),
},
)
elif settings.LOCAL_MODE:
# When in local mode, request session's are not authenticated,
# which means account confirmation cannot be skipped. So we'll
# return None instead of returning the confirm email page.
return None, None
elif server_settings.EMAIL_HOST_USER and not account.confirmed:
return (
None,
{
"title": CONFIRM_EMAIL_PAGE_TITLE,
"guidance": _(
"We sent you a confirmation email. Once confirmed, "
"continue on this page."
),
"icon_label": _("Stellar Development Foundation"),
},
)
else:
return None, None
class MyDepositIntegration(DepositIntegration):
def form_for_transaction(
self,
request: Request,
transaction: Transaction,
post_data=None,
amount=None,
*args,
**kwargs,
) -> Optional[forms.Form]:
kyc_form, content = SEP24KYC.check_kyc(transaction, post_data=post_data)
if kyc_form:
return kyc_form
elif content or transaction.amount_in:
return None
elif post_data:
return TransactionForm(transaction, post_data)
else:
return TransactionForm(transaction, initial={"amount": amount})
def content_for_template(
self,
request: Request,
template: Template,
form: Optional[forms.Form] = None,
transaction: Optional[Transaction] = None,
*args,
**kwargs,
) -> Optional[Dict]:
na, kyc_content = SEP24KYC.check_kyc(transaction)
if kyc_content:
return kyc_content
elif template == Template.DEPOSIT:
if not form:
return None
return {
"title": _("Polaris Transaction Information"),
"guidance": _("Please enter the amount you would like to transfer."),
"icon_label": _("Stellar Development Foundation"),
}
elif template == Template.MORE_INFO:
content = {
"title": _("Polaris Transaction Information"),
"icon_label": _("Stellar Development Foundation"),
}
if transaction.status == Transaction.STATUS.pending_user_transfer_start:
# We're waiting on the user to send an off-chain payment
content.update(
memo=b64encode(str(hash(transaction)).encode())
.decode()[:10]
.upper()
)
return content
def after_form_validation(
self,
request: Request,
form: forms.Form,
transaction: Transaction,
*args,
**kwargs,
):
try:
SEP24KYC.track_user_activity(form, transaction)
except RuntimeError:
# Since no polaris account exists for this transaction, KYCForm
# will be returned from the next form_for_transaction() call
logger.exception(
f"KYCForm was not served first for unknown account, id: "
f"{transaction.stellar_account}"
)
def process_sep6_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs,
) -> Dict:
account = (
PolarisStellarAccount.objects.filter(account=params["account"], memo=None)
.select_related("user")
.first()
)
if not account:
return {
"type": "non_interactive_customer_info_needed",
"fields": [
"first_name",
"last_name",
"email_address",
"bank_number",
"bank_account_number",
],
}
elif not (account.user.bank_account_number and account.user.bank_number):
return {
"type": "non_interactive_customer_info_needed",
"fields": ["bank_number", "bank_account_number",],
}
elif params["type"] != "bank_account":
raise ValueError(_("'type' must be 'bank_account'"))
elif not account.confirmed:
# Here is where you would normally return something like this:
# {
# "type": "customer_info_status",
# "status": "pending"
# }
# However, we're not going to block the client from completing
# the flow since this is a reference server.
pass
asset = params["asset"]
min_amount = round(asset.deposit_min_amount, asset.significant_decimals)
max_amount = round(asset.deposit_max_amount, asset.significant_decimals)
if params["amount"]:
if not (min_amount <= params["amount"] <= max_amount):
raise ValueError(_("invalid 'amount'"))
transaction.amount_in = params["amount"]
transaction.amount_fee = calculate_fee(
{
"amount": params["amount"],
"operation": "deposit",
"asset_code": asset.code,
}
)
transaction.amount_out = round(
transaction.amount_in - transaction.amount_fee,
asset.significant_decimals,
)
transaction.save()
# request is valid, return success data and add transaction to user model
PolarisUserTransaction.objects.create(
transaction_id=transaction.id, user=account.user, account=account
)
return {
"how": "fake bank account number",
"extra_info": {
"message": (
"'how' would normally contain a terse explanation for how "
"to deposit the asset with the anchor, and 'extra_info' "
"would provide any additional information."
)
},
}
def create_channel_account(self, transaction: Transaction, *args, **kwargs):
kp = Keypair.random()
settings.HORIZON_SERVER._client.get(
f"https://friendbot.stellar.org/?addr={kp.public_key}"
)
transaction.channel_seed = kp.secret
transaction.save()
def after_deposit(self, transaction: Transaction, *args, **kwargs):
transaction.channel_seed = None
transaction.save()
class MyWithdrawalIntegration(WithdrawalIntegration):
def form_for_transaction(
self,
request: Request,
transaction: Transaction,
post_data=None,
amount=None,
*args,
**kwargs,
) -> Optional[forms.Form]:
kyc_form, content = SEP24KYC.check_kyc(transaction, post_data)
if kyc_form:
return kyc_form
elif content or transaction.amount_in:
return None
elif post_data:
return WithdrawForm(transaction, post_data)
else:
return WithdrawForm(transaction, initial={"amount": amount})
def content_for_template(
self,
request: Request,
template: Template,
form: Optional[forms.Form] = None,
transaction: Optional[Transaction] = None,
*args,
**kwargs,
) -> Optional[Dict]:
na, content = SEP24KYC.check_kyc(transaction)
if content:
return content
elif template == Template.WITHDRAW:
if not form:
return None
return {
"title": _("Polaris Transaction Information"),
"icon_label": _("Stellar Development Foundation"),
"guidance": (
_(
"Please enter the banking details for the account "
"you would like to receive your funds."
)
),
}
else: # template == Template.MORE_INFO
return {
"title": _("Polaris Transaction Information"),
"icon_label": _("Stellar Development Foundation"),
}
def after_form_validation(
self,
request: Request,
form: forms.Form,
transaction: Transaction,
*args,
**kwargs,
):
try:
SEP24KYC.track_user_activity(form, transaction)
except RuntimeError:
# Since no polaris account exists for this transaction, KYCForm
# will be returned from the next form_for_transaction() call
logger.exception(
f"KYCForm was not served first for unknown account, id: "
f"{transaction.stellar_account}"
)
def process_sep6_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs,
) -> Dict:
account = (
PolarisStellarAccount.objects.filter(
account=params["account"],
memo=params["memo"],
memo_type=params["memo_type"],
)
.select_related("user")
.first()
)
if not account:
return {
"type": "non_interactive_customer_info_needed",
"fields": [
"first_name",
"last_name",
"email_address",
"bank_number",
"bank_account_number",
],
}
elif not (account.user.bank_account_number and account.user.bank_number):
return {
"type": "non_interactive_customer_info_needed",
"fields": ["bank_number", "bank_account_number",],
}
elif params["type"] != "bank_account":
raise ValueError(_("'type' must be 'bank_account'"))
elif not params["dest"]:
raise ValueError(_("'dest' is required"))
elif not params["dest_extra"]:
raise ValueError(_("'dest_extra' is required"))
elif not account.confirmed:
# Here is where you would normally return something like this:
# {
# "type": "customer_info_status",
# "status": "pending"
# }
# However, we're not going to block the client from completing
# the flow since this is a reference server.
pass
asset = params["asset"]
min_amount = round(asset.withdrawal_min_amount, asset.significant_decimals)
max_amount = round(asset.withdrawal_max_amount, asset.significant_decimals)
if params["amount"]:
if not (min_amount <= params["amount"] <= max_amount):
raise ValueError(_("invalid 'amount'"))
transaction.amount_in = params["amount"]
transaction.amount_fee = calculate_fee(
{
"amount": params["amount"],
"operation": "withdraw",
"asset_code": asset.code,
}
)
transaction.amount_out = round(
transaction.amount_in - transaction.amount_fee,
asset.significant_decimals,
)
transaction.save()
response = {
"account_id": asset.distribution_account,
"min_amount": min_amount,
"max_amount": max_amount,
"fee_fixed": round(asset.withdrawal_fee_fixed, asset.significant_decimals),
"fee_percent": asset.withdrawal_fee_percent,
}
if params["memo_type"] and params["memo"]:
response["memo_type"] = params["memo_type"]
response["memo"] = params["memo"]
PolarisUserTransaction.objects.create(
transaction_id=transaction.id, user=account.user, account=account
)
return response
class MyCustomerIntegration(CustomerIntegration):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.required_fields = [
"account",
"first_name",
"last_name",
"email_address",
"bank_account_number",
"bank_number",
]
self.accepted = {"status": "ACCEPTED"}
self.needs_basic_info = {
"status": "NEEDS_INFO",
"fields": {
"first_name": {
"description": "first name of the customer",
"type": "string",
},
"last_name": {
"description": "last name of the customer",
"type": "string",
},
"email_address": {
"description": "email address of the customer",
"type": "string",
},
},
}
self.needs_bank_info = {
"status": "NEEDS_INFO",
"fields": {
"bank_account_number": {
"description": "bank account number of the customer",
"type": "string",
},
"bank_number": {
"description": "routing number of the customer",
"type": "string",
},
},
}
self.needs_all_info = {
"status": "NEEDS_INFO",
"fields": {
"first_name": {
"description": "first name of the customer",
"type": "string",
},
"last_name": {
"description": "last name of the customer",
"type": "string",
},
"email_address": {
"description": "email address of the customer",
"type": "string",
},
"bank_account_number": {
"description": "bank account number of the customer",
"type": "string",
},
"bank_number": {
"description": "routing number of the customer",
"type": "string",
},
},
}
def get(
self, token: SEP10Token, request: Request, params: Dict, *args, **kwargs
) -> Dict:
user = None
if params.get("id"):
user = PolarisUser.objects.filter(id=params["id"]).first()
if not user:
raise ObjectDoesNotExist(_("customer not found"))
elif params.get("account"):
account = PolarisStellarAccount.objects.filter(
account=params.get("account"),
memo=params.get("memo"),
memo_type=params.get("memo_type"),
).first()
user = account.user if account else None
if not user:
if params.get("type") in ["sep6-deposit", "sep31-sender", "sep31-receiver"]:
return self.needs_basic_info
elif params.get("type") in [None, "sep6-withdraw"]:
return self.needs_all_info
else:
raise ValueError(
_("invalid 'type'. see /info response for valid values.")
)
response_data = {"id": str(user.id)}
basic_info_accepted = {
"provided_fields": {
"first_name": {
"description": "first name of the customer",
"type": "string",
"status": "ACCEPTED",
},
"last_name": {
"description": "last name of the customer",
"type": "string",
"status": "ACCEPTED",
},
"email_address": {
"description": "email address of the customer",
"type": "string",
"status": "ACCEPTED",
},
}
}
if (user.bank_number and user.bank_account_number) or (
params.get("type") in ["sep6-deposit", "sep31-sender", "sep31-receiver"]
):
response_data.update(self.accepted)
response_data.update(basic_info_accepted)
if user.bank_number and user.bank_account_number:
response_data["provided_fields"].update(
{
"bank_account_number": {
"description": "bank account number of the customer",
"type": "string",
"status": "ACCEPTED",
},
"bank_number": {
"description": "routing number of the customer",
"type": "string",
"status": "ACCEPTED",
},
}
)
elif params.get("type") in [None, "sep6-withdraw"]:
response_data.update(basic_info_accepted)
response_data.update(self.needs_bank_info)
else:
raise ValueError(_("invalid 'type'. see /info response for valid values."))
return response_data
def put(
self, token: SEP10Token, request: Request, params: Dict, *args, **kwargs
) -> str:
if params.get("id"):
user = PolarisUser.objects.filter(id=params["id"]).first()
if not user:
raise ObjectDoesNotExist("could not identify user customer 'id'")
else:
account = PolarisStellarAccount.objects.filter(
account=params["account"],
memo=params.get("memo"),
memo_type=params.get("memo_type"),
).first()
if not account:
# email_address is a secondary ID
if "email_address" not in params:
raise ValueError(
"SEP-9 fields were not passed for new customer. "
"'first_name', 'last_name', and 'email_address' are required."
)
# find existing user by previously-specified email
user = PolarisUser.objects.filter(email=params["email_address"]).first()
if user:
account = PolarisStellarAccount.objects.create(
user=user,
account=params["account"],
memo=params["memo"],
memo_type=params["memo_type"],
)
send_confirmation_email(user, account)
else:
user, account = self.create_new_user(params)
send_confirmation_email(user, account)
else:
user = account.user
if (
user.email != params.get("email_address")
and PolarisUser.objects.filter(email=params["email_address"]).exists()
):
raise ValueError("email_address is taken")
user.email = params.get("email_address") or user.email
user.first_name = params.get("first_name") or user.first_name
user.last_name = params.get("last_name") or user.last_name
user.bank_number = params.get("bank_number") or user.bank_number
user.bank_account_number = (
params.get("bank_account_number") or user.bank_account_number
)
user.save()
return str(user.id)
def delete(
self,
token: <PASSWORD>Token,
request: Request,
account: str,
memo: Optional[str],
memo_type: Optional[str],
*args,
**kwargs,
):
qparams = {"account": account, "memo": memo, "memo_type": memo_type}
account = PolarisStellarAccount.objects.filter(**qparams).first()
if not account:
raise ObjectDoesNotExist()
account.user.delete()
@staticmethod
def create_new_user(params):
if not all(f in params for f in ["first_name", "last_name", "email_address"]):
raise ValueError(
"SEP-9 fields were not passed for new customer. "
"'first_name', 'last_name', and 'email_address' are required."
)
user = PolarisUser.objects.create(
first_name=params["first_name"],
last_name=params["last_name"],
email=params["email_address"],
bank_number=params.get("bank_number"),
bank_account_number=params.get("bank_account_number"),
)
account = PolarisStellarAccount.objects.create(
user=user,
account=params["account"],
memo=params.get("memo"),
memo_type=params.get("memo_type"),
)
return user, account
class MySEP31ReceiverIntegration(SEP31ReceiverIntegration):
def info(
self,
request: Request,
asset: Asset,
lang: Optional[str] = None,
*args,
**kwargs,
):
return {
"sep12": {
"sender": {
"types": {
"sep31-sender": {
"description": "the basic type for sending customers"
}
}
},
"receiver": {
"types": {
"sep31-receiver": {
"description": "the basic type for receiving customers"
}
}
},
},
"fields": {
"transaction": {
"routing_number": {
"description": "routing number of the destination bank account"
},
"account_number": {
"description": "bank account number of the destination"
},
},
},
}
def process_post_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs,
) -> Optional[Dict]:
_ = params.get("sender_id") # not actually used
receiver_id = params.get("receiver_id")
transaction_fields = params.get("fields", {}).get("transaction")
for field, val in transaction_fields.items():
if not isinstance(val, str):
return {"error": f"'{field}'" + _(" is not of type str")}
receiving_user = PolarisUser.objects.filter(id=receiver_id).first()
if not receiving_user:
return {"error": "customer_info_needed", "type": "sep31-receiver"}
elif not (receiving_user.bank_account_number and receiving_user.bank_number):
receiving_user.bank_account_number = transaction_fields["account_number"]
receiving_user.bank_number = transaction_fields["routing_number"]
receiving_user.save()
transaction.save()
PolarisUserTransaction.objects.create(
user=receiving_user, transaction_id=transaction.id
)
def process_patch_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs,
):
info_fields = params.get("fields", {})
transaction_fields = info_fields.get("transaction", {})
if not isinstance(transaction_fields, dict):
raise ValueError(_("'transaction' value must be an object"))
possible_fields = set()
for obj in self.info(transaction.asset)["fields"].values():
possible_fields.union(obj.keys())
update_fields = list(transaction_fields.keys())
if not update_fields:
raise ValueError(_("No fields provided"))
elif any(f not in possible_fields for f in update_fields):
raise ValueError(_("unexpected fields provided"))
elif not all(isinstance(update_fields[f], str) for f in update_fields):
raise ValueError(_("field values must be strings"))
user = (
PolarisUserTransaction.objects.filter(transaction_id=transaction.id)
.first()
.user
)
if "routing_number" in update_fields:
user.bank_number = transaction_fields["routing_number"]
elif "account_number" in update_fields:
user.bank_account_number = transaction_fields["account_number"]
user.save()
def valid_sending_anchor(
self, token: SEP10Token, request: Request, public_key: str, *args, **kwargs
) -> bool:
# A real anchor would check if public_key belongs to a partner anchor
return True
class MyRailsIntegration(RailsIntegration):
def poll_pending_deposits(
self, pending_deposits: QuerySet, *args, **kwargs
) -> List[Transaction]:
"""
Anchors should implement their banking rails here, as described
in the :class:`.RailsIntegration` docstrings.
This implementation interfaces with a fake banking rails client
for demonstration purposes.
"""
# interface with mock banking rails
ready_deposits = []
mock_bank_account_id = "XXXXXXXXXXXXX"
client = rails.BankAPIClient(mock_bank_account_id)
for deposit in pending_deposits:
bank_deposit = client.get_deposit(deposit=deposit)
if bank_deposit and bank_deposit.status == "complete":
if not deposit.amount_in:
deposit.amount_in = Decimal(103)
if bank_deposit.amount != deposit.amount_in or not deposit.amount_fee:
deposit.amount_fee = calculate_fee(
{
"amount": deposit.amount_in,
"operation": settings.OPERATION_DEPOSIT,
"asset_code": deposit.asset.code,
}
)
deposit.amount_out = round(
deposit.amount_in - deposit.amount_fee,
deposit.asset.significant_decimals,
)
deposit.save()
ready_deposits.append(deposit)
return ready_deposits
def poll_outgoing_transactions(
self, transactions: QuerySet, *args, **kwargs
) -> List[Transaction]:
"""
Auto-complete pending_external transactions
An anchor would typically collect information on the transactions passed
and return only the transactions that have completed the external transfer.
"""
return list(transactions)
def execute_outgoing_transaction(self, transaction: Transaction, *args, **kwargs):
def error():
transaction.status = Transaction.STATUS.error
transaction.status_message = (
f"Unable to find user info for transaction {transaction.id}"
)
transaction.save()
logger.info("fetching user data for transaction")
user_transaction = PolarisUserTransaction.objects.filter(
transaction_id=transaction.id
).first()
if not user_transaction: # something is wrong with our user tracking code
error()
return
# SEP31 users don't have stellar accounts, so check the user column on the transaction.
# Since that is a new column, it may be None. If so, use the account's user column
if user_transaction.user:
user = user_transaction.user
else:
user = getattr(user_transaction.account, "user", None)
if not user: # something is wrong with our user tracking code
error()
return
if transaction.kind == Transaction.KIND.withdrawal:
operation = settings.OPERATION_WITHDRAWAL
else:
operation = Transaction.KIND.send
if not transaction.amount_fee:
transaction.amount_fee = calculate_fee(
{
"amount": transaction.amount_in,
"operation": operation,
"asset_code": transaction.asset.code,
}
)
transaction.amount_out = round(
transaction.amount_in - transaction.amount_fee,
transaction.asset.significant_decimals,
)
client = rails.BankAPIClient("fake anchor bank account number")
response = client.send_funds(
to_account=user.bank_account_number,
amount=transaction.amount_in - transaction.amount_fee,
)
if response["success"]:
logger.info(f"successfully sent mock outgoing transaction {transaction.id}")
transaction.status = Transaction.STATUS.pending_external
else:
# Parse a mock bank API response to demonstrate how an anchor would
# report back to the sending anchor which fields needed updating.
error_fields = response.error.fields
info_fields = MySEP31ReceiverIntegration().info(transaction.asset)
required_info_update = defaultdict(dict)
for field in error_fields:
if "name" in field:
required_info_update["receiver"][field] = info_fields["receiver"][
field
]
elif "account" in field:
required_info_update["transaction"][field] = info_fields[
"receiver"
][field]
transaction.required_info_update = json.dumps(required_info_update)
transaction.required_info_message = response.error.message
transaction.status = Transaction.STATUS.pending_transaction_info_update
transaction.save()
def fee_integration(fee_params: Dict, *args, **kwargs) -> Decimal:
"""
This function replaces the default registered_fee_func for demonstration
purposes.
However, since we don't have any custom logic to implement, it simply
calls the default that has been replaced.
"""
return calculate_fee(fee_params)
def info_integration(request: Request, asset: Asset, lang: str):
# Not using `asset` since this reference server only supports SRT
languages = [l[0] for l in server_settings.LANGUAGES]
if lang and lang not in languages:
raise ValueError()
return {
"fields": {
"type": {
"description": _("'bank_account' is the only value supported'"),
"choices": ["bank_account"],
},
},
"types": {
"bank_account": {
"fields": {
"dest": {"description": _("bank account number")},
"dest_extra": {"description": _("bank routing number")},
}
}
},
}
|
import json
from smtplib import SMTPException
from decimal import Decimal
from typing import List, Dict, Optional, Tuple
from urllib.parse import urlencode
from base64 import b64encode
from collections import defaultdict
from logging import getLogger
from django.db.models import QuerySet
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import gettext as _
from django import forms
from django.urls import reverse
from django.core.mail import send_mail
from django.conf import settings as server_settings
from django.template.loader import render_to_string
from stellar_sdk.keypair import Keypair
from rest_framework.request import Request
from polaris.models import Transaction, Asset
from polaris.templates import Template
from polaris.integrations import (
DepositIntegration,
WithdrawalIntegration,
SEP31ReceiverIntegration,
CustomerIntegration,
calculate_fee,
RailsIntegration,
TransactionForm,
)
from polaris import settings
from polaris.sep10.token import SEP10Token
from . import mock_banking_rails as rails
from .models import PolarisUser, PolarisStellarAccount, PolarisUserTransaction
from .forms import KYCForm, WithdrawForm
logger = getLogger(__name__)
CONFIRM_EMAIL_PAGE_TITLE = _("Confirm Email")
def send_confirmation_email(user: PolarisUser, account: PolarisStellarAccount):
"""
Sends a confirmation email to user.email
In a real production deployment, you would never want to send emails
as part of the request/response cycle. Instead, use a job queue service
like Celery. This reference server is not intended to handle heavy
traffic so we are making an exception here.
"""
args = urlencode({"token": account.confirmation_token, "email": user.email})
url = f"{settings.HOST_URL}{reverse('confirm_email')}?{args}"
try:
send_mail(
_("Reference Anchor Server: Confirm Email"),
# email body if the HTML is not rendered
_("Confirm your email by pasting this URL in your browser: %s") % url,
server_settings.EMAIL_HOST_USER,
[user.email],
html_message=render_to_string(
"confirmation_email.html",
{"first_name": user.first_name, "confirmation_url": url},
),
)
except SMTPException as e:
logger.error(f"Unable to send email to {user.email}: {e}")
class SEP24KYC:
@staticmethod
def track_user_activity(form: forms.Form, transaction: Transaction):
"""
Creates a PolarisUserTransaction object, and depending on the form
passed, also creates a new PolarisStellarAccount and potentially a
new PolarisUser. This function ensures an accurate record of a
particular person's activity.
"""
if isinstance(form, KYCForm):
data = form.cleaned_data
user = PolarisUser.objects.filter(email=data.get("email")).first()
if not user:
user = PolarisUser.objects.create(
first_name=data.get("first_name"),
last_name=data.get("last_name"),
email=data.get("email"),
)
account = PolarisStellarAccount.objects.create(
account=transaction.stellar_account, user=user,
)
if server_settings.EMAIL_HOST_USER:
send_confirmation_email(user, account)
else:
try:
account = PolarisStellarAccount.objects.get(
account=transaction.stellar_account, memo=None
)
except PolarisStellarAccount.DoesNotExist:
raise RuntimeError(
f"Unknown address: {transaction.stellar_account}, KYC required."
)
PolarisUserTransaction.objects.get_or_create(
user=account.user, account=account, transaction_id=transaction.id
)
@staticmethod
def check_kyc(
transaction: Transaction, post_data=None
) -> Tuple[Optional[forms.Form], Optional[Dict]]:
"""
Returns a KYCForm if there is no record of this stellar account,
otherwise returns None.
"""
account = PolarisStellarAccount.objects.filter(
account=transaction.stellar_account,
).first()
if not account: # Unknown stellar account, get KYC info
if post_data:
form = KYCForm(post_data)
else:
form = KYCForm()
return (
form,
{
"icon_label": _("Stellar Development Foundation"),
"title": _("Polaris KYC Information"),
"guidance": (
_(
"We're legally required to know our customers. "
"Please enter the information requested."
)
),
},
)
elif settings.LOCAL_MODE:
# When in local mode, request session's are not authenticated,
# which means account confirmation cannot be skipped. So we'll
# return None instead of returning the confirm email page.
return None, None
elif server_settings.EMAIL_HOST_USER and not account.confirmed:
return (
None,
{
"title": CONFIRM_EMAIL_PAGE_TITLE,
"guidance": _(
"We sent you a confirmation email. Once confirmed, "
"continue on this page."
),
"icon_label": _("Stellar Development Foundation"),
},
)
else:
return None, None
class MyDepositIntegration(DepositIntegration):
def form_for_transaction(
self,
request: Request,
transaction: Transaction,
post_data=None,
amount=None,
*args,
**kwargs,
) -> Optional[forms.Form]:
kyc_form, content = SEP24KYC.check_kyc(transaction, post_data=post_data)
if kyc_form:
return kyc_form
elif content or transaction.amount_in:
return None
elif post_data:
return TransactionForm(transaction, post_data)
else:
return TransactionForm(transaction, initial={"amount": amount})
def content_for_template(
self,
request: Request,
template: Template,
form: Optional[forms.Form] = None,
transaction: Optional[Transaction] = None,
*args,
**kwargs,
) -> Optional[Dict]:
na, kyc_content = SEP24KYC.check_kyc(transaction)
if kyc_content:
return kyc_content
elif template == Template.DEPOSIT:
if not form:
return None
return {
"title": _("Polaris Transaction Information"),
"guidance": _("Please enter the amount you would like to transfer."),
"icon_label": _("Stellar Development Foundation"),
}
elif template == Template.MORE_INFO:
content = {
"title": _("Polaris Transaction Information"),
"icon_label": _("Stellar Development Foundation"),
}
if transaction.status == Transaction.STATUS.pending_user_transfer_start:
# We're waiting on the user to send an off-chain payment
content.update(
memo=b64encode(str(hash(transaction)).encode())
.decode()[:10]
.upper()
)
return content
def after_form_validation(
self,
request: Request,
form: forms.Form,
transaction: Transaction,
*args,
**kwargs,
):
try:
SEP24KYC.track_user_activity(form, transaction)
except RuntimeError:
# Since no polaris account exists for this transaction, KYCForm
# will be returned from the next form_for_transaction() call
logger.exception(
f"KYCForm was not served first for unknown account, id: "
f"{transaction.stellar_account}"
)
def process_sep6_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs,
) -> Dict:
account = (
PolarisStellarAccount.objects.filter(account=params["account"], memo=None)
.select_related("user")
.first()
)
if not account:
return {
"type": "non_interactive_customer_info_needed",
"fields": [
"first_name",
"last_name",
"email_address",
"bank_number",
"bank_account_number",
],
}
elif not (account.user.bank_account_number and account.user.bank_number):
return {
"type": "non_interactive_customer_info_needed",
"fields": ["bank_number", "bank_account_number",],
}
elif params["type"] != "bank_account":
raise ValueError(_("'type' must be 'bank_account'"))
elif not account.confirmed:
# Here is where you would normally return something like this:
# {
# "type": "customer_info_status",
# "status": "pending"
# }
# However, we're not going to block the client from completing
# the flow since this is a reference server.
pass
asset = params["asset"]
min_amount = round(asset.deposit_min_amount, asset.significant_decimals)
max_amount = round(asset.deposit_max_amount, asset.significant_decimals)
if params["amount"]:
if not (min_amount <= params["amount"] <= max_amount):
raise ValueError(_("invalid 'amount'"))
transaction.amount_in = params["amount"]
transaction.amount_fee = calculate_fee(
{
"amount": params["amount"],
"operation": "deposit",
"asset_code": asset.code,
}
)
transaction.amount_out = round(
transaction.amount_in - transaction.amount_fee,
asset.significant_decimals,
)
transaction.save()
# request is valid, return success data and add transaction to user model
PolarisUserTransaction.objects.create(
transaction_id=transaction.id, user=account.user, account=account
)
return {
"how": "fake bank account number",
"extra_info": {
"message": (
"'how' would normally contain a terse explanation for how "
"to deposit the asset with the anchor, and 'extra_info' "
"would provide any additional information."
)
},
}
def create_channel_account(self, transaction: Transaction, *args, **kwargs):
kp = Keypair.random()
settings.HORIZON_SERVER._client.get(
f"https://friendbot.stellar.org/?addr={kp.public_key}"
)
transaction.channel_seed = kp.secret
transaction.save()
def after_deposit(self, transaction: Transaction, *args, **kwargs):
transaction.channel_seed = None
transaction.save()
class MyWithdrawalIntegration(WithdrawalIntegration):
def form_for_transaction(
self,
request: Request,
transaction: Transaction,
post_data=None,
amount=None,
*args,
**kwargs,
) -> Optional[forms.Form]:
kyc_form, content = SEP24KYC.check_kyc(transaction, post_data)
if kyc_form:
return kyc_form
elif content or transaction.amount_in:
return None
elif post_data:
return WithdrawForm(transaction, post_data)
else:
return WithdrawForm(transaction, initial={"amount": amount})
def content_for_template(
self,
request: Request,
template: Template,
form: Optional[forms.Form] = None,
transaction: Optional[Transaction] = None,
*args,
**kwargs,
) -> Optional[Dict]:
na, content = SEP24KYC.check_kyc(transaction)
if content:
return content
elif template == Template.WITHDRAW:
if not form:
return None
return {
"title": _("Polaris Transaction Information"),
"icon_label": _("Stellar Development Foundation"),
"guidance": (
_(
"Please enter the banking details for the account "
"you would like to receive your funds."
)
),
}
else: # template == Template.MORE_INFO
return {
"title": _("Polaris Transaction Information"),
"icon_label": _("Stellar Development Foundation"),
}
def after_form_validation(
self,
request: Request,
form: forms.Form,
transaction: Transaction,
*args,
**kwargs,
):
try:
SEP24KYC.track_user_activity(form, transaction)
except RuntimeError:
# Since no polaris account exists for this transaction, KYCForm
# will be returned from the next form_for_transaction() call
logger.exception(
f"KYCForm was not served first for unknown account, id: "
f"{transaction.stellar_account}"
)
def process_sep6_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs,
) -> Dict:
account = (
PolarisStellarAccount.objects.filter(
account=params["account"],
memo=params["memo"],
memo_type=params["memo_type"],
)
.select_related("user")
.first()
)
if not account:
return {
"type": "non_interactive_customer_info_needed",
"fields": [
"first_name",
"last_name",
"email_address",
"bank_number",
"bank_account_number",
],
}
elif not (account.user.bank_account_number and account.user.bank_number):
return {
"type": "non_interactive_customer_info_needed",
"fields": ["bank_number", "bank_account_number",],
}
elif params["type"] != "bank_account":
raise ValueError(_("'type' must be 'bank_account'"))
elif not params["dest"]:
raise ValueError(_("'dest' is required"))
elif not params["dest_extra"]:
raise ValueError(_("'dest_extra' is required"))
elif not account.confirmed:
# Here is where you would normally return something like this:
# {
# "type": "customer_info_status",
# "status": "pending"
# }
# However, we're not going to block the client from completing
# the flow since this is a reference server.
pass
asset = params["asset"]
min_amount = round(asset.withdrawal_min_amount, asset.significant_decimals)
max_amount = round(asset.withdrawal_max_amount, asset.significant_decimals)
if params["amount"]:
if not (min_amount <= params["amount"] <= max_amount):
raise ValueError(_("invalid 'amount'"))
transaction.amount_in = params["amount"]
transaction.amount_fee = calculate_fee(
{
"amount": params["amount"],
"operation": "withdraw",
"asset_code": asset.code,
}
)
transaction.amount_out = round(
transaction.amount_in - transaction.amount_fee,
asset.significant_decimals,
)
transaction.save()
response = {
"account_id": asset.distribution_account,
"min_amount": min_amount,
"max_amount": max_amount,
"fee_fixed": round(asset.withdrawal_fee_fixed, asset.significant_decimals),
"fee_percent": asset.withdrawal_fee_percent,
}
if params["memo_type"] and params["memo"]:
response["memo_type"] = params["memo_type"]
response["memo"] = params["memo"]
PolarisUserTransaction.objects.create(
transaction_id=transaction.id, user=account.user, account=account
)
return response
class MyCustomerIntegration(CustomerIntegration):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.required_fields = [
"account",
"first_name",
"last_name",
"email_address",
"bank_account_number",
"bank_number",
]
self.accepted = {"status": "ACCEPTED"}
self.needs_basic_info = {
"status": "NEEDS_INFO",
"fields": {
"first_name": {
"description": "first name of the customer",
"type": "string",
},
"last_name": {
"description": "last name of the customer",
"type": "string",
},
"email_address": {
"description": "email address of the customer",
"type": "string",
},
},
}
self.needs_bank_info = {
"status": "NEEDS_INFO",
"fields": {
"bank_account_number": {
"description": "bank account number of the customer",
"type": "string",
},
"bank_number": {
"description": "routing number of the customer",
"type": "string",
},
},
}
self.needs_all_info = {
"status": "NEEDS_INFO",
"fields": {
"first_name": {
"description": "first name of the customer",
"type": "string",
},
"last_name": {
"description": "last name of the customer",
"type": "string",
},
"email_address": {
"description": "email address of the customer",
"type": "string",
},
"bank_account_number": {
"description": "bank account number of the customer",
"type": "string",
},
"bank_number": {
"description": "routing number of the customer",
"type": "string",
},
},
}
def get(
self, token: SEP10Token, request: Request, params: Dict, *args, **kwargs
) -> Dict:
user = None
if params.get("id"):
user = PolarisUser.objects.filter(id=params["id"]).first()
if not user:
raise ObjectDoesNotExist(_("customer not found"))
elif params.get("account"):
account = PolarisStellarAccount.objects.filter(
account=params.get("account"),
memo=params.get("memo"),
memo_type=params.get("memo_type"),
).first()
user = account.user if account else None
if not user:
if params.get("type") in ["sep6-deposit", "sep31-sender", "sep31-receiver"]:
return self.needs_basic_info
elif params.get("type") in [None, "sep6-withdraw"]:
return self.needs_all_info
else:
raise ValueError(
_("invalid 'type'. see /info response for valid values.")
)
response_data = {"id": str(user.id)}
basic_info_accepted = {
"provided_fields": {
"first_name": {
"description": "first name of the customer",
"type": "string",
"status": "ACCEPTED",
},
"last_name": {
"description": "last name of the customer",
"type": "string",
"status": "ACCEPTED",
},
"email_address": {
"description": "email address of the customer",
"type": "string",
"status": "ACCEPTED",
},
}
}
if (user.bank_number and user.bank_account_number) or (
params.get("type") in ["sep6-deposit", "sep31-sender", "sep31-receiver"]
):
response_data.update(self.accepted)
response_data.update(basic_info_accepted)
if user.bank_number and user.bank_account_number:
response_data["provided_fields"].update(
{
"bank_account_number": {
"description": "bank account number of the customer",
"type": "string",
"status": "ACCEPTED",
},
"bank_number": {
"description": "routing number of the customer",
"type": "string",
"status": "ACCEPTED",
},
}
)
elif params.get("type") in [None, "sep6-withdraw"]:
response_data.update(basic_info_accepted)
response_data.update(self.needs_bank_info)
else:
raise ValueError(_("invalid 'type'. see /info response for valid values."))
return response_data
def put(
self, token: SEP10Token, request: Request, params: Dict, *args, **kwargs
) -> str:
if params.get("id"):
user = PolarisUser.objects.filter(id=params["id"]).first()
if not user:
raise ObjectDoesNotExist("could not identify user customer 'id'")
else:
account = PolarisStellarAccount.objects.filter(
account=params["account"],
memo=params.get("memo"),
memo_type=params.get("memo_type"),
).first()
if not account:
# email_address is a secondary ID
if "email_address" not in params:
raise ValueError(
"SEP-9 fields were not passed for new customer. "
"'first_name', 'last_name', and 'email_address' are required."
)
# find existing user by previously-specified email
user = PolarisUser.objects.filter(email=params["email_address"]).first()
if user:
account = PolarisStellarAccount.objects.create(
user=user,
account=params["account"],
memo=params["memo"],
memo_type=params["memo_type"],
)
send_confirmation_email(user, account)
else:
user, account = self.create_new_user(params)
send_confirmation_email(user, account)
else:
user = account.user
if (
user.email != params.get("email_address")
and PolarisUser.objects.filter(email=params["email_address"]).exists()
):
raise ValueError("email_address is taken")
user.email = params.get("email_address") or user.email
user.first_name = params.get("first_name") or user.first_name
user.last_name = params.get("last_name") or user.last_name
user.bank_number = params.get("bank_number") or user.bank_number
user.bank_account_number = (
params.get("bank_account_number") or user.bank_account_number
)
user.save()
return str(user.id)
def delete(
self,
token: <PASSWORD>Token,
request: Request,
account: str,
memo: Optional[str],
memo_type: Optional[str],
*args,
**kwargs,
):
qparams = {"account": account, "memo": memo, "memo_type": memo_type}
account = PolarisStellarAccount.objects.filter(**qparams).first()
if not account:
raise ObjectDoesNotExist()
account.user.delete()
@staticmethod
def create_new_user(params):
if not all(f in params for f in ["first_name", "last_name", "email_address"]):
raise ValueError(
"SEP-9 fields were not passed for new customer. "
"'first_name', 'last_name', and 'email_address' are required."
)
user = PolarisUser.objects.create(
first_name=params["first_name"],
last_name=params["last_name"],
email=params["email_address"],
bank_number=params.get("bank_number"),
bank_account_number=params.get("bank_account_number"),
)
account = PolarisStellarAccount.objects.create(
user=user,
account=params["account"],
memo=params.get("memo"),
memo_type=params.get("memo_type"),
)
return user, account
class MySEP31ReceiverIntegration(SEP31ReceiverIntegration):
def info(
self,
request: Request,
asset: Asset,
lang: Optional[str] = None,
*args,
**kwargs,
):
return {
"sep12": {
"sender": {
"types": {
"sep31-sender": {
"description": "the basic type for sending customers"
}
}
},
"receiver": {
"types": {
"sep31-receiver": {
"description": "the basic type for receiving customers"
}
}
},
},
"fields": {
"transaction": {
"routing_number": {
"description": "routing number of the destination bank account"
},
"account_number": {
"description": "bank account number of the destination"
},
},
},
}
def process_post_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs,
) -> Optional[Dict]:
_ = params.get("sender_id") # not actually used
receiver_id = params.get("receiver_id")
transaction_fields = params.get("fields", {}).get("transaction")
for field, val in transaction_fields.items():
if not isinstance(val, str):
return {"error": f"'{field}'" + _(" is not of type str")}
receiving_user = PolarisUser.objects.filter(id=receiver_id).first()
if not receiving_user:
return {"error": "customer_info_needed", "type": "sep31-receiver"}
elif not (receiving_user.bank_account_number and receiving_user.bank_number):
receiving_user.bank_account_number = transaction_fields["account_number"]
receiving_user.bank_number = transaction_fields["routing_number"]
receiving_user.save()
transaction.save()
PolarisUserTransaction.objects.create(
user=receiving_user, transaction_id=transaction.id
)
def process_patch_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs,
):
info_fields = params.get("fields", {})
transaction_fields = info_fields.get("transaction", {})
if not isinstance(transaction_fields, dict):
raise ValueError(_("'transaction' value must be an object"))
possible_fields = set()
for obj in self.info(transaction.asset)["fields"].values():
possible_fields.union(obj.keys())
update_fields = list(transaction_fields.keys())
if not update_fields:
raise ValueError(_("No fields provided"))
elif any(f not in possible_fields for f in update_fields):
raise ValueError(_("unexpected fields provided"))
elif not all(isinstance(update_fields[f], str) for f in update_fields):
raise ValueError(_("field values must be strings"))
user = (
PolarisUserTransaction.objects.filter(transaction_id=transaction.id)
.first()
.user
)
if "routing_number" in update_fields:
user.bank_number = transaction_fields["routing_number"]
elif "account_number" in update_fields:
user.bank_account_number = transaction_fields["account_number"]
user.save()
def valid_sending_anchor(
self, token: SEP10Token, request: Request, public_key: str, *args, **kwargs
) -> bool:
# A real anchor would check if public_key belongs to a partner anchor
return True
class MyRailsIntegration(RailsIntegration):
def poll_pending_deposits(
self, pending_deposits: QuerySet, *args, **kwargs
) -> List[Transaction]:
"""
Anchors should implement their banking rails here, as described
in the :class:`.RailsIntegration` docstrings.
This implementation interfaces with a fake banking rails client
for demonstration purposes.
"""
# interface with mock banking rails
ready_deposits = []
mock_bank_account_id = "XXXXXXXXXXXXX"
client = rails.BankAPIClient(mock_bank_account_id)
for deposit in pending_deposits:
bank_deposit = client.get_deposit(deposit=deposit)
if bank_deposit and bank_deposit.status == "complete":
if not deposit.amount_in:
deposit.amount_in = Decimal(103)
if bank_deposit.amount != deposit.amount_in or not deposit.amount_fee:
deposit.amount_fee = calculate_fee(
{
"amount": deposit.amount_in,
"operation": settings.OPERATION_DEPOSIT,
"asset_code": deposit.asset.code,
}
)
deposit.amount_out = round(
deposit.amount_in - deposit.amount_fee,
deposit.asset.significant_decimals,
)
deposit.save()
ready_deposits.append(deposit)
return ready_deposits
def poll_outgoing_transactions(
self, transactions: QuerySet, *args, **kwargs
) -> List[Transaction]:
"""
Auto-complete pending_external transactions
An anchor would typically collect information on the transactions passed
and return only the transactions that have completed the external transfer.
"""
return list(transactions)
def execute_outgoing_transaction(self, transaction: Transaction, *args, **kwargs):
def error():
transaction.status = Transaction.STATUS.error
transaction.status_message = (
f"Unable to find user info for transaction {transaction.id}"
)
transaction.save()
logger.info("fetching user data for transaction")
user_transaction = PolarisUserTransaction.objects.filter(
transaction_id=transaction.id
).first()
if not user_transaction: # something is wrong with our user tracking code
error()
return
# SEP31 users don't have stellar accounts, so check the user column on the transaction.
# Since that is a new column, it may be None. If so, use the account's user column
if user_transaction.user:
user = user_transaction.user
else:
user = getattr(user_transaction.account, "user", None)
if not user: # something is wrong with our user tracking code
error()
return
if transaction.kind == Transaction.KIND.withdrawal:
operation = settings.OPERATION_WITHDRAWAL
else:
operation = Transaction.KIND.send
if not transaction.amount_fee:
transaction.amount_fee = calculate_fee(
{
"amount": transaction.amount_in,
"operation": operation,
"asset_code": transaction.asset.code,
}
)
transaction.amount_out = round(
transaction.amount_in - transaction.amount_fee,
transaction.asset.significant_decimals,
)
client = rails.BankAPIClient("fake anchor bank account number")
response = client.send_funds(
to_account=user.bank_account_number,
amount=transaction.amount_in - transaction.amount_fee,
)
if response["success"]:
logger.info(f"successfully sent mock outgoing transaction {transaction.id}")
transaction.status = Transaction.STATUS.pending_external
else:
# Parse a mock bank API response to demonstrate how an anchor would
# report back to the sending anchor which fields needed updating.
error_fields = response.error.fields
info_fields = MySEP31ReceiverIntegration().info(transaction.asset)
required_info_update = defaultdict(dict)
for field in error_fields:
if "name" in field:
required_info_update["receiver"][field] = info_fields["receiver"][
field
]
elif "account" in field:
required_info_update["transaction"][field] = info_fields[
"receiver"
][field]
transaction.required_info_update = json.dumps(required_info_update)
transaction.required_info_message = response.error.message
transaction.status = Transaction.STATUS.pending_transaction_info_update
transaction.save()
def fee_integration(fee_params: Dict, *args, **kwargs) -> Decimal:
"""
This function replaces the default registered_fee_func for demonstration
purposes.
However, since we don't have any custom logic to implement, it simply
calls the default that has been replaced.
"""
return calculate_fee(fee_params)
def info_integration(request: Request, asset: Asset, lang: str):
# Not using `asset` since this reference server only supports SRT
languages = [l[0] for l in server_settings.LANGUAGES]
if lang and lang not in languages:
raise ValueError()
return {
"fields": {
"type": {
"description": _("'bank_account' is the only value supported'"),
"choices": ["bank_account"],
},
},
"types": {
"bank_account": {
"fields": {
"dest": {"description": _("bank account number")},
"dest_extra": {"description": _("bank routing number")},
}
}
},
}
|
en
| 0.894118
|
Sends a confirmation email to user.email In a real production deployment, you would never want to send emails as part of the request/response cycle. Instead, use a job queue service like Celery. This reference server is not intended to handle heavy traffic so we are making an exception here. # email body if the HTML is not rendered Creates a PolarisUserTransaction object, and depending on the form passed, also creates a new PolarisStellarAccount and potentially a new PolarisUser. This function ensures an accurate record of a particular person's activity. Returns a KYCForm if there is no record of this stellar account, otherwise returns None. # Unknown stellar account, get KYC info # When in local mode, request session's are not authenticated, # which means account confirmation cannot be skipped. So we'll # return None instead of returning the confirm email page. # We're waiting on the user to send an off-chain payment # Since no polaris account exists for this transaction, KYCForm # will be returned from the next form_for_transaction() call # Here is where you would normally return something like this: # { # "type": "customer_info_status", # "status": "pending" # } # However, we're not going to block the client from completing # the flow since this is a reference server. # request is valid, return success data and add transaction to user model # template == Template.MORE_INFO # Since no polaris account exists for this transaction, KYCForm # will be returned from the next form_for_transaction() call # Here is where you would normally return something like this: # { # "type": "customer_info_status", # "status": "pending" # } # However, we're not going to block the client from completing # the flow since this is a reference server. # email_address is a secondary ID # find existing user by previously-specified email # not actually used # A real anchor would check if public_key belongs to a partner anchor Anchors should implement their banking rails here, as described in the :class:`.RailsIntegration` docstrings. This implementation interfaces with a fake banking rails client for demonstration purposes. # interface with mock banking rails Auto-complete pending_external transactions An anchor would typically collect information on the transactions passed and return only the transactions that have completed the external transfer. # something is wrong with our user tracking code # SEP31 users don't have stellar accounts, so check the user column on the transaction. # Since that is a new column, it may be None. If so, use the account's user column # something is wrong with our user tracking code # Parse a mock bank API response to demonstrate how an anchor would # report back to the sending anchor which fields needed updating. This function replaces the default registered_fee_func for demonstration purposes. However, since we don't have any custom logic to implement, it simply calls the default that has been replaced. # Not using `asset` since this reference server only supports SRT
| 1.978223
| 2
|
jobs/appointment_reminder/send_email_reminder.py
|
saravanpa-aot/queue-management
| 0
|
6627304
|
<filename>jobs/appointment_reminder/send_email_reminder.py
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Update Payment Job.
This module is being invoked from a job and it cleans up the stale records
"""
import os
import sys
import time
from app.utilities.ches_email import send_email, generate_ches_token
from flask import Flask
from jinja2 import Environment, FileSystemLoader
import config
from utils.appointment import get_reminders
from utils.logging import setup_logging
setup_logging(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'logging.conf')) # important to do this first
def create_app(run_mode=os.getenv('FLASK_ENV', 'production')):
"""Return a configured Flask App using the Factory method."""
app = Flask(__name__)
app.config.from_object(config.CONFIGURATION[run_mode])
register_shellcontext(app)
return app
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'app': app
}
app.shell_context_processor(shell_context)
def run():
application = create_app()
application.app_context().push()
send_reminders(application)
def send_reminders(app):
"""Send email reminders for next day appointments."""
app.logger.debug('<<< Starting job')
# CHES token
ches_token = generate_ches_token()
reminders = get_reminders(app=app)
if reminders:
sender = app.config.get('MAIL_FROM_ID')
app_url = app.config.get('EMAIL_APPOINTMENT_APP_URL')
app_folder = [folder for folder in sys.path if 'api/api' in folder][0]
template_path = app_folder.replace('api/api', 'api/api/email_templates')
env = Environment(loader=FileSystemLoader(template_path), autoescape=True)
template = env.get_template('confirmation_email.html')
max_email_per_batch = app.config.get('MAX_EMAIL_PER_BATCH')
print(f'Maximum email per batch {max_email_per_batch}')
appointments = reminders.json()
email_count = 0
print('found {} reminders to send!'.format(len(appointments.get('appointments'))))
for appointment in appointments.get('appointments'):
try:
subject = 'Confirmation – Your appointment on {}'.format(appointment.get('day'))
body = template.render(display_name=appointment.get('display_name'),
location=appointment.get('location'),
formatted_date=appointment.get('formatted_date'),
duration=appointment.get('duration'),
telephone=appointment.get('telephone'),
service_name=appointment.get('service_name'),
civic_address=appointment.get('civic_address'),
service_email_paragraph=appointment.get('service_email_paragraph'),
office_email_paragraph=appointment.get('office_email_paragraph'),
url=app_url)
send_email(ches_token, subject, appointment.get('email'), sender, body)
email_count += 1
except Exception as e:
print(e) # log and continue
if email_count == max_email_per_batch:
print('Pausing for a minute')
time.sleep(60)
email_count = 0
# To handle token expiry, get a new token when the task resumes.
ches_token = generate_ches_token()
app.logger.debug('Ending job>>>')
if __name__ == "__main__":
run()
|
<filename>jobs/appointment_reminder/send_email_reminder.py
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Update Payment Job.
This module is being invoked from a job and it cleans up the stale records
"""
import os
import sys
import time
from app.utilities.ches_email import send_email, generate_ches_token
from flask import Flask
from jinja2 import Environment, FileSystemLoader
import config
from utils.appointment import get_reminders
from utils.logging import setup_logging
setup_logging(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'logging.conf')) # important to do this first
def create_app(run_mode=os.getenv('FLASK_ENV', 'production')):
"""Return a configured Flask App using the Factory method."""
app = Flask(__name__)
app.config.from_object(config.CONFIGURATION[run_mode])
register_shellcontext(app)
return app
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'app': app
}
app.shell_context_processor(shell_context)
def run():
application = create_app()
application.app_context().push()
send_reminders(application)
def send_reminders(app):
"""Send email reminders for next day appointments."""
app.logger.debug('<<< Starting job')
# CHES token
ches_token = generate_ches_token()
reminders = get_reminders(app=app)
if reminders:
sender = app.config.get('MAIL_FROM_ID')
app_url = app.config.get('EMAIL_APPOINTMENT_APP_URL')
app_folder = [folder for folder in sys.path if 'api/api' in folder][0]
template_path = app_folder.replace('api/api', 'api/api/email_templates')
env = Environment(loader=FileSystemLoader(template_path), autoescape=True)
template = env.get_template('confirmation_email.html')
max_email_per_batch = app.config.get('MAX_EMAIL_PER_BATCH')
print(f'Maximum email per batch {max_email_per_batch}')
appointments = reminders.json()
email_count = 0
print('found {} reminders to send!'.format(len(appointments.get('appointments'))))
for appointment in appointments.get('appointments'):
try:
subject = 'Confirmation – Your appointment on {}'.format(appointment.get('day'))
body = template.render(display_name=appointment.get('display_name'),
location=appointment.get('location'),
formatted_date=appointment.get('formatted_date'),
duration=appointment.get('duration'),
telephone=appointment.get('telephone'),
service_name=appointment.get('service_name'),
civic_address=appointment.get('civic_address'),
service_email_paragraph=appointment.get('service_email_paragraph'),
office_email_paragraph=appointment.get('office_email_paragraph'),
url=app_url)
send_email(ches_token, subject, appointment.get('email'), sender, body)
email_count += 1
except Exception as e:
print(e) # log and continue
if email_count == max_email_per_batch:
print('Pausing for a minute')
time.sleep(60)
email_count = 0
# To handle token expiry, get a new token when the task resumes.
ches_token = generate_ches_token()
app.logger.debug('Ending job>>>')
if __name__ == "__main__":
run()
|
en
| 0.829123
|
# Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. The Update Payment Job. This module is being invoked from a job and it cleans up the stale records # important to do this first Return a configured Flask App using the Factory method. Register shell context objects. Shell context objects. Send email reminders for next day appointments. # CHES token # log and continue # To handle token expiry, get a new token when the task resumes.
| 2.260562
| 2
|
script.py
|
GSPuniani/automated-greenhouse-forms
| 0
|
6627305
|
<reponame>GSPuniani/automated-greenhouse-forms
# Web form automation
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.support.ui import Select
# Accessing environment variables
import os
import yaml
# Chromedriver
chromedriver_location = f"/Users/{os.environ['USER']}/Downloads/chromedriver"
# browser = webdriver.Safari()
browser = webdriver.Chrome(chromedriver_location)
# binary = FirefoxBinary('path/to/installed firefox binary')
# browser = webdriver.Firefox(firefox_binary=binary)
# Retrieve info from YAML file
with open('config.yml', 'r') as file:
info = yaml.safe_load(file)
# Job Application URL
browser.get(info['url'])
# First Name
first_name = browser.find_element_by_xpath('//*[@id="first_name"]')
first_name.send_keys(info['first_name'])
# Last Name
last_name = browser.find_element_by_xpath('//*[@id="last_name"]')
last_name.send_keys(info['last_name'])
# Email
email = browser.find_element_by_xpath('//*[@id="email"]')
email.send_keys(info['email'])
# Phone
phone = browser.find_element_by_xpath('//*[@id="phone"]')
if phone:
phone.send_keys(info['phone'])
# City
city = browser.find_element_by_xpath('//*[@id="job_application_location"]')
if city:
city.send_keys(info['city'])
# LinkedIn
linkedin = browser.find_element_by_xpath('//*[@id="job_application_answers_attributes_15_text_value"]')
if linkedin:
linkedin.send_keys(info['linkedin'])
# School
# school = browser.find_element_by_xpath('//*[@id="s2id_autogen1"]')
# if school:
# school.send_keys(info['school'])
# Degree
degree = Select(browser.find_element_by_id('education_degree_0'))
degree.select_by_visible_text(info['degree'])
# Discipline
discipline = Select(browser.find_element_by_id('education_discipline_0'))
discipline.select_by_visible_text(info['discipline'])
# Start Date
start_month = browser.find_element_by_xpath('//*[@id="education_section"]/div[1]/fieldset/div[4]/fieldset/input[1]')
if start_month:
start_month.send_keys(info['start_month'])
start_year = browser.find_element_by_xpath('//*[@id="education_section"]/div[1]/fieldset/div[4]/fieldset/input[2]')
if start_year:
start_year.send_keys(info['start_year'])
# End Date
end_month = browser.find_element_by_xpath('//*[@id="education_section"]/div[1]/fieldset/div[5]/fieldset/input[1]')
if end_month:
end_month.send_keys(info['end_month'])
end_year = browser.find_element_by_xpath('//*[@id="education_section"]/div[1]/fieldset/div[5]/fieldset/input[2]')
if end_year:
end_year.send_keys(info['end_year'])
# Confirm all information is true by clicking checkbox
browser.find_element_by_xpath('//*[@id="job_application_answers_attributes_0_answer_selected_options_attributes_0_question_option_id"]').click()
# Undergraduate GPA
gpa_undergrad = Select(browser.find_element_by_id('job_application_answers_attributes_1_answer_selected_options_attributes_1_question_option_id'))
gpa_undergrad.select_by_visible_text(info['gpa_undergrad'])
# Graduate GPA
gpa_grad = Select(browser.find_element_by_id('job_application_answers_attributes_2_answer_selected_options_attributes_2_question_option_id'))
gpa_grad.select_by_visible_text(info['gpa_grad'])
# Doctorate GPA
gpa_doctorate = Select(browser.find_element_by_id('job_application_answers_attributes_3_answer_selected_options_attributes_3_question_option_id'))
gpa_doctorate.select_by_visible_text(info['gpa_doctorate'])
# SAT Score
sat_score = Select(browser.find_element_by_id('job_application_answers_attributes_4_answer_selected_options_attributes_4_question_option_id'))
sat_score.select_by_visible_text(info['sat_score'])
# ACT Score
act_score = Select(browser.find_element_by_id('job_application_answers_attributes_5_answer_selected_options_attributes_5_question_option_id'))
act_score.select_by_visible_text(info['act_score'])
# GRE Score
gre_score = Select(browser.find_element_by_id('job_application_answers_attributes_6_answer_selected_options_attributes_6_question_option_id'))
gre_score.select_by_visible_text(info['gre_score'])
# GMAT Score
gmat_score = Select(browser.find_element_by_id('job_application_answers_attributes_7_answer_selected_options_attributes_7_question_option_id'))
gmat_score.select_by_visible_text(info['gmat_score'])
# SpaceX Employment History
spacex_history = Select(browser.find_element_by_id('job_application_answers_attributes_8_answer_selected_options_attributes_8_question_option_id'))
spacex_history.select_by_visible_text(info['spacex_history'])
# Years of Professional Work Experience
work_exp = Select(browser.find_element_by_id('job_application_answers_attributes_10_answer_selected_options_attributes_10_question_option_id'))
work_exp.select_by_visible_text(info['work_exp'])
# Basic Qualifications Satisfied
basic_qualifications = Select(browser.find_element_by_id('job_application_answers_attributes_11_boolean_value'))
basic_qualifications.select_by_visible_text(info['basic_qualifications'])
# How did you hear about this job?
hear_job = Select(browser.find_element_by_id('job_application_answers_attributes_17_answer_selected_options_attributes_17_question_option_id'))
hear_job.select_by_visible_text(info['hear_job'])
# Legal authorization
legal_auth = Select(browser.find_element_by_id('job_application_answers_attributes_19_answer_selected_options_attributes_19_question_option_id'))
legal_auth.select_by_visible_text(info['legal_auth'])
# Citizenship Status
citizen_status = Select(browser.find_element_by_id('job_application_answers_attributes_20_answer_selected_options_attributes_20_question_option_id'))
citizen_status.select_by_visible_text(info['citizen_status'])
# Gender
gender = Select(browser.find_element_by_id('job_application_gender'))
gender.select_by_visible_text(info['gender'])
# Hispanic/Latino
hispanic_lat = Select(browser.find_element_by_id('job_application_hispanic_ethnicity'))
hispanic_lat.select_by_visible_text(info['hispanic_lat'])
# Veteran Status
veteran = Select(browser.find_element_by_id('job_application_veteran_status'))
veteran.select_by_visible_text(info['veteran'])
# Disability Status
disability = Select(browser.find_element_by_id('job_application_disability_status'))
disability.select_by_visible_text(info['disability'])
|
# Web form automation
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.support.ui import Select
# Accessing environment variables
import os
import yaml
# Chromedriver
chromedriver_location = f"/Users/{os.environ['USER']}/Downloads/chromedriver"
# browser = webdriver.Safari()
browser = webdriver.Chrome(chromedriver_location)
# binary = FirefoxBinary('path/to/installed firefox binary')
# browser = webdriver.Firefox(firefox_binary=binary)
# Retrieve info from YAML file
with open('config.yml', 'r') as file:
info = yaml.safe_load(file)
# Job Application URL
browser.get(info['url'])
# First Name
first_name = browser.find_element_by_xpath('//*[@id="first_name"]')
first_name.send_keys(info['first_name'])
# Last Name
last_name = browser.find_element_by_xpath('//*[@id="last_name"]')
last_name.send_keys(info['last_name'])
# Email
email = browser.find_element_by_xpath('//*[@id="email"]')
email.send_keys(info['email'])
# Phone
phone = browser.find_element_by_xpath('//*[@id="phone"]')
if phone:
phone.send_keys(info['phone'])
# City
city = browser.find_element_by_xpath('//*[@id="job_application_location"]')
if city:
city.send_keys(info['city'])
# LinkedIn
linkedin = browser.find_element_by_xpath('//*[@id="job_application_answers_attributes_15_text_value"]')
if linkedin:
linkedin.send_keys(info['linkedin'])
# School
# school = browser.find_element_by_xpath('//*[@id="s2id_autogen1"]')
# if school:
# school.send_keys(info['school'])
# Degree
degree = Select(browser.find_element_by_id('education_degree_0'))
degree.select_by_visible_text(info['degree'])
# Discipline
discipline = Select(browser.find_element_by_id('education_discipline_0'))
discipline.select_by_visible_text(info['discipline'])
# Start Date
start_month = browser.find_element_by_xpath('//*[@id="education_section"]/div[1]/fieldset/div[4]/fieldset/input[1]')
if start_month:
start_month.send_keys(info['start_month'])
start_year = browser.find_element_by_xpath('//*[@id="education_section"]/div[1]/fieldset/div[4]/fieldset/input[2]')
if start_year:
start_year.send_keys(info['start_year'])
# End Date
end_month = browser.find_element_by_xpath('//*[@id="education_section"]/div[1]/fieldset/div[5]/fieldset/input[1]')
if end_month:
end_month.send_keys(info['end_month'])
end_year = browser.find_element_by_xpath('//*[@id="education_section"]/div[1]/fieldset/div[5]/fieldset/input[2]')
if end_year:
end_year.send_keys(info['end_year'])
# Confirm all information is true by clicking checkbox
browser.find_element_by_xpath('//*[@id="job_application_answers_attributes_0_answer_selected_options_attributes_0_question_option_id"]').click()
# Undergraduate GPA
gpa_undergrad = Select(browser.find_element_by_id('job_application_answers_attributes_1_answer_selected_options_attributes_1_question_option_id'))
gpa_undergrad.select_by_visible_text(info['gpa_undergrad'])
# Graduate GPA
gpa_grad = Select(browser.find_element_by_id('job_application_answers_attributes_2_answer_selected_options_attributes_2_question_option_id'))
gpa_grad.select_by_visible_text(info['gpa_grad'])
# Doctorate GPA
gpa_doctorate = Select(browser.find_element_by_id('job_application_answers_attributes_3_answer_selected_options_attributes_3_question_option_id'))
gpa_doctorate.select_by_visible_text(info['gpa_doctorate'])
# SAT Score
sat_score = Select(browser.find_element_by_id('job_application_answers_attributes_4_answer_selected_options_attributes_4_question_option_id'))
sat_score.select_by_visible_text(info['sat_score'])
# ACT Score
act_score = Select(browser.find_element_by_id('job_application_answers_attributes_5_answer_selected_options_attributes_5_question_option_id'))
act_score.select_by_visible_text(info['act_score'])
# GRE Score
gre_score = Select(browser.find_element_by_id('job_application_answers_attributes_6_answer_selected_options_attributes_6_question_option_id'))
gre_score.select_by_visible_text(info['gre_score'])
# GMAT Score
gmat_score = Select(browser.find_element_by_id('job_application_answers_attributes_7_answer_selected_options_attributes_7_question_option_id'))
gmat_score.select_by_visible_text(info['gmat_score'])
# SpaceX Employment History
spacex_history = Select(browser.find_element_by_id('job_application_answers_attributes_8_answer_selected_options_attributes_8_question_option_id'))
spacex_history.select_by_visible_text(info['spacex_history'])
# Years of Professional Work Experience
work_exp = Select(browser.find_element_by_id('job_application_answers_attributes_10_answer_selected_options_attributes_10_question_option_id'))
work_exp.select_by_visible_text(info['work_exp'])
# Basic Qualifications Satisfied
basic_qualifications = Select(browser.find_element_by_id('job_application_answers_attributes_11_boolean_value'))
basic_qualifications.select_by_visible_text(info['basic_qualifications'])
# How did you hear about this job?
hear_job = Select(browser.find_element_by_id('job_application_answers_attributes_17_answer_selected_options_attributes_17_question_option_id'))
hear_job.select_by_visible_text(info['hear_job'])
# Legal authorization
legal_auth = Select(browser.find_element_by_id('job_application_answers_attributes_19_answer_selected_options_attributes_19_question_option_id'))
legal_auth.select_by_visible_text(info['legal_auth'])
# Citizenship Status
citizen_status = Select(browser.find_element_by_id('job_application_answers_attributes_20_answer_selected_options_attributes_20_question_option_id'))
citizen_status.select_by_visible_text(info['citizen_status'])
# Gender
gender = Select(browser.find_element_by_id('job_application_gender'))
gender.select_by_visible_text(info['gender'])
# Hispanic/Latino
hispanic_lat = Select(browser.find_element_by_id('job_application_hispanic_ethnicity'))
hispanic_lat.select_by_visible_text(info['hispanic_lat'])
# Veteran Status
veteran = Select(browser.find_element_by_id('job_application_veteran_status'))
veteran.select_by_visible_text(info['veteran'])
# Disability Status
disability = Select(browser.find_element_by_id('job_application_disability_status'))
disability.select_by_visible_text(info['disability'])
|
en
| 0.695914
|
# Web form automation # Accessing environment variables # Chromedriver # browser = webdriver.Safari() # binary = FirefoxBinary('path/to/installed firefox binary') # browser = webdriver.Firefox(firefox_binary=binary) # Retrieve info from YAML file # Job Application URL # First Name # Last Name # Email # Phone # City # LinkedIn # School # school = browser.find_element_by_xpath('//*[@id="s2id_autogen1"]') # if school: # school.send_keys(info['school']) # Degree # Discipline # Start Date # End Date # Confirm all information is true by clicking checkbox # Undergraduate GPA # Graduate GPA # Doctorate GPA # SAT Score # ACT Score # GRE Score # GMAT Score # SpaceX Employment History # Years of Professional Work Experience # Basic Qualifications Satisfied # How did you hear about this job? # Legal authorization # Citizenship Status # Gender # Hispanic/Latino # Veteran Status # Disability Status
| 2.556002
| 3
|
homeassistant/components/ais_google_home/const.py
|
stravinci/AIS-home-assistant
| 5
|
6627306
|
<reponame>stravinci/AIS-home-assistant<filename>homeassistant/components/ais_google_home/const.py
"""Constants for the ais_google_home."""
DOMAIN = "ais_google_home"
CONF_OAUTH_JSON = "oauth_json"
|
"""Constants for the ais_google_home."""
DOMAIN = "ais_google_home"
CONF_OAUTH_JSON = "oauth_json"
|
en
| 0.398215
|
Constants for the ais_google_home.
| 0.994987
| 1
|
src/components/fakedata/__init__.py
|
phong10119/sever-freshfarm
| 1
|
6627307
|
<filename>src/components/fakedata/__init__.py
from src.models.user import db, User, OAuth, Token, Order, Order_item, Order_status
from src.models.product import Product, Inventory, Rating, Category
from src.models.trading import Shipment, Invoice, Invoice_status, Payment
import random
from flask import Blueprint , render_template, jsonify
fakedata_blueprint = Blueprint('fakebp', __name__)
categories = [
'fruits', 'vegetables', 'seasoning'
]
inventories = ['District 7', 'Thu Duc district']
fruit_product = [['Apple', 'https://www.walmart.ca/en/ip/apple-gala/6000195494284', 'gam'], ['Avocado', 'https://images.eatsmarter.de/sites/default/files/styles/576x432/public/avocado-fotolia-600x450.jpg', 'gam'], ['Banana', 'http://buyfv.com/wp-content/uploads/2019/01/10000025-2_3-fresho-banana-robusta.jpg', 'gam'], ['Coconut', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTVMEklVSrhnZTPMcMz8t4d5x-NGLFDBZ703bFG6r_sDKntyn9w&s', 'unit'], ['Grape', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcROfRR0dudAEg7DFfMoRQom_kXXrrTsw8FgWVHbhKR60Nf2oMAUiw&s', 'gam'], ['Mango', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSz6jtchGZGiR38Cj8FdzywopoMSiyo7gJON8J2FmYdxTsrUEbb&s', 'gam'],
['Orange', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcToBnHvC2lea0nC8LecgwotZiI7RhCFJsTv0JKPttLzLQvFdFF7&s', 'gam'], ['Dragon fruit', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQFxguw9NULcOIKmSUUMP4a9uQos0xmanvo4QPI2BRb3YdfMJ8nZQ&s', 'gam'], ['Watermelon', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRkL4UyUjb81Ecw4Z1SDA-JFV9oe2zgxlv4_99VBERkvWichiUz&s', 'gam'], ['Pineaple', 'https://i5.walmartimages.com/asr/dd2a5d3c-d358-4579-8ece-59ce1804ab5b_9.0b874251fccc645fd98ac76e797c2d2a.jpeg?odnWidth=450&odnHeight=450&odnBg=ffffff', 'gam'], ['Papayya', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQaqNWeGRhl-m7m0KmYxmOxncf3lWA8tNe2Tzd-o_zBXn4PxsaCAA&s', 'gam']]
vegetable_product = [['Bell pepper', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTrcDPSIQqP1Uo1lK7GUlYRSpCf1edmQtEGGEJ5ay4QbAdQObwIDQ&s', 'gam'], ['Cauliflower', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTOGNxkCVGuTZ2-E7L4WnidWPbZT63F6fKKblotH7n5H8F8GUY&s', 'gam'], ['Cabbage', 'https://pcdn.columbian.com/wp-content/uploads/2019/08/0830_met_cabbage-1226x0-c-default.jpg', 'gam'], ['Carrot', 'https://i5.walmartimages.ca/images/Enlarge/271/747/6000191271747.jpg', 'gam'], ['Cucumber',
'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTzFuperqiyoF-6b2Vz6FWv0wndZ9jFdkABGLbnD_xvOPr3tBqRdA&s', 'gam'], ['Tomato', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTnNWU9oih_G799tg1sc41vK5VGroGcK4XmudN2Zi_OTxZs6jIBGA&s', 'gam'], ['Pumpkin', 'https://www.duluthnewstribune.com/incoming/4684986-wtscwa-pumpkin-web.jpg/alternates/BASE_LANDSCAPE/pumpkin%20web.jpg', 'gam'], ['Green bean', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSEBESKVXPO9nYPU8cwLGqjaNKBpHcobcSdVEjxeD1UYXWQhMgUiA&s', 'gam']]
seasoning_product = [['Onion', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcS6LOWhat5UFSjK3YcU-hCyC2A6b8sSZf3g0taMFPTT2vBZAgy6&s', 'gam'], ['Garlic', 'https://www.basketbazzar.com/wp-content/uploads/2019/05/Garlic.jpg', 'gam'], ['Turmeric', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT9H01mkElD1fKidz9sOUqhDPSdrCGNY5DINkQ1Ls_4Kmlri0plzg&s', 'gam'],
['Green onion', 'https://cdn.shopify.com/s/files/1/0135/9839/2378/products/Grreen_Onion_Bulb_800x560.png?v=1558314353', 'gam'], ['Pepper', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQB7vIiFa02_CsFtZreVdTJsijjy5Hf_wiD1NB6NqS4sUBZG9aRWg&s', 'gam']]
product_description = [
{
"name": "in purus eu magna vulputate luctus cum sociis natoque penatibus et magnis dis parturient montes nascetur ridiculus mus vivamus vestibulum sagittis"
},
{
"name": "adipiscing elit proin risus praesent lectus vestibulum quam sapien varius ut blandit non interdum in ante vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae"
},
{
"name": "eget eros elementum pellentesque quisque porta volutpat erat quisque erat eros viverra eget congue eget semper rutrum nulla nunc purus phasellus in felis donec semper sapien a libero nam dui proin leo odio porttitor id consequat in"
},
{
"name": "leo odio porttitor id consequat in consequat ut nulla sed accumsan felis ut at dolor quis odio consequat varius integer ac leo"
},
{
"name": "imperdiet et commodo vulputate justo in blandit ultrices enim lorem ipsum dolor sit amet consectetuer adipiscing elit proin interdum mauris non ligula pellentesque"
},
{
"name": "velit donec diam neque vestibulum eget vulputate ut ultrices vel augue vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia"
},
{
"name": "mauris non ligula pellentesque ultrices phasellus id sapien in sapien iaculis congue vivamus metus arcu adipiscing molestie hendrerit at vulputate vitae nisl"
},
{
"name": "faucibus orci luctus et ultrices posuere cubilia curae duis faucibus accumsan odio curabitur convallis duis consequat dui nec nisi volutpat eleifend donec ut dolor morbi vel lectus in quam fringilla rhoncus mauris enim leo rhoncus sed vestibulum"
},
{
"name": "metus sapien ut nunc vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae mauris viverra diam vitae quam suspendisse potenti nullam porttitor lacus at turpis donec posuere metus vitae ipsum aliquam non mauris morbi non lectus"
},
{
"name": "nunc proin at turpis a pede posuere nonummy integer non velit donec diam neque vestibulum eget vulputate ut ultrices vel augue vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia"
}
]
order_status = ['Proceeding', 'Delivering', 'Delivered', 'Canceled', 'In cart']
stores = [
{
"id": 1,
"login_name": "Markus",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/etnesciuntiste.jpg?size=100x100&set=set1",
"store_name": "Janyx"
}, {
"id": 2,
"login_name": "Corabelle",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/asperioresinaliquam.bmp?size=100x100&set=set1",
"store_name": "Eamia"
}, {
"id": 3,
"login_name": "Drusie",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/nonsitdolor.png?size=100x100&set=set1",
"store_name": "BlogXS"
}, {
"id": 4,
"login_name": "Maximilian",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/voluptasnonvero.png?size=100x100&set=set1",
"store_name": "Meedoo"
}, {
"id": 5,
"login_name": "Drugi",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/eligendiautdeserunt.jpg?size=100x100&set=set1",
"store_name": "Dynabox"
}, {
"id": 6,
"login_name": "Ilene",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/vellaboreet.bmp?size=100x100&set=set1",
"store_name": "Photofeed"
}, {
"id": 7,
"login_name": "Illa",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/laboriosamvelitanimi.jpg?size=100x100&set=set1",
"store_name": "Jatri"
}, {
"id": 8,
"login_name": "Essy",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/repudiandaeconsequaturqui.png?size=100x100&set=set1",
"store_name": "Zoozzy"
}, {
"id": 9,
"login_name": "Stinky",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/quoquodquam.bmp?size=100x100&set=set1",
"store_name": "Skaboo"
}, {
"id": 10,
"login_name": "Jackie",
"password": <PASSWORD>,
"img_url": "https://robohash.org/quiinharum.bmp?size=100x100&set=set1",
"store_name": "Zoozzy"
}
]
users = [
{"id":1,"login_name":"Piegrome","img_url":"https://robohash.org/namametincidunt.png?size=200x200&set=set1"},
{"id":2,"login_name":"Bolderstone","img_url":"https://robohash.org/remminusmodi.png?size=200x200&set=set1"},
{"id":3,"login_name":"Axleby","img_url":"https://robohash.org/doloreconsequaturquisquam.png?size=200x200&set=set1"},
{"id":4,"login_name":"Gerge","img_url":"https://robohash.org/nihilrepellendusea.png?size=200x200&set=set1"},
{"id":5,"login_name":"Ellings","img_url":"https://robohash.org/quoautnihil.png?size=200x200&set=set1"},
{"id":6,"login_name":"Keling","img_url":"https://robohash.org/doloremquevelitexcepturi.png?size=200x200&set=set1"},
{"id":7,"login_name":"Kleinerman","img_url":"https://robohash.org/quamvoluptatumet.png?size=200x200&set=set1"},
{"id":8,"login_name":"Chetter","img_url":"https://robohash.org/istesiteaque.png?size=200x200&set=set1"},
{"id":9,"login_name":"Jedrachowicz","img_url":"https://robohash.org/situllamamet.png?size=200x200&set=set1"},
{"id":10,"login_name":"Sayce","img_url":"https://robohash.org/harumdistinctioitaque.png?size=200x200&set=set1"},
{"id":11,"login_name":"Vella","img_url":"https://robohash.org/utverorepudiandae.png?size=200x200&set=set1"},
{"id":12,"login_name":"Kenvin","img_url":"https://robohash.org/magninobisdolores.png?size=200x200&set=set1"},
{"id":13,"login_name":"Perazzo","img_url":"https://robohash.org/quasifugiatsunt.png?size=200x200&set=set1"},
{"id":14,"login_name":"Beart","img_url":"https://robohash.org/autemaliquammaxime.png?size=200x200&set=set1"},
{"id":15,"login_name":"Tomasik","img_url":"https://robohash.org/sapienteoditrepellendus.png?size=200x200&set=set1"},
{"id":16,"login_name":"Neasam","img_url":"https://robohash.org/inerrorautem.png?size=200x200&set=set1"},
{"id":17,"login_name":"Greenstock","img_url":"https://robohash.org/ututipsum.png?size=200x200&set=set1"},
{"id":18,"login_name":"Vermer","img_url":"https://robohash.org/quaevelitexercitationem.png?size=200x200&set=set1"},
{"id":19,"login_name":"Kale","img_url":"https://robohash.org/suscipitnecessitatibusexcepturi.png?size=200x200&set=set1"},
{"id":20,"login_name":"Portwaine","img_url":"https://robohash.org/occaecatiteneturnesciunt.png?size=200x200&set=set1"},
{"id":21,"login_name":"Shefton","img_url":"https://robohash.org/aliasassumendafuga.png?size=200x200&set=set1"},
{"id":22,"login_name":"Guinane","img_url":"https://robohash.org/sequiconsequunturenim.png?size=200x200&set=set1"},
{"id":23,"login_name":"Gitthouse","img_url":"https://robohash.org/idlaboreesse.png?size=200x200&set=set1"},
{"id":24,"login_name":"Lyngsted","img_url":"https://robohash.org/aliquiddelectuset.png?size=200x200&set=set1"},
{"id":25,"login_name":"Dellar","img_url":"https://robohash.org/cupiditateexplicaboesse.png?size=200x200&set=set1"},
{"id":26,"login_name":"Latham","img_url":"https://robohash.org/voluptatemaccusantiumeligendi.png?size=200x200&set=set1"},
{"id":27,"login_name":"Bamb","img_url":"https://robohash.org/doloremquequiafacere.png?size=200x200&set=set1"},
{"id":28,"login_name":"Sigg","img_url":"https://robohash.org/quodveroet.png?size=200x200&set=set1"},
{"id":29,"login_name":"Lasham","img_url":"https://robohash.org/dolorehiceaque.png?size=200x200&set=set1"},
{"id":30,"login_name":"Lattimore","img_url":"https://robohash.org/quiimpeditsuscipit.png?size=200x200&set=set1"},
{"id":31,"login_name":"Rozet","img_url":"https://robohash.org/officiisperspiciatisneque.png?size=200x200&set=set1"},
{"id":32,"login_name":"Budibent","img_url":"https://robohash.org/doloresvoluptasquidem.png?size=200x200&set=set1"},
{"id":33,"login_name":"Mains","img_url":"https://robohash.org/temporeculpatotam.png?size=200x200&set=set1"},
{"id":34,"login_name":"Orrow","img_url":"https://robohash.org/cumculpadoloremque.png?size=200x200&set=set1"},
{"id":35,"login_name":"Gearty","img_url":"https://robohash.org/involuptasminus.png?size=200x200&set=set1"},
{"id":36,"login_name":"Arni","img_url":"https://robohash.org/voluptatemsequitotam.png?size=200x200&set=set1"},
{"id":37,"login_name":"Piddick","img_url":"https://robohash.org/saepequibusdamnesciunt.png?size=200x200&set=set1"},
{"id":38,"login_name":"Acom","img_url":"https://robohash.org/minimaharumet.png?size=200x200&set=set1"},
{"id":39,"login_name":"Clemenzi","img_url":"https://robohash.org/nemoillumlibero.png?size=200x200&set=set1"},
{"id":40,"login_name":"Asgodby","img_url":"https://robohash.org/minusnostrumipsam.png?size=200x200&set=set1"},
]
ratings = [
{"id":1,"rating":3,"comment":"aggregate granular e-commerce","user_id":25,"product_id":58},
{"id":2,"rating":1,"comment":"disintermediate transparent e-services","user_id":20,"product_id":87},
{"id":3,"rating":2,"comment":"facilitate back-end users","user_id":21,"product_id":34},
{"id":4,"rating":4,"comment":"extend one-to-one platforms","user_id":35,"product_id":69},
{"id":5,"rating":5,"comment":"envisioneer leading-edge technologies","user_id":38,"product_id":60},
{"id":6,"rating":3,"comment":"harness front-end applications","user_id":47,"product_id":68},
{"id":7,"rating":1,"comment":"seize bricks-and-clicks web services","user_id":20,"product_id":50},
{"id":8,"rating":1,"comment":"aggregate integrated e-markets","user_id":31,"product_id":29},
{"id":9,"rating":1,"comment":"leverage innovative eyeballs","user_id":32,"product_id":11},
{"id":10,"rating":1,"comment":"architect synergistic supply-chains","user_id":27,"product_id":49},
{"id":11,"rating":1,"comment":"implement best-of-breed functionalities","user_id":49,"product_id":24},
{"id":12,"rating":5,"comment":"synergize best-of-breed metrics","user_id":20,"product_id":18},
{"id":13,"rating":3,"comment":"engage B2C niches","user_id":33,"product_id":34},
{"id":14,"rating":5,"comment":"integrate collaborative portals","user_id":38,"product_id":85},
{"id":15,"rating":3,"comment":"mesh global architectures","user_id":20,"product_id":77},
{"id":16,"rating":1,"comment":"target best-of-breed initiatives","user_id":36,"product_id":7},
{"id":17,"rating":5,"comment":"iterate wireless infomediaries","user_id":13,"product_id":28},
{"id":18,"rating":2,"comment":"target sticky methodologies","user_id":39,"product_id":12},
{"id":19,"rating":3,"comment":"productize turn-key architectures","user_id":36,"product_id":32},
{"id":20,"rating":2,"comment":"monetize granular channels","user_id":40,"product_id":30},
{"id":21,"rating":4,"comment":"unleash leading-edge functionalities","user_id":46,"product_id":30},
{"id":22,"rating":1,"comment":"maximize user-centric solutions","user_id":11,"product_id":78},
{"id":23,"rating":5,"comment":"engage enterprise e-business","user_id":21,"product_id":7},
{"id":24,"rating":1,"comment":"extend bricks-and-clicks e-business","user_id":46,"product_id":57},
{"id":25,"rating":5,"comment":"expedite interactive relationships","user_id":48,"product_id":27},
{"id":26,"rating":5,"comment":"engineer wireless mindshare","user_id":45,"product_id":14},
{"id":27,"rating":1,"comment":"evolve sticky platforms","user_id":25,"product_id":5},
{"id":28,"rating":4,"comment":"harness holistic convergence","user_id":40,"product_id":79},
{"id":29,"rating":5,"comment":"incentivize extensible partnerships","user_id":22,"product_id":48},
{"id":30,"rating":3,"comment":"visualize impactful infrastructures","user_id":21,"product_id":43},
{"id":31,"rating":2,"comment":"scale next-generation experiences","user_id":11,"product_id":66},
{"id":32,"rating":2,"comment":"architect user-centric infrastructures","user_id":37,"product_id":31},
{"id":33,"rating":3,"comment":"e-enable 24/365 e-markets","user_id":40,"product_id":23},
{"id":34,"rating":5,"comment":"engineer web-enabled markets","user_id":28,"product_id":31},
{"id":35,"rating":5,"comment":"expedite viral portals","user_id":48,"product_id":6},
{"id":36,"rating":1,"comment":"seize B2B functionalities","user_id":45,"product_id":51},
{"id":37,"rating":5,"comment":"strategize turn-key technologies","user_id":49,"product_id":65},
{"id":38,"rating":1,"comment":"redefine ubiquitous mindshare","user_id":20,"product_id":63},
{"id":39,"rating":2,"comment":"innovate robust solutions","user_id":41,"product_id":88},
{"id":40,"rating":3,"comment":"redefine global schemas","user_id":18,"product_id":27},
{"id":41,"rating":5,"comment":"enhance 24/7 systems","user_id":22,"product_id":47},
{"id":42,"rating":4,"comment":"matrix strategic mindshare","user_id":46,"product_id":30},
{"id":43,"rating":1,"comment":"evolve end-to-end synergies","user_id":21,"product_id":84},
{"id":44,"rating":5,"comment":"optimize sticky systems","user_id":35,"product_id":38},
{"id":45,"rating":3,"comment":"matrix bricks-and-clicks users","user_id":49,"product_id":37},
{"id":46,"rating":3,"comment":"innovate efficient relationships","user_id":11,"product_id":31},
{"id":47,"rating":4,"comment":"iterate clicks-and-mortar channels","user_id":41,"product_id":88},
{"id":48,"rating":2,"comment":"architect mission-critical web services","user_id":36,"product_id":77},
{"id":49,"rating":5,"comment":"streamline value-added mindshare","user_id":39,"product_id":80},
{"id":50,"rating":2,"comment":"syndicate bleeding-edge markets","user_id":42,"product_id":63},
{"id":51,"rating":3,"comment":"e-enable enterprise schemas","user_id":48,"product_id":72},
{"id":52,"rating":4,"comment":"whiteboard e-business infrastructures","user_id":31,"product_id":62},
{"id":53,"rating":3,"comment":"evolve cutting-edge technologies","user_id":45,"product_id":23},
{"id":54,"rating":4,"comment":"harness seamless partnerships","user_id":40,"product_id":34},
{"id":55,"rating":4,"comment":"target impactful deliverables","user_id":23,"product_id":75},
{"id":56,"rating":1,"comment":"unleash collaborative functionalities","user_id":15,"product_id":34},
{"id":57,"rating":4,"comment":"synthesize clicks-and-mortar experiences","user_id":14,"product_id":16},
{"id":58,"rating":1,"comment":"redefine distributed mindshare","user_id":48,"product_id":20},
{"id":59,"rating":2,"comment":"generate world-class relationships","user_id":43,"product_id":75},
{"id":60,"rating":1,"comment":"maximize collaborative bandwidth","user_id":40,"product_id":69},
{"id":61,"rating":5,"comment":"matrix holistic initiatives","user_id":21,"product_id":26},
{"id":62,"rating":2,"comment":"streamline back-end supply-chains","user_id":41,"product_id":28},
{"id":63,"rating":1,"comment":"unleash granular models","user_id":28,"product_id":89},
{"id":64,"rating":3,"comment":"brand impactful communities","user_id":11,"product_id":57},
{"id":65,"rating":4,"comment":"redefine real-time systems","user_id":27,"product_id":62},
{"id":66,"rating":4,"comment":"monetize user-centric web-readiness","user_id":13,"product_id":44},
{"id":67,"rating":4,"comment":"synthesize 24/365 e-business","user_id":30,"product_id":81},
{"id":68,"rating":5,"comment":"whiteboard mission-critical solutions","user_id":33,"product_id":20},
{"id":69,"rating":2,"comment":"deliver dynamic architectures","user_id":25,"product_id":33},
{"id":70,"rating":2,"comment":"matrix magnetic models","user_id":38,"product_id":16},
{"id":71,"rating":4,"comment":"transform vertical e-business","user_id":25,"product_id":87},
{"id":72,"rating":5,"comment":"monetize proactive infomediaries","user_id":36,"product_id":83},
{"id":73,"rating":5,"comment":"mesh global architectures","user_id":42,"product_id":79},
{"id":74,"rating":4,"comment":"exploit B2C platforms","user_id":11,"product_id":56},
{"id":75,"rating":1,"comment":"repurpose front-end e-tailers","user_id":21,"product_id":23},
{"id":76,"rating":3,"comment":"synergize B2C models","user_id":16,"product_id":67},
{"id":77,"rating":4,"comment":"engineer out-of-the-box relationships","user_id":23,"product_id":77},
{"id":78,"rating":3,"comment":"incubate interactive initiatives","user_id":12,"product_id":56},
{"id":79,"rating":2,"comment":"scale world-class vortals","user_id":47,"product_id":29},
{"id":80,"rating":1,"comment":"embrace strategic methodologies","user_id":35,"product_id":81},
{"id":81,"rating":1,"comment":"brand proactive relationships","user_id":40,"product_id":25},
{"id":82,"rating":2,"comment":"extend viral supply-chains","user_id":38,"product_id":26},
{"id":83,"rating":5,"comment":"evolve frictionless methodologies","user_id":39,"product_id":56},
{"id":84,"rating":4,"comment":"generate plug-and-play metrics","user_id":30,"product_id":54},
{"id":85,"rating":4,"comment":"maximize virtual communities","user_id":34,"product_id":84},
{"id":86,"rating":5,"comment":"implement B2C e-tailers","user_id":16,"product_id":38},
{"id":87,"rating":1,"comment":"scale integrated initiatives","user_id":49,"product_id":45},
{"id":88,"rating":2,"comment":"benchmark distributed paradigms","user_id":40,"product_id":64},
{"id":89,"rating":1,"comment":"disintermediate holistic systems","user_id":43,"product_id":12},
{"id":90,"rating":4,"comment":"morph efficient ROI","user_id":13,"product_id":26},
{"id":91,"rating":2,"comment":"streamline dot-com portals","user_id":40,"product_id":66},
{"id":92,"rating":1,"comment":"harness holistic networks","user_id":17,"product_id":86},
{"id":93,"rating":1,"comment":"envisioneer bleeding-edge systems","user_id":19,"product_id":39},
{"id":94,"rating":5,"comment":"transform plug-and-play e-services","user_id":12,"product_id":36},
{"id":95,"rating":4,"comment":"synthesize open-source methodologies","user_id":49,"product_id":71},
{"id":96,"rating":5,"comment":"morph scalable e-commerce","user_id":35,"product_id":57},
{"id":97,"rating":4,"comment":"repurpose frictionless ROI","user_id":37,"product_id":1},
{"id":98,"rating":1,"comment":"incentivize e-business supply-chains","user_id":42,"product_id":21},
{"id":99,"rating":3,"comment":"deliver magnetic initiatives","user_id":32,"product_id":32},
{"id":100,"rating":5,"comment":"repurpose innovative functionalities","user_id":27,"product_id":13},
{"id":101,"rating":1,"comment":"implement innovative niches","user_id":19,"product_id":58},
{"id":102,"rating":3,"comment":"synergize intuitive deliverables","user_id":45,"product_id":18},
{"id":103,"rating":5,"comment":"empower extensible metrics","user_id":40,"product_id":67},
{"id":104,"rating":2,"comment":"productize one-to-one schemas","user_id":30,"product_id":72},
{"id":105,"rating":2,"comment":"orchestrate customized synergies","user_id":32,"product_id":41},
{"id":106,"rating":2,"comment":"grow strategic initiatives","user_id":17,"product_id":37},
{"id":107,"rating":2,"comment":"disintermediate robust action-items","user_id":41,"product_id":57},
{"id":108,"rating":2,"comment":"disintermediate scalable partnerships","user_id":41,"product_id":11},
{"id":109,"rating":1,"comment":"redefine B2C users","user_id":37,"product_id":80},
{"id":110,"rating":3,"comment":"streamline B2B users","user_id":46,"product_id":70},
{"id":111,"rating":2,"comment":"maximize e-business metrics","user_id":40,"product_id":87},
{"id":112,"rating":5,"comment":"whiteboard strategic web services","user_id":22,"product_id":11},
{"id":113,"rating":3,"comment":"enable revolutionary convergence","user_id":17,"product_id":42},
{"id":114,"rating":1,"comment":"whiteboard viral content","user_id":45,"product_id":55},
{"id":115,"rating":3,"comment":"incubate collaborative synergies","user_id":44,"product_id":57},
{"id":116,"rating":5,"comment":"visualize ubiquitous web services","user_id":49,"product_id":4},
{"id":117,"rating":1,"comment":"implement strategic users","user_id":49,"product_id":77},
{"id":118,"rating":2,"comment":"matrix rich mindshare","user_id":31,"product_id":57},
{"id":119,"rating":3,"comment":"grow rich portals","user_id":22,"product_id":16},
{"id":120,"rating":3,"comment":"morph out-of-the-box supply-chains","user_id":17,"product_id":64},
{"id":121,"rating":3,"comment":"evolve clicks-and-mortar ROI","user_id":33,"product_id":42},
{"id":122,"rating":4,"comment":"leverage visionary portals","user_id":49,"product_id":85},
{"id":123,"rating":2,"comment":"generate dot-com e-markets","user_id":45,"product_id":77},
{"id":124,"rating":5,"comment":"orchestrate web-enabled schemas","user_id":37,"product_id":82},
{"id":125,"rating":1,"comment":"enable turn-key vortals","user_id":34,"product_id":46},
{"id":126,"rating":5,"comment":"exploit distributed supply-chains","user_id":19,"product_id":72},
{"id":127,"rating":4,"comment":"engineer global e-business","user_id":26,"product_id":39},
{"id":128,"rating":4,"comment":"strategize virtual systems","user_id":16,"product_id":86},
{"id":129,"rating":5,"comment":"optimize bricks-and-clicks functionalities","user_id":22,"product_id":81},
{"id":130,"rating":3,"comment":"monetize magnetic web-readiness","user_id":29,"product_id":57},
{"id":131,"rating":4,"comment":"orchestrate best-of-breed synergies","user_id":17,"product_id":79},
{"id":132,"rating":2,"comment":"scale clicks-and-mortar networks","user_id":12,"product_id":29},
{"id":133,"rating":4,"comment":"recontextualize cross-platform channels","user_id":43,"product_id":31},
{"id":134,"rating":5,"comment":"productize scalable ROI","user_id":39,"product_id":74},
{"id":135,"rating":1,"comment":"streamline dot-com content","user_id":43,"product_id":79},
{"id":136,"rating":1,"comment":"cultivate impactful methodologies","user_id":16,"product_id":51},
{"id":137,"rating":4,"comment":"unleash sticky networks","user_id":19,"product_id":66},
{"id":138,"rating":5,"comment":"empower end-to-end portals","user_id":21,"product_id":72},
{"id":139,"rating":5,"comment":"facilitate next-generation networks","user_id":41,"product_id":84},
{"id":140,"rating":2,"comment":"grow cross-media communities","user_id":33,"product_id":19},
{"id":141,"rating":2,"comment":"benchmark clicks-and-mortar eyeballs","user_id":13,"product_id":51},
{"id":142,"rating":5,"comment":"reintermediate transparent metrics","user_id":46,"product_id":2},
{"id":143,"rating":4,"comment":"morph vertical relationships","user_id":14,"product_id":4},
{"id":144,"rating":3,"comment":"deploy plug-and-play e-business","user_id":42,"product_id":87},
{"id":145,"rating":3,"comment":"seize scalable e-services","user_id":36,"product_id":25},
{"id":146,"rating":1,"comment":"cultivate one-to-one e-markets","user_id":27,"product_id":67},
{"id":147,"rating":1,"comment":"embrace one-to-one infrastructures","user_id":46,"product_id":10},
{"id":148,"rating":2,"comment":"brand vertical web services","user_id":29,"product_id":36},
{"id":149,"rating":5,"comment":"target e-business channels","user_id":18,"product_id":32},
{"id":150,"rating":2,"comment":"facilitate intuitive technologies","user_id":39,"product_id":41},
{"id":151,"rating":4,"comment":"whiteboard compelling supply-chains","user_id":23,"product_id":64},
{"id":152,"rating":3,"comment":"strategize collaborative systems","user_id":19,"product_id":64},
{"id":153,"rating":4,"comment":"harness clicks-and-mortar markets","user_id":24,"product_id":13},
{"id":154,"rating":3,"comment":"morph viral vortals","user_id":14,"product_id":15},
{"id":155,"rating":5,"comment":"transition front-end metrics","user_id":37,"product_id":33},
{"id":156,"rating":3,"comment":"empower global mindshare","user_id":44,"product_id":83},
{"id":157,"rating":5,"comment":"architect leading-edge markets","user_id":49,"product_id":66},
{"id":158,"rating":1,"comment":"leverage open-source e-commerce","user_id":30,"product_id":10},
{"id":159,"rating":1,"comment":"incentivize killer channels","user_id":36,"product_id":8},
{"id":160,"rating":2,"comment":"transition one-to-one synergies","user_id":23,"product_id":64},
{"id":161,"rating":5,"comment":"visualize cross-platform initiatives","user_id":49,"product_id":22},
{"id":162,"rating":1,"comment":"morph 24/7 synergies","user_id":24,"product_id":76},
{"id":163,"rating":4,"comment":"brand cutting-edge partnerships","user_id":21,"product_id":75},
{"id":164,"rating":5,"comment":"scale cross-media e-services","user_id":19,"product_id":59},
{"id":165,"rating":1,"comment":"enable integrated web services","user_id":46,"product_id":29},
{"id":166,"rating":1,"comment":"utilize enterprise infomediaries","user_id":34,"product_id":1},
{"id":167,"rating":4,"comment":"embrace mission-critical vortals","user_id":44,"product_id":42},
{"id":168,"rating":1,"comment":"revolutionize strategic models","user_id":15,"product_id":5},
{"id":169,"rating":1,"comment":"strategize bricks-and-clicks partnerships","user_id":11,"product_id":71},
{"id":170,"rating":1,"comment":"envisioneer bleeding-edge e-tailers","user_id":26,"product_id":38},
{"id":171,"rating":1,"comment":"grow bleeding-edge architectures","user_id":35,"product_id":9},
{"id":172,"rating":5,"comment":"brand virtual niches","user_id":39,"product_id":65},
{"id":173,"rating":1,"comment":"whiteboard sticky applications","user_id":38,"product_id":86},
{"id":174,"rating":4,"comment":"whiteboard next-generation initiatives","user_id":42,"product_id":13},
{"id":175,"rating":2,"comment":"incubate collaborative ROI","user_id":40,"product_id":64},
{"id":176,"rating":3,"comment":"morph customized technologies","user_id":35,"product_id":45},
{"id":177,"rating":5,"comment":"whiteboard web-enabled communities","user_id":41,"product_id":8},
{"id":178,"rating":5,"comment":"synergize vertical networks","user_id":35,"product_id":24},
{"id":179,"rating":4,"comment":"incubate value-added schemas","user_id":27,"product_id":62},
{"id":180,"rating":3,"comment":"expedite proactive portals","user_id":21,"product_id":17},
{"id":181,"rating":1,"comment":"leverage best-of-breed communities","user_id":18,"product_id":37},
{"id":182,"rating":3,"comment":"syndicate distributed relationships","user_id":47,"product_id":13},
{"id":183,"rating":2,"comment":"harness plug-and-play bandwidth","user_id":43,"product_id":7},
{"id":184,"rating":3,"comment":"envisioneer leading-edge e-tailers","user_id":29,"product_id":13},
{"id":185,"rating":5,"comment":"matrix customized niches","user_id":40,"product_id":76},
{"id":186,"rating":5,"comment":"engineer interactive paradigms","user_id":13,"product_id":64},
{"id":187,"rating":1,"comment":"aggregate plug-and-play metrics","user_id":47,"product_id":1},
{"id":188,"rating":1,"comment":"brand frictionless platforms","user_id":42,"product_id":59},
{"id":189,"rating":2,"comment":"transition dot-com partnerships","user_id":48,"product_id":18},
{"id":190,"rating":4,"comment":"target vertical interfaces","user_id":37,"product_id":34},
{"id":191,"rating":3,"comment":"mesh extensible e-business","user_id":12,"product_id":53},
{"id":192,"rating":5,"comment":"whiteboard enterprise niches","user_id":37,"product_id":21},
{"id":193,"rating":4,"comment":"morph next-generation infrastructures","user_id":12,"product_id":54},
{"id":194,"rating":5,"comment":"scale 24/7 bandwidth","user_id":13,"product_id":72},
{"id":195,"rating":3,"comment":"harness dynamic content","user_id":36,"product_id":1},
{"id":196,"rating":2,"comment":"morph next-generation deliverables","user_id":27,"product_id":9},
{"id":197,"rating":4,"comment":"target efficient infrastructures","user_id":16,"product_id":13},
{"id":198,"rating":2,"comment":"engage viral schemas","user_id":47,"product_id":44},
{"id":199,"rating":5,"comment":"expedite value-added solutions","user_id":41,"product_id":23},
{"id":200,"rating":1,"comment":"exploit scalable methodologies","user_id":45,"product_id":84},
{"id":201,"rating":1,"comment":"strategize one-to-one content","user_id":16,"product_id":86},
{"id":202,"rating":5,"comment":"enhance dot-com networks","user_id":16,"product_id":52},
{"id":203,"rating":1,"comment":"productize enterprise schemas","user_id":46,"product_id":80},
{"id":204,"rating":2,"comment":"transition compelling initiatives","user_id":46,"product_id":74},
{"id":205,"rating":4,"comment":"benchmark cutting-edge functionalities","user_id":36,"product_id":87},
{"id":206,"rating":3,"comment":"orchestrate out-of-the-box ROI","user_id":30,"product_id":1},
{"id":207,"rating":2,"comment":"evolve back-end convergence","user_id":15,"product_id":21},
{"id":208,"rating":3,"comment":"syndicate end-to-end models","user_id":35,"product_id":86},
{"id":209,"rating":2,"comment":"brand 24/7 e-markets","user_id":32,"product_id":36},
{"id":210,"rating":4,"comment":"implement transparent e-services","user_id":47,"product_id":86},
{"id":211,"rating":5,"comment":"utilize leading-edge ROI","user_id":37,"product_id":65},
{"id":212,"rating":1,"comment":"cultivate bricks-and-clicks users","user_id":47,"product_id":4},
{"id":213,"rating":4,"comment":"repurpose sticky supply-chains","user_id":26,"product_id":62},
{"id":214,"rating":1,"comment":"brand virtual functionalities","user_id":43,"product_id":55},
{"id":215,"rating":1,"comment":"strategize impactful metrics","user_id":24,"product_id":84},
{"id":216,"rating":2,"comment":"harness dot-com content","user_id":17,"product_id":38},
{"id":217,"rating":4,"comment":"target sexy initiatives","user_id":39,"product_id":76},
{"id":218,"rating":5,"comment":"incubate extensible action-items","user_id":49,"product_id":5},
{"id":219,"rating":2,"comment":"monetize clicks-and-mortar networks","user_id":32,"product_id":2},
{"id":220,"rating":4,"comment":"scale bleeding-edge initiatives","user_id":15,"product_id":36},
{"id":221,"rating":4,"comment":"facilitate e-business systems","user_id":15,"product_id":38},
{"id":222,"rating":4,"comment":"architect clicks-and-mortar content","user_id":38,"product_id":34},
{"id":223,"rating":2,"comment":"transition compelling communities","user_id":14,"product_id":7},
{"id":224,"rating":4,"comment":"syndicate global e-tailers","user_id":35,"product_id":66},
{"id":225,"rating":1,"comment":"e-enable cross-media methodologies","user_id":46,"product_id":28},
{"id":226,"rating":4,"comment":"iterate ubiquitous models","user_id":34,"product_id":69},
{"id":227,"rating":4,"comment":"transform seamless methodologies","user_id":18,"product_id":64},
{"id":228,"rating":3,"comment":"empower killer vortals","user_id":38,"product_id":60},
{"id":229,"rating":4,"comment":"drive sexy paradigms","user_id":35,"product_id":61},
{"id":230,"rating":3,"comment":"streamline value-added supply-chains","user_id":34,"product_id":44},
{"id":231,"rating":1,"comment":"cultivate one-to-one infomediaries","user_id":17,"product_id":62},
{"id":232,"rating":5,"comment":"reintermediate dot-com niches","user_id":11,"product_id":30},
{"id":233,"rating":4,"comment":"unleash turn-key action-items","user_id":17,"product_id":73},
{"id":234,"rating":5,"comment":"synthesize dynamic interfaces","user_id":16,"product_id":36},
{"id":235,"rating":4,"comment":"extend 24/7 deliverables","user_id":16,"product_id":8},
{"id":236,"rating":2,"comment":"deploy holistic relationships","user_id":27,"product_id":39},
{"id":237,"rating":5,"comment":"utilize mission-critical supply-chains","user_id":40,"product_id":37},
{"id":238,"rating":3,"comment":"facilitate dynamic methodologies","user_id":11,"product_id":69},
{"id":239,"rating":5,"comment":"synthesize cross-platform web-readiness","user_id":16,"product_id":21},
{"id":240,"rating":2,"comment":"reintermediate 24/365 functionalities","user_id":32,"product_id":1},
{"id":241,"rating":4,"comment":"reintermediate cross-media e-business","user_id":20,"product_id":66},
{"id":242,"rating":2,"comment":"deploy sticky functionalities","user_id":43,"product_id":86},
{"id":243,"rating":3,"comment":"exploit dot-com platforms","user_id":14,"product_id":48},
{"id":244,"rating":4,"comment":"enable wireless experiences","user_id":12,"product_id":85},
{"id":245,"rating":3,"comment":"cultivate magnetic partnerships","user_id":48,"product_id":9},
{"id":246,"rating":4,"comment":"synergize wireless web-readiness","user_id":19,"product_id":65},
{"id":247,"rating":5,"comment":"implement e-business e-services","user_id":16,"product_id":3},
{"id":248,"rating":1,"comment":"expedite intuitive supply-chains","user_id":30,"product_id":75},
{"id":249,"rating":4,"comment":"aggregate rich functionalities","user_id":28,"product_id":75},
{"id":250,"rating":1,"comment":"disintermediate web-enabled metrics","user_id":35,"product_id":84},
{"id":251,"rating":4,"comment":"reintermediate dynamic technologies","user_id":48,"product_id":67},
{"id":252,"rating":1,"comment":"productize virtual systems","user_id":11,"product_id":45},
{"id":253,"rating":3,"comment":"optimize clicks-and-mortar experiences","user_id":42,"product_id":26},
{"id":254,"rating":2,"comment":"evolve collaborative systems","user_id":46,"product_id":81},
{"id":255,"rating":3,"comment":"productize cutting-edge methodologies","user_id":49,"product_id":25},
{"id":256,"rating":2,"comment":"benchmark sticky ROI","user_id":35,"product_id":67},
{"id":257,"rating":3,"comment":"reintermediate plug-and-play systems","user_id":21,"product_id":48},
{"id":258,"rating":5,"comment":"generate strategic e-markets","user_id":11,"product_id":11},
{"id":259,"rating":1,"comment":"incubate cross-platform systems","user_id":24,"product_id":12},
{"id":260,"rating":1,"comment":"matrix vertical niches","user_id":22,"product_id":49},
{"id":261,"rating":5,"comment":"redefine robust networks","user_id":16,"product_id":65},
{"id":262,"rating":5,"comment":"engage world-class web-readiness","user_id":24,"product_id":12},
{"id":263,"rating":2,"comment":"strategize front-end infomediaries","user_id":36,"product_id":75},
{"id":264,"rating":3,"comment":"repurpose turn-key architectures","user_id":24,"product_id":31},
{"id":265,"rating":2,"comment":"streamline dot-com initiatives","user_id":24,"product_id":84},
{"id":266,"rating":3,"comment":"strategize enterprise applications","user_id":38,"product_id":22},
{"id":267,"rating":1,"comment":"generate seamless schemas","user_id":32,"product_id":51},
{"id":268,"rating":3,"comment":"synergize B2C e-tailers","user_id":25,"product_id":44},
{"id":269,"rating":2,"comment":"harness global solutions","user_id":34,"product_id":66},
{"id":270,"rating":5,"comment":"evolve intuitive e-markets","user_id":13,"product_id":81},
{"id":271,"rating":5,"comment":"drive distributed paradigms","user_id":28,"product_id":82},
{"id":272,"rating":5,"comment":"visualize seamless markets","user_id":42,"product_id":82},
{"id":273,"rating":4,"comment":"reintermediate cross-platform paradigms","user_id":45,"product_id":12},
{"id":274,"rating":3,"comment":"architect impactful channels","user_id":48,"product_id":79},
{"id":275,"rating":4,"comment":"recontextualize B2C technologies","user_id":17,"product_id":9},
{"id":276,"rating":4,"comment":"implement front-end action-items","user_id":23,"product_id":7},
{"id":277,"rating":1,"comment":"enable dot-com vortals","user_id":27,"product_id":58},
{"id":278,"rating":2,"comment":"incubate back-end solutions","user_id":11,"product_id":11},
{"id":279,"rating":3,"comment":"architect proactive e-business","user_id":34,"product_id":81},
{"id":280,"rating":3,"comment":"redefine best-of-breed e-commerce","user_id":47,"product_id":71},
{"id":281,"rating":2,"comment":"productize visionary infrastructures","user_id":20,"product_id":85},
{"id":282,"rating":5,"comment":"strategize bricks-and-clicks systems","user_id":43,"product_id":56},
{"id":283,"rating":2,"comment":"syndicate value-added platforms","user_id":19,"product_id":84},
{"id":284,"rating":4,"comment":"evolve cross-media deliverables","user_id":19,"product_id":81},
{"id":285,"rating":3,"comment":"target plug-and-play experiences","user_id":33,"product_id":83},
{"id":286,"rating":1,"comment":"evolve real-time infomediaries","user_id":27,"product_id":25},
{"id":287,"rating":5,"comment":"scale frictionless relationships","user_id":15,"product_id":13},
{"id":288,"rating":1,"comment":"evolve real-time vortals","user_id":19,"product_id":69},
{"id":289,"rating":4,"comment":"innovate robust models","user_id":14,"product_id":70},
{"id":290,"rating":2,"comment":"recontextualize web-enabled markets","user_id":40,"product_id":70},
{"id":291,"rating":1,"comment":"maximize interactive vortals","user_id":27,"product_id":6},
{"id":292,"rating":3,"comment":"seize virtual vortals","user_id":41,"product_id":49},
{"id":293,"rating":4,"comment":"disintermediate revolutionary e-tailers","user_id":20,"product_id":87},
{"id":294,"rating":1,"comment":"aggregate extensible applications","user_id":34,"product_id":37},
{"id":295,"rating":2,"comment":"incubate one-to-one channels","user_id":16,"product_id":13},
{"id":296,"rating":4,"comment":"target extensible bandwidth","user_id":27,"product_id":81},
{"id":297,"rating":5,"comment":"leverage mission-critical e-commerce","user_id":47,"product_id":75},
{"id":298,"rating":5,"comment":"e-enable robust communities","user_id":26,"product_id":20},
{"id":299,"rating":4,"comment":"iterate mission-critical bandwidth","user_id":47,"product_id":77},
{"id":300,"rating":2,"comment":"mesh extensible partnerships","user_id":23,"product_id":4},
{"id":301,"rating":2,"comment":"incubate sexy architectures","user_id":22,"product_id":72},
{"id":302,"rating":1,"comment":"enhance scalable content","user_id":35,"product_id":47},
{"id":303,"rating":1,"comment":"exploit user-centric networks","user_id":22,"product_id":78},
{"id":304,"rating":2,"comment":"implement plug-and-play models","user_id":20,"product_id":45},
{"id":305,"rating":3,"comment":"drive next-generation bandwidth","user_id":22,"product_id":44},
{"id":306,"rating":3,"comment":"cultivate one-to-one e-tailers","user_id":31,"product_id":40},
{"id":307,"rating":2,"comment":"cultivate front-end models","user_id":43,"product_id":18},
{"id":308,"rating":4,"comment":"synthesize back-end web-readiness","user_id":45,"product_id":33},
{"id":309,"rating":3,"comment":"reintermediate turn-key synergies","user_id":35,"product_id":42},
{"id":310,"rating":2,"comment":"integrate frictionless channels","user_id":15,"product_id":50},
{"id":311,"rating":5,"comment":"transform strategic paradigms","user_id":28,"product_id":24},
{"id":312,"rating":4,"comment":"innovate killer web-readiness","user_id":33,"product_id":87},
{"id":313,"rating":2,"comment":"grow proactive portals","user_id":28,"product_id":40},
{"id":314,"rating":1,"comment":"deliver open-source e-markets","user_id":45,"product_id":7},
{"id":315,"rating":3,"comment":"morph revolutionary networks","user_id":37,"product_id":53},
{"id":316,"rating":2,"comment":"harness back-end users","user_id":46,"product_id":17},
{"id":317,"rating":2,"comment":"deliver integrated mindshare","user_id":13,"product_id":35},
{"id":318,"rating":5,"comment":"incentivize frictionless e-markets","user_id":29,"product_id":58},
{"id":319,"rating":5,"comment":"extend rich solutions","user_id":28,"product_id":57},
{"id":320,"rating":3,"comment":"cultivate user-centric bandwidth","user_id":23,"product_id":4},
{"id":321,"rating":4,"comment":"reinvent distributed eyeballs","user_id":26,"product_id":34},
{"id":322,"rating":4,"comment":"extend frictionless paradigms","user_id":22,"product_id":59},
{"id":323,"rating":3,"comment":"iterate clicks-and-mortar infrastructures","user_id":17,"product_id":41},
{"id":324,"rating":2,"comment":"morph clicks-and-mortar infrastructures","user_id":11,"product_id":45},
{"id":325,"rating":1,"comment":"synergize open-source mindshare","user_id":34,"product_id":61},
{"id":326,"rating":3,"comment":"streamline viral models","user_id":46,"product_id":60},
{"id":327,"rating":3,"comment":"transform efficient web-readiness","user_id":26,"product_id":58},
{"id":328,"rating":1,"comment":"synthesize cross-platform infrastructures","user_id":23,"product_id":59},
{"id":329,"rating":5,"comment":"maximize intuitive metrics","user_id":49,"product_id":85},
{"id":330,"rating":2,"comment":"orchestrate innovative communities","user_id":12,"product_id":10},
{"id":331,"rating":2,"comment":"benchmark cross-platform content","user_id":13,"product_id":27},
{"id":332,"rating":3,"comment":"incentivize user-centric web services","user_id":28,"product_id":16},
{"id":333,"rating":4,"comment":"syndicate next-generation users","user_id":46,"product_id":51},
{"id":334,"rating":3,"comment":"e-enable intuitive web services","user_id":11,"product_id":43},
{"id":335,"rating":4,"comment":"mesh holistic channels","user_id":39,"product_id":28},
{"id":336,"rating":5,"comment":"syndicate visionary architectures","user_id":46,"product_id":50},
{"id":337,"rating":3,"comment":"engage mission-critical communities","user_id":43,"product_id":25},
{"id":338,"rating":4,"comment":"mesh cross-media methodologies","user_id":27,"product_id":1},
{"id":339,"rating":1,"comment":"whiteboard enterprise partnerships","user_id":32,"product_id":67},
{"id":340,"rating":3,"comment":"benchmark scalable bandwidth","user_id":26,"product_id":71},
{"id":341,"rating":3,"comment":"harness turn-key functionalities","user_id":22,"product_id":21},
{"id":342,"rating":3,"comment":"evolve global communities","user_id":29,"product_id":81},
{"id":343,"rating":1,"comment":"grow front-end applications","user_id":34,"product_id":13},
{"id":344,"rating":4,"comment":"recontextualize collaborative communities","user_id":43,"product_id":6},
{"id":345,"rating":4,"comment":"target granular content","user_id":42,"product_id":70},
{"id":346,"rating":3,"comment":"deploy next-generation paradigms","user_id":37,"product_id":67},
{"id":347,"rating":1,"comment":"repurpose back-end eyeballs","user_id":47,"product_id":8},
{"id":348,"rating":1,"comment":"cultivate dynamic e-tailers","user_id":26,"product_id":3},
{"id":349,"rating":5,"comment":"maximize plug-and-play systems","user_id":27,"product_id":67},
{"id":350,"rating":2,"comment":"visualize rich channels","user_id":14,"product_id":40},
{"id":351,"rating":4,"comment":"recontextualize value-added metrics","user_id":29,"product_id":13},
{"id":352,"rating":5,"comment":"transform B2C solutions","user_id":39,"product_id":22},
{"id":353,"rating":1,"comment":"expedite sexy e-services","user_id":36,"product_id":74},
{"id":354,"rating":2,"comment":"transition bleeding-edge schemas","user_id":28,"product_id":41},
{"id":355,"rating":1,"comment":"recontextualize dot-com niches","user_id":25,"product_id":40},
{"id":356,"rating":3,"comment":"maximize out-of-the-box channels","user_id":14,"product_id":14},
{"id":357,"rating":3,"comment":"aggregate one-to-one paradigms","user_id":34,"product_id":45},
{"id":358,"rating":2,"comment":"scale frictionless portals","user_id":16,"product_id":70},
{"id":359,"rating":2,"comment":"expedite robust markets","user_id":29,"product_id":4},
{"id":360,"rating":2,"comment":"expedite strategic e-markets","user_id":24,"product_id":16},
{"id":361,"rating":5,"comment":"incentivize global supply-chains","user_id":41,"product_id":10},
{"id":362,"rating":2,"comment":"syndicate rich deliverables","user_id":44,"product_id":84},
{"id":363,"rating":2,"comment":"whiteboard B2B communities","user_id":49,"product_id":63},
{"id":364,"rating":1,"comment":"deliver interactive bandwidth","user_id":41,"product_id":85},
{"id":365,"rating":3,"comment":"disintermediate real-time convergence","user_id":23,"product_id":11},
{"id":366,"rating":3,"comment":"cultivate 24/7 e-business","user_id":17,"product_id":28},
{"id":367,"rating":4,"comment":"aggregate scalable eyeballs","user_id":35,"product_id":50},
{"id":368,"rating":4,"comment":"implement B2B models","user_id":41,"product_id":67},
{"id":369,"rating":2,"comment":"orchestrate open-source infrastructures","user_id":36,"product_id":46},
{"id":370,"rating":1,"comment":"morph mission-critical methodologies","user_id":34,"product_id":17},
{"id":371,"rating":4,"comment":"aggregate seamless communities","user_id":40,"product_id":72},
{"id":372,"rating":4,"comment":"innovate holistic applications","user_id":11,"product_id":88},
{"id":373,"rating":3,"comment":"maximize killer ROI","user_id":39,"product_id":85},
{"id":374,"rating":5,"comment":"drive cross-media paradigms","user_id":22,"product_id":58},
{"id":375,"rating":2,"comment":"whiteboard B2B applications","user_id":14,"product_id":22},
{"id":376,"rating":2,"comment":"repurpose bricks-and-clicks functionalities","user_id":16,"product_id":83},
{"id":377,"rating":4,"comment":"facilitate wireless deliverables","user_id":23,"product_id":32},
{"id":378,"rating":5,"comment":"maximize cross-media e-markets","user_id":39,"product_id":88},
{"id":379,"rating":2,"comment":"deploy scalable solutions","user_id":39,"product_id":84},
{"id":380,"rating":1,"comment":"implement cutting-edge networks","user_id":22,"product_id":45},
{"id":381,"rating":3,"comment":"disintermediate efficient relationships","user_id":22,"product_id":28},
{"id":382,"rating":3,"comment":"grow plug-and-play deliverables","user_id":38,"product_id":76},
{"id":383,"rating":4,"comment":"envisioneer best-of-breed initiatives","user_id":25,"product_id":83},
{"id":384,"rating":4,"comment":"drive one-to-one relationships","user_id":20,"product_id":30},
{"id":385,"rating":2,"comment":"embrace front-end e-tailers","user_id":44,"product_id":63},
{"id":386,"rating":3,"comment":"engage compelling bandwidth","user_id":37,"product_id":4},
{"id":387,"rating":5,"comment":"monetize magnetic content","user_id":18,"product_id":18},
{"id":388,"rating":2,"comment":"architect out-of-the-box functionalities","user_id":16,"product_id":82},
{"id":389,"rating":1,"comment":"mesh compelling synergies","user_id":14,"product_id":75},
{"id":390,"rating":1,"comment":"utilize next-generation channels","user_id":12,"product_id":67},
{"id":391,"rating":2,"comment":"cultivate customized e-commerce","user_id":49,"product_id":18},
{"id":392,"rating":1,"comment":"generate interactive interfaces","user_id":13,"product_id":66},
{"id":393,"rating":2,"comment":"leverage plug-and-play architectures","user_id":37,"product_id":83},
{"id":394,"rating":2,"comment":"generate impactful infomediaries","user_id":49,"product_id":52},
{"id":395,"rating":3,"comment":"optimize bleeding-edge e-services","user_id":24,"product_id":5},
{"id":396,"rating":3,"comment":"syndicate B2B e-services","user_id":28,"product_id":40},
{"id":397,"rating":3,"comment":"deploy best-of-breed deliverables","user_id":14,"product_id":73},
{"id":398,"rating":2,"comment":"evolve bleeding-edge content","user_id":45,"product_id":89},
{"id":399,"rating":4,"comment":"disintermediate granular users","user_id":20,"product_id":59},
{"id":400,"rating":1,"comment":"visualize dynamic mindshare","user_id":34,"product_id":10}
]
@fakedata_blueprint.route('/')
def craete_fake_date():
# for el in categories:
# new_cate = Category(body=el)
# db.session.add(new_cate)
# db.session.commit()
# for el in inventories:
# new_i = Inventory(location=el)
# db.session.add(new_i)
# db.session.commit()
# for el in order_status:
# new_os = Order_status(status=el)
# db.session.add(new_os)
# db.session.commit()
# for store in stores:
# new_store = User(login_name=store['login_name'], img_url=store['img_url'],store=True, store_name=store['store_name'])
# new_store.set_password(store['password'])
# db.session.add(new_store)
# db.session.commit()
for user in users:
new_user = User(login_name=user['login_name'], img_url=user['img_url'])
db.session.add(new_user)
db.session.commit()
for x in range(0, 30):
ran = random.randint(0, 10)
ran_price = random.randint(1, 5)*1000
new_product = Product(name=fruit_product[ran][0], discription=product_description[random.randint(
0, 9)]['name'], img_url=fruit_product[ran][1], price=ran_price, category_id=1, user_owner_id=random.randint(1,9), stock=random.randint(100,200), time="2019/12/15", expired_date="2020/01/10", inventory_id=random.randint(1,2))
db.session.add(new_product)
db.session.commit()
for x in range(0, 30):
ran = random.randint(0, 7)
ran_price = random.randint(1, 5)*1000
new_product = Product(name=vegetable_product[ran][0], discription=product_description[random.randint(
0, 9)]['name'], img_url=vegetable_product[ran][1], price=ran_price, category_id=2, user_owner_id=random.randint(1,9), stock=random.randint(100,200), time="2019/12/15", expired_date="2020/01/10", inventory_id=random.randint(1,2))
db.session.add(new_product)
db.session.commit()
for x in range(0, 30):
ran = random.randint(0, 4)
ran_price = random.randint(1, 5)*1000
new_product = Product(name=seasoning_product[ran][0], discription=product_description[random.randint(
0, 9)]['name'], img_url=seasoning_product[ran][1], price=ran_price, category_id=3, user_owner_id=random.randint(1,9), stock=random.randint(100,200), time="2019/12/15", expired_date="2020/01/10", inventory_id=random.randint(1,2))
db.session.add(new_product)
db.session.commit()
for rating in ratings:
new_rating = Rating(rating=rating['rating'], comment=rating['comment'], user_id=rating['user_id'], product_id=rating['product_id'])
db.session.add(new_rating)
db.session.commit()
return jsonify({'head' : 'success!'})
|
<filename>src/components/fakedata/__init__.py
from src.models.user import db, User, OAuth, Token, Order, Order_item, Order_status
from src.models.product import Product, Inventory, Rating, Category
from src.models.trading import Shipment, Invoice, Invoice_status, Payment
import random
from flask import Blueprint , render_template, jsonify
fakedata_blueprint = Blueprint('fakebp', __name__)
categories = [
'fruits', 'vegetables', 'seasoning'
]
inventories = ['District 7', 'Thu Duc district']
fruit_product = [['Apple', 'https://www.walmart.ca/en/ip/apple-gala/6000195494284', 'gam'], ['Avocado', 'https://images.eatsmarter.de/sites/default/files/styles/576x432/public/avocado-fotolia-600x450.jpg', 'gam'], ['Banana', 'http://buyfv.com/wp-content/uploads/2019/01/10000025-2_3-fresho-banana-robusta.jpg', 'gam'], ['Coconut', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTVMEklVSrhnZTPMcMz8t4d5x-NGLFDBZ703bFG6r_sDKntyn9w&s', 'unit'], ['Grape', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcROfRR0dudAEg7DFfMoRQom_kXXrrTsw8FgWVHbhKR60Nf2oMAUiw&s', 'gam'], ['Mango', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSz6jtchGZGiR38Cj8FdzywopoMSiyo7gJON8J2FmYdxTsrUEbb&s', 'gam'],
['Orange', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcToBnHvC2lea0nC8LecgwotZiI7RhCFJsTv0JKPttLzLQvFdFF7&s', 'gam'], ['Dragon fruit', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQFxguw9NULcOIKmSUUMP4a9uQos0xmanvo4QPI2BRb3YdfMJ8nZQ&s', 'gam'], ['Watermelon', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRkL4UyUjb81Ecw4Z1SDA-JFV9oe2zgxlv4_99VBERkvWichiUz&s', 'gam'], ['Pineaple', 'https://i5.walmartimages.com/asr/dd2a5d3c-d358-4579-8ece-59ce1804ab5b_9.0b874251fccc645fd98ac76e797c2d2a.jpeg?odnWidth=450&odnHeight=450&odnBg=ffffff', 'gam'], ['Papayya', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQaqNWeGRhl-m7m0KmYxmOxncf3lWA8tNe2Tzd-o_zBXn4PxsaCAA&s', 'gam']]
vegetable_product = [['Bell pepper', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTrcDPSIQqP1Uo1lK7GUlYRSpCf1edmQtEGGEJ5ay4QbAdQObwIDQ&s', 'gam'], ['Cauliflower', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTOGNxkCVGuTZ2-E7L4WnidWPbZT63F6fKKblotH7n5H8F8GUY&s', 'gam'], ['Cabbage', 'https://pcdn.columbian.com/wp-content/uploads/2019/08/0830_met_cabbage-1226x0-c-default.jpg', 'gam'], ['Carrot', 'https://i5.walmartimages.ca/images/Enlarge/271/747/6000191271747.jpg', 'gam'], ['Cucumber',
'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTzFuperqiyoF-6b2Vz6FWv0wndZ9jFdkABGLbnD_xvOPr3tBqRdA&s', 'gam'], ['Tomato', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTnNWU9oih_G799tg1sc41vK5VGroGcK4XmudN2Zi_OTxZs6jIBGA&s', 'gam'], ['Pumpkin', 'https://www.duluthnewstribune.com/incoming/4684986-wtscwa-pumpkin-web.jpg/alternates/BASE_LANDSCAPE/pumpkin%20web.jpg', 'gam'], ['Green bean', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSEBESKVXPO9nYPU8cwLGqjaNKBpHcobcSdVEjxeD1UYXWQhMgUiA&s', 'gam']]
seasoning_product = [['Onion', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcS6LOWhat5UFSjK3YcU-hCyC2A6b8sSZf3g0taMFPTT2vBZAgy6&s', 'gam'], ['Garlic', 'https://www.basketbazzar.com/wp-content/uploads/2019/05/Garlic.jpg', 'gam'], ['Turmeric', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT9H01mkElD1fKidz9sOUqhDPSdrCGNY5DINkQ1Ls_4Kmlri0plzg&s', 'gam'],
['Green onion', 'https://cdn.shopify.com/s/files/1/0135/9839/2378/products/Grreen_Onion_Bulb_800x560.png?v=1558314353', 'gam'], ['Pepper', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQB7vIiFa02_CsFtZreVdTJsijjy5Hf_wiD1NB6NqS4sUBZG9aRWg&s', 'gam']]
product_description = [
{
"name": "in purus eu magna vulputate luctus cum sociis natoque penatibus et magnis dis parturient montes nascetur ridiculus mus vivamus vestibulum sagittis"
},
{
"name": "adipiscing elit proin risus praesent lectus vestibulum quam sapien varius ut blandit non interdum in ante vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae"
},
{
"name": "eget eros elementum pellentesque quisque porta volutpat erat quisque erat eros viverra eget congue eget semper rutrum nulla nunc purus phasellus in felis donec semper sapien a libero nam dui proin leo odio porttitor id consequat in"
},
{
"name": "leo odio porttitor id consequat in consequat ut nulla sed accumsan felis ut at dolor quis odio consequat varius integer ac leo"
},
{
"name": "imperdiet et commodo vulputate justo in blandit ultrices enim lorem ipsum dolor sit amet consectetuer adipiscing elit proin interdum mauris non ligula pellentesque"
},
{
"name": "velit donec diam neque vestibulum eget vulputate ut ultrices vel augue vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia"
},
{
"name": "mauris non ligula pellentesque ultrices phasellus id sapien in sapien iaculis congue vivamus metus arcu adipiscing molestie hendrerit at vulputate vitae nisl"
},
{
"name": "faucibus orci luctus et ultrices posuere cubilia curae duis faucibus accumsan odio curabitur convallis duis consequat dui nec nisi volutpat eleifend donec ut dolor morbi vel lectus in quam fringilla rhoncus mauris enim leo rhoncus sed vestibulum"
},
{
"name": "metus sapien ut nunc vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae mauris viverra diam vitae quam suspendisse potenti nullam porttitor lacus at turpis donec posuere metus vitae ipsum aliquam non mauris morbi non lectus"
},
{
"name": "nunc proin at turpis a pede posuere nonummy integer non velit donec diam neque vestibulum eget vulputate ut ultrices vel augue vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia"
}
]
order_status = ['Proceeding', 'Delivering', 'Delivered', 'Canceled', 'In cart']
stores = [
{
"id": 1,
"login_name": "Markus",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/etnesciuntiste.jpg?size=100x100&set=set1",
"store_name": "Janyx"
}, {
"id": 2,
"login_name": "Corabelle",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/asperioresinaliquam.bmp?size=100x100&set=set1",
"store_name": "Eamia"
}, {
"id": 3,
"login_name": "Drusie",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/nonsitdolor.png?size=100x100&set=set1",
"store_name": "BlogXS"
}, {
"id": 4,
"login_name": "Maximilian",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/voluptasnonvero.png?size=100x100&set=set1",
"store_name": "Meedoo"
}, {
"id": 5,
"login_name": "Drugi",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/eligendiautdeserunt.jpg?size=100x100&set=set1",
"store_name": "Dynabox"
}, {
"id": 6,
"login_name": "Ilene",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/vellaboreet.bmp?size=100x100&set=set1",
"store_name": "Photofeed"
}, {
"id": 7,
"login_name": "Illa",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/laboriosamvelitanimi.jpg?size=100x100&set=set1",
"store_name": "Jatri"
}, {
"id": 8,
"login_name": "Essy",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/repudiandaeconsequaturqui.png?size=100x100&set=set1",
"store_name": "Zoozzy"
}, {
"id": 9,
"login_name": "Stinky",
"password": "<PASSWORD>",
"img_url": "https://robohash.org/quoquodquam.bmp?size=100x100&set=set1",
"store_name": "Skaboo"
}, {
"id": 10,
"login_name": "Jackie",
"password": <PASSWORD>,
"img_url": "https://robohash.org/quiinharum.bmp?size=100x100&set=set1",
"store_name": "Zoozzy"
}
]
users = [
{"id":1,"login_name":"Piegrome","img_url":"https://robohash.org/namametincidunt.png?size=200x200&set=set1"},
{"id":2,"login_name":"Bolderstone","img_url":"https://robohash.org/remminusmodi.png?size=200x200&set=set1"},
{"id":3,"login_name":"Axleby","img_url":"https://robohash.org/doloreconsequaturquisquam.png?size=200x200&set=set1"},
{"id":4,"login_name":"Gerge","img_url":"https://robohash.org/nihilrepellendusea.png?size=200x200&set=set1"},
{"id":5,"login_name":"Ellings","img_url":"https://robohash.org/quoautnihil.png?size=200x200&set=set1"},
{"id":6,"login_name":"Keling","img_url":"https://robohash.org/doloremquevelitexcepturi.png?size=200x200&set=set1"},
{"id":7,"login_name":"Kleinerman","img_url":"https://robohash.org/quamvoluptatumet.png?size=200x200&set=set1"},
{"id":8,"login_name":"Chetter","img_url":"https://robohash.org/istesiteaque.png?size=200x200&set=set1"},
{"id":9,"login_name":"Jedrachowicz","img_url":"https://robohash.org/situllamamet.png?size=200x200&set=set1"},
{"id":10,"login_name":"Sayce","img_url":"https://robohash.org/harumdistinctioitaque.png?size=200x200&set=set1"},
{"id":11,"login_name":"Vella","img_url":"https://robohash.org/utverorepudiandae.png?size=200x200&set=set1"},
{"id":12,"login_name":"Kenvin","img_url":"https://robohash.org/magninobisdolores.png?size=200x200&set=set1"},
{"id":13,"login_name":"Perazzo","img_url":"https://robohash.org/quasifugiatsunt.png?size=200x200&set=set1"},
{"id":14,"login_name":"Beart","img_url":"https://robohash.org/autemaliquammaxime.png?size=200x200&set=set1"},
{"id":15,"login_name":"Tomasik","img_url":"https://robohash.org/sapienteoditrepellendus.png?size=200x200&set=set1"},
{"id":16,"login_name":"Neasam","img_url":"https://robohash.org/inerrorautem.png?size=200x200&set=set1"},
{"id":17,"login_name":"Greenstock","img_url":"https://robohash.org/ututipsum.png?size=200x200&set=set1"},
{"id":18,"login_name":"Vermer","img_url":"https://robohash.org/quaevelitexercitationem.png?size=200x200&set=set1"},
{"id":19,"login_name":"Kale","img_url":"https://robohash.org/suscipitnecessitatibusexcepturi.png?size=200x200&set=set1"},
{"id":20,"login_name":"Portwaine","img_url":"https://robohash.org/occaecatiteneturnesciunt.png?size=200x200&set=set1"},
{"id":21,"login_name":"Shefton","img_url":"https://robohash.org/aliasassumendafuga.png?size=200x200&set=set1"},
{"id":22,"login_name":"Guinane","img_url":"https://robohash.org/sequiconsequunturenim.png?size=200x200&set=set1"},
{"id":23,"login_name":"Gitthouse","img_url":"https://robohash.org/idlaboreesse.png?size=200x200&set=set1"},
{"id":24,"login_name":"Lyngsted","img_url":"https://robohash.org/aliquiddelectuset.png?size=200x200&set=set1"},
{"id":25,"login_name":"Dellar","img_url":"https://robohash.org/cupiditateexplicaboesse.png?size=200x200&set=set1"},
{"id":26,"login_name":"Latham","img_url":"https://robohash.org/voluptatemaccusantiumeligendi.png?size=200x200&set=set1"},
{"id":27,"login_name":"Bamb","img_url":"https://robohash.org/doloremquequiafacere.png?size=200x200&set=set1"},
{"id":28,"login_name":"Sigg","img_url":"https://robohash.org/quodveroet.png?size=200x200&set=set1"},
{"id":29,"login_name":"Lasham","img_url":"https://robohash.org/dolorehiceaque.png?size=200x200&set=set1"},
{"id":30,"login_name":"Lattimore","img_url":"https://robohash.org/quiimpeditsuscipit.png?size=200x200&set=set1"},
{"id":31,"login_name":"Rozet","img_url":"https://robohash.org/officiisperspiciatisneque.png?size=200x200&set=set1"},
{"id":32,"login_name":"Budibent","img_url":"https://robohash.org/doloresvoluptasquidem.png?size=200x200&set=set1"},
{"id":33,"login_name":"Mains","img_url":"https://robohash.org/temporeculpatotam.png?size=200x200&set=set1"},
{"id":34,"login_name":"Orrow","img_url":"https://robohash.org/cumculpadoloremque.png?size=200x200&set=set1"},
{"id":35,"login_name":"Gearty","img_url":"https://robohash.org/involuptasminus.png?size=200x200&set=set1"},
{"id":36,"login_name":"Arni","img_url":"https://robohash.org/voluptatemsequitotam.png?size=200x200&set=set1"},
{"id":37,"login_name":"Piddick","img_url":"https://robohash.org/saepequibusdamnesciunt.png?size=200x200&set=set1"},
{"id":38,"login_name":"Acom","img_url":"https://robohash.org/minimaharumet.png?size=200x200&set=set1"},
{"id":39,"login_name":"Clemenzi","img_url":"https://robohash.org/nemoillumlibero.png?size=200x200&set=set1"},
{"id":40,"login_name":"Asgodby","img_url":"https://robohash.org/minusnostrumipsam.png?size=200x200&set=set1"},
]
ratings = [
{"id":1,"rating":3,"comment":"aggregate granular e-commerce","user_id":25,"product_id":58},
{"id":2,"rating":1,"comment":"disintermediate transparent e-services","user_id":20,"product_id":87},
{"id":3,"rating":2,"comment":"facilitate back-end users","user_id":21,"product_id":34},
{"id":4,"rating":4,"comment":"extend one-to-one platforms","user_id":35,"product_id":69},
{"id":5,"rating":5,"comment":"envisioneer leading-edge technologies","user_id":38,"product_id":60},
{"id":6,"rating":3,"comment":"harness front-end applications","user_id":47,"product_id":68},
{"id":7,"rating":1,"comment":"seize bricks-and-clicks web services","user_id":20,"product_id":50},
{"id":8,"rating":1,"comment":"aggregate integrated e-markets","user_id":31,"product_id":29},
{"id":9,"rating":1,"comment":"leverage innovative eyeballs","user_id":32,"product_id":11},
{"id":10,"rating":1,"comment":"architect synergistic supply-chains","user_id":27,"product_id":49},
{"id":11,"rating":1,"comment":"implement best-of-breed functionalities","user_id":49,"product_id":24},
{"id":12,"rating":5,"comment":"synergize best-of-breed metrics","user_id":20,"product_id":18},
{"id":13,"rating":3,"comment":"engage B2C niches","user_id":33,"product_id":34},
{"id":14,"rating":5,"comment":"integrate collaborative portals","user_id":38,"product_id":85},
{"id":15,"rating":3,"comment":"mesh global architectures","user_id":20,"product_id":77},
{"id":16,"rating":1,"comment":"target best-of-breed initiatives","user_id":36,"product_id":7},
{"id":17,"rating":5,"comment":"iterate wireless infomediaries","user_id":13,"product_id":28},
{"id":18,"rating":2,"comment":"target sticky methodologies","user_id":39,"product_id":12},
{"id":19,"rating":3,"comment":"productize turn-key architectures","user_id":36,"product_id":32},
{"id":20,"rating":2,"comment":"monetize granular channels","user_id":40,"product_id":30},
{"id":21,"rating":4,"comment":"unleash leading-edge functionalities","user_id":46,"product_id":30},
{"id":22,"rating":1,"comment":"maximize user-centric solutions","user_id":11,"product_id":78},
{"id":23,"rating":5,"comment":"engage enterprise e-business","user_id":21,"product_id":7},
{"id":24,"rating":1,"comment":"extend bricks-and-clicks e-business","user_id":46,"product_id":57},
{"id":25,"rating":5,"comment":"expedite interactive relationships","user_id":48,"product_id":27},
{"id":26,"rating":5,"comment":"engineer wireless mindshare","user_id":45,"product_id":14},
{"id":27,"rating":1,"comment":"evolve sticky platforms","user_id":25,"product_id":5},
{"id":28,"rating":4,"comment":"harness holistic convergence","user_id":40,"product_id":79},
{"id":29,"rating":5,"comment":"incentivize extensible partnerships","user_id":22,"product_id":48},
{"id":30,"rating":3,"comment":"visualize impactful infrastructures","user_id":21,"product_id":43},
{"id":31,"rating":2,"comment":"scale next-generation experiences","user_id":11,"product_id":66},
{"id":32,"rating":2,"comment":"architect user-centric infrastructures","user_id":37,"product_id":31},
{"id":33,"rating":3,"comment":"e-enable 24/365 e-markets","user_id":40,"product_id":23},
{"id":34,"rating":5,"comment":"engineer web-enabled markets","user_id":28,"product_id":31},
{"id":35,"rating":5,"comment":"expedite viral portals","user_id":48,"product_id":6},
{"id":36,"rating":1,"comment":"seize B2B functionalities","user_id":45,"product_id":51},
{"id":37,"rating":5,"comment":"strategize turn-key technologies","user_id":49,"product_id":65},
{"id":38,"rating":1,"comment":"redefine ubiquitous mindshare","user_id":20,"product_id":63},
{"id":39,"rating":2,"comment":"innovate robust solutions","user_id":41,"product_id":88},
{"id":40,"rating":3,"comment":"redefine global schemas","user_id":18,"product_id":27},
{"id":41,"rating":5,"comment":"enhance 24/7 systems","user_id":22,"product_id":47},
{"id":42,"rating":4,"comment":"matrix strategic mindshare","user_id":46,"product_id":30},
{"id":43,"rating":1,"comment":"evolve end-to-end synergies","user_id":21,"product_id":84},
{"id":44,"rating":5,"comment":"optimize sticky systems","user_id":35,"product_id":38},
{"id":45,"rating":3,"comment":"matrix bricks-and-clicks users","user_id":49,"product_id":37},
{"id":46,"rating":3,"comment":"innovate efficient relationships","user_id":11,"product_id":31},
{"id":47,"rating":4,"comment":"iterate clicks-and-mortar channels","user_id":41,"product_id":88},
{"id":48,"rating":2,"comment":"architect mission-critical web services","user_id":36,"product_id":77},
{"id":49,"rating":5,"comment":"streamline value-added mindshare","user_id":39,"product_id":80},
{"id":50,"rating":2,"comment":"syndicate bleeding-edge markets","user_id":42,"product_id":63},
{"id":51,"rating":3,"comment":"e-enable enterprise schemas","user_id":48,"product_id":72},
{"id":52,"rating":4,"comment":"whiteboard e-business infrastructures","user_id":31,"product_id":62},
{"id":53,"rating":3,"comment":"evolve cutting-edge technologies","user_id":45,"product_id":23},
{"id":54,"rating":4,"comment":"harness seamless partnerships","user_id":40,"product_id":34},
{"id":55,"rating":4,"comment":"target impactful deliverables","user_id":23,"product_id":75},
{"id":56,"rating":1,"comment":"unleash collaborative functionalities","user_id":15,"product_id":34},
{"id":57,"rating":4,"comment":"synthesize clicks-and-mortar experiences","user_id":14,"product_id":16},
{"id":58,"rating":1,"comment":"redefine distributed mindshare","user_id":48,"product_id":20},
{"id":59,"rating":2,"comment":"generate world-class relationships","user_id":43,"product_id":75},
{"id":60,"rating":1,"comment":"maximize collaborative bandwidth","user_id":40,"product_id":69},
{"id":61,"rating":5,"comment":"matrix holistic initiatives","user_id":21,"product_id":26},
{"id":62,"rating":2,"comment":"streamline back-end supply-chains","user_id":41,"product_id":28},
{"id":63,"rating":1,"comment":"unleash granular models","user_id":28,"product_id":89},
{"id":64,"rating":3,"comment":"brand impactful communities","user_id":11,"product_id":57},
{"id":65,"rating":4,"comment":"redefine real-time systems","user_id":27,"product_id":62},
{"id":66,"rating":4,"comment":"monetize user-centric web-readiness","user_id":13,"product_id":44},
{"id":67,"rating":4,"comment":"synthesize 24/365 e-business","user_id":30,"product_id":81},
{"id":68,"rating":5,"comment":"whiteboard mission-critical solutions","user_id":33,"product_id":20},
{"id":69,"rating":2,"comment":"deliver dynamic architectures","user_id":25,"product_id":33},
{"id":70,"rating":2,"comment":"matrix magnetic models","user_id":38,"product_id":16},
{"id":71,"rating":4,"comment":"transform vertical e-business","user_id":25,"product_id":87},
{"id":72,"rating":5,"comment":"monetize proactive infomediaries","user_id":36,"product_id":83},
{"id":73,"rating":5,"comment":"mesh global architectures","user_id":42,"product_id":79},
{"id":74,"rating":4,"comment":"exploit B2C platforms","user_id":11,"product_id":56},
{"id":75,"rating":1,"comment":"repurpose front-end e-tailers","user_id":21,"product_id":23},
{"id":76,"rating":3,"comment":"synergize B2C models","user_id":16,"product_id":67},
{"id":77,"rating":4,"comment":"engineer out-of-the-box relationships","user_id":23,"product_id":77},
{"id":78,"rating":3,"comment":"incubate interactive initiatives","user_id":12,"product_id":56},
{"id":79,"rating":2,"comment":"scale world-class vortals","user_id":47,"product_id":29},
{"id":80,"rating":1,"comment":"embrace strategic methodologies","user_id":35,"product_id":81},
{"id":81,"rating":1,"comment":"brand proactive relationships","user_id":40,"product_id":25},
{"id":82,"rating":2,"comment":"extend viral supply-chains","user_id":38,"product_id":26},
{"id":83,"rating":5,"comment":"evolve frictionless methodologies","user_id":39,"product_id":56},
{"id":84,"rating":4,"comment":"generate plug-and-play metrics","user_id":30,"product_id":54},
{"id":85,"rating":4,"comment":"maximize virtual communities","user_id":34,"product_id":84},
{"id":86,"rating":5,"comment":"implement B2C e-tailers","user_id":16,"product_id":38},
{"id":87,"rating":1,"comment":"scale integrated initiatives","user_id":49,"product_id":45},
{"id":88,"rating":2,"comment":"benchmark distributed paradigms","user_id":40,"product_id":64},
{"id":89,"rating":1,"comment":"disintermediate holistic systems","user_id":43,"product_id":12},
{"id":90,"rating":4,"comment":"morph efficient ROI","user_id":13,"product_id":26},
{"id":91,"rating":2,"comment":"streamline dot-com portals","user_id":40,"product_id":66},
{"id":92,"rating":1,"comment":"harness holistic networks","user_id":17,"product_id":86},
{"id":93,"rating":1,"comment":"envisioneer bleeding-edge systems","user_id":19,"product_id":39},
{"id":94,"rating":5,"comment":"transform plug-and-play e-services","user_id":12,"product_id":36},
{"id":95,"rating":4,"comment":"synthesize open-source methodologies","user_id":49,"product_id":71},
{"id":96,"rating":5,"comment":"morph scalable e-commerce","user_id":35,"product_id":57},
{"id":97,"rating":4,"comment":"repurpose frictionless ROI","user_id":37,"product_id":1},
{"id":98,"rating":1,"comment":"incentivize e-business supply-chains","user_id":42,"product_id":21},
{"id":99,"rating":3,"comment":"deliver magnetic initiatives","user_id":32,"product_id":32},
{"id":100,"rating":5,"comment":"repurpose innovative functionalities","user_id":27,"product_id":13},
{"id":101,"rating":1,"comment":"implement innovative niches","user_id":19,"product_id":58},
{"id":102,"rating":3,"comment":"synergize intuitive deliverables","user_id":45,"product_id":18},
{"id":103,"rating":5,"comment":"empower extensible metrics","user_id":40,"product_id":67},
{"id":104,"rating":2,"comment":"productize one-to-one schemas","user_id":30,"product_id":72},
{"id":105,"rating":2,"comment":"orchestrate customized synergies","user_id":32,"product_id":41},
{"id":106,"rating":2,"comment":"grow strategic initiatives","user_id":17,"product_id":37},
{"id":107,"rating":2,"comment":"disintermediate robust action-items","user_id":41,"product_id":57},
{"id":108,"rating":2,"comment":"disintermediate scalable partnerships","user_id":41,"product_id":11},
{"id":109,"rating":1,"comment":"redefine B2C users","user_id":37,"product_id":80},
{"id":110,"rating":3,"comment":"streamline B2B users","user_id":46,"product_id":70},
{"id":111,"rating":2,"comment":"maximize e-business metrics","user_id":40,"product_id":87},
{"id":112,"rating":5,"comment":"whiteboard strategic web services","user_id":22,"product_id":11},
{"id":113,"rating":3,"comment":"enable revolutionary convergence","user_id":17,"product_id":42},
{"id":114,"rating":1,"comment":"whiteboard viral content","user_id":45,"product_id":55},
{"id":115,"rating":3,"comment":"incubate collaborative synergies","user_id":44,"product_id":57},
{"id":116,"rating":5,"comment":"visualize ubiquitous web services","user_id":49,"product_id":4},
{"id":117,"rating":1,"comment":"implement strategic users","user_id":49,"product_id":77},
{"id":118,"rating":2,"comment":"matrix rich mindshare","user_id":31,"product_id":57},
{"id":119,"rating":3,"comment":"grow rich portals","user_id":22,"product_id":16},
{"id":120,"rating":3,"comment":"morph out-of-the-box supply-chains","user_id":17,"product_id":64},
{"id":121,"rating":3,"comment":"evolve clicks-and-mortar ROI","user_id":33,"product_id":42},
{"id":122,"rating":4,"comment":"leverage visionary portals","user_id":49,"product_id":85},
{"id":123,"rating":2,"comment":"generate dot-com e-markets","user_id":45,"product_id":77},
{"id":124,"rating":5,"comment":"orchestrate web-enabled schemas","user_id":37,"product_id":82},
{"id":125,"rating":1,"comment":"enable turn-key vortals","user_id":34,"product_id":46},
{"id":126,"rating":5,"comment":"exploit distributed supply-chains","user_id":19,"product_id":72},
{"id":127,"rating":4,"comment":"engineer global e-business","user_id":26,"product_id":39},
{"id":128,"rating":4,"comment":"strategize virtual systems","user_id":16,"product_id":86},
{"id":129,"rating":5,"comment":"optimize bricks-and-clicks functionalities","user_id":22,"product_id":81},
{"id":130,"rating":3,"comment":"monetize magnetic web-readiness","user_id":29,"product_id":57},
{"id":131,"rating":4,"comment":"orchestrate best-of-breed synergies","user_id":17,"product_id":79},
{"id":132,"rating":2,"comment":"scale clicks-and-mortar networks","user_id":12,"product_id":29},
{"id":133,"rating":4,"comment":"recontextualize cross-platform channels","user_id":43,"product_id":31},
{"id":134,"rating":5,"comment":"productize scalable ROI","user_id":39,"product_id":74},
{"id":135,"rating":1,"comment":"streamline dot-com content","user_id":43,"product_id":79},
{"id":136,"rating":1,"comment":"cultivate impactful methodologies","user_id":16,"product_id":51},
{"id":137,"rating":4,"comment":"unleash sticky networks","user_id":19,"product_id":66},
{"id":138,"rating":5,"comment":"empower end-to-end portals","user_id":21,"product_id":72},
{"id":139,"rating":5,"comment":"facilitate next-generation networks","user_id":41,"product_id":84},
{"id":140,"rating":2,"comment":"grow cross-media communities","user_id":33,"product_id":19},
{"id":141,"rating":2,"comment":"benchmark clicks-and-mortar eyeballs","user_id":13,"product_id":51},
{"id":142,"rating":5,"comment":"reintermediate transparent metrics","user_id":46,"product_id":2},
{"id":143,"rating":4,"comment":"morph vertical relationships","user_id":14,"product_id":4},
{"id":144,"rating":3,"comment":"deploy plug-and-play e-business","user_id":42,"product_id":87},
{"id":145,"rating":3,"comment":"seize scalable e-services","user_id":36,"product_id":25},
{"id":146,"rating":1,"comment":"cultivate one-to-one e-markets","user_id":27,"product_id":67},
{"id":147,"rating":1,"comment":"embrace one-to-one infrastructures","user_id":46,"product_id":10},
{"id":148,"rating":2,"comment":"brand vertical web services","user_id":29,"product_id":36},
{"id":149,"rating":5,"comment":"target e-business channels","user_id":18,"product_id":32},
{"id":150,"rating":2,"comment":"facilitate intuitive technologies","user_id":39,"product_id":41},
{"id":151,"rating":4,"comment":"whiteboard compelling supply-chains","user_id":23,"product_id":64},
{"id":152,"rating":3,"comment":"strategize collaborative systems","user_id":19,"product_id":64},
{"id":153,"rating":4,"comment":"harness clicks-and-mortar markets","user_id":24,"product_id":13},
{"id":154,"rating":3,"comment":"morph viral vortals","user_id":14,"product_id":15},
{"id":155,"rating":5,"comment":"transition front-end metrics","user_id":37,"product_id":33},
{"id":156,"rating":3,"comment":"empower global mindshare","user_id":44,"product_id":83},
{"id":157,"rating":5,"comment":"architect leading-edge markets","user_id":49,"product_id":66},
{"id":158,"rating":1,"comment":"leverage open-source e-commerce","user_id":30,"product_id":10},
{"id":159,"rating":1,"comment":"incentivize killer channels","user_id":36,"product_id":8},
{"id":160,"rating":2,"comment":"transition one-to-one synergies","user_id":23,"product_id":64},
{"id":161,"rating":5,"comment":"visualize cross-platform initiatives","user_id":49,"product_id":22},
{"id":162,"rating":1,"comment":"morph 24/7 synergies","user_id":24,"product_id":76},
{"id":163,"rating":4,"comment":"brand cutting-edge partnerships","user_id":21,"product_id":75},
{"id":164,"rating":5,"comment":"scale cross-media e-services","user_id":19,"product_id":59},
{"id":165,"rating":1,"comment":"enable integrated web services","user_id":46,"product_id":29},
{"id":166,"rating":1,"comment":"utilize enterprise infomediaries","user_id":34,"product_id":1},
{"id":167,"rating":4,"comment":"embrace mission-critical vortals","user_id":44,"product_id":42},
{"id":168,"rating":1,"comment":"revolutionize strategic models","user_id":15,"product_id":5},
{"id":169,"rating":1,"comment":"strategize bricks-and-clicks partnerships","user_id":11,"product_id":71},
{"id":170,"rating":1,"comment":"envisioneer bleeding-edge e-tailers","user_id":26,"product_id":38},
{"id":171,"rating":1,"comment":"grow bleeding-edge architectures","user_id":35,"product_id":9},
{"id":172,"rating":5,"comment":"brand virtual niches","user_id":39,"product_id":65},
{"id":173,"rating":1,"comment":"whiteboard sticky applications","user_id":38,"product_id":86},
{"id":174,"rating":4,"comment":"whiteboard next-generation initiatives","user_id":42,"product_id":13},
{"id":175,"rating":2,"comment":"incubate collaborative ROI","user_id":40,"product_id":64},
{"id":176,"rating":3,"comment":"morph customized technologies","user_id":35,"product_id":45},
{"id":177,"rating":5,"comment":"whiteboard web-enabled communities","user_id":41,"product_id":8},
{"id":178,"rating":5,"comment":"synergize vertical networks","user_id":35,"product_id":24},
{"id":179,"rating":4,"comment":"incubate value-added schemas","user_id":27,"product_id":62},
{"id":180,"rating":3,"comment":"expedite proactive portals","user_id":21,"product_id":17},
{"id":181,"rating":1,"comment":"leverage best-of-breed communities","user_id":18,"product_id":37},
{"id":182,"rating":3,"comment":"syndicate distributed relationships","user_id":47,"product_id":13},
{"id":183,"rating":2,"comment":"harness plug-and-play bandwidth","user_id":43,"product_id":7},
{"id":184,"rating":3,"comment":"envisioneer leading-edge e-tailers","user_id":29,"product_id":13},
{"id":185,"rating":5,"comment":"matrix customized niches","user_id":40,"product_id":76},
{"id":186,"rating":5,"comment":"engineer interactive paradigms","user_id":13,"product_id":64},
{"id":187,"rating":1,"comment":"aggregate plug-and-play metrics","user_id":47,"product_id":1},
{"id":188,"rating":1,"comment":"brand frictionless platforms","user_id":42,"product_id":59},
{"id":189,"rating":2,"comment":"transition dot-com partnerships","user_id":48,"product_id":18},
{"id":190,"rating":4,"comment":"target vertical interfaces","user_id":37,"product_id":34},
{"id":191,"rating":3,"comment":"mesh extensible e-business","user_id":12,"product_id":53},
{"id":192,"rating":5,"comment":"whiteboard enterprise niches","user_id":37,"product_id":21},
{"id":193,"rating":4,"comment":"morph next-generation infrastructures","user_id":12,"product_id":54},
{"id":194,"rating":5,"comment":"scale 24/7 bandwidth","user_id":13,"product_id":72},
{"id":195,"rating":3,"comment":"harness dynamic content","user_id":36,"product_id":1},
{"id":196,"rating":2,"comment":"morph next-generation deliverables","user_id":27,"product_id":9},
{"id":197,"rating":4,"comment":"target efficient infrastructures","user_id":16,"product_id":13},
{"id":198,"rating":2,"comment":"engage viral schemas","user_id":47,"product_id":44},
{"id":199,"rating":5,"comment":"expedite value-added solutions","user_id":41,"product_id":23},
{"id":200,"rating":1,"comment":"exploit scalable methodologies","user_id":45,"product_id":84},
{"id":201,"rating":1,"comment":"strategize one-to-one content","user_id":16,"product_id":86},
{"id":202,"rating":5,"comment":"enhance dot-com networks","user_id":16,"product_id":52},
{"id":203,"rating":1,"comment":"productize enterprise schemas","user_id":46,"product_id":80},
{"id":204,"rating":2,"comment":"transition compelling initiatives","user_id":46,"product_id":74},
{"id":205,"rating":4,"comment":"benchmark cutting-edge functionalities","user_id":36,"product_id":87},
{"id":206,"rating":3,"comment":"orchestrate out-of-the-box ROI","user_id":30,"product_id":1},
{"id":207,"rating":2,"comment":"evolve back-end convergence","user_id":15,"product_id":21},
{"id":208,"rating":3,"comment":"syndicate end-to-end models","user_id":35,"product_id":86},
{"id":209,"rating":2,"comment":"brand 24/7 e-markets","user_id":32,"product_id":36},
{"id":210,"rating":4,"comment":"implement transparent e-services","user_id":47,"product_id":86},
{"id":211,"rating":5,"comment":"utilize leading-edge ROI","user_id":37,"product_id":65},
{"id":212,"rating":1,"comment":"cultivate bricks-and-clicks users","user_id":47,"product_id":4},
{"id":213,"rating":4,"comment":"repurpose sticky supply-chains","user_id":26,"product_id":62},
{"id":214,"rating":1,"comment":"brand virtual functionalities","user_id":43,"product_id":55},
{"id":215,"rating":1,"comment":"strategize impactful metrics","user_id":24,"product_id":84},
{"id":216,"rating":2,"comment":"harness dot-com content","user_id":17,"product_id":38},
{"id":217,"rating":4,"comment":"target sexy initiatives","user_id":39,"product_id":76},
{"id":218,"rating":5,"comment":"incubate extensible action-items","user_id":49,"product_id":5},
{"id":219,"rating":2,"comment":"monetize clicks-and-mortar networks","user_id":32,"product_id":2},
{"id":220,"rating":4,"comment":"scale bleeding-edge initiatives","user_id":15,"product_id":36},
{"id":221,"rating":4,"comment":"facilitate e-business systems","user_id":15,"product_id":38},
{"id":222,"rating":4,"comment":"architect clicks-and-mortar content","user_id":38,"product_id":34},
{"id":223,"rating":2,"comment":"transition compelling communities","user_id":14,"product_id":7},
{"id":224,"rating":4,"comment":"syndicate global e-tailers","user_id":35,"product_id":66},
{"id":225,"rating":1,"comment":"e-enable cross-media methodologies","user_id":46,"product_id":28},
{"id":226,"rating":4,"comment":"iterate ubiquitous models","user_id":34,"product_id":69},
{"id":227,"rating":4,"comment":"transform seamless methodologies","user_id":18,"product_id":64},
{"id":228,"rating":3,"comment":"empower killer vortals","user_id":38,"product_id":60},
{"id":229,"rating":4,"comment":"drive sexy paradigms","user_id":35,"product_id":61},
{"id":230,"rating":3,"comment":"streamline value-added supply-chains","user_id":34,"product_id":44},
{"id":231,"rating":1,"comment":"cultivate one-to-one infomediaries","user_id":17,"product_id":62},
{"id":232,"rating":5,"comment":"reintermediate dot-com niches","user_id":11,"product_id":30},
{"id":233,"rating":4,"comment":"unleash turn-key action-items","user_id":17,"product_id":73},
{"id":234,"rating":5,"comment":"synthesize dynamic interfaces","user_id":16,"product_id":36},
{"id":235,"rating":4,"comment":"extend 24/7 deliverables","user_id":16,"product_id":8},
{"id":236,"rating":2,"comment":"deploy holistic relationships","user_id":27,"product_id":39},
{"id":237,"rating":5,"comment":"utilize mission-critical supply-chains","user_id":40,"product_id":37},
{"id":238,"rating":3,"comment":"facilitate dynamic methodologies","user_id":11,"product_id":69},
{"id":239,"rating":5,"comment":"synthesize cross-platform web-readiness","user_id":16,"product_id":21},
{"id":240,"rating":2,"comment":"reintermediate 24/365 functionalities","user_id":32,"product_id":1},
{"id":241,"rating":4,"comment":"reintermediate cross-media e-business","user_id":20,"product_id":66},
{"id":242,"rating":2,"comment":"deploy sticky functionalities","user_id":43,"product_id":86},
{"id":243,"rating":3,"comment":"exploit dot-com platforms","user_id":14,"product_id":48},
{"id":244,"rating":4,"comment":"enable wireless experiences","user_id":12,"product_id":85},
{"id":245,"rating":3,"comment":"cultivate magnetic partnerships","user_id":48,"product_id":9},
{"id":246,"rating":4,"comment":"synergize wireless web-readiness","user_id":19,"product_id":65},
{"id":247,"rating":5,"comment":"implement e-business e-services","user_id":16,"product_id":3},
{"id":248,"rating":1,"comment":"expedite intuitive supply-chains","user_id":30,"product_id":75},
{"id":249,"rating":4,"comment":"aggregate rich functionalities","user_id":28,"product_id":75},
{"id":250,"rating":1,"comment":"disintermediate web-enabled metrics","user_id":35,"product_id":84},
{"id":251,"rating":4,"comment":"reintermediate dynamic technologies","user_id":48,"product_id":67},
{"id":252,"rating":1,"comment":"productize virtual systems","user_id":11,"product_id":45},
{"id":253,"rating":3,"comment":"optimize clicks-and-mortar experiences","user_id":42,"product_id":26},
{"id":254,"rating":2,"comment":"evolve collaborative systems","user_id":46,"product_id":81},
{"id":255,"rating":3,"comment":"productize cutting-edge methodologies","user_id":49,"product_id":25},
{"id":256,"rating":2,"comment":"benchmark sticky ROI","user_id":35,"product_id":67},
{"id":257,"rating":3,"comment":"reintermediate plug-and-play systems","user_id":21,"product_id":48},
{"id":258,"rating":5,"comment":"generate strategic e-markets","user_id":11,"product_id":11},
{"id":259,"rating":1,"comment":"incubate cross-platform systems","user_id":24,"product_id":12},
{"id":260,"rating":1,"comment":"matrix vertical niches","user_id":22,"product_id":49},
{"id":261,"rating":5,"comment":"redefine robust networks","user_id":16,"product_id":65},
{"id":262,"rating":5,"comment":"engage world-class web-readiness","user_id":24,"product_id":12},
{"id":263,"rating":2,"comment":"strategize front-end infomediaries","user_id":36,"product_id":75},
{"id":264,"rating":3,"comment":"repurpose turn-key architectures","user_id":24,"product_id":31},
{"id":265,"rating":2,"comment":"streamline dot-com initiatives","user_id":24,"product_id":84},
{"id":266,"rating":3,"comment":"strategize enterprise applications","user_id":38,"product_id":22},
{"id":267,"rating":1,"comment":"generate seamless schemas","user_id":32,"product_id":51},
{"id":268,"rating":3,"comment":"synergize B2C e-tailers","user_id":25,"product_id":44},
{"id":269,"rating":2,"comment":"harness global solutions","user_id":34,"product_id":66},
{"id":270,"rating":5,"comment":"evolve intuitive e-markets","user_id":13,"product_id":81},
{"id":271,"rating":5,"comment":"drive distributed paradigms","user_id":28,"product_id":82},
{"id":272,"rating":5,"comment":"visualize seamless markets","user_id":42,"product_id":82},
{"id":273,"rating":4,"comment":"reintermediate cross-platform paradigms","user_id":45,"product_id":12},
{"id":274,"rating":3,"comment":"architect impactful channels","user_id":48,"product_id":79},
{"id":275,"rating":4,"comment":"recontextualize B2C technologies","user_id":17,"product_id":9},
{"id":276,"rating":4,"comment":"implement front-end action-items","user_id":23,"product_id":7},
{"id":277,"rating":1,"comment":"enable dot-com vortals","user_id":27,"product_id":58},
{"id":278,"rating":2,"comment":"incubate back-end solutions","user_id":11,"product_id":11},
{"id":279,"rating":3,"comment":"architect proactive e-business","user_id":34,"product_id":81},
{"id":280,"rating":3,"comment":"redefine best-of-breed e-commerce","user_id":47,"product_id":71},
{"id":281,"rating":2,"comment":"productize visionary infrastructures","user_id":20,"product_id":85},
{"id":282,"rating":5,"comment":"strategize bricks-and-clicks systems","user_id":43,"product_id":56},
{"id":283,"rating":2,"comment":"syndicate value-added platforms","user_id":19,"product_id":84},
{"id":284,"rating":4,"comment":"evolve cross-media deliverables","user_id":19,"product_id":81},
{"id":285,"rating":3,"comment":"target plug-and-play experiences","user_id":33,"product_id":83},
{"id":286,"rating":1,"comment":"evolve real-time infomediaries","user_id":27,"product_id":25},
{"id":287,"rating":5,"comment":"scale frictionless relationships","user_id":15,"product_id":13},
{"id":288,"rating":1,"comment":"evolve real-time vortals","user_id":19,"product_id":69},
{"id":289,"rating":4,"comment":"innovate robust models","user_id":14,"product_id":70},
{"id":290,"rating":2,"comment":"recontextualize web-enabled markets","user_id":40,"product_id":70},
{"id":291,"rating":1,"comment":"maximize interactive vortals","user_id":27,"product_id":6},
{"id":292,"rating":3,"comment":"seize virtual vortals","user_id":41,"product_id":49},
{"id":293,"rating":4,"comment":"disintermediate revolutionary e-tailers","user_id":20,"product_id":87},
{"id":294,"rating":1,"comment":"aggregate extensible applications","user_id":34,"product_id":37},
{"id":295,"rating":2,"comment":"incubate one-to-one channels","user_id":16,"product_id":13},
{"id":296,"rating":4,"comment":"target extensible bandwidth","user_id":27,"product_id":81},
{"id":297,"rating":5,"comment":"leverage mission-critical e-commerce","user_id":47,"product_id":75},
{"id":298,"rating":5,"comment":"e-enable robust communities","user_id":26,"product_id":20},
{"id":299,"rating":4,"comment":"iterate mission-critical bandwidth","user_id":47,"product_id":77},
{"id":300,"rating":2,"comment":"mesh extensible partnerships","user_id":23,"product_id":4},
{"id":301,"rating":2,"comment":"incubate sexy architectures","user_id":22,"product_id":72},
{"id":302,"rating":1,"comment":"enhance scalable content","user_id":35,"product_id":47},
{"id":303,"rating":1,"comment":"exploit user-centric networks","user_id":22,"product_id":78},
{"id":304,"rating":2,"comment":"implement plug-and-play models","user_id":20,"product_id":45},
{"id":305,"rating":3,"comment":"drive next-generation bandwidth","user_id":22,"product_id":44},
{"id":306,"rating":3,"comment":"cultivate one-to-one e-tailers","user_id":31,"product_id":40},
{"id":307,"rating":2,"comment":"cultivate front-end models","user_id":43,"product_id":18},
{"id":308,"rating":4,"comment":"synthesize back-end web-readiness","user_id":45,"product_id":33},
{"id":309,"rating":3,"comment":"reintermediate turn-key synergies","user_id":35,"product_id":42},
{"id":310,"rating":2,"comment":"integrate frictionless channels","user_id":15,"product_id":50},
{"id":311,"rating":5,"comment":"transform strategic paradigms","user_id":28,"product_id":24},
{"id":312,"rating":4,"comment":"innovate killer web-readiness","user_id":33,"product_id":87},
{"id":313,"rating":2,"comment":"grow proactive portals","user_id":28,"product_id":40},
{"id":314,"rating":1,"comment":"deliver open-source e-markets","user_id":45,"product_id":7},
{"id":315,"rating":3,"comment":"morph revolutionary networks","user_id":37,"product_id":53},
{"id":316,"rating":2,"comment":"harness back-end users","user_id":46,"product_id":17},
{"id":317,"rating":2,"comment":"deliver integrated mindshare","user_id":13,"product_id":35},
{"id":318,"rating":5,"comment":"incentivize frictionless e-markets","user_id":29,"product_id":58},
{"id":319,"rating":5,"comment":"extend rich solutions","user_id":28,"product_id":57},
{"id":320,"rating":3,"comment":"cultivate user-centric bandwidth","user_id":23,"product_id":4},
{"id":321,"rating":4,"comment":"reinvent distributed eyeballs","user_id":26,"product_id":34},
{"id":322,"rating":4,"comment":"extend frictionless paradigms","user_id":22,"product_id":59},
{"id":323,"rating":3,"comment":"iterate clicks-and-mortar infrastructures","user_id":17,"product_id":41},
{"id":324,"rating":2,"comment":"morph clicks-and-mortar infrastructures","user_id":11,"product_id":45},
{"id":325,"rating":1,"comment":"synergize open-source mindshare","user_id":34,"product_id":61},
{"id":326,"rating":3,"comment":"streamline viral models","user_id":46,"product_id":60},
{"id":327,"rating":3,"comment":"transform efficient web-readiness","user_id":26,"product_id":58},
{"id":328,"rating":1,"comment":"synthesize cross-platform infrastructures","user_id":23,"product_id":59},
{"id":329,"rating":5,"comment":"maximize intuitive metrics","user_id":49,"product_id":85},
{"id":330,"rating":2,"comment":"orchestrate innovative communities","user_id":12,"product_id":10},
{"id":331,"rating":2,"comment":"benchmark cross-platform content","user_id":13,"product_id":27},
{"id":332,"rating":3,"comment":"incentivize user-centric web services","user_id":28,"product_id":16},
{"id":333,"rating":4,"comment":"syndicate next-generation users","user_id":46,"product_id":51},
{"id":334,"rating":3,"comment":"e-enable intuitive web services","user_id":11,"product_id":43},
{"id":335,"rating":4,"comment":"mesh holistic channels","user_id":39,"product_id":28},
{"id":336,"rating":5,"comment":"syndicate visionary architectures","user_id":46,"product_id":50},
{"id":337,"rating":3,"comment":"engage mission-critical communities","user_id":43,"product_id":25},
{"id":338,"rating":4,"comment":"mesh cross-media methodologies","user_id":27,"product_id":1},
{"id":339,"rating":1,"comment":"whiteboard enterprise partnerships","user_id":32,"product_id":67},
{"id":340,"rating":3,"comment":"benchmark scalable bandwidth","user_id":26,"product_id":71},
{"id":341,"rating":3,"comment":"harness turn-key functionalities","user_id":22,"product_id":21},
{"id":342,"rating":3,"comment":"evolve global communities","user_id":29,"product_id":81},
{"id":343,"rating":1,"comment":"grow front-end applications","user_id":34,"product_id":13},
{"id":344,"rating":4,"comment":"recontextualize collaborative communities","user_id":43,"product_id":6},
{"id":345,"rating":4,"comment":"target granular content","user_id":42,"product_id":70},
{"id":346,"rating":3,"comment":"deploy next-generation paradigms","user_id":37,"product_id":67},
{"id":347,"rating":1,"comment":"repurpose back-end eyeballs","user_id":47,"product_id":8},
{"id":348,"rating":1,"comment":"cultivate dynamic e-tailers","user_id":26,"product_id":3},
{"id":349,"rating":5,"comment":"maximize plug-and-play systems","user_id":27,"product_id":67},
{"id":350,"rating":2,"comment":"visualize rich channels","user_id":14,"product_id":40},
{"id":351,"rating":4,"comment":"recontextualize value-added metrics","user_id":29,"product_id":13},
{"id":352,"rating":5,"comment":"transform B2C solutions","user_id":39,"product_id":22},
{"id":353,"rating":1,"comment":"expedite sexy e-services","user_id":36,"product_id":74},
{"id":354,"rating":2,"comment":"transition bleeding-edge schemas","user_id":28,"product_id":41},
{"id":355,"rating":1,"comment":"recontextualize dot-com niches","user_id":25,"product_id":40},
{"id":356,"rating":3,"comment":"maximize out-of-the-box channels","user_id":14,"product_id":14},
{"id":357,"rating":3,"comment":"aggregate one-to-one paradigms","user_id":34,"product_id":45},
{"id":358,"rating":2,"comment":"scale frictionless portals","user_id":16,"product_id":70},
{"id":359,"rating":2,"comment":"expedite robust markets","user_id":29,"product_id":4},
{"id":360,"rating":2,"comment":"expedite strategic e-markets","user_id":24,"product_id":16},
{"id":361,"rating":5,"comment":"incentivize global supply-chains","user_id":41,"product_id":10},
{"id":362,"rating":2,"comment":"syndicate rich deliverables","user_id":44,"product_id":84},
{"id":363,"rating":2,"comment":"whiteboard B2B communities","user_id":49,"product_id":63},
{"id":364,"rating":1,"comment":"deliver interactive bandwidth","user_id":41,"product_id":85},
{"id":365,"rating":3,"comment":"disintermediate real-time convergence","user_id":23,"product_id":11},
{"id":366,"rating":3,"comment":"cultivate 24/7 e-business","user_id":17,"product_id":28},
{"id":367,"rating":4,"comment":"aggregate scalable eyeballs","user_id":35,"product_id":50},
{"id":368,"rating":4,"comment":"implement B2B models","user_id":41,"product_id":67},
{"id":369,"rating":2,"comment":"orchestrate open-source infrastructures","user_id":36,"product_id":46},
{"id":370,"rating":1,"comment":"morph mission-critical methodologies","user_id":34,"product_id":17},
{"id":371,"rating":4,"comment":"aggregate seamless communities","user_id":40,"product_id":72},
{"id":372,"rating":4,"comment":"innovate holistic applications","user_id":11,"product_id":88},
{"id":373,"rating":3,"comment":"maximize killer ROI","user_id":39,"product_id":85},
{"id":374,"rating":5,"comment":"drive cross-media paradigms","user_id":22,"product_id":58},
{"id":375,"rating":2,"comment":"whiteboard B2B applications","user_id":14,"product_id":22},
{"id":376,"rating":2,"comment":"repurpose bricks-and-clicks functionalities","user_id":16,"product_id":83},
{"id":377,"rating":4,"comment":"facilitate wireless deliverables","user_id":23,"product_id":32},
{"id":378,"rating":5,"comment":"maximize cross-media e-markets","user_id":39,"product_id":88},
{"id":379,"rating":2,"comment":"deploy scalable solutions","user_id":39,"product_id":84},
{"id":380,"rating":1,"comment":"implement cutting-edge networks","user_id":22,"product_id":45},
{"id":381,"rating":3,"comment":"disintermediate efficient relationships","user_id":22,"product_id":28},
{"id":382,"rating":3,"comment":"grow plug-and-play deliverables","user_id":38,"product_id":76},
{"id":383,"rating":4,"comment":"envisioneer best-of-breed initiatives","user_id":25,"product_id":83},
{"id":384,"rating":4,"comment":"drive one-to-one relationships","user_id":20,"product_id":30},
{"id":385,"rating":2,"comment":"embrace front-end e-tailers","user_id":44,"product_id":63},
{"id":386,"rating":3,"comment":"engage compelling bandwidth","user_id":37,"product_id":4},
{"id":387,"rating":5,"comment":"monetize magnetic content","user_id":18,"product_id":18},
{"id":388,"rating":2,"comment":"architect out-of-the-box functionalities","user_id":16,"product_id":82},
{"id":389,"rating":1,"comment":"mesh compelling synergies","user_id":14,"product_id":75},
{"id":390,"rating":1,"comment":"utilize next-generation channels","user_id":12,"product_id":67},
{"id":391,"rating":2,"comment":"cultivate customized e-commerce","user_id":49,"product_id":18},
{"id":392,"rating":1,"comment":"generate interactive interfaces","user_id":13,"product_id":66},
{"id":393,"rating":2,"comment":"leverage plug-and-play architectures","user_id":37,"product_id":83},
{"id":394,"rating":2,"comment":"generate impactful infomediaries","user_id":49,"product_id":52},
{"id":395,"rating":3,"comment":"optimize bleeding-edge e-services","user_id":24,"product_id":5},
{"id":396,"rating":3,"comment":"syndicate B2B e-services","user_id":28,"product_id":40},
{"id":397,"rating":3,"comment":"deploy best-of-breed deliverables","user_id":14,"product_id":73},
{"id":398,"rating":2,"comment":"evolve bleeding-edge content","user_id":45,"product_id":89},
{"id":399,"rating":4,"comment":"disintermediate granular users","user_id":20,"product_id":59},
{"id":400,"rating":1,"comment":"visualize dynamic mindshare","user_id":34,"product_id":10}
]
@fakedata_blueprint.route('/')
def craete_fake_date():
# for el in categories:
# new_cate = Category(body=el)
# db.session.add(new_cate)
# db.session.commit()
# for el in inventories:
# new_i = Inventory(location=el)
# db.session.add(new_i)
# db.session.commit()
# for el in order_status:
# new_os = Order_status(status=el)
# db.session.add(new_os)
# db.session.commit()
# for store in stores:
# new_store = User(login_name=store['login_name'], img_url=store['img_url'],store=True, store_name=store['store_name'])
# new_store.set_password(store['password'])
# db.session.add(new_store)
# db.session.commit()
for user in users:
new_user = User(login_name=user['login_name'], img_url=user['img_url'])
db.session.add(new_user)
db.session.commit()
for x in range(0, 30):
ran = random.randint(0, 10)
ran_price = random.randint(1, 5)*1000
new_product = Product(name=fruit_product[ran][0], discription=product_description[random.randint(
0, 9)]['name'], img_url=fruit_product[ran][1], price=ran_price, category_id=1, user_owner_id=random.randint(1,9), stock=random.randint(100,200), time="2019/12/15", expired_date="2020/01/10", inventory_id=random.randint(1,2))
db.session.add(new_product)
db.session.commit()
for x in range(0, 30):
ran = random.randint(0, 7)
ran_price = random.randint(1, 5)*1000
new_product = Product(name=vegetable_product[ran][0], discription=product_description[random.randint(
0, 9)]['name'], img_url=vegetable_product[ran][1], price=ran_price, category_id=2, user_owner_id=random.randint(1,9), stock=random.randint(100,200), time="2019/12/15", expired_date="2020/01/10", inventory_id=random.randint(1,2))
db.session.add(new_product)
db.session.commit()
for x in range(0, 30):
ran = random.randint(0, 4)
ran_price = random.randint(1, 5)*1000
new_product = Product(name=seasoning_product[ran][0], discription=product_description[random.randint(
0, 9)]['name'], img_url=seasoning_product[ran][1], price=ran_price, category_id=3, user_owner_id=random.randint(1,9), stock=random.randint(100,200), time="2019/12/15", expired_date="2020/01/10", inventory_id=random.randint(1,2))
db.session.add(new_product)
db.session.commit()
for rating in ratings:
new_rating = Rating(rating=rating['rating'], comment=rating['comment'], user_id=rating['user_id'], product_id=rating['product_id'])
db.session.add(new_rating)
db.session.commit()
return jsonify({'head' : 'success!'})
|
en
| 0.355836
|
# for el in categories: # new_cate = Category(body=el) # db.session.add(new_cate) # db.session.commit() # for el in inventories: # new_i = Inventory(location=el) # db.session.add(new_i) # db.session.commit() # for el in order_status: # new_os = Order_status(status=el) # db.session.add(new_os) # db.session.commit() # for store in stores: # new_store = User(login_name=store['login_name'], img_url=store['img_url'],store=True, store_name=store['store_name']) # new_store.set_password(store['password']) # db.session.add(new_store) # db.session.commit()
| 2.094629
| 2
|
ravel/mndeps.py
|
mudbri/Faure
| 1
|
6627308
|
<filename>ravel/mndeps.py
"""
Creating topologies from command-line parameters.
"""
import os
import re
from ravel.util import splitArgs
from topo.topolib import (EmptyTopo, SingleSwitchTopo, SingleSwitchReversedTopo, MinimalTopo, LinearTopo, TreeTopo, FatTreeTopo, ISPTopo)
TOPOS = { "empty": EmptyTopo,
"minimal": MinimalTopo,
"linear": LinearTopo,
"reversed": SingleSwitchReversedTopo,
"single": SingleSwitchTopo,
"tree": TreeTopo,
"fattree": FatTreeTopo,
"isp": ISPTopo
}
def setCustom(name, value):
"""Set custom parameters for Mininet
name: parameter name
value: parameter value"""
if name in ("topos", "switches", "hosts", "controllers"):
param = name.upper()
globals()[param].update(value)
elif name == "validate":
validate = value
else:
globals()[name] = value
def custom(value):
"""Parse custom parameters
value: string containing custom parameters"""
files = []
if os.path.isfile(value):
files.append(value)
else:
files += value.split(",")
for filename in files:
customs = {}
if os.path.isfile(filename):
exec(compile(open(filename, "rb").read(), filename, 'exec'), customs, customs)
for name, val in customs.items():
setCustom(name, val)
else:
print("Could not find custom file", filename)
def build(topoStr):
"""Build topology from string with format (object, arg1, arg2,...).
topoStr: topology string"""
try:
topo, args, kwargs = splitArgs( topoStr )
if topo not in TOPOS:
raise Exception( 'Invalid topo name %s' % topo )
return TOPOS[ topo ]( *args, **kwargs )
except Exception as e:
print(e)
return None
|
<filename>ravel/mndeps.py
"""
Creating topologies from command-line parameters.
"""
import os
import re
from ravel.util import splitArgs
from topo.topolib import (EmptyTopo, SingleSwitchTopo, SingleSwitchReversedTopo, MinimalTopo, LinearTopo, TreeTopo, FatTreeTopo, ISPTopo)
TOPOS = { "empty": EmptyTopo,
"minimal": MinimalTopo,
"linear": LinearTopo,
"reversed": SingleSwitchReversedTopo,
"single": SingleSwitchTopo,
"tree": TreeTopo,
"fattree": FatTreeTopo,
"isp": ISPTopo
}
def setCustom(name, value):
"""Set custom parameters for Mininet
name: parameter name
value: parameter value"""
if name in ("topos", "switches", "hosts", "controllers"):
param = name.upper()
globals()[param].update(value)
elif name == "validate":
validate = value
else:
globals()[name] = value
def custom(value):
"""Parse custom parameters
value: string containing custom parameters"""
files = []
if os.path.isfile(value):
files.append(value)
else:
files += value.split(",")
for filename in files:
customs = {}
if os.path.isfile(filename):
exec(compile(open(filename, "rb").read(), filename, 'exec'), customs, customs)
for name, val in customs.items():
setCustom(name, val)
else:
print("Could not find custom file", filename)
def build(topoStr):
"""Build topology from string with format (object, arg1, arg2,...).
topoStr: topology string"""
try:
topo, args, kwargs = splitArgs( topoStr )
if topo not in TOPOS:
raise Exception( 'Invalid topo name %s' % topo )
return TOPOS[ topo ]( *args, **kwargs )
except Exception as e:
print(e)
return None
|
en
| 0.126303
|
Creating topologies from command-line parameters. Set custom parameters for Mininet name: parameter name value: parameter value Parse custom parameters value: string containing custom parameters Build topology from string with format (object, arg1, arg2,...). topoStr: topology string
| 2.715892
| 3
|
bruhat/extern/todd_coxeter.py
|
punkdit/bruhat
| 3
|
6627309
|
<reponame>punkdit/bruhat<gh_stars>1-10
#!/usr/bin/env python3
# From: https://math.berkeley.edu/~kmill/notes/todd_coxeter.html
# Example of Todd-Coxeter to compute S_3 from relations
idents = []
neighbors = []
to_visit = 0
ngens = 2
rels = [
(1, 0), # a^-1a
(3, 2), # b^-1b
(0, 0, 0), #a^3
(2, 2), # b^2
(0, 2, 0, 2) # abab
]
hgens = [
(2,), # b
]
def find(c):
c2 = idents[c]
if c == c2:
return c
else:
c2 = find(c2)
idents[c] = c2
return c2
def new():
c = len(idents)
idents.append(c)
neighbors.append((2*ngens)*[None])
return c
def unify(c1, c2):
c1 = find(c1)
c2 = find(c2)
if c1 == c2:
return
c1, c2 = min(c1, c2), max(c1, c2)
idents[c2] = c1
for d in range(2*ngens):
n1 = neighbors[c1][d]
n2 = neighbors[c2][d]
if n1 == None:
neighbors[c1][d] = n2
elif n2 != None:
unify(n1, n2)
def follow(c, d):
c = find(c)
ns = neighbors[c]
if ns[d] == None:
ns[d] = new()
return find(ns[d])
def followp(c, ds):
c = find(c)
for d in reversed(ds):
c = follow(c, d)
return c
start = new()
for hgen in hgens:
unify(followp(start, hgen), start)
while to_visit < len(idents):
c = find(to_visit)
if c == to_visit:
for rel in rels:
unify(followp(c, rel), c)
to_visit += 1
print("done")
cosets = [c for i, c in enumerate(idents) if i == c]
perms = [[cosets.index(follow(c, 2*d)) for i, c in enumerate(cosets)]
for d in range(ngens)]
def cycle(perm):
parts = []
for i in range(len(perm)):
part = [str(i+1)]
k = perm[i]
while k != i:
if k < i: break
part.append(str(k+1))
k = perm[k]
else:
parts.append(" ".join(part))
return "("+")(".join(parts)+")"
for d in range(ngens):
print("g%d ="%d, cycle(perms[d]))
|
#!/usr/bin/env python3
# From: https://math.berkeley.edu/~kmill/notes/todd_coxeter.html
# Example of Todd-Coxeter to compute S_3 from relations
idents = []
neighbors = []
to_visit = 0
ngens = 2
rels = [
(1, 0), # a^-1a
(3, 2), # b^-1b
(0, 0, 0), #a^3
(2, 2), # b^2
(0, 2, 0, 2) # abab
]
hgens = [
(2,), # b
]
def find(c):
c2 = idents[c]
if c == c2:
return c
else:
c2 = find(c2)
idents[c] = c2
return c2
def new():
c = len(idents)
idents.append(c)
neighbors.append((2*ngens)*[None])
return c
def unify(c1, c2):
c1 = find(c1)
c2 = find(c2)
if c1 == c2:
return
c1, c2 = min(c1, c2), max(c1, c2)
idents[c2] = c1
for d in range(2*ngens):
n1 = neighbors[c1][d]
n2 = neighbors[c2][d]
if n1 == None:
neighbors[c1][d] = n2
elif n2 != None:
unify(n1, n2)
def follow(c, d):
c = find(c)
ns = neighbors[c]
if ns[d] == None:
ns[d] = new()
return find(ns[d])
def followp(c, ds):
c = find(c)
for d in reversed(ds):
c = follow(c, d)
return c
start = new()
for hgen in hgens:
unify(followp(start, hgen), start)
while to_visit < len(idents):
c = find(to_visit)
if c == to_visit:
for rel in rels:
unify(followp(c, rel), c)
to_visit += 1
print("done")
cosets = [c for i, c in enumerate(idents) if i == c]
perms = [[cosets.index(follow(c, 2*d)) for i, c in enumerate(cosets)]
for d in range(ngens)]
def cycle(perm):
parts = []
for i in range(len(perm)):
part = [str(i+1)]
k = perm[i]
while k != i:
if k < i: break
part.append(str(k+1))
k = perm[k]
else:
parts.append(" ".join(part))
return "("+")(".join(parts)+")"
for d in range(ngens):
print("g%d ="%d, cycle(perms[d]))
|
en
| 0.455077
|
#!/usr/bin/env python3 # From: https://math.berkeley.edu/~kmill/notes/todd_coxeter.html # Example of Todd-Coxeter to compute S_3 from relations # a^-1a # b^-1b #a^3 # b^2 # abab # b
| 3.010001
| 3
|
tests/test_ops.py
|
Kyle-Kyle/angr
| 6
|
6627310
|
<filename>tests/test_ops.py
import angr
import claripy
import archinfo
# all the input values were generated via
# [random.randrange(256) for _ in range(16)]
# then set into the input registers via gdb
# set $xmm0.v16_int8 = {...}
# then read out as uint128s
# p/x $xmm0.uint128
# then single stepped and the result read out
def test_irop_perm():
p = angr.load_shellcode('vpshufb xmm0,xmm1,xmm2', 'amd64')
# concrete test
s1 = p.factory.blank_state()
s1.regs.xmm1 = 0x3c899a56814ee9b84c7b5d8394c85881
s1.regs.xmm2 = 0xa55c66a2cdef1cbcd72b42078d1b7f8b
s2 = s1.step(num_inst=1).successors[0]
assert (s2.regs.xmm0 == 0x00567b00000056000081c84c00813c00).is_true()
# symbolic test
s3 = p.factory.blank_state()
s3.regs.xmm1 = claripy.BVS('xmm1', 128)
s3.regs.xmm2 = claripy.BVS('xmm2', 128)
s4 = s3.step(num_inst=1).successors[0]
s4.solver.add(s4.regs.xmm2 == 0xa55c66a2cdef1cbcd72b42078d1b7f8b)
s4.solver.add(s4.regs.xmm0 == 0x00567b00000056000081c84c00813c00)
assert s4.solver.solution(s4.regs.xmm1, 0x3c899a56814ee9b84c7b5d8394c85881)
def test_irop_mulhi():
p = angr.load_shellcode('vpmulhw xmm0,xmm1,xmm2', 'amd64')
# concrete test
s1 = p.factory.blank_state()
s1.regs.xmm1 = 0x3aca92553c2526d4f20987aeab250255
s1.regs.xmm2 = 0x1aebcb281463274ec3ce6473619a8541
s2 = s1.step(num_inst=1).successors[0]
assert (s2.regs.xmm0 == 0x62e16a304ca05f60348d0c9dfa5fee1).is_true()
def test_irop_catevenlanes():
p = angr.load_shellcode('pmulhrsw xmm0, xmm1', 'amd64')
# concrete test
s1 = p.factory.blank_state()
s1.regs.xmm0 = 0x4713e06bf3235e97ca8cfde0647d65fd
s1.regs.xmm1 = 0x31f1f86da1dce7de252adc78160e1016
s2 = s1.step(num_inst=1).successors[0]
assert (s2.regs.xmm0 == 0x1bbb01de0976ee2bf07b009711500cd1).is_true()
if __name__ == '__main__':
test_irop_perm()
test_irop_mulhi()
test_irop_catevenlanes()
|
<filename>tests/test_ops.py
import angr
import claripy
import archinfo
# all the input values were generated via
# [random.randrange(256) for _ in range(16)]
# then set into the input registers via gdb
# set $xmm0.v16_int8 = {...}
# then read out as uint128s
# p/x $xmm0.uint128
# then single stepped and the result read out
def test_irop_perm():
p = angr.load_shellcode('vpshufb xmm0,xmm1,xmm2', 'amd64')
# concrete test
s1 = p.factory.blank_state()
s1.regs.xmm1 = 0x3c899a56814ee9b84c7b5d8394c85881
s1.regs.xmm2 = 0xa55c66a2cdef1cbcd72b42078d1b7f8b
s2 = s1.step(num_inst=1).successors[0]
assert (s2.regs.xmm0 == 0x00567b00000056000081c84c00813c00).is_true()
# symbolic test
s3 = p.factory.blank_state()
s3.regs.xmm1 = claripy.BVS('xmm1', 128)
s3.regs.xmm2 = claripy.BVS('xmm2', 128)
s4 = s3.step(num_inst=1).successors[0]
s4.solver.add(s4.regs.xmm2 == 0xa55c66a2cdef1cbcd72b42078d1b7f8b)
s4.solver.add(s4.regs.xmm0 == 0x00567b00000056000081c84c00813c00)
assert s4.solver.solution(s4.regs.xmm1, 0x3c899a56814ee9b84c7b5d8394c85881)
def test_irop_mulhi():
p = angr.load_shellcode('vpmulhw xmm0,xmm1,xmm2', 'amd64')
# concrete test
s1 = p.factory.blank_state()
s1.regs.xmm1 = 0x3aca92553c2526d4f20987aeab250255
s1.regs.xmm2 = 0x1aebcb281463274ec3ce6473619a8541
s2 = s1.step(num_inst=1).successors[0]
assert (s2.regs.xmm0 == 0x62e16a304ca05f60348d0c9dfa5fee1).is_true()
def test_irop_catevenlanes():
p = angr.load_shellcode('pmulhrsw xmm0, xmm1', 'amd64')
# concrete test
s1 = p.factory.blank_state()
s1.regs.xmm0 = 0x4713e06bf3235e97ca8cfde0647d65fd
s1.regs.xmm1 = 0x31f1f86da1dce7de252adc78160e1016
s2 = s1.step(num_inst=1).successors[0]
assert (s2.regs.xmm0 == 0x1bbb01de0976ee2bf07b009711500cd1).is_true()
if __name__ == '__main__':
test_irop_perm()
test_irop_mulhi()
test_irop_catevenlanes()
|
en
| 0.773155
|
# all the input values were generated via # [random.randrange(256) for _ in range(16)] # then set into the input registers via gdb # set $xmm0.v16_int8 = {...} # then read out as uint128s # p/x $xmm0.uint128 # then single stepped and the result read out # concrete test # symbolic test # concrete test # concrete test
| 1.904748
| 2
|
sample3.py
|
vswamy/python
| 0
|
6627311
|
<reponame>vswamy/python
#Learning Python
import os
## to use list as a stack, use append and pop operations
list = [1,2,3]
print(list)
list.pop()
print(list)
list.append(4)
print(list)
|
#Learning Python
import os
## to use list as a stack, use append and pop operations
list = [1,2,3]
print(list)
list.pop()
print(list)
list.append(4)
print(list)
|
en
| 0.892967
|
#Learning Python ## to use list as a stack, use append and pop operations
| 3.971533
| 4
|
hail/python/hail/plot/__init__.py
|
atgenomix/hail
| 1
|
6627312
|
from .plots import output_notebook, show, histogram, cumulative_histogram, histogram2d, scatter, qq, manhattan
__all__ = ['output_notebook',
'show',
'histogram',
'cumulative_histogram',
'scatter',
'histogram2d',
'qq',
'manhattan']
|
from .plots import output_notebook, show, histogram, cumulative_histogram, histogram2d, scatter, qq, manhattan
__all__ = ['output_notebook',
'show',
'histogram',
'cumulative_histogram',
'scatter',
'histogram2d',
'qq',
'manhattan']
|
none
| 1
| 1.275322
| 1
|
|
data/studio21_generated/interview/0300/starter_code.py
|
vijaykumawat256/Prompt-Summarization
| 0
|
6627313
|
class Solution:
def leastOpsExpressTarget(self, x: int, target: int) -> int:
|
class Solution:
def leastOpsExpressTarget(self, x: int, target: int) -> int:
|
none
| 1
| 1.83518
| 2
|
|
tutorialScripts/tutorialClientSystem.py
|
wode490390/Mod-stub
| 0
|
6627314
|
<gh_stars>0
# -*- coding: utf-8 -*-
# 获取客户端引擎API模块
import client.extraClientApi as clientApi
# 获取客户端system的基类ClientSystem
ClientSystem = clientApi.GetClientSystemCls()
# 在modMain中注册的Client System类
class TutorialClientSystem(ClientSystem):
# 客户端System的初始化函数
def __init__(self, namespace, systemName):
# 首先初始化TutorialClientSystem的基类ClientSystem
super(TutorialClientSystem, self).__init__(namespace, systemName)
print "==== TutorialClientSystem Init ===="
# 函数名为Destroy才会被调用,在这个System被引擎回收的时候会调这个函数来销毁一些内容
def Destroy(self):
pass
|
# -*- coding: utf-8 -*-
# 获取客户端引擎API模块
import client.extraClientApi as clientApi
# 获取客户端system的基类ClientSystem
ClientSystem = clientApi.GetClientSystemCls()
# 在modMain中注册的Client System类
class TutorialClientSystem(ClientSystem):
# 客户端System的初始化函数
def __init__(self, namespace, systemName):
# 首先初始化TutorialClientSystem的基类ClientSystem
super(TutorialClientSystem, self).__init__(namespace, systemName)
print "==== TutorialClientSystem Init ===="
# 函数名为Destroy才会被调用,在这个System被引擎回收的时候会调这个函数来销毁一些内容
def Destroy(self):
pass
|
zh
| 0.777592
|
# -*- coding: utf-8 -*- # 获取客户端引擎API模块 # 获取客户端system的基类ClientSystem # 在modMain中注册的Client System类 # 客户端System的初始化函数 # 首先初始化TutorialClientSystem的基类ClientSystem # 函数名为Destroy才会被调用,在这个System被引擎回收的时候会调这个函数来销毁一些内容
| 2.524067
| 3
|
metrics/outputformat_csv.py
|
mcallaghan-bsm/metrics
| 8
|
6627315
|
# -*- coding: utf-8 -*-
"""output in CSV format.
All rights reserved, see LICENSE for details.
"""
from __future__ import unicode_literals
def format(file_metrics, build_metrics):
"""Compute output in CSV format (only file_metrics)."""
# TODO maybe we need different output for build_metrics in csv format, too?
# filter out positions metric
def report_header(file_metrics):
values = list(file_metrics.values())[0]
print(values)
values.pop('block_positions', None)
return 'filename,' + ','.join(values) + '\n'
def report_metrics(file_metrics):
report = ''
for key, values in file_metrics.items():
report += key + ','
report += ','.join([str(v) for k, v in values.items() if k not in ['block_positions']])
report += '\n'
return report
report = report_header(file_metrics)
report += report_metrics(file_metrics)
return report
|
# -*- coding: utf-8 -*-
"""output in CSV format.
All rights reserved, see LICENSE for details.
"""
from __future__ import unicode_literals
def format(file_metrics, build_metrics):
"""Compute output in CSV format (only file_metrics)."""
# TODO maybe we need different output for build_metrics in csv format, too?
# filter out positions metric
def report_header(file_metrics):
values = list(file_metrics.values())[0]
print(values)
values.pop('block_positions', None)
return 'filename,' + ','.join(values) + '\n'
def report_metrics(file_metrics):
report = ''
for key, values in file_metrics.items():
report += key + ','
report += ','.join([str(v) for k, v in values.items() if k not in ['block_positions']])
report += '\n'
return report
report = report_header(file_metrics)
report += report_metrics(file_metrics)
return report
|
en
| 0.787076
|
# -*- coding: utf-8 -*- output in CSV format. All rights reserved, see LICENSE for details. Compute output in CSV format (only file_metrics). # TODO maybe we need different output for build_metrics in csv format, too? # filter out positions metric
| 3.12233
| 3
|
train_encoder.py
|
Harsha-Musunuri/stylegan2-pytorch
| 7
|
6627316
|
<gh_stars>1-10
import argparse
import math
import random
import os
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import datasets, transforms, utils
from PIL import Image
from tqdm import tqdm
import util
from calc_inception import load_patched_inception_v3
from fid import extract_feature_from_samples, calc_fid, extract_feature_from_reconstruction
import pickle
import pdb
st = pdb.set_trace
try:
import wandb
except ImportError:
wandb = None
from idinvert_pytorch.models.perceptual_model import VGG16
from dataset import get_image_dataset
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
from op import conv2d_gradfix
from non_leaking import augment, AdaptiveAugment
def data_sampler(dataset, shuffle, distributed):
if distributed:
return data.distributed.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return data.RandomSampler(dataset)
else:
return data.SequentialSampler(dataset)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)
def sample_data(loader):
# Endless image iterator
while True:
for batch in loader:
if isinstance(batch, (list, tuple)):
yield batch[0]
else:
yield batch
def d_logistic_loss(real_pred, fake_pred):
real_loss = F.softplus(-real_pred)
fake_loss = F.softplus(fake_pred)
return real_loss.mean() + fake_loss.mean()
def d_r1_loss(real_pred, real_img):
with conv2d_gradfix.no_weight_gradients():
grad_real, = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True
)
grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_nonsaturating_loss(fake_pred):
loss = F.softplus(-fake_pred).mean()
return loss
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3]
)
grad, = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
)
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_mean.detach(), path_lengths
def make_noise(batch, latent_dim, n_noise, device):
if n_noise == 1:
return torch.randn(batch, latent_dim, device=device)
noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)
return noises
def mixing_noise(batch, latent_dim, prob, device):
if prob > 0 and random.random() < prob:
return make_noise(batch, latent_dim, 2, device)
else:
return [make_noise(batch, latent_dim, 1, device)]
def set_grad_none(model, targets):
for n, p in model.named_parameters():
if n in targets:
p.grad = None
def accumulate_batches(data_iter, num):
samples = []
while num > 0:
imgs = next(data_iter)
samples.append(imgs)
num -= imgs.size(0)
samples = torch.cat(samples, dim=0)
if num < 0:
samples = samples[:num, ...]
return samples
def load_real_samples(args, data_iter):
npy_path = args.sample_cache
if npy_path is not None and os.path.exists(npy_path):
sample_x = torch.from_numpy(np.load(npy_path)).to(args.device)
else:
sample_x = accumulate_batches(data_iter, args.n_sample).to(args.device)
if npy_path is not None:
np.save(npy_path, sample_x.cpu().numpy())
return sample_x
def train(args, loader, loader2, encoder, generator, discriminator, vggnet, pwcnet, e_optim, d_optim, e_ema, pca_state, device):
inception = real_mean = real_cov = mean_latent = None
if args.eval_every > 0:
inception = nn.DataParallel(load_patched_inception_v3()).to(device)
inception.eval()
with open(args.inception, "rb") as f:
embeds = pickle.load(f)
real_mean = embeds["mean"]
real_cov = embeds["cov"]
if get_rank() == 0:
if args.eval_every > 0:
with open(os.path.join(args.log_dir, 'log_fid.txt'), 'a+') as f:
f.write(f"Name: {getattr(args, 'name', 'NA')}\n{'-'*50}\n")
if args.log_every > 0:
with open(os.path.join(args.log_dir, 'log.txt'), 'a+') as f:
f.write(f"Name: {getattr(args, 'name', 'NA')}\n{'-'*50}\n")
loader = sample_data(loader)
pbar = range(args.iter)
if get_rank() == 0:
pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)
d_loss_val = 0
e_loss_val = 0
rec_loss_val = 0
vgg_loss_val = 0
adv_loss_val = 0
loss_dict = {"d": torch.tensor(0., device=device),
"real_score": torch.tensor(0., device=device),
"fake_score": torch.tensor(0., device=device),
"r1_d": torch.tensor(0., device=device),
"r1_e": torch.tensor(0., device=device),
"rec": torch.tensor(0., device=device),}
avg_pix_loss = util.AverageMeter()
avg_vgg_loss = util.AverageMeter()
if args.distributed:
e_module = encoder.module
d_module = discriminator.module
g_module = generator.module
else:
e_module = encoder
d_module = discriminator
g_module = generator
# accum = 0.5 ** (32 / (10 * 1000))
ada_aug_p = args.augment_p if args.augment_p > 0 else 0.0
r_t_stat = 0
if args.augment and args.augment_p == 0:
ada_augment = AdaptiveAugment(args.ada_target, args.ada_length, args.ada_every, device)
# sample_x = accumulate_batches(loader, args.n_sample).to(device)
sample_x = load_real_samples(args, loader)
if sample_x.ndim > 4:
sample_x = sample_x[:,0,...]
input_is_latent = args.latent_space != 'z' # Encode in z space?
requires_grad(generator, False) # always False
generator.eval() # Generator should be ema and in eval mode
g_ema = generator
# if args.no_ema or e_ema is None:
# e_ema = encoder
for idx in pbar:
i = idx + args.start_iter
if i > args.iter:
print("Done!")
break
real_img = next(loader)
real_img = real_img.to(device)
# Train Encoder
if args.toggle_grads:
requires_grad(encoder, True)
requires_grad(discriminator, False)
pix_loss = vgg_loss = adv_loss = rec_loss = torch.tensor(0., device=device)
latent_real, _ = encoder(real_img)
fake_img, _ = generator([latent_real], input_is_latent=input_is_latent)
if args.lambda_adv > 0:
if args.augment:
fake_img_aug, _ = augment(fake_img, ada_aug_p)
else:
fake_img_aug = fake_img
fake_pred = discriminator(fake_img_aug)
adv_loss = g_nonsaturating_loss(fake_pred)
if args.lambda_pix > 0:
if args.pix_loss == 'l2':
pix_loss = torch.mean((fake_img - real_img) ** 2)
else:
pix_loss = F.l1_loss(fake_img, real_img)
if args.lambda_vgg > 0:
real_feat = vggnet(real_img)
fake_feat = vggnet(fake_img)
vgg_loss = torch.mean((real_feat - fake_feat) ** 2)
e_loss = pix_loss * args.lambda_pix + vgg_loss * args.lambda_vgg + adv_loss * args.lambda_adv
loss_dict["e"] = e_loss
loss_dict["pix"] = pix_loss
loss_dict["vgg"] = vgg_loss
loss_dict["adv"] = adv_loss
encoder.zero_grad()
e_loss.backward()
e_optim.step()
if args.train_on_fake:
e_regularize = args.e_rec_every > 0 and i % args.e_rec_every == 0
if e_regularize and args.lambda_rec > 0:
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
fake_img, latent_fake = generator(noise, input_is_latent=input_is_latent, return_latents=True)
latent_pred, _ = encoder(fake_img)
if latent_pred.ndim < 3:
latent_pred = latent_pred.unsqueeze(1).repeat(1, latent_fake.size(1), 1)
rec_loss = torch.mean((latent_fake - latent_pred) ** 2)
encoder.zero_grad()
(rec_loss * args.lambda_rec).backward()
e_optim.step()
loss_dict["rec"] = rec_loss
# e_regularize = args.e_reg_every > 0 and i % args.e_reg_every == 0
# if e_regularize:
# # why not regularize on augmented real?
# real_img.requires_grad = True
# real_pred, _ = encoder(real_img)
# r1_loss_e = d_r1_loss(real_pred, real_img)
# encoder.zero_grad()
# (args.r1 / 2 * r1_loss_e * args.e_reg_every + 0 * real_pred.view(-1)[0]).backward()
# e_optim.step()
# loss_dict["r1_e"] = r1_loss_e
if not args.no_ema and e_ema is not None:
ema_nimg = args.ema_kimg * 1000
if args.ema_rampup is not None:
ema_nimg = min(ema_nimg, i * args.batch * args.ema_rampup)
accum = 0.5 ** (args.batch / max(ema_nimg, 1e-8))
accumulate(e_ema, e_module, accum)
# Train Discriminator
if args.toggle_grads:
requires_grad(encoder, False)
requires_grad(discriminator, True)
if not args.no_update_discriminator and args.lambda_adv > 0:
latent_real, _ = encoder(real_img)
fake_img, _ = generator([latent_real], input_is_latent=input_is_latent)
if args.augment:
real_img_aug, _ = augment(real_img, ada_aug_p)
fake_img_aug, _ = augment(fake_img, ada_aug_p)
else:
real_img_aug = real_img
fake_img_aug = fake_img
fake_pred = discriminator(fake_img_aug)
real_pred = discriminator(real_img_aug)
d_loss = d_logistic_loss(real_pred, fake_pred)
loss_dict["d"] = d_loss
loss_dict["real_score"] = real_pred.mean()
loss_dict["fake_score"] = fake_pred.mean()
discriminator.zero_grad()
d_loss.backward()
d_optim.step()
if args.augment and args.augment_p == 0:
ada_aug_p = ada_augment.tune(real_pred)
r_t_stat = ada_augment.r_t_stat
d_regularize = args.d_reg_every > 0 and i % args.d_reg_every == 0
if d_regularize:
# why not regularize on augmented real?
real_img.requires_grad = True
real_pred = discriminator(real_img)
r1_loss_d = d_r1_loss(real_pred, real_img)
discriminator.zero_grad()
(args.r1 / 2 * r1_loss_d * args.d_reg_every + 0 * real_pred.view(-1)[0]).backward()
# Why 0* ? Answer is here https://github.com/rosinality/stylegan2-pytorch/issues/76
d_optim.step()
loss_dict["r1_d"] = r1_loss_d
loss_reduced = reduce_loss_dict(loss_dict)
d_loss_val = loss_reduced["d"].mean().item()
e_loss_val = loss_reduced["e"].mean().item()
r1_d_val = loss_reduced["r1_d"].mean().item()
r1_e_val = loss_reduced["r1_e"].mean().item()
pix_loss_val = loss_reduced["pix"].mean().item()
vgg_loss_val = loss_reduced["vgg"].mean().item()
adv_loss_val = loss_reduced["adv"].mean().item()
rec_loss_val = loss_reduced["rec"].mean().item()
real_score_val = loss_reduced["real_score"].mean().item()
fake_score_val = loss_reduced["fake_score"].mean().item()
avg_pix_loss.update(pix_loss_val, real_img.shape[0])
avg_vgg_loss.update(vgg_loss_val, real_img.shape[0])
if get_rank() == 0:
pbar.set_description(
(
f"d: {d_loss_val:.4f}; e: {e_loss_val:.4f}; r1_d: {r1_d_val:.4f}; r1_e: {r1_e_val:.4f}; "
f"pix: {pix_loss_val:.4f}; vgg: {vgg_loss_val:.4f}; adv: {adv_loss_val:.4f}; "
f"rec: {rec_loss_val:.4f}; augment: {ada_aug_p:.4f}"
)
)
if i % args.log_every == 0:
with torch.no_grad():
latent_x, _ = e_ema(sample_x)
fake_x, _ = g_ema([latent_x], input_is_latent=input_is_latent)
sample_pix_loss = torch.sum((sample_x - fake_x) ** 2)
with open(os.path.join(args.log_dir, 'log.txt'), 'a+') as f:
f.write(f"{i:07d}; pix: {avg_pix_loss.avg}; vgg: {avg_vgg_loss.avg}; "
f"ref: {sample_pix_loss.item()};\n")
if args.eval_every > 0 and i % args.eval_every == 0:
with torch.no_grad():
g_ema.eval()
e_ema.eval()
# Recon
features = extract_feature_from_reconstruction(
e_ema, g_ema, inception, args.truncation, mean_latent, loader2, args.device,
input_is_latent=input_is_latent, mode='recon',
).numpy()
sample_mean = np.mean(features, 0)
sample_cov = np.cov(features, rowvar=False)
fid_re = calc_fid(sample_mean, sample_cov, real_mean, real_cov)
# print("Recon FID:", fid_re)
with open(os.path.join(args.log_dir, 'log_fid.txt'), 'a+') as f:
f.write(f"{i:07d}; recon fid: {float(fid_re):.4f};\n")
if wandb and args.wandb:
wandb.log(
{
"Encoder": e_loss_val,
"Discriminator": d_loss_val,
"Augment": ada_aug_p,
"Rt": r_t_stat,
"R1 D": r1_d_val,
"R1 E": r1_e_val,
"Pix Loss": pix_loss_val,
"VGG Loss": vgg_loss_val,
"Adv Loss": adv_loss_val,
"Rec Loss": rec_loss_val,
"Real Score": real_score_val,
"Fake Score": fake_score_val,
}
)
if i % args.log_every == 0:
with torch.no_grad():
e_eval = encoder if args.no_ema else e_ema
e_eval.eval()
nrow = int(args.n_sample ** 0.5)
nchw = list(sample_x.shape)[1:]
latent_real, _ = e_eval(sample_x)
fake_img, _ = generator([latent_real], input_is_latent=input_is_latent)
sample = torch.cat((sample_x.reshape(args.n_sample//nrow, nrow, *nchw),
fake_img.reshape(args.n_sample//nrow, nrow, *nchw)), 1)
utils.save_image(
sample.reshape(2*args.n_sample, *nchw),
os.path.join(args.log_dir, 'sample', f"{str(i).zfill(6)}.png"),
nrow=nrow,
normalize=True,
value_range=(-1, 1),
)
e_eval.train()
if i % args.save_every == 0:
e_eval = encoder if args.no_ema else e_ema
torch.save(
{
"e": e_module.state_dict(),
"d": d_module.state_dict(),
"g_ema": g_module.state_dict(),
"e_ema": e_eval.state_dict(),
"e_optim": e_optim.state_dict(),
"d_optim": d_optim.state_dict(),
"args": args,
"ada_aug_p": ada_aug_p,
"iter": i,
},
os.path.join(args.log_dir, 'weight', f"{str(i).zfill(6)}.pt"),
)
if i % args.save_latest_every == 0:
torch.save(
{
"e": e_module.state_dict(),
"d": d_module.state_dict(),
"g_ema": g_module.state_dict(),
"e_ema": e_eval.state_dict(),
"e_optim": e_optim.state_dict(),
"d_optim": d_optim.state_dict(),
"args": args,
"ada_aug_p": ada_aug_p,
"iter": i,
},
os.path.join(args.log_dir, 'weight', f"latest.pt"),
)
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(description="StyleGAN2 encoder trainer")
parser.add_argument("--path", type=str, help="path to the lmdb dataset")
parser.add_argument("--arch", type=str, default='stylegan2', help="model architectures (stylegan2 | swagan)")
parser.add_argument("--dataset", type=str, default='multires')
parser.add_argument("--cache", type=str, default='local.db')
parser.add_argument("--sample_cache", type=str, default=None)
parser.add_argument("--name", type=str, help="experiment name", default='default_exp')
parser.add_argument("--log_root", type=str, help="where to save training logs", default='logs')
parser.add_argument("--log_every", type=int, default=100, help="save samples every # iters")
parser.add_argument("--save_every", type=int, default=1000, help="save checkpoints every # iters")
parser.add_argument("--save_latest_every", type=int, default=100, help="save latest checkpoints every # iters")
parser.add_argument("--resume", action='store_true')
parser.add_argument("--no_update_discriminator", action='store_true')
parser.add_argument("--no_load_discriminator", action='store_true')
parser.add_argument("--toggle_grads", action='store_true')
parser.add_argument("--use_optical_flow", action='store_true')
parser.add_argument("--use_wscale", action='store_true', help="whether to use `wscale` layer in idinvert encoder")
parser.add_argument("--no_ema", action='store_true', help="do not use ema if enabled")
parser.add_argument("--train_on_fake", action='store_true', help="train encoder on fake?")
parser.add_argument("--e_rec_every", type=int, default=1, help="interval of minimizing recon loss on w")
parser.add_argument("--pix_loss", type=str, default='l2')
parser.add_argument("--lambda_pix", type=float, default=1.0, help="recon loss on pixel (x)")
parser.add_argument("--lambda_vgg", type=float, default=5e-5)
parser.add_argument("--lambda_adv", type=float, default=0.1)
parser.add_argument("--lambda_rec", type=float, default=1.0, help="recon loss on style (w)")
parser.add_argument("--output_layer_idx", type=int, default=23)
parser.add_argument("--vgg_ckpt", type=str, default="vgg16.pth")
parser.add_argument("--which_encoder", type=str, default='style')
parser.add_argument("--which_latent", type=str, default='w_plus')
parser.add_argument("--stddev_group", type=int, default=1)
parser.add_argument("--use_residual_latent_mlp", action='store_true')
parser.add_argument("--n_latent_mlp", type=int, default=8)
parser.add_argument(
"--iter", type=int, default=800000, help="total training iterations"
)
parser.add_argument(
"--batch", type=int, default=16, help="batch sizes for each gpus"
)
parser.add_argument(
"--n_sample",
type=int,
default=64,
help="number of the samples generated during training",
)
parser.add_argument(
"--size", type=int, default=256, help="image sizes for the model"
)
parser.add_argument(
"--r1", type=float, default=10, help="weight of the r1 regularization"
)
parser.add_argument(
"--path_regularize",
type=float,
default=2,
help="weight of the path length regularization",
)
parser.add_argument(
"--path_batch_shrink",
type=int,
default=2,
help="batch size reducing factor for the path length regularization (reduce memory consumption)",
)
parser.add_argument(
"--e_reg_every",
type=int,
default=0,
help="interval of the applying r1 regularization, no if 0",
)
parser.add_argument(
"--d_reg_every",
type=int,
default=16,
help="interval of the applying r1 regularization, no if 0",
)
parser.add_argument(
"--g_reg_every",
type=int,
default=4,
help="interval of the applying path length regularization",
)
parser.add_argument(
"--mixing", type=float, default=0.9, help="probability of latent code mixing"
)
parser.add_argument(
"--ckpt",
type=str,
default=None,
help="path to the checkpoints to resume training",
)
parser.add_argument(
"--g_ckpt",
type=str,
default=None,
help="path to the checkpoint of generator",
)
parser.add_argument("--lr", type=float, default=0.002, help="learning rate")
parser.add_argument(
"--channel_multiplier",
type=int,
default=2,
help="channel multiplier factor for the model. config-f = 2, else = 1",
)
parser.add_argument(
"--wandb", action="store_true", help="use weights and biases logging"
)
parser.add_argument(
"--local_rank", type=int, default=0, help="local rank for distributed training"
)
parser.add_argument(
"--augment", action="store_true", help="apply non leaking augmentation"
)
parser.add_argument(
"--augment_p",
type=float,
default=0,
help="probability of applying augmentation. 0 = use adaptive augmentation",
)
parser.add_argument(
"--ada_target",
type=float,
default=0.6,
help="target augmentation probability for adaptive augmentation",
)
parser.add_argument(
"--ada_length",
type=int,
default=500 * 1000,
help="target duraing to reach augmentation probability for adaptive augmentation",
)
parser.add_argument(
"--ada_every",
type=int,
default=8,
help="probability update interval of the adaptive augmentation",
)
parser.add_argument("--inception", type=str, default=None, help="path to precomputed inception embedding")
parser.add_argument("--eval_every", type=int, default=1000, help="interval of metric evaluation")
parser.add_argument("--truncation", type=float, default=1, help="truncation factor")
parser.add_argument("--n_sample_fid", type=int, default=10000, help="number of the samples for calculating FID")
parser.add_argument("--latent_space", type=str, default='w', help="latent space (w | p | pn | z)")
parser.add_argument("--ema_kimg", type=int, default=10, help="Half-life of the exponential moving average (EMA) of generator weights.")
parser.add_argument("--ema_rampup", type=float, default=None, help="EMA ramp-up coefficient.")
parser.add_argument("--n_mlp_g", type=int, default=8)
parser.add_argument("--pca_state", type=str, default=None)
args = parser.parse_args()
util.seed_everything()
args.device = device
n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = n_gpu > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
args.n_latent = int(np.log2(args.size)) * 2 - 2 # used in Generator
args.latent = 512 # fixed, dim of w or z (same size)
if args.which_latent == 'w_plus':
args.latent_full = args.latent * args.n_latent
elif args.which_latent == 'w_tied':
args.latent_full = args.latent
else:
raise NotImplementedError
args.start_iter = 0
args.iter += 1
util.set_log_dir(args)
util.print_args(parser, args)
if args.arch == 'stylegan2':
from model import Generator, Discriminator
elif args.arch == 'swagan':
from swagan import Generator, Discriminator
# PCA state
pca_state = None
if args.pca_state is not None:
pca_state = np.load(args.pca_state)
pca_state = {k: torch.from_numpy(pca_state[k]).float() for k in pca_state}
pca_state['Lambda'] = pca_state['Lambda'].unsqueeze(0)
pca_state['mu'] = pca_state['mu'].unsqueeze(0)
pca_state['CT'] = pca_state['C'].T
# Auxiliary models (VGG and PWC)
vggnet = VGG16(output_layer_idx=args.output_layer_idx).to(device)
vgg_ckpt = torch.load(args.vgg_ckpt, map_location=lambda storage, loc: storage)
vggnet.load_state_dict(vgg_ckpt)
pwcnet = None
if args.use_optical_flow:
pwc = __import__('pytorch-pwc.run', globals(), locals(), ['Network'], 0)
pwcnet = pwc.Network().to(device) # state_dict loaded in init
pwcnet.eval()
discriminator = Discriminator(
args.size, channel_multiplier=args.channel_multiplier
).to(device)
# generator = Generator(
# args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
# ).to(device)
g_ema = Generator(
args.size, args.latent, args.n_mlp_g, channel_multiplier=args.channel_multiplier
).to(device)
g_ema.eval()
# accumulate(g_ema, generator, 0)
e_ema = None
if args.which_encoder == 'idinvert':
from idinvert_pytorch.models.stylegan_encoder_network import StyleGANEncoderNet
encoder = StyleGANEncoderNet(resolution=args.size, w_space_dim=args.latent,
which_latent=args.which_latent, reshape_latent=False,
use_wscale=args.use_wscale).to(device)
if not args.no_ema:
e_ema = StyleGANEncoderNet(resolution=args.size, w_space_dim=args.latent,
which_latent=args.which_latent, reshape_latent=False,
use_wscale=args.use_wscale).to(device)
else:
from model import Encoder
encoder = Encoder(args.size, args.latent, channel_multiplier=args.channel_multiplier,
which_latent=args.which_latent, reshape_latent=False, stddev_group=args.stddev_group,
latent_space=args.latent_space, pca_state=pca_state).to(device)
if not args.no_ema:
e_ema = Encoder(args.size, args.latent, channel_multiplier=args.channel_multiplier,
which_latent=args.which_latent, reshape_latent=False, stddev_group=args.stddev_group,
latent_space=args.latent_space, pca_state=pca_state).to(device)
if not args.no_ema:
e_ema.eval()
accumulate(e_ema, encoder, 0)
# For lazy regularization (see paper appendix page 11)
# e_reg_ratio = args.e_reg_every / (args.e_reg_every + 1) if args.e_reg_every > 0 else 1.
e_reg_ratio = 1.
d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1) if args.d_reg_every > 0 else 1.
e_optim = optim.Adam(
encoder.parameters(),
lr=args.lr * e_reg_ratio,
betas=(0 ** e_reg_ratio, 0.99 ** e_reg_ratio),
)
d_optim = optim.Adam(
discriminator.parameters(),
lr=args.lr * d_reg_ratio,
betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),
)
if args.resume:
if args.ckpt is None:
args.ckpt = os.path.join(args.log_dir, 'weight', f"latest.pt")
print("load model:", args.ckpt)
ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)
try:
ckpt_name = os.path.basename(args.ckpt)
if 'iter' in ckpt:
args.start_iter = ckpt["iter"]
else:
args.start_iter = int(os.path.splitext(ckpt_name)[0])
except ValueError:
pass
encoder.load_state_dict(ckpt["e"])
# generator.load_state_dict(ckpt["g"])
discriminator.load_state_dict(ckpt["d"])
e_ema.load_state_dict(ckpt["e_ema"])
g_ema.load_state_dict(ckpt["g_ema"])
e_optim.load_state_dict(ckpt["e_optim"])
# g_optim.load_state_dict(ckpt["g_optim"])
d_optim.load_state_dict(ckpt["d_optim"])
else:
print("load g model:", args.g_ckpt)
g_ckpt = torch.load(args.g_ckpt, map_location=lambda storage, loc: storage)
# generator.load_state_dict(g_ckpt["g"])
if 'g_ema' in g_ckpt:
g_ema.load_state_dict(g_ckpt["g_ema"])
else:
g_ema.load_state_dict(g_ckpt["g"])
if not args.no_load_discriminator:
discriminator.load_state_dict(g_ckpt["d"])
d_optim.load_state_dict(g_ckpt["d_optim"])
if args.distributed:
encoder = nn.parallel.DistributedDataParallel(
encoder,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
discriminator = nn.parallel.DistributedDataParallel(
discriminator,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
dataset = get_image_dataset(args, args.dataset, args.path, train=True)
loader = data.DataLoader(
dataset,
batch_size=args.batch,
sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed),
drop_last=True,
)
loader2 = None
if args.eval_every > 0:
indices = torch.randperm(len(dataset))[:args.n_sample_fid]
dataset2 = data.Subset(dataset, indices)
loader2 = data.DataLoader(dataset2, batch_size=64, num_workers=4, shuffle=False)
if get_rank() == 0 and wandb is not None and args.wandb:
wandb.init(project=args.name)
train(args, loader, loader2, encoder, g_ema, discriminator, vggnet, pwcnet, e_optim, d_optim, e_ema, pca_state, device)
|
import argparse
import math
import random
import os
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import datasets, transforms, utils
from PIL import Image
from tqdm import tqdm
import util
from calc_inception import load_patched_inception_v3
from fid import extract_feature_from_samples, calc_fid, extract_feature_from_reconstruction
import pickle
import pdb
st = pdb.set_trace
try:
import wandb
except ImportError:
wandb = None
from idinvert_pytorch.models.perceptual_model import VGG16
from dataset import get_image_dataset
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
from op import conv2d_gradfix
from non_leaking import augment, AdaptiveAugment
def data_sampler(dataset, shuffle, distributed):
if distributed:
return data.distributed.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return data.RandomSampler(dataset)
else:
return data.SequentialSampler(dataset)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)
def sample_data(loader):
# Endless image iterator
while True:
for batch in loader:
if isinstance(batch, (list, tuple)):
yield batch[0]
else:
yield batch
def d_logistic_loss(real_pred, fake_pred):
real_loss = F.softplus(-real_pred)
fake_loss = F.softplus(fake_pred)
return real_loss.mean() + fake_loss.mean()
def d_r1_loss(real_pred, real_img):
with conv2d_gradfix.no_weight_gradients():
grad_real, = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True
)
grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_nonsaturating_loss(fake_pred):
loss = F.softplus(-fake_pred).mean()
return loss
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3]
)
grad, = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
)
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_mean.detach(), path_lengths
def make_noise(batch, latent_dim, n_noise, device):
if n_noise == 1:
return torch.randn(batch, latent_dim, device=device)
noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)
return noises
def mixing_noise(batch, latent_dim, prob, device):
if prob > 0 and random.random() < prob:
return make_noise(batch, latent_dim, 2, device)
else:
return [make_noise(batch, latent_dim, 1, device)]
def set_grad_none(model, targets):
for n, p in model.named_parameters():
if n in targets:
p.grad = None
def accumulate_batches(data_iter, num):
samples = []
while num > 0:
imgs = next(data_iter)
samples.append(imgs)
num -= imgs.size(0)
samples = torch.cat(samples, dim=0)
if num < 0:
samples = samples[:num, ...]
return samples
def load_real_samples(args, data_iter):
npy_path = args.sample_cache
if npy_path is not None and os.path.exists(npy_path):
sample_x = torch.from_numpy(np.load(npy_path)).to(args.device)
else:
sample_x = accumulate_batches(data_iter, args.n_sample).to(args.device)
if npy_path is not None:
np.save(npy_path, sample_x.cpu().numpy())
return sample_x
def train(args, loader, loader2, encoder, generator, discriminator, vggnet, pwcnet, e_optim, d_optim, e_ema, pca_state, device):
inception = real_mean = real_cov = mean_latent = None
if args.eval_every > 0:
inception = nn.DataParallel(load_patched_inception_v3()).to(device)
inception.eval()
with open(args.inception, "rb") as f:
embeds = pickle.load(f)
real_mean = embeds["mean"]
real_cov = embeds["cov"]
if get_rank() == 0:
if args.eval_every > 0:
with open(os.path.join(args.log_dir, 'log_fid.txt'), 'a+') as f:
f.write(f"Name: {getattr(args, 'name', 'NA')}\n{'-'*50}\n")
if args.log_every > 0:
with open(os.path.join(args.log_dir, 'log.txt'), 'a+') as f:
f.write(f"Name: {getattr(args, 'name', 'NA')}\n{'-'*50}\n")
loader = sample_data(loader)
pbar = range(args.iter)
if get_rank() == 0:
pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)
d_loss_val = 0
e_loss_val = 0
rec_loss_val = 0
vgg_loss_val = 0
adv_loss_val = 0
loss_dict = {"d": torch.tensor(0., device=device),
"real_score": torch.tensor(0., device=device),
"fake_score": torch.tensor(0., device=device),
"r1_d": torch.tensor(0., device=device),
"r1_e": torch.tensor(0., device=device),
"rec": torch.tensor(0., device=device),}
avg_pix_loss = util.AverageMeter()
avg_vgg_loss = util.AverageMeter()
if args.distributed:
e_module = encoder.module
d_module = discriminator.module
g_module = generator.module
else:
e_module = encoder
d_module = discriminator
g_module = generator
# accum = 0.5 ** (32 / (10 * 1000))
ada_aug_p = args.augment_p if args.augment_p > 0 else 0.0
r_t_stat = 0
if args.augment and args.augment_p == 0:
ada_augment = AdaptiveAugment(args.ada_target, args.ada_length, args.ada_every, device)
# sample_x = accumulate_batches(loader, args.n_sample).to(device)
sample_x = load_real_samples(args, loader)
if sample_x.ndim > 4:
sample_x = sample_x[:,0,...]
input_is_latent = args.latent_space != 'z' # Encode in z space?
requires_grad(generator, False) # always False
generator.eval() # Generator should be ema and in eval mode
g_ema = generator
# if args.no_ema or e_ema is None:
# e_ema = encoder
for idx in pbar:
i = idx + args.start_iter
if i > args.iter:
print("Done!")
break
real_img = next(loader)
real_img = real_img.to(device)
# Train Encoder
if args.toggle_grads:
requires_grad(encoder, True)
requires_grad(discriminator, False)
pix_loss = vgg_loss = adv_loss = rec_loss = torch.tensor(0., device=device)
latent_real, _ = encoder(real_img)
fake_img, _ = generator([latent_real], input_is_latent=input_is_latent)
if args.lambda_adv > 0:
if args.augment:
fake_img_aug, _ = augment(fake_img, ada_aug_p)
else:
fake_img_aug = fake_img
fake_pred = discriminator(fake_img_aug)
adv_loss = g_nonsaturating_loss(fake_pred)
if args.lambda_pix > 0:
if args.pix_loss == 'l2':
pix_loss = torch.mean((fake_img - real_img) ** 2)
else:
pix_loss = F.l1_loss(fake_img, real_img)
if args.lambda_vgg > 0:
real_feat = vggnet(real_img)
fake_feat = vggnet(fake_img)
vgg_loss = torch.mean((real_feat - fake_feat) ** 2)
e_loss = pix_loss * args.lambda_pix + vgg_loss * args.lambda_vgg + adv_loss * args.lambda_adv
loss_dict["e"] = e_loss
loss_dict["pix"] = pix_loss
loss_dict["vgg"] = vgg_loss
loss_dict["adv"] = adv_loss
encoder.zero_grad()
e_loss.backward()
e_optim.step()
if args.train_on_fake:
e_regularize = args.e_rec_every > 0 and i % args.e_rec_every == 0
if e_regularize and args.lambda_rec > 0:
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
fake_img, latent_fake = generator(noise, input_is_latent=input_is_latent, return_latents=True)
latent_pred, _ = encoder(fake_img)
if latent_pred.ndim < 3:
latent_pred = latent_pred.unsqueeze(1).repeat(1, latent_fake.size(1), 1)
rec_loss = torch.mean((latent_fake - latent_pred) ** 2)
encoder.zero_grad()
(rec_loss * args.lambda_rec).backward()
e_optim.step()
loss_dict["rec"] = rec_loss
# e_regularize = args.e_reg_every > 0 and i % args.e_reg_every == 0
# if e_regularize:
# # why not regularize on augmented real?
# real_img.requires_grad = True
# real_pred, _ = encoder(real_img)
# r1_loss_e = d_r1_loss(real_pred, real_img)
# encoder.zero_grad()
# (args.r1 / 2 * r1_loss_e * args.e_reg_every + 0 * real_pred.view(-1)[0]).backward()
# e_optim.step()
# loss_dict["r1_e"] = r1_loss_e
if not args.no_ema and e_ema is not None:
ema_nimg = args.ema_kimg * 1000
if args.ema_rampup is not None:
ema_nimg = min(ema_nimg, i * args.batch * args.ema_rampup)
accum = 0.5 ** (args.batch / max(ema_nimg, 1e-8))
accumulate(e_ema, e_module, accum)
# Train Discriminator
if args.toggle_grads:
requires_grad(encoder, False)
requires_grad(discriminator, True)
if not args.no_update_discriminator and args.lambda_adv > 0:
latent_real, _ = encoder(real_img)
fake_img, _ = generator([latent_real], input_is_latent=input_is_latent)
if args.augment:
real_img_aug, _ = augment(real_img, ada_aug_p)
fake_img_aug, _ = augment(fake_img, ada_aug_p)
else:
real_img_aug = real_img
fake_img_aug = fake_img
fake_pred = discriminator(fake_img_aug)
real_pred = discriminator(real_img_aug)
d_loss = d_logistic_loss(real_pred, fake_pred)
loss_dict["d"] = d_loss
loss_dict["real_score"] = real_pred.mean()
loss_dict["fake_score"] = fake_pred.mean()
discriminator.zero_grad()
d_loss.backward()
d_optim.step()
if args.augment and args.augment_p == 0:
ada_aug_p = ada_augment.tune(real_pred)
r_t_stat = ada_augment.r_t_stat
d_regularize = args.d_reg_every > 0 and i % args.d_reg_every == 0
if d_regularize:
# why not regularize on augmented real?
real_img.requires_grad = True
real_pred = discriminator(real_img)
r1_loss_d = d_r1_loss(real_pred, real_img)
discriminator.zero_grad()
(args.r1 / 2 * r1_loss_d * args.d_reg_every + 0 * real_pred.view(-1)[0]).backward()
# Why 0* ? Answer is here https://github.com/rosinality/stylegan2-pytorch/issues/76
d_optim.step()
loss_dict["r1_d"] = r1_loss_d
loss_reduced = reduce_loss_dict(loss_dict)
d_loss_val = loss_reduced["d"].mean().item()
e_loss_val = loss_reduced["e"].mean().item()
r1_d_val = loss_reduced["r1_d"].mean().item()
r1_e_val = loss_reduced["r1_e"].mean().item()
pix_loss_val = loss_reduced["pix"].mean().item()
vgg_loss_val = loss_reduced["vgg"].mean().item()
adv_loss_val = loss_reduced["adv"].mean().item()
rec_loss_val = loss_reduced["rec"].mean().item()
real_score_val = loss_reduced["real_score"].mean().item()
fake_score_val = loss_reduced["fake_score"].mean().item()
avg_pix_loss.update(pix_loss_val, real_img.shape[0])
avg_vgg_loss.update(vgg_loss_val, real_img.shape[0])
if get_rank() == 0:
pbar.set_description(
(
f"d: {d_loss_val:.4f}; e: {e_loss_val:.4f}; r1_d: {r1_d_val:.4f}; r1_e: {r1_e_val:.4f}; "
f"pix: {pix_loss_val:.4f}; vgg: {vgg_loss_val:.4f}; adv: {adv_loss_val:.4f}; "
f"rec: {rec_loss_val:.4f}; augment: {ada_aug_p:.4f}"
)
)
if i % args.log_every == 0:
with torch.no_grad():
latent_x, _ = e_ema(sample_x)
fake_x, _ = g_ema([latent_x], input_is_latent=input_is_latent)
sample_pix_loss = torch.sum((sample_x - fake_x) ** 2)
with open(os.path.join(args.log_dir, 'log.txt'), 'a+') as f:
f.write(f"{i:07d}; pix: {avg_pix_loss.avg}; vgg: {avg_vgg_loss.avg}; "
f"ref: {sample_pix_loss.item()};\n")
if args.eval_every > 0 and i % args.eval_every == 0:
with torch.no_grad():
g_ema.eval()
e_ema.eval()
# Recon
features = extract_feature_from_reconstruction(
e_ema, g_ema, inception, args.truncation, mean_latent, loader2, args.device,
input_is_latent=input_is_latent, mode='recon',
).numpy()
sample_mean = np.mean(features, 0)
sample_cov = np.cov(features, rowvar=False)
fid_re = calc_fid(sample_mean, sample_cov, real_mean, real_cov)
# print("Recon FID:", fid_re)
with open(os.path.join(args.log_dir, 'log_fid.txt'), 'a+') as f:
f.write(f"{i:07d}; recon fid: {float(fid_re):.4f};\n")
if wandb and args.wandb:
wandb.log(
{
"Encoder": e_loss_val,
"Discriminator": d_loss_val,
"Augment": ada_aug_p,
"Rt": r_t_stat,
"R1 D": r1_d_val,
"R1 E": r1_e_val,
"Pix Loss": pix_loss_val,
"VGG Loss": vgg_loss_val,
"Adv Loss": adv_loss_val,
"Rec Loss": rec_loss_val,
"Real Score": real_score_val,
"Fake Score": fake_score_val,
}
)
if i % args.log_every == 0:
with torch.no_grad():
e_eval = encoder if args.no_ema else e_ema
e_eval.eval()
nrow = int(args.n_sample ** 0.5)
nchw = list(sample_x.shape)[1:]
latent_real, _ = e_eval(sample_x)
fake_img, _ = generator([latent_real], input_is_latent=input_is_latent)
sample = torch.cat((sample_x.reshape(args.n_sample//nrow, nrow, *nchw),
fake_img.reshape(args.n_sample//nrow, nrow, *nchw)), 1)
utils.save_image(
sample.reshape(2*args.n_sample, *nchw),
os.path.join(args.log_dir, 'sample', f"{str(i).zfill(6)}.png"),
nrow=nrow,
normalize=True,
value_range=(-1, 1),
)
e_eval.train()
if i % args.save_every == 0:
e_eval = encoder if args.no_ema else e_ema
torch.save(
{
"e": e_module.state_dict(),
"d": d_module.state_dict(),
"g_ema": g_module.state_dict(),
"e_ema": e_eval.state_dict(),
"e_optim": e_optim.state_dict(),
"d_optim": d_optim.state_dict(),
"args": args,
"ada_aug_p": ada_aug_p,
"iter": i,
},
os.path.join(args.log_dir, 'weight', f"{str(i).zfill(6)}.pt"),
)
if i % args.save_latest_every == 0:
torch.save(
{
"e": e_module.state_dict(),
"d": d_module.state_dict(),
"g_ema": g_module.state_dict(),
"e_ema": e_eval.state_dict(),
"e_optim": e_optim.state_dict(),
"d_optim": d_optim.state_dict(),
"args": args,
"ada_aug_p": ada_aug_p,
"iter": i,
},
os.path.join(args.log_dir, 'weight', f"latest.pt"),
)
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(description="StyleGAN2 encoder trainer")
parser.add_argument("--path", type=str, help="path to the lmdb dataset")
parser.add_argument("--arch", type=str, default='stylegan2', help="model architectures (stylegan2 | swagan)")
parser.add_argument("--dataset", type=str, default='multires')
parser.add_argument("--cache", type=str, default='local.db')
parser.add_argument("--sample_cache", type=str, default=None)
parser.add_argument("--name", type=str, help="experiment name", default='default_exp')
parser.add_argument("--log_root", type=str, help="where to save training logs", default='logs')
parser.add_argument("--log_every", type=int, default=100, help="save samples every # iters")
parser.add_argument("--save_every", type=int, default=1000, help="save checkpoints every # iters")
parser.add_argument("--save_latest_every", type=int, default=100, help="save latest checkpoints every # iters")
parser.add_argument("--resume", action='store_true')
parser.add_argument("--no_update_discriminator", action='store_true')
parser.add_argument("--no_load_discriminator", action='store_true')
parser.add_argument("--toggle_grads", action='store_true')
parser.add_argument("--use_optical_flow", action='store_true')
parser.add_argument("--use_wscale", action='store_true', help="whether to use `wscale` layer in idinvert encoder")
parser.add_argument("--no_ema", action='store_true', help="do not use ema if enabled")
parser.add_argument("--train_on_fake", action='store_true', help="train encoder on fake?")
parser.add_argument("--e_rec_every", type=int, default=1, help="interval of minimizing recon loss on w")
parser.add_argument("--pix_loss", type=str, default='l2')
parser.add_argument("--lambda_pix", type=float, default=1.0, help="recon loss on pixel (x)")
parser.add_argument("--lambda_vgg", type=float, default=5e-5)
parser.add_argument("--lambda_adv", type=float, default=0.1)
parser.add_argument("--lambda_rec", type=float, default=1.0, help="recon loss on style (w)")
parser.add_argument("--output_layer_idx", type=int, default=23)
parser.add_argument("--vgg_ckpt", type=str, default="vgg16.pth")
parser.add_argument("--which_encoder", type=str, default='style')
parser.add_argument("--which_latent", type=str, default='w_plus')
parser.add_argument("--stddev_group", type=int, default=1)
parser.add_argument("--use_residual_latent_mlp", action='store_true')
parser.add_argument("--n_latent_mlp", type=int, default=8)
parser.add_argument(
"--iter", type=int, default=800000, help="total training iterations"
)
parser.add_argument(
"--batch", type=int, default=16, help="batch sizes for each gpus"
)
parser.add_argument(
"--n_sample",
type=int,
default=64,
help="number of the samples generated during training",
)
parser.add_argument(
"--size", type=int, default=256, help="image sizes for the model"
)
parser.add_argument(
"--r1", type=float, default=10, help="weight of the r1 regularization"
)
parser.add_argument(
"--path_regularize",
type=float,
default=2,
help="weight of the path length regularization",
)
parser.add_argument(
"--path_batch_shrink",
type=int,
default=2,
help="batch size reducing factor for the path length regularization (reduce memory consumption)",
)
parser.add_argument(
"--e_reg_every",
type=int,
default=0,
help="interval of the applying r1 regularization, no if 0",
)
parser.add_argument(
"--d_reg_every",
type=int,
default=16,
help="interval of the applying r1 regularization, no if 0",
)
parser.add_argument(
"--g_reg_every",
type=int,
default=4,
help="interval of the applying path length regularization",
)
parser.add_argument(
"--mixing", type=float, default=0.9, help="probability of latent code mixing"
)
parser.add_argument(
"--ckpt",
type=str,
default=None,
help="path to the checkpoints to resume training",
)
parser.add_argument(
"--g_ckpt",
type=str,
default=None,
help="path to the checkpoint of generator",
)
parser.add_argument("--lr", type=float, default=0.002, help="learning rate")
parser.add_argument(
"--channel_multiplier",
type=int,
default=2,
help="channel multiplier factor for the model. config-f = 2, else = 1",
)
parser.add_argument(
"--wandb", action="store_true", help="use weights and biases logging"
)
parser.add_argument(
"--local_rank", type=int, default=0, help="local rank for distributed training"
)
parser.add_argument(
"--augment", action="store_true", help="apply non leaking augmentation"
)
parser.add_argument(
"--augment_p",
type=float,
default=0,
help="probability of applying augmentation. 0 = use adaptive augmentation",
)
parser.add_argument(
"--ada_target",
type=float,
default=0.6,
help="target augmentation probability for adaptive augmentation",
)
parser.add_argument(
"--ada_length",
type=int,
default=500 * 1000,
help="target duraing to reach augmentation probability for adaptive augmentation",
)
parser.add_argument(
"--ada_every",
type=int,
default=8,
help="probability update interval of the adaptive augmentation",
)
parser.add_argument("--inception", type=str, default=None, help="path to precomputed inception embedding")
parser.add_argument("--eval_every", type=int, default=1000, help="interval of metric evaluation")
parser.add_argument("--truncation", type=float, default=1, help="truncation factor")
parser.add_argument("--n_sample_fid", type=int, default=10000, help="number of the samples for calculating FID")
parser.add_argument("--latent_space", type=str, default='w', help="latent space (w | p | pn | z)")
parser.add_argument("--ema_kimg", type=int, default=10, help="Half-life of the exponential moving average (EMA) of generator weights.")
parser.add_argument("--ema_rampup", type=float, default=None, help="EMA ramp-up coefficient.")
parser.add_argument("--n_mlp_g", type=int, default=8)
parser.add_argument("--pca_state", type=str, default=None)
args = parser.parse_args()
util.seed_everything()
args.device = device
n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = n_gpu > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
args.n_latent = int(np.log2(args.size)) * 2 - 2 # used in Generator
args.latent = 512 # fixed, dim of w or z (same size)
if args.which_latent == 'w_plus':
args.latent_full = args.latent * args.n_latent
elif args.which_latent == 'w_tied':
args.latent_full = args.latent
else:
raise NotImplementedError
args.start_iter = 0
args.iter += 1
util.set_log_dir(args)
util.print_args(parser, args)
if args.arch == 'stylegan2':
from model import Generator, Discriminator
elif args.arch == 'swagan':
from swagan import Generator, Discriminator
# PCA state
pca_state = None
if args.pca_state is not None:
pca_state = np.load(args.pca_state)
pca_state = {k: torch.from_numpy(pca_state[k]).float() for k in pca_state}
pca_state['Lambda'] = pca_state['Lambda'].unsqueeze(0)
pca_state['mu'] = pca_state['mu'].unsqueeze(0)
pca_state['CT'] = pca_state['C'].T
# Auxiliary models (VGG and PWC)
vggnet = VGG16(output_layer_idx=args.output_layer_idx).to(device)
vgg_ckpt = torch.load(args.vgg_ckpt, map_location=lambda storage, loc: storage)
vggnet.load_state_dict(vgg_ckpt)
pwcnet = None
if args.use_optical_flow:
pwc = __import__('pytorch-pwc.run', globals(), locals(), ['Network'], 0)
pwcnet = pwc.Network().to(device) # state_dict loaded in init
pwcnet.eval()
discriminator = Discriminator(
args.size, channel_multiplier=args.channel_multiplier
).to(device)
# generator = Generator(
# args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
# ).to(device)
g_ema = Generator(
args.size, args.latent, args.n_mlp_g, channel_multiplier=args.channel_multiplier
).to(device)
g_ema.eval()
# accumulate(g_ema, generator, 0)
e_ema = None
if args.which_encoder == 'idinvert':
from idinvert_pytorch.models.stylegan_encoder_network import StyleGANEncoderNet
encoder = StyleGANEncoderNet(resolution=args.size, w_space_dim=args.latent,
which_latent=args.which_latent, reshape_latent=False,
use_wscale=args.use_wscale).to(device)
if not args.no_ema:
e_ema = StyleGANEncoderNet(resolution=args.size, w_space_dim=args.latent,
which_latent=args.which_latent, reshape_latent=False,
use_wscale=args.use_wscale).to(device)
else:
from model import Encoder
encoder = Encoder(args.size, args.latent, channel_multiplier=args.channel_multiplier,
which_latent=args.which_latent, reshape_latent=False, stddev_group=args.stddev_group,
latent_space=args.latent_space, pca_state=pca_state).to(device)
if not args.no_ema:
e_ema = Encoder(args.size, args.latent, channel_multiplier=args.channel_multiplier,
which_latent=args.which_latent, reshape_latent=False, stddev_group=args.stddev_group,
latent_space=args.latent_space, pca_state=pca_state).to(device)
if not args.no_ema:
e_ema.eval()
accumulate(e_ema, encoder, 0)
# For lazy regularization (see paper appendix page 11)
# e_reg_ratio = args.e_reg_every / (args.e_reg_every + 1) if args.e_reg_every > 0 else 1.
e_reg_ratio = 1.
d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1) if args.d_reg_every > 0 else 1.
e_optim = optim.Adam(
encoder.parameters(),
lr=args.lr * e_reg_ratio,
betas=(0 ** e_reg_ratio, 0.99 ** e_reg_ratio),
)
d_optim = optim.Adam(
discriminator.parameters(),
lr=args.lr * d_reg_ratio,
betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),
)
if args.resume:
if args.ckpt is None:
args.ckpt = os.path.join(args.log_dir, 'weight', f"latest.pt")
print("load model:", args.ckpt)
ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)
try:
ckpt_name = os.path.basename(args.ckpt)
if 'iter' in ckpt:
args.start_iter = ckpt["iter"]
else:
args.start_iter = int(os.path.splitext(ckpt_name)[0])
except ValueError:
pass
encoder.load_state_dict(ckpt["e"])
# generator.load_state_dict(ckpt["g"])
discriminator.load_state_dict(ckpt["d"])
e_ema.load_state_dict(ckpt["e_ema"])
g_ema.load_state_dict(ckpt["g_ema"])
e_optim.load_state_dict(ckpt["e_optim"])
# g_optim.load_state_dict(ckpt["g_optim"])
d_optim.load_state_dict(ckpt["d_optim"])
else:
print("load g model:", args.g_ckpt)
g_ckpt = torch.load(args.g_ckpt, map_location=lambda storage, loc: storage)
# generator.load_state_dict(g_ckpt["g"])
if 'g_ema' in g_ckpt:
g_ema.load_state_dict(g_ckpt["g_ema"])
else:
g_ema.load_state_dict(g_ckpt["g"])
if not args.no_load_discriminator:
discriminator.load_state_dict(g_ckpt["d"])
d_optim.load_state_dict(g_ckpt["d_optim"])
if args.distributed:
encoder = nn.parallel.DistributedDataParallel(
encoder,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
discriminator = nn.parallel.DistributedDataParallel(
discriminator,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
dataset = get_image_dataset(args, args.dataset, args.path, train=True)
loader = data.DataLoader(
dataset,
batch_size=args.batch,
sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed),
drop_last=True,
)
loader2 = None
if args.eval_every > 0:
indices = torch.randperm(len(dataset))[:args.n_sample_fid]
dataset2 = data.Subset(dataset, indices)
loader2 = data.DataLoader(dataset2, batch_size=64, num_workers=4, shuffle=False)
if get_rank() == 0 and wandb is not None and args.wandb:
wandb.init(project=args.name)
train(args, loader, loader2, encoder, g_ema, discriminator, vggnet, pwcnet, e_optim, d_optim, e_ema, pca_state, device)
|
en
| 0.478766
|
# Endless image iterator # accum = 0.5 ** (32 / (10 * 1000)) # sample_x = accumulate_batches(loader, args.n_sample).to(device) # Encode in z space? # always False # Generator should be ema and in eval mode # if args.no_ema or e_ema is None: # e_ema = encoder # Train Encoder # e_regularize = args.e_reg_every > 0 and i % args.e_reg_every == 0 # if e_regularize: # # why not regularize on augmented real? # real_img.requires_grad = True # real_pred, _ = encoder(real_img) # r1_loss_e = d_r1_loss(real_pred, real_img) # encoder.zero_grad() # (args.r1 / 2 * r1_loss_e * args.e_reg_every + 0 * real_pred.view(-1)[0]).backward() # e_optim.step() # loss_dict["r1_e"] = r1_loss_e # Train Discriminator # why not regularize on augmented real? # Why 0* ? Answer is here https://github.com/rosinality/stylegan2-pytorch/issues/76 # Recon # print("Recon FID:", fid_re) # iters") # iters") # iters") # used in Generator # fixed, dim of w or z (same size) # PCA state # Auxiliary models (VGG and PWC) # state_dict loaded in init # generator = Generator( # args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier # ).to(device) # accumulate(g_ema, generator, 0) # For lazy regularization (see paper appendix page 11) # e_reg_ratio = args.e_reg_every / (args.e_reg_every + 1) if args.e_reg_every > 0 else 1. # generator.load_state_dict(ckpt["g"]) # g_optim.load_state_dict(ckpt["g_optim"]) # generator.load_state_dict(g_ckpt["g"])
| 1.979821
| 2
|
examples/py/async-instantiate-all-at-once.py
|
Dan-krm/ccxt
| 3
|
6627317
|
# -*- coding: utf-8 -*-
import os
import sys
import asyncio
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt.async_support as ccxt # noqa: E402
exchanges = {} # a placeholder for your instances
async def main():
for id in ccxt.exchanges:
exchange = getattr(ccxt, id)
exchanges[id] = exchange()
# now exchanges dictionary contains all exchange instances...
print(await exchanges['bittrex'].fetch_order_book('ETH/BTC'))
# close the aiohttp session object
for id in exchanges:
await exchanges[id].close()
asyncio.run(main())
|
# -*- coding: utf-8 -*-
import os
import sys
import asyncio
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt.async_support as ccxt # noqa: E402
exchanges = {} # a placeholder for your instances
async def main():
for id in ccxt.exchanges:
exchange = getattr(ccxt, id)
exchanges[id] = exchange()
# now exchanges dictionary contains all exchange instances...
print(await exchanges['bittrex'].fetch_order_book('ETH/BTC'))
# close the aiohttp session object
for id in exchanges:
await exchanges[id].close()
asyncio.run(main())
|
en
| 0.670171
|
# -*- coding: utf-8 -*- # noqa: E402 # a placeholder for your instances # now exchanges dictionary contains all exchange instances... # close the aiohttp session object
| 2.709159
| 3
|
inheritence_of_class).py
|
Annonymous-error/general-codes
| 1
|
6627318
|
class phone: #base class
def __initt__(self, brand,model ,price):
self.brand = brand
self.model = model
self.price=max(price,0) #sso as to reject non negative values
def full_name(self): #class fuction
return f"{self.brand}{self.model}"
def make_a_call(self): #class fuction
return f"caliing {number}..."
class smartphone: # carries all properties of phone
def __initt__(self, brand,model ,price,ram, internal_mem,rear_cam ):
super().__init__(brand,model,price)
self.ram= ram
self.internal_mem = internal_mem
self.rear_cam =rear_cam
def full_name(self): #class fuction
return f"{self.brand}{self.model}{ram}"
class flagshipphone: # carries all properties of phone
def __initt__(self, brand,model ,price,ram, internal_mem,rear_cam,front_cam):
super().__init__(brand,model ,price,ram, internal_mem,rear_cam)
self.front_cam =front_cam
#python searches methods from child to parent = Method resolution order
# print(help(flagshipphone()))
issubclass(smartphone,phone) # to check given class is subclass ie smartphone is of phone
|
class phone: #base class
def __initt__(self, brand,model ,price):
self.brand = brand
self.model = model
self.price=max(price,0) #sso as to reject non negative values
def full_name(self): #class fuction
return f"{self.brand}{self.model}"
def make_a_call(self): #class fuction
return f"caliing {number}..."
class smartphone: # carries all properties of phone
def __initt__(self, brand,model ,price,ram, internal_mem,rear_cam ):
super().__init__(brand,model,price)
self.ram= ram
self.internal_mem = internal_mem
self.rear_cam =rear_cam
def full_name(self): #class fuction
return f"{self.brand}{self.model}{ram}"
class flagshipphone: # carries all properties of phone
def __initt__(self, brand,model ,price,ram, internal_mem,rear_cam,front_cam):
super().__init__(brand,model ,price,ram, internal_mem,rear_cam)
self.front_cam =front_cam
#python searches methods from child to parent = Method resolution order
# print(help(flagshipphone()))
issubclass(smartphone,phone) # to check given class is subclass ie smartphone is of phone
|
en
| 0.821529
|
#base class #sso as to reject non negative values #class fuction #class fuction # carries all properties of phone #class fuction # carries all properties of phone #python searches methods from child to parent = Method resolution order # print(help(flagshipphone())) # to check given class is subclass ie smartphone is of phone
| 3.798259
| 4
|
code/super_minitaur/script/lpmslib/LpmsB.py
|
buenos-dan/quadrupedal_robot
| 5
|
6627319
|
<gh_stars>1-10
import time
import serial
import threading
import struct
import sys
from datetime import datetime, timedelta
from LpmsConfig import *
from lputils import *
from LpmsConfigurationSettings import LpmsConfigurationSettings
#TODO:
# check serial port opened before executing commands
# add wait for ack routine
class LpmsB(object):
TAG = "LPMSB"
runOnce = True
verbose = True
is_thread_running = False
sensor_configuration = LpmsConfigurationSettings()
PACKET_ADDRESS0 = 0
PACKET_ADDRESS1 = 1
PACKET_FUNCTION0 = 2
PACKET_FUNCTION1 = 3
PACKET_RAW_DATA = 4
PACKET_LRC_CHECK0 = 5
PACKET_LRC_CHECK1 = 6
PACKET_END = 7
PACKET_LENGTH0 = 8
PACKET_LENGTH1 = 9
current_length = 0
current_function = 0
current_address = 0
rx_state = PACKET_END
in_bytes = []
rx_buffer = []
raw_tx_data = []
rx_index = 0
lrc_check = 0
wait_for_ack = False
wait_for_data = False
is_sensor_connected = False
config_register = 0
status_register = 0
imu_id = 0
timestamp = 0
frame_counter = 0
battery_level = 0
battery_voltage = 0
temperature = 0
acc_x = 0
acc_y = 0
acc_z = 0
gyr_x = 0
gyr_y = 0
gyr_z = 0
mag_x = 0
mag_y = 0
mag_z = 0
angular_vel_x = 0
angular_vel_y = 0
angular_vel_z = 0
quat_w = 0
quat_x = 0
quat_y = 0
quat_z = 0
euler_x = 0
euler_y = 0
euler_z = 0
linacc_x = 0
linacc_y = 0
linacc_z = 0
altitude = 0
pressure = 0
# debug log
debug_log_size = 0
debug_log_size_index = 0
def __init__(self, port, baudrate):
self.port = port
self.baudrate = baudrate
self.__init_params()
def __clear_params(self):
self.current_length = 0
self.current_function = 0
self.current_address = 0
self.rx_state = self.PACKET_END
self.in_bytes = []
self.rx_buffer = []
self.raw_tx_data = []
self.rx_index = 0
self.lrc_check = 0
self.imu_id = 0
self.timestamp = 0
self.frame_counter = 0
self.temperature = 0
self.acc_x = 0
self.acc_y = 0
self.acc_z = 0
self.gyr_x = 0
self.gyr_y = 0
self.gyr_z = 0
self.mag_x = 0
self.mag_y = 0
self.mag_z = 0
self.angular_vel_x = 0
self.angular_vel_y = 0
self.angular_vel_z = 0
self.quat_w = 0
self.quat_x = 0
self.quat_y = 0
self.quat_z = 0
self.euler_x = 0
self.euler_y = 0
self.euler_z = 0
self.linacc_x = 0
self.linacc_y = 0
self.linacc_z = 0
self.altitude = 0
self.pressure = 0
self.wait_for_ack = False
self.wait_for_data = False
def __init_params(self):
self.__clear_params()
def __thread_is_alive(self):
try:
return self.thread.isAlive()
except AttributeError:
return False
def __run(self):
""" Method that runs forever """
self.is_thread_running = True
while not self.quit:
self.is_sensor_connected = True
bytesToRead = self.serial_port.inWaiting()
if bytesToRead > 0:
reading = self.serial_port.read(bytesToRead)
#print reading
self.__parse(reading)
self.serial_port.close()
self.is_sensor_connected = False
self.is_thread_running = False
# TODO: add offset length check
def __convert_rxbytes_to_int16(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("h", ''.join(dataList[offset:offset+2]))
return i
def __convert_rxbytes_to_int(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("i", ''.join(dataList[offset:offset+4]))
return i
def __convert_rxbytes_to_float(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("f", ''.join(dataList[offset:offset+4]))
return i
def __convert_int16_to_txbytes(self, v):
"""
return bytesarray
"""
return struct.pack("h", v)
def __convert_int_to_txbytes(self, v):
"""
return bytesarray
"""
return struct.pack("i", v)
def __print_str_to_hex(self, s):
print ":".join("{:02x}".format(ord(c)) for c in s)
# Parser
def __parse_function(self):
cf = self.current_function
if cf == LPMS_ACK:
if self.verbose: logd(self.TAG , "Received Ack")
self.wait_for_ack = False
elif cf == LPMS_NACK:
if self.verbose: logd(self.TAG , "Received Nack")
self.wait_for_ack = False
elif cf == LPMS_GET_CONFIG:
self.config_register = self.__convert_rxbytes_to_int(0, self.rx_buffer)
#print"{0:b}".format(self.config_register)
self.__parse_configuration_register(self.config_register)
self.wait_for_data = False
elif cf == LPMS_GET_SENSOR_DATA:
if self.sensor_configuration.sixteen_bit_data_enable:
self.__parse_sensor_data(16)
else:
self.__parse_sensor_data()
self.wait_for_data = False
elif cf == GET_FIRMWARE_VERSION:
vmajor = self.__convert_rxbytes_to_int(8, self.rx_buffer)
vminor = self.__convert_rxbytes_to_int(4, self.rx_buffer)
vbuild = self.__convert_rxbytes_to_int(0, self.rx_buffer)
self.firmwareVersion = str(vmajor) + "." + str(vminor) + "." + str(vbuild)
self.wait_for_data = False
elif cf == GET_PING:
if self.sensor_configuration.timestamp_counter_mode_enable:
self.timestamp = self.__convert_rxbytes_to_int(0, self.rx_buffer)
else:
self.timestamp = self.__convert_rxbytes_to_float(0, self.rx_buffer)
elif cf == GET_TEMPERATURE:
self.temperature = self.__convert_rxbytes_to_float(0, self.rx_buffer)
self.wait_for_data = False
def __parse(self, data):
self.lrcReceived = 0
for b in data:
if self.rx_state == self.PACKET_END:
if (b == ':'):
self.rx_state = self.PACKET_ADDRESS0
elif self.rx_state == self.PACKET_ADDRESS0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_ADDRESS1
elif self.rx_state == self.PACKET_ADDRESS1:
self.in_bytes.append(b)
self.current_address = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.imu_id = self.current_address
self.rx_state = self.PACKET_FUNCTION0
elif self.rx_state == self.PACKET_FUNCTION0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_FUNCTION1
elif self.rx_state == self.PACKET_FUNCTION1:
self.in_bytes.append(b)
self.current_function = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.rx_state = self.PACKET_LENGTH0
elif self.rx_state == self.PACKET_LENGTH0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_LENGTH1
elif self.rx_state == self.PACKET_LENGTH1:
self.in_bytes.append(b)
self.current_length = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.rx_state = self.PACKET_RAW_DATA
self.rx_index = 0
self.rx_buffer = []
elif self.rx_state == self.PACKET_RAW_DATA:
if self.rx_index == self.current_length:
self.lrc_check = self.current_address + self.current_function + self.current_length
self.lrc_check = self.lrc_check + sum([ord(c) for c in self.rx_buffer])
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_LRC_CHECK1
else:
# add length check
self.rx_buffer.append(b)
self.rx_index = self.rx_index + 1
elif self.rx_state == self.PACKET_LRC_CHECK1:
self.in_bytes.append(b)
self.lrcReceived = self.__convert_rxbytes_to_int16(0, self.in_bytes)
if self.lrcReceived == self.lrc_check:
self.__parse_function()
self.rx_state = self.PACKET_END
else:
self.rx_state = self.PACKET_END
def __parse_sensor_data(self, data_mode=32):
o = 0
r2d = 57.2958
if data_mode == 16:
converter = lambda offset, l: float(self.__convert_rxbytes_to_int16(offset, l)) / 1000.0
increment = 2
else:
converter = lambda offset, l: self.__convert_rxbytes_to_float(offset, l)
increment = 4
# TODO: Add timestamp counter mode/elapsed mode
self.timestamp = float(self.__convert_rxbytes_to_float(0, self.rx_buffer))
o += 4
if self.runOnce:
self.frame_counter = 0
self.runOnce = False
else:
self.frame_counter += 1
if self.sensor_configuration.gyro_enable:
self.gyr_x = converter(o, self.rx_buffer) * r2d
o += increment
self.gyr_y = converter(o, self.rx_buffer) * r2d
o += increment
self.gyr_z = converter(o, self.rx_buffer) * r2d
o += increment
if self.sensor_configuration.accelerometer_enable:
self.acc_x = converter(o, self.rx_buffer)
o += increment
self.acc_y = converter(o, self.rx_buffer)
o += increment
self.acc_z = converter(o, self.rx_buffer)
o += increment
if self.sensor_configuration.magnetometer_enable:
self.mag_x = converter(o, self.rx_buffer)
o += increment
self.mag_y = converter(o, self.rx_buffer)
o += increment
self.mag_z = converter(o, self.rx_buffer)
o += increment
# 100 Fixed point
if data_mode == 16:
self.mag_x *= 10
self.mag_y *= 10
self.mag_z *= 10
if self.sensor_configuration.angular_velocity_enable:
self.angular_vel_x = converter(o, self.rx_buffer) * r2d
o += increment
self.angular_vel_y = converter(o, self.rx_buffer) * r2d
o += increment
self.angular_vel_z = converter(o, self.rx_buffer) * r2d
o += increment
if self.sensor_configuration.quaternion_enable:
self.quat_w = converter(o, self.rx_buffer)
o += increment
self.quat_x = converter(o, self.rx_buffer)
o += increment
self.quat_y = converter(o, self.rx_buffer)
o += increment
self.quat_z = converter(o, self.rx_buffer)
o += increment
if self.sensor_configuration.euler_enable:
self.euler_x = converter(o, self.rx_buffer) * r2d
o += increment
self.euler_y = converter(o, self.rx_buffer) * r2d
o += increment
self.euler_z = converter(o, self.rx_buffer) * r2d
o += increment
if self.sensor_configuration.linear_acceleration_enable:
self.linacc_x = converter(o, self.rx_buffer)
o += increment
self.linacc_y = converter(o, self.rx_buffer)
o += increment
self.linacc_z = converter(o, self.rx_buffer)
o += increment
if self.sensor_configuration.pressure_enable:
self.pressure = converter(o, self.rx_buffer)
o += increment
# 10 Fixed point
if data_mode == 16:
self.pressure *= 100
if self.sensor_configuration.altitude_enable:
self.altitude = converter(o, self.rx_buffer)
o += increment
# 10 Fixed point
if data_mode == 16:
self.altitude *= 100
if self.sensor_configuration.temperature_enable:
self.temperature = converter(o, self.rx_buffer)
o += increment
# 100 Fixed point
if data_mode == 16:
self.temperature *= 10
def __parse_sensor_data_16bit(self):
o = 0
r2d = 57.2958
if self.sensor_configuration.timestamp_counter_mode_enable:
self.timestamp = float(self.__convert_rxbytes_to_int(0, self.rx_buffer))
else:
self.timestamp = self.__convert_rxbytes_to_float(0, self.rx_buffer)
o += 4
self.frame_counter += 1
if self.sensor_configuration.gyro_enable:
self.gyr_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.gyr_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.gyr_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
if self.sensor_configuration.accelerometer_enable:
self.acc_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.acc_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.acc_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
if self.sensor_configuration.magnetometer_enable:
self.mag_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
self.mag_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
self.mag_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
if self.sensor_configuration.quaternion_enable:
self.quat_w = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.quat_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.quat_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.quat_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
if self.sensor_configuration.euler_enable:
self.euler_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.euler_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.euler_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
if self.sensor_configuration.linear_acceleration_enable:
self.linacc_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.linacc_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.linacc_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
if self.sensor_configuration.pressure_enable:
self.pressure = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
# communication
def __get_config_register(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
if self.verbose: logd(self.TAG, "Get config register")
time.sleep(.1)
self.__lpbus_set_none(LPMS_GET_CONFIG)
self.wait_for_data = True
self.__wait_for_response()
def __send_data(self, function, length):
txlrc_check = 0
txBuffer = chr(0x3a)
txBuffer += self.__convert_int16_to_txbytes(self.imu_id)
txBuffer += self.__convert_int16_to_txbytes(function)
txBuffer += self.__convert_int16_to_txbytes(length)
if length > 0:
txBuffer += self.raw_tx_data
txlrc_check = self.imu_id + function + length
if length > 0:
txlrc_check += sum([ord(c) for c in self.raw_tx_data])
txBuffer += self.__convert_int16_to_txbytes(txlrc_check)
txBuffer += chr(0x0d)
txBuffer += chr(0x0a)
bytesSent = self.serial_port.write(txBuffer)
def __lpbus_set_none(self, command):
self.__send_data(command, 0)
def __lpbus_set_int32(self, command, v):
self.raw_tx_data = self.__convert_int_to_txbytes(v)
self.__send_data(command, 4)
def __lpbus_set_data(self, command, length, dataBuffer):
self.raw_tx_data = dataBuffer
self.__send_data(command, length)
def __wait_for_response(self):
while self.wait_for_ack or self.wait_for_data:
time.sleep(.1)
def __parse_configuration_register(self, cr):
self.sensor_configuration.parse(cr)
# User command
def connect(self):
if self.__thread_is_alive():
loge(self.TAG, "Another connection established")
return False
try:
self.__clear_params()
self.thread = threading.Thread(target=self.__run, args=())
self.serial_port = serial.Serial(self.port, self.baudrate)
self.quit = False
if self.verbose: logd(self.TAG , "Sensor connected")
#thread.daemon = True # Daemonize thread
self.thread.start() # Start the execution
time.sleep(1)
self.set_command_mode()
self.__get_config_register()
self.set_streaming_mode()
return True
except serial.SerialException:
loge(self.TAG, "Could not open port " + self.port)
loge(self.TAG, "Please try again")
return False
def disconnect(self):
self.quit = True
if self.__thread_is_alive():
self.thread.join()
if self.verbose: logd(self.TAG , "sensor disconnected")
return True
def is_connected(self):
return self.is_sensor_connected
# Configuration and Status
def get_config_register(self):
"""
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.__lpbus_set_none(LPMS_GET_CONFIG)
self.wait_for_data = True
self.__wait_for_response()
"""
return self.sensor_configuration
def get_status_register(self):
pass
# Mode switching
def set_command_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
if self.verbose: logd(self.TAG, "Set command mode")
self.__lpbus_set_none(LPMS_GOTO_COMMAND_MODE)
self.wait_for_ack = True
self.__wait_for_response()
def set_streaming_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set streaming mode")
self.__lpbus_set_none(LPMS_GOTO_STREAM_MODE)
self.wait_for_ack = True
self.__wait_for_response()
# Data transmision
def get_sensor_data(self):
"""
get sensor data during command Mode
"""
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
if self.verbose: logd(self.TAG, "Get sensor data")
self.__lpbus_set_none(LPMS_GET_SENSOR_DATA)
self.wait_for_data = True
self.__wait_for_response()
return self.get_stream_data()
def get_stream_data(self):
"""
get sensor data during stream Mode
"""
data = []
data.append(self.imu_id)
data.append(self.timestamp)
data.append(self.frame_counter)
data.append(self.battery_level)
data.append(self.battery_voltage)
data.append(self.temperature)
data.append([self.acc_x, self.acc_y, self.acc_z])
data.append([self.gyr_x, self.gyr_y, self.gyr_z])
data.append([self.mag_x, self.mag_y, self.mag_z])
data.append([self.quat_w, self.quat_x, self.quat_y, self.quat_z])
data.append([self.euler_x, self.euler_y, self.euler_z])
data.append([self.linacc_x, self.linacc_y, self.linacc_z])
return data
def set_transmit_data(self):
pass
def set_stream_frequency(self, freq):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set stream freq: "+str(freq)+"Hz")
self.__lpbus_set_int32(LPMS_SET_STREAM_FREQ , freq)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
def set_stream_frequency_5Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_5HZ)
def set_stream_frequency_10Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_10HZ)
def set_stream_frequency_25Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_25HZ)
def set_stream_frequency_50Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_50HZ)
def set_stream_frequency_100Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_100HZ)
def set_stream_frequency_200Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_200HZ)
def set_stream_frequency_400Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_400HZ)
def set_16bit_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set 16 bit data")
self.__lpbus_set_int32(LPMS_SET_LPBUS_DATA_MODE, LPMS_LPBUS_DATA_MODE_16)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
def set_32bit_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set 32 bit data")
self.__lpbus_set_int32(LPMS_SET_LPBUS_DATA_MODE, LPMS_LPBUS_DATA_MODE_32)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
# Register value save and reset
def save_parameters(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Save parameters to sensor")
self.__lpbus_set_none(LPMS_WRITE_REGISTERS)
self.wait_for_ack = True
self.__wait_for_response()
self.set_streaming_mode()
def reset_factory(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Reset factory settings")
self.__lpbus_set_none(LPMS_RESET_FACTORY_VALUE)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
# Reference setting and offset reset
def reset_reference(self):
pass
|
import time
import serial
import threading
import struct
import sys
from datetime import datetime, timedelta
from LpmsConfig import *
from lputils import *
from LpmsConfigurationSettings import LpmsConfigurationSettings
#TODO:
# check serial port opened before executing commands
# add wait for ack routine
class LpmsB(object):
TAG = "LPMSB"
runOnce = True
verbose = True
is_thread_running = False
sensor_configuration = LpmsConfigurationSettings()
PACKET_ADDRESS0 = 0
PACKET_ADDRESS1 = 1
PACKET_FUNCTION0 = 2
PACKET_FUNCTION1 = 3
PACKET_RAW_DATA = 4
PACKET_LRC_CHECK0 = 5
PACKET_LRC_CHECK1 = 6
PACKET_END = 7
PACKET_LENGTH0 = 8
PACKET_LENGTH1 = 9
current_length = 0
current_function = 0
current_address = 0
rx_state = PACKET_END
in_bytes = []
rx_buffer = []
raw_tx_data = []
rx_index = 0
lrc_check = 0
wait_for_ack = False
wait_for_data = False
is_sensor_connected = False
config_register = 0
status_register = 0
imu_id = 0
timestamp = 0
frame_counter = 0
battery_level = 0
battery_voltage = 0
temperature = 0
acc_x = 0
acc_y = 0
acc_z = 0
gyr_x = 0
gyr_y = 0
gyr_z = 0
mag_x = 0
mag_y = 0
mag_z = 0
angular_vel_x = 0
angular_vel_y = 0
angular_vel_z = 0
quat_w = 0
quat_x = 0
quat_y = 0
quat_z = 0
euler_x = 0
euler_y = 0
euler_z = 0
linacc_x = 0
linacc_y = 0
linacc_z = 0
altitude = 0
pressure = 0
# debug log
debug_log_size = 0
debug_log_size_index = 0
def __init__(self, port, baudrate):
self.port = port
self.baudrate = baudrate
self.__init_params()
def __clear_params(self):
self.current_length = 0
self.current_function = 0
self.current_address = 0
self.rx_state = self.PACKET_END
self.in_bytes = []
self.rx_buffer = []
self.raw_tx_data = []
self.rx_index = 0
self.lrc_check = 0
self.imu_id = 0
self.timestamp = 0
self.frame_counter = 0
self.temperature = 0
self.acc_x = 0
self.acc_y = 0
self.acc_z = 0
self.gyr_x = 0
self.gyr_y = 0
self.gyr_z = 0
self.mag_x = 0
self.mag_y = 0
self.mag_z = 0
self.angular_vel_x = 0
self.angular_vel_y = 0
self.angular_vel_z = 0
self.quat_w = 0
self.quat_x = 0
self.quat_y = 0
self.quat_z = 0
self.euler_x = 0
self.euler_y = 0
self.euler_z = 0
self.linacc_x = 0
self.linacc_y = 0
self.linacc_z = 0
self.altitude = 0
self.pressure = 0
self.wait_for_ack = False
self.wait_for_data = False
def __init_params(self):
self.__clear_params()
def __thread_is_alive(self):
try:
return self.thread.isAlive()
except AttributeError:
return False
def __run(self):
""" Method that runs forever """
self.is_thread_running = True
while not self.quit:
self.is_sensor_connected = True
bytesToRead = self.serial_port.inWaiting()
if bytesToRead > 0:
reading = self.serial_port.read(bytesToRead)
#print reading
self.__parse(reading)
self.serial_port.close()
self.is_sensor_connected = False
self.is_thread_running = False
# TODO: add offset length check
def __convert_rxbytes_to_int16(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("h", ''.join(dataList[offset:offset+2]))
return i
def __convert_rxbytes_to_int(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("i", ''.join(dataList[offset:offset+4]))
return i
def __convert_rxbytes_to_float(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("f", ''.join(dataList[offset:offset+4]))
return i
def __convert_int16_to_txbytes(self, v):
"""
return bytesarray
"""
return struct.pack("h", v)
def __convert_int_to_txbytes(self, v):
"""
return bytesarray
"""
return struct.pack("i", v)
def __print_str_to_hex(self, s):
print ":".join("{:02x}".format(ord(c)) for c in s)
# Parser
def __parse_function(self):
cf = self.current_function
if cf == LPMS_ACK:
if self.verbose: logd(self.TAG , "Received Ack")
self.wait_for_ack = False
elif cf == LPMS_NACK:
if self.verbose: logd(self.TAG , "Received Nack")
self.wait_for_ack = False
elif cf == LPMS_GET_CONFIG:
self.config_register = self.__convert_rxbytes_to_int(0, self.rx_buffer)
#print"{0:b}".format(self.config_register)
self.__parse_configuration_register(self.config_register)
self.wait_for_data = False
elif cf == LPMS_GET_SENSOR_DATA:
if self.sensor_configuration.sixteen_bit_data_enable:
self.__parse_sensor_data(16)
else:
self.__parse_sensor_data()
self.wait_for_data = False
elif cf == GET_FIRMWARE_VERSION:
vmajor = self.__convert_rxbytes_to_int(8, self.rx_buffer)
vminor = self.__convert_rxbytes_to_int(4, self.rx_buffer)
vbuild = self.__convert_rxbytes_to_int(0, self.rx_buffer)
self.firmwareVersion = str(vmajor) + "." + str(vminor) + "." + str(vbuild)
self.wait_for_data = False
elif cf == GET_PING:
if self.sensor_configuration.timestamp_counter_mode_enable:
self.timestamp = self.__convert_rxbytes_to_int(0, self.rx_buffer)
else:
self.timestamp = self.__convert_rxbytes_to_float(0, self.rx_buffer)
elif cf == GET_TEMPERATURE:
self.temperature = self.__convert_rxbytes_to_float(0, self.rx_buffer)
self.wait_for_data = False
def __parse(self, data):
self.lrcReceived = 0
for b in data:
if self.rx_state == self.PACKET_END:
if (b == ':'):
self.rx_state = self.PACKET_ADDRESS0
elif self.rx_state == self.PACKET_ADDRESS0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_ADDRESS1
elif self.rx_state == self.PACKET_ADDRESS1:
self.in_bytes.append(b)
self.current_address = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.imu_id = self.current_address
self.rx_state = self.PACKET_FUNCTION0
elif self.rx_state == self.PACKET_FUNCTION0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_FUNCTION1
elif self.rx_state == self.PACKET_FUNCTION1:
self.in_bytes.append(b)
self.current_function = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.rx_state = self.PACKET_LENGTH0
elif self.rx_state == self.PACKET_LENGTH0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_LENGTH1
elif self.rx_state == self.PACKET_LENGTH1:
self.in_bytes.append(b)
self.current_length = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.rx_state = self.PACKET_RAW_DATA
self.rx_index = 0
self.rx_buffer = []
elif self.rx_state == self.PACKET_RAW_DATA:
if self.rx_index == self.current_length:
self.lrc_check = self.current_address + self.current_function + self.current_length
self.lrc_check = self.lrc_check + sum([ord(c) for c in self.rx_buffer])
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_LRC_CHECK1
else:
# add length check
self.rx_buffer.append(b)
self.rx_index = self.rx_index + 1
elif self.rx_state == self.PACKET_LRC_CHECK1:
self.in_bytes.append(b)
self.lrcReceived = self.__convert_rxbytes_to_int16(0, self.in_bytes)
if self.lrcReceived == self.lrc_check:
self.__parse_function()
self.rx_state = self.PACKET_END
else:
self.rx_state = self.PACKET_END
def __parse_sensor_data(self, data_mode=32):
o = 0
r2d = 57.2958
if data_mode == 16:
converter = lambda offset, l: float(self.__convert_rxbytes_to_int16(offset, l)) / 1000.0
increment = 2
else:
converter = lambda offset, l: self.__convert_rxbytes_to_float(offset, l)
increment = 4
# TODO: Add timestamp counter mode/elapsed mode
self.timestamp = float(self.__convert_rxbytes_to_float(0, self.rx_buffer))
o += 4
if self.runOnce:
self.frame_counter = 0
self.runOnce = False
else:
self.frame_counter += 1
if self.sensor_configuration.gyro_enable:
self.gyr_x = converter(o, self.rx_buffer) * r2d
o += increment
self.gyr_y = converter(o, self.rx_buffer) * r2d
o += increment
self.gyr_z = converter(o, self.rx_buffer) * r2d
o += increment
if self.sensor_configuration.accelerometer_enable:
self.acc_x = converter(o, self.rx_buffer)
o += increment
self.acc_y = converter(o, self.rx_buffer)
o += increment
self.acc_z = converter(o, self.rx_buffer)
o += increment
if self.sensor_configuration.magnetometer_enable:
self.mag_x = converter(o, self.rx_buffer)
o += increment
self.mag_y = converter(o, self.rx_buffer)
o += increment
self.mag_z = converter(o, self.rx_buffer)
o += increment
# 100 Fixed point
if data_mode == 16:
self.mag_x *= 10
self.mag_y *= 10
self.mag_z *= 10
if self.sensor_configuration.angular_velocity_enable:
self.angular_vel_x = converter(o, self.rx_buffer) * r2d
o += increment
self.angular_vel_y = converter(o, self.rx_buffer) * r2d
o += increment
self.angular_vel_z = converter(o, self.rx_buffer) * r2d
o += increment
if self.sensor_configuration.quaternion_enable:
self.quat_w = converter(o, self.rx_buffer)
o += increment
self.quat_x = converter(o, self.rx_buffer)
o += increment
self.quat_y = converter(o, self.rx_buffer)
o += increment
self.quat_z = converter(o, self.rx_buffer)
o += increment
if self.sensor_configuration.euler_enable:
self.euler_x = converter(o, self.rx_buffer) * r2d
o += increment
self.euler_y = converter(o, self.rx_buffer) * r2d
o += increment
self.euler_z = converter(o, self.rx_buffer) * r2d
o += increment
if self.sensor_configuration.linear_acceleration_enable:
self.linacc_x = converter(o, self.rx_buffer)
o += increment
self.linacc_y = converter(o, self.rx_buffer)
o += increment
self.linacc_z = converter(o, self.rx_buffer)
o += increment
if self.sensor_configuration.pressure_enable:
self.pressure = converter(o, self.rx_buffer)
o += increment
# 10 Fixed point
if data_mode == 16:
self.pressure *= 100
if self.sensor_configuration.altitude_enable:
self.altitude = converter(o, self.rx_buffer)
o += increment
# 10 Fixed point
if data_mode == 16:
self.altitude *= 100
if self.sensor_configuration.temperature_enable:
self.temperature = converter(o, self.rx_buffer)
o += increment
# 100 Fixed point
if data_mode == 16:
self.temperature *= 10
def __parse_sensor_data_16bit(self):
o = 0
r2d = 57.2958
if self.sensor_configuration.timestamp_counter_mode_enable:
self.timestamp = float(self.__convert_rxbytes_to_int(0, self.rx_buffer))
else:
self.timestamp = self.__convert_rxbytes_to_float(0, self.rx_buffer)
o += 4
self.frame_counter += 1
if self.sensor_configuration.gyro_enable:
self.gyr_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.gyr_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.gyr_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
if self.sensor_configuration.accelerometer_enable:
self.acc_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.acc_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.acc_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
if self.sensor_configuration.magnetometer_enable:
self.mag_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
self.mag_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
self.mag_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
if self.sensor_configuration.quaternion_enable:
self.quat_w = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.quat_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.quat_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.quat_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
if self.sensor_configuration.euler_enable:
self.euler_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.euler_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.euler_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
if self.sensor_configuration.linear_acceleration_enable:
self.linacc_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.linacc_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.linacc_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
if self.sensor_configuration.pressure_enable:
self.pressure = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
# communication
def __get_config_register(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
if self.verbose: logd(self.TAG, "Get config register")
time.sleep(.1)
self.__lpbus_set_none(LPMS_GET_CONFIG)
self.wait_for_data = True
self.__wait_for_response()
def __send_data(self, function, length):
txlrc_check = 0
txBuffer = chr(0x3a)
txBuffer += self.__convert_int16_to_txbytes(self.imu_id)
txBuffer += self.__convert_int16_to_txbytes(function)
txBuffer += self.__convert_int16_to_txbytes(length)
if length > 0:
txBuffer += self.raw_tx_data
txlrc_check = self.imu_id + function + length
if length > 0:
txlrc_check += sum([ord(c) for c in self.raw_tx_data])
txBuffer += self.__convert_int16_to_txbytes(txlrc_check)
txBuffer += chr(0x0d)
txBuffer += chr(0x0a)
bytesSent = self.serial_port.write(txBuffer)
def __lpbus_set_none(self, command):
self.__send_data(command, 0)
def __lpbus_set_int32(self, command, v):
self.raw_tx_data = self.__convert_int_to_txbytes(v)
self.__send_data(command, 4)
def __lpbus_set_data(self, command, length, dataBuffer):
self.raw_tx_data = dataBuffer
self.__send_data(command, length)
def __wait_for_response(self):
while self.wait_for_ack or self.wait_for_data:
time.sleep(.1)
def __parse_configuration_register(self, cr):
self.sensor_configuration.parse(cr)
# User command
def connect(self):
if self.__thread_is_alive():
loge(self.TAG, "Another connection established")
return False
try:
self.__clear_params()
self.thread = threading.Thread(target=self.__run, args=())
self.serial_port = serial.Serial(self.port, self.baudrate)
self.quit = False
if self.verbose: logd(self.TAG , "Sensor connected")
#thread.daemon = True # Daemonize thread
self.thread.start() # Start the execution
time.sleep(1)
self.set_command_mode()
self.__get_config_register()
self.set_streaming_mode()
return True
except serial.SerialException:
loge(self.TAG, "Could not open port " + self.port)
loge(self.TAG, "Please try again")
return False
def disconnect(self):
self.quit = True
if self.__thread_is_alive():
self.thread.join()
if self.verbose: logd(self.TAG , "sensor disconnected")
return True
def is_connected(self):
return self.is_sensor_connected
# Configuration and Status
def get_config_register(self):
"""
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.__lpbus_set_none(LPMS_GET_CONFIG)
self.wait_for_data = True
self.__wait_for_response()
"""
return self.sensor_configuration
def get_status_register(self):
pass
# Mode switching
def set_command_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
if self.verbose: logd(self.TAG, "Set command mode")
self.__lpbus_set_none(LPMS_GOTO_COMMAND_MODE)
self.wait_for_ack = True
self.__wait_for_response()
def set_streaming_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set streaming mode")
self.__lpbus_set_none(LPMS_GOTO_STREAM_MODE)
self.wait_for_ack = True
self.__wait_for_response()
# Data transmision
def get_sensor_data(self):
"""
get sensor data during command Mode
"""
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
if self.verbose: logd(self.TAG, "Get sensor data")
self.__lpbus_set_none(LPMS_GET_SENSOR_DATA)
self.wait_for_data = True
self.__wait_for_response()
return self.get_stream_data()
def get_stream_data(self):
"""
get sensor data during stream Mode
"""
data = []
data.append(self.imu_id)
data.append(self.timestamp)
data.append(self.frame_counter)
data.append(self.battery_level)
data.append(self.battery_voltage)
data.append(self.temperature)
data.append([self.acc_x, self.acc_y, self.acc_z])
data.append([self.gyr_x, self.gyr_y, self.gyr_z])
data.append([self.mag_x, self.mag_y, self.mag_z])
data.append([self.quat_w, self.quat_x, self.quat_y, self.quat_z])
data.append([self.euler_x, self.euler_y, self.euler_z])
data.append([self.linacc_x, self.linacc_y, self.linacc_z])
return data
def set_transmit_data(self):
pass
def set_stream_frequency(self, freq):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set stream freq: "+str(freq)+"Hz")
self.__lpbus_set_int32(LPMS_SET_STREAM_FREQ , freq)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
def set_stream_frequency_5Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_5HZ)
def set_stream_frequency_10Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_10HZ)
def set_stream_frequency_25Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_25HZ)
def set_stream_frequency_50Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_50HZ)
def set_stream_frequency_100Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_100HZ)
def set_stream_frequency_200Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_200HZ)
def set_stream_frequency_400Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_400HZ)
def set_16bit_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set 16 bit data")
self.__lpbus_set_int32(LPMS_SET_LPBUS_DATA_MODE, LPMS_LPBUS_DATA_MODE_16)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
def set_32bit_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set 32 bit data")
self.__lpbus_set_int32(LPMS_SET_LPBUS_DATA_MODE, LPMS_LPBUS_DATA_MODE_32)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
# Register value save and reset
def save_parameters(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Save parameters to sensor")
self.__lpbus_set_none(LPMS_WRITE_REGISTERS)
self.wait_for_ack = True
self.__wait_for_response()
self.set_streaming_mode()
def reset_factory(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Reset factory settings")
self.__lpbus_set_none(LPMS_RESET_FACTORY_VALUE)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
# Reference setting and offset reset
def reset_reference(self):
pass
|
en
| 0.525622
|
#TODO: # check serial port opened before executing commands # add wait for ack routine # debug log Method that runs forever #print reading # TODO: add offset length check dataList is a list dataList is a list dataList is a list return bytesarray return bytesarray # Parser #print"{0:b}".format(self.config_register) # add length check # TODO: Add timestamp counter mode/elapsed mode # 100 Fixed point # 10 Fixed point # 10 Fixed point # 100 Fixed point # communication # User command #thread.daemon = True # Daemonize thread # Start the execution # Configuration and Status if not self.is_connected(): loge(self.TAG, "sensor not connected") return None self.__lpbus_set_none(LPMS_GET_CONFIG) self.wait_for_data = True self.__wait_for_response() # Mode switching # Data transmision get sensor data during command Mode get sensor data during stream Mode # Register value save and reset # Reference setting and offset reset
| 2.591282
| 3
|
entities/player.py
|
emredesu/re-one
| 0
|
6627320
|
<filename>entities/player.py
import pygame
from tools.colours import WHITE, RED
from tools.globals import SCREEN_HEIGHT, SCREEN_WIDTH
class Player(pygame.sprite.Sprite):
def __init__(self, image):
super().__init__()
self.image = image
self.rect = self.image.get_rect()
self.rect.x = int(SCREEN_WIDTH / 2)
self.rect.y = SCREEN_HEIGHT - 40
def move_right(self, pixels):
if not self.rect.x + 30 > SCREEN_WIDTH:
self.rect.x += pixels
def move_left(self, pixels):
if not self.rect.x < 0:
self.rect.x -= pixels
def move_down(self, pixels):
if not self.rect.y + 30 > SCREEN_HEIGHT:
self.rect.y += pixels
def move_up(self, pixels):
if not self.rect.y < 0:
self.rect.y -= pixels
def reset_position(self):
self.rect.x = int(SCREEN_WIDTH / 2)
self.rect.y = SCREEN_HEIGHT - 40
class PlayerBullet(pygame.sprite.Sprite):
def __init__(self, spawn_pos_x, spawn_pos_y):
super().__init__()
self.image = pygame.Surface([6, 6])
self.image.fill(WHITE)
self.image.set_colorkey(WHITE)
# temp
pygame.draw.rect(self.image, RED, [0, 0, 6, 6])
self.rect = self.image.get_rect()
self.rect.x = spawn_pos_x
self.rect.y = spawn_pos_y
|
<filename>entities/player.py
import pygame
from tools.colours import WHITE, RED
from tools.globals import SCREEN_HEIGHT, SCREEN_WIDTH
class Player(pygame.sprite.Sprite):
def __init__(self, image):
super().__init__()
self.image = image
self.rect = self.image.get_rect()
self.rect.x = int(SCREEN_WIDTH / 2)
self.rect.y = SCREEN_HEIGHT - 40
def move_right(self, pixels):
if not self.rect.x + 30 > SCREEN_WIDTH:
self.rect.x += pixels
def move_left(self, pixels):
if not self.rect.x < 0:
self.rect.x -= pixels
def move_down(self, pixels):
if not self.rect.y + 30 > SCREEN_HEIGHT:
self.rect.y += pixels
def move_up(self, pixels):
if not self.rect.y < 0:
self.rect.y -= pixels
def reset_position(self):
self.rect.x = int(SCREEN_WIDTH / 2)
self.rect.y = SCREEN_HEIGHT - 40
class PlayerBullet(pygame.sprite.Sprite):
def __init__(self, spawn_pos_x, spawn_pos_y):
super().__init__()
self.image = pygame.Surface([6, 6])
self.image.fill(WHITE)
self.image.set_colorkey(WHITE)
# temp
pygame.draw.rect(self.image, RED, [0, 0, 6, 6])
self.rect = self.image.get_rect()
self.rect.x = spawn_pos_x
self.rect.y = spawn_pos_y
|
none
| 1
| 2.951992
| 3
|
|
tests/adapters/maze_xml_test.py
|
the-hypermedia-project/representor-python
| 11
|
6627321
|
<reponame>the-hypermedia-project/representor-python
import xml.etree.ElementTree as ET
import unittest
import json
from representor import Representor
from representor.contrib.maze_xml import MazeXMLAdapter
cell_xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<maze version="1.0">
<cell>
<link href="http://amundsen.com/examples/mazes/2d/five-by-five/0:north" rel="current" debug="0:1,1,1,0" total="25" side="5" />
<link href="http://amundsen.com/examples/mazes/2d/five-by-five/5:east" rel="east" />
</cell>
</maze>"""
class TestClass(unittest.TestCase):
def test_media_type(self):
self.assertEqual(MazeXMLAdapter.media_type,
"application/vnd.amundsen.maze+xml")
class TestParse(unittest.TestCase):
def setUp(self):
Representor.adapters.add(MazeXMLAdapter)
self.resource = Representor.adapters.translate_from("application/vnd.amundsen.maze+xml",
cell_xml)
def tearDown(self):
Representor.reset_adapters()
def test_parse_links(self):
links = self.resource.links.all()
self.assertEqual(len(links), 2)
self.assertEqual(len(self.resource.links.filter_by_rel("current")), 1)
def test_type(self):
pass
class TestBuild(unittest.TestCase):
def setUp(self):
Representor.adapters.add(MazeXMLAdapter)
self.resource = Representor()
self.resource.meta.attributes.add("type", "cell")
self.resource.links.add("current", "http://example.com/cell/2")
self.resource.links.add("east", "http://example.com/cell/3")
self.raw_xml = self.resource.translate_to("application/vnd.amundsen.maze+xml")
def tearDown(self):
Representor.reset_adapters()
def test_build(self):
root = ET.fromstring(self.raw_xml)
self.assertEqual(root[0].tag, "cell")
self.assertEqual(len(root[0].findall("link")), 2)
|
import xml.etree.ElementTree as ET
import unittest
import json
from representor import Representor
from representor.contrib.maze_xml import MazeXMLAdapter
cell_xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<maze version="1.0">
<cell>
<link href="http://amundsen.com/examples/mazes/2d/five-by-five/0:north" rel="current" debug="0:1,1,1,0" total="25" side="5" />
<link href="http://amundsen.com/examples/mazes/2d/five-by-five/5:east" rel="east" />
</cell>
</maze>"""
class TestClass(unittest.TestCase):
def test_media_type(self):
self.assertEqual(MazeXMLAdapter.media_type,
"application/vnd.amundsen.maze+xml")
class TestParse(unittest.TestCase):
def setUp(self):
Representor.adapters.add(MazeXMLAdapter)
self.resource = Representor.adapters.translate_from("application/vnd.amundsen.maze+xml",
cell_xml)
def tearDown(self):
Representor.reset_adapters()
def test_parse_links(self):
links = self.resource.links.all()
self.assertEqual(len(links), 2)
self.assertEqual(len(self.resource.links.filter_by_rel("current")), 1)
def test_type(self):
pass
class TestBuild(unittest.TestCase):
def setUp(self):
Representor.adapters.add(MazeXMLAdapter)
self.resource = Representor()
self.resource.meta.attributes.add("type", "cell")
self.resource.links.add("current", "http://example.com/cell/2")
self.resource.links.add("east", "http://example.com/cell/3")
self.raw_xml = self.resource.translate_to("application/vnd.amundsen.maze+xml")
def tearDown(self):
Representor.reset_adapters()
def test_build(self):
root = ET.fromstring(self.raw_xml)
self.assertEqual(root[0].tag, "cell")
self.assertEqual(len(root[0].findall("link")), 2)
|
en
| 0.360688
|
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?> <maze version="1.0"> <cell> <link href="http://amundsen.com/examples/mazes/2d/five-by-five/0:north" rel="current" debug="0:1,1,1,0" total="25" side="5" /> <link href="http://amundsen.com/examples/mazes/2d/five-by-five/5:east" rel="east" /> </cell> </maze>
| 2.771275
| 3
|
ryu/app/wsgi.py
|
SYBreloom/ryu
| 975
|
6627322
|
<reponame>SYBreloom/ryu
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 <NAME> <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from types import MethodType
from routes import Mapper
from routes.util import URLGenerator
import six
from tinyrpc.server import RPCServer
from tinyrpc.dispatch import RPCDispatcher
from tinyrpc.dispatch import public as rpc_public
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports import ServerTransport, ClientTransport
from tinyrpc.client import RPCClient
import webob.dec
import webob.exc
from webob.request import Request as webob_Request
from webob.response import Response as webob_Response
from ryu import cfg
from ryu.lib import hub
DEFAULT_WSGI_HOST = '0.0.0.0'
DEFAULT_WSGI_PORT = 8080
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.StrOpt(
'wsapi-host', default=DEFAULT_WSGI_HOST,
help='webapp listen host (default %s)' % DEFAULT_WSGI_HOST),
cfg.IntOpt(
'wsapi-port', default=DEFAULT_WSGI_PORT,
help='webapp listen port (default %s)' % DEFAULT_WSGI_PORT),
])
HEX_PATTERN = r'0x[0-9a-z]+'
DIGIT_PATTERN = r'[1-9][0-9]*'
def route(name, path, methods=None, requirements=None):
def _route(controller_method):
controller_method.routing_info = {
'name': name,
'path': path,
'methods': methods,
'requirements': requirements,
}
return controller_method
return _route
class Request(webob_Request):
"""
Wrapper class for webob.request.Request.
The behavior of this class is the same as webob.request.Request
except for setting "charset" to "UTF-8" automatically.
"""
DEFAULT_CHARSET = "UTF-8"
def __init__(self, environ, charset=DEFAULT_CHARSET, *args, **kwargs):
super(Request, self).__init__(
environ, charset=charset, *args, **kwargs)
class Response(webob_Response):
"""
Wrapper class for webob.response.Response.
The behavior of this class is the same as webob.response.Response
except for setting "charset" to "UTF-8" automatically.
"""
DEFAULT_CHARSET = "UTF-8"
def __init__(self, charset=DEFAULT_CHARSET, *args, **kwargs):
super(Response, self).__init__(charset=charset, *args, **kwargs)
class WebSocketRegistrationWrapper(object):
def __init__(self, func, controller):
self._controller = controller
self._controller_method = MethodType(func, controller)
def __call__(self, ws):
wsgi_application = self._controller.parent
ws_manager = wsgi_application.websocketmanager
ws_manager.add_connection(ws)
try:
self._controller_method(ws)
finally:
ws_manager.delete_connection(ws)
class _AlreadyHandledResponse(Response):
# XXX: Eventlet API should not be used directly.
# https://github.com/benoitc/gunicorn/pull/2581
from packaging import version
import eventlet
if version.parse(eventlet.__version__) >= version.parse("0.30.3"):
import eventlet.wsgi
_ALREADY_HANDLED = getattr(eventlet.wsgi, "ALREADY_HANDLED", None)
else:
from eventlet.wsgi import ALREADY_HANDLED
_ALREADY_HANDLED = ALREADY_HANDLED
def __call__(self, environ, start_response):
return self._ALREADY_HANDLED
def websocket(name, path):
def _websocket(controller_func):
def __websocket(self, req, **_):
wrapper = WebSocketRegistrationWrapper(controller_func, self)
ws_wsgi = hub.WebSocketWSGI(wrapper)
ws_wsgi(req.environ, req.start_response)
# XXX: In order to prevent the writing to a already closed socket.
# This issue is caused by combined use:
# - webob.dec.wsgify()
# - eventlet.wsgi.HttpProtocol.handle_one_response()
return _AlreadyHandledResponse()
__websocket.routing_info = {
'name': name,
'path': path,
'methods': None,
'requirements': None,
}
return __websocket
return _websocket
class ControllerBase(object):
special_vars = ['action', 'controller']
def __init__(self, req, link, data, **config):
self.req = req
self.link = link
self.data = data
self.parent = None
for name, value in config.items():
setattr(self, name, value)
def __call__(self, req):
action = self.req.urlvars.get('action', 'index')
if hasattr(self, '__before__'):
self.__before__()
kwargs = self.req.urlvars.copy()
for attr in self.special_vars:
if attr in kwargs:
del kwargs[attr]
return getattr(self, action)(req, **kwargs)
class WebSocketDisconnectedError(Exception):
pass
class WebSocketServerTransport(ServerTransport):
def __init__(self, ws):
self.ws = ws
def receive_message(self):
message = self.ws.wait()
if message is None:
raise WebSocketDisconnectedError()
context = None
return context, message
def send_reply(self, context, reply):
self.ws.send(six.text_type(reply))
class WebSocketRPCServer(RPCServer):
def __init__(self, ws, rpc_callback):
dispatcher = RPCDispatcher()
dispatcher.register_instance(rpc_callback)
super(WebSocketRPCServer, self).__init__(
WebSocketServerTransport(ws),
JSONRPCProtocol(),
dispatcher,
)
def serve_forever(self):
try:
super(WebSocketRPCServer, self).serve_forever()
except WebSocketDisconnectedError:
return
def _spawn(self, func, *args, **kwargs):
hub.spawn(func, *args, **kwargs)
class WebSocketClientTransport(ClientTransport):
def __init__(self, ws, queue):
self.ws = ws
self.queue = queue
def send_message(self, message, expect_reply=True):
self.ws.send(six.text_type(message))
if expect_reply:
return self.queue.get()
class WebSocketRPCClient(RPCClient):
def __init__(self, ws):
self.ws = ws
self.queue = hub.Queue()
super(WebSocketRPCClient, self).__init__(
JSONRPCProtocol(),
WebSocketClientTransport(ws, self.queue),
)
def serve_forever(self):
while True:
msg = self.ws.wait()
if msg is None:
break
self.queue.put(msg)
class wsgify_hack(webob.dec.wsgify):
def __call__(self, environ, start_response):
self.kwargs['start_response'] = start_response
return super(wsgify_hack, self).__call__(environ, start_response)
class WebSocketManager(object):
def __init__(self):
self._connections = []
def add_connection(self, ws):
self._connections.append(ws)
def delete_connection(self, ws):
self._connections.remove(ws)
def broadcast(self, msg):
for connection in self._connections:
connection.send(msg)
class WSGIApplication(object):
def __init__(self, **config):
self.config = config
self.mapper = Mapper()
self.registory = {}
self._wsmanager = WebSocketManager()
super(WSGIApplication, self).__init__()
def _match(self, req):
# Note: Invoke the new API, first. If the arguments unmatched,
# invoke the old API.
try:
return self.mapper.match(environ=req.environ)
except TypeError:
self.mapper.environ = req.environ
return self.mapper.match(req.path_info)
@wsgify_hack
def __call__(self, req, start_response):
match = self._match(req)
if not match:
return webob.exc.HTTPNotFound()
req.start_response = start_response
req.urlvars = match
link = URLGenerator(self.mapper, req.environ)
data = None
name = match['controller'].__name__
if name in self.registory:
data = self.registory[name]
controller = match['controller'](req, link, data, **self.config)
controller.parent = self
return controller(req)
def register(self, controller, data=None):
def _target_filter(attr):
if not inspect.ismethod(attr) and not inspect.isfunction(attr):
return False
if not hasattr(attr, 'routing_info'):
return False
return True
methods = inspect.getmembers(controller, _target_filter)
for method_name, method in methods:
routing_info = getattr(method, 'routing_info')
name = routing_info['name']
path = routing_info['path']
conditions = {}
if routing_info.get('methods'):
conditions['method'] = routing_info['methods']
requirements = routing_info.get('requirements') or {}
self.mapper.connect(name,
path,
controller=controller,
requirements=requirements,
action=method_name,
conditions=conditions)
if data:
self.registory[controller.__name__] = data
@property
def websocketmanager(self):
return self._wsmanager
class WSGIServer(hub.WSGIServer):
def __init__(self, application, **config):
super(WSGIServer, self).__init__((CONF.wsapi_host, CONF.wsapi_port),
application, **config)
def __call__(self):
self.serve_forever()
def start_service(app_mgr):
for instance in app_mgr.contexts.values():
if instance.__class__ == WSGIApplication:
return WSGIServer(instance)
return None
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 <NAME> <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from types import MethodType
from routes import Mapper
from routes.util import URLGenerator
import six
from tinyrpc.server import RPCServer
from tinyrpc.dispatch import RPCDispatcher
from tinyrpc.dispatch import public as rpc_public
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports import ServerTransport, ClientTransport
from tinyrpc.client import RPCClient
import webob.dec
import webob.exc
from webob.request import Request as webob_Request
from webob.response import Response as webob_Response
from ryu import cfg
from ryu.lib import hub
DEFAULT_WSGI_HOST = '0.0.0.0'
DEFAULT_WSGI_PORT = 8080
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.StrOpt(
'wsapi-host', default=DEFAULT_WSGI_HOST,
help='webapp listen host (default %s)' % DEFAULT_WSGI_HOST),
cfg.IntOpt(
'wsapi-port', default=DEFAULT_WSGI_PORT,
help='webapp listen port (default %s)' % DEFAULT_WSGI_PORT),
])
HEX_PATTERN = r'0x[0-9a-z]+'
DIGIT_PATTERN = r'[1-9][0-9]*'
def route(name, path, methods=None, requirements=None):
def _route(controller_method):
controller_method.routing_info = {
'name': name,
'path': path,
'methods': methods,
'requirements': requirements,
}
return controller_method
return _route
class Request(webob_Request):
"""
Wrapper class for webob.request.Request.
The behavior of this class is the same as webob.request.Request
except for setting "charset" to "UTF-8" automatically.
"""
DEFAULT_CHARSET = "UTF-8"
def __init__(self, environ, charset=DEFAULT_CHARSET, *args, **kwargs):
super(Request, self).__init__(
environ, charset=charset, *args, **kwargs)
class Response(webob_Response):
"""
Wrapper class for webob.response.Response.
The behavior of this class is the same as webob.response.Response
except for setting "charset" to "UTF-8" automatically.
"""
DEFAULT_CHARSET = "UTF-8"
def __init__(self, charset=DEFAULT_CHARSET, *args, **kwargs):
super(Response, self).__init__(charset=charset, *args, **kwargs)
class WebSocketRegistrationWrapper(object):
def __init__(self, func, controller):
self._controller = controller
self._controller_method = MethodType(func, controller)
def __call__(self, ws):
wsgi_application = self._controller.parent
ws_manager = wsgi_application.websocketmanager
ws_manager.add_connection(ws)
try:
self._controller_method(ws)
finally:
ws_manager.delete_connection(ws)
class _AlreadyHandledResponse(Response):
# XXX: Eventlet API should not be used directly.
# https://github.com/benoitc/gunicorn/pull/2581
from packaging import version
import eventlet
if version.parse(eventlet.__version__) >= version.parse("0.30.3"):
import eventlet.wsgi
_ALREADY_HANDLED = getattr(eventlet.wsgi, "ALREADY_HANDLED", None)
else:
from eventlet.wsgi import ALREADY_HANDLED
_ALREADY_HANDLED = ALREADY_HANDLED
def __call__(self, environ, start_response):
return self._ALREADY_HANDLED
def websocket(name, path):
def _websocket(controller_func):
def __websocket(self, req, **_):
wrapper = WebSocketRegistrationWrapper(controller_func, self)
ws_wsgi = hub.WebSocketWSGI(wrapper)
ws_wsgi(req.environ, req.start_response)
# XXX: In order to prevent the writing to a already closed socket.
# This issue is caused by combined use:
# - webob.dec.wsgify()
# - eventlet.wsgi.HttpProtocol.handle_one_response()
return _AlreadyHandledResponse()
__websocket.routing_info = {
'name': name,
'path': path,
'methods': None,
'requirements': None,
}
return __websocket
return _websocket
class ControllerBase(object):
special_vars = ['action', 'controller']
def __init__(self, req, link, data, **config):
self.req = req
self.link = link
self.data = data
self.parent = None
for name, value in config.items():
setattr(self, name, value)
def __call__(self, req):
action = self.req.urlvars.get('action', 'index')
if hasattr(self, '__before__'):
self.__before__()
kwargs = self.req.urlvars.copy()
for attr in self.special_vars:
if attr in kwargs:
del kwargs[attr]
return getattr(self, action)(req, **kwargs)
class WebSocketDisconnectedError(Exception):
pass
class WebSocketServerTransport(ServerTransport):
def __init__(self, ws):
self.ws = ws
def receive_message(self):
message = self.ws.wait()
if message is None:
raise WebSocketDisconnectedError()
context = None
return context, message
def send_reply(self, context, reply):
self.ws.send(six.text_type(reply))
class WebSocketRPCServer(RPCServer):
def __init__(self, ws, rpc_callback):
dispatcher = RPCDispatcher()
dispatcher.register_instance(rpc_callback)
super(WebSocketRPCServer, self).__init__(
WebSocketServerTransport(ws),
JSONRPCProtocol(),
dispatcher,
)
def serve_forever(self):
try:
super(WebSocketRPCServer, self).serve_forever()
except WebSocketDisconnectedError:
return
def _spawn(self, func, *args, **kwargs):
hub.spawn(func, *args, **kwargs)
class WebSocketClientTransport(ClientTransport):
def __init__(self, ws, queue):
self.ws = ws
self.queue = queue
def send_message(self, message, expect_reply=True):
self.ws.send(six.text_type(message))
if expect_reply:
return self.queue.get()
class WebSocketRPCClient(RPCClient):
def __init__(self, ws):
self.ws = ws
self.queue = hub.Queue()
super(WebSocketRPCClient, self).__init__(
JSONRPCProtocol(),
WebSocketClientTransport(ws, self.queue),
)
def serve_forever(self):
while True:
msg = self.ws.wait()
if msg is None:
break
self.queue.put(msg)
class wsgify_hack(webob.dec.wsgify):
def __call__(self, environ, start_response):
self.kwargs['start_response'] = start_response
return super(wsgify_hack, self).__call__(environ, start_response)
class WebSocketManager(object):
def __init__(self):
self._connections = []
def add_connection(self, ws):
self._connections.append(ws)
def delete_connection(self, ws):
self._connections.remove(ws)
def broadcast(self, msg):
for connection in self._connections:
connection.send(msg)
class WSGIApplication(object):
def __init__(self, **config):
self.config = config
self.mapper = Mapper()
self.registory = {}
self._wsmanager = WebSocketManager()
super(WSGIApplication, self).__init__()
def _match(self, req):
# Note: Invoke the new API, first. If the arguments unmatched,
# invoke the old API.
try:
return self.mapper.match(environ=req.environ)
except TypeError:
self.mapper.environ = req.environ
return self.mapper.match(req.path_info)
@wsgify_hack
def __call__(self, req, start_response):
match = self._match(req)
if not match:
return webob.exc.HTTPNotFound()
req.start_response = start_response
req.urlvars = match
link = URLGenerator(self.mapper, req.environ)
data = None
name = match['controller'].__name__
if name in self.registory:
data = self.registory[name]
controller = match['controller'](req, link, data, **self.config)
controller.parent = self
return controller(req)
def register(self, controller, data=None):
def _target_filter(attr):
if not inspect.ismethod(attr) and not inspect.isfunction(attr):
return False
if not hasattr(attr, 'routing_info'):
return False
return True
methods = inspect.getmembers(controller, _target_filter)
for method_name, method in methods:
routing_info = getattr(method, 'routing_info')
name = routing_info['name']
path = routing_info['path']
conditions = {}
if routing_info.get('methods'):
conditions['method'] = routing_info['methods']
requirements = routing_info.get('requirements') or {}
self.mapper.connect(name,
path,
controller=controller,
requirements=requirements,
action=method_name,
conditions=conditions)
if data:
self.registory[controller.__name__] = data
@property
def websocketmanager(self):
return self._wsmanager
class WSGIServer(hub.WSGIServer):
def __init__(self, application, **config):
super(WSGIServer, self).__init__((CONF.wsapi_host, CONF.wsapi_port),
application, **config)
def __call__(self):
self.serve_forever()
def start_service(app_mgr):
for instance in app_mgr.contexts.values():
if instance.__class__ == WSGIApplication:
return WSGIServer(instance)
return None
|
en
| 0.795413
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2012 <NAME> <yamahata at private email ne jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. Wrapper class for webob.request.Request. The behavior of this class is the same as webob.request.Request except for setting "charset" to "UTF-8" automatically. Wrapper class for webob.response.Response. The behavior of this class is the same as webob.response.Response except for setting "charset" to "UTF-8" automatically. # XXX: Eventlet API should not be used directly. # https://github.com/benoitc/gunicorn/pull/2581 # XXX: In order to prevent the writing to a already closed socket. # This issue is caused by combined use: # - webob.dec.wsgify() # - eventlet.wsgi.HttpProtocol.handle_one_response() # Note: Invoke the new API, first. If the arguments unmatched, # invoke the old API.
| 1.863909
| 2
|
terra_bonobo_nodes/terra.py
|
Terralego/terra-bonobo-nodes
| 1
|
6627323
|
<filename>terra_bonobo_nodes/terra.py
import logging
from copy import deepcopy
from json import JSONDecodeError
from bonobo.config import Configurable, Option, Service
from bonobo.config.processors import ContextProcessor
from bonobo.constants import END, NOT_MODIFIED
from bonobo.util.objects import ValueHolder
from django.conf import settings
from django.contrib.gis.db.models import Union
from django.contrib.gis.db.models.functions import (
Distance,
Intersection,
MakeValid,
Transform,
)
from django.contrib.gis.geos import GEOSGeometry
from django.db import connection, transaction
from geostore.models import Feature, FeatureQuerySet, Layer # noqa
from requests.compat import urljoin
logger = logging.getLogger(__name__)
GEOS_EMPTY_POINT = GEOSGeometry("POINT EMPTY")
class LayerClusters(Configurable):
"""
Extract cluster from layers
Options:
`input_layers` list of input layers
`metric_projection_srid` used projection
`distance` minimal distance between each cluster
Return:
Point cluster point object
QuerySet QuerySet of all features included in the cluster
"""
input_layers = Option(list, positional=True, required=True)
metric_projection_srid = Option(int, positional=True, required=True)
distance = Option(int, positional=True, required=True)
def __call__(self, *args, **kwargs):
args = [
self.metric_projection_srid,
self.distance,
[input_layer.pk for input_layer in self.input_layers],
]
with connection.cursor() as cursor:
sql_query = f"""
SELECT
array_agg(id) AS ids,
ST_AsText(ST_SnapToGrid(ST_Transform(geom, %s), %s)) AS cluster_id
FROM
{Feature._meta.db_table}
WHERE
layer_id = ANY(%s::INT[])
GROUP BY
cluster_id
"""
cursor.execute(sql_query, args)
for features, cluster in cursor.fetchall():
yield cluster, Feature.objects.filter(pk__in=features)
class SubdivideGeom(Configurable):
"""
Execute ST_Subdivide to an input geometry
Options:
`max_vertices` numbe maximal of vertices of the new geometry
`geom` geom field where is located the geometry
Return:
identifier identifier of the new record
record properties of the record
"""
max_vertices = Option(int, positional=True, default=256)
geom = Option(str, positional=True, default="geom")
def __call__(self, identifier, properties, *args, **kwargs):
args = [
properties[self.geom].ewkt,
self.max_vertices,
]
with connection.cursor() as cursor:
sql_query = (
"SELECT ST_Subdivide(ST_Buffer(ST_GeomFromText(%s), 0), %s) AS geom"
)
cursor.execute(sql_query, args)
id = 0
for (geom,) in cursor.fetchall():
properties = deepcopy(properties)
properties[self.geom] = GEOSGeometry(geom)
yield f"{identifier}-{id}", properties
id += 1
class LoadFeatureInLayer(Configurable):
"""
Load feature data in input layer
Options:
`geom` geom field where is located the geometry
`layer` layer where to insert the geometry and its attributes
`window_length` size of bulk import
Services:
`service_layer` Layer where to insert geometries, used if layer argument is empty
Return:
NOT_MODIFIED
"""
geom = Option(str, positional=True, default="geom")
layer = Option(None, required=False, positional=True)
window_length = Option(int, default=100)
layer_name = Option(str, required=False)
@ContextProcessor
def buffer(self, context, *args, **kwargs):
buffer = yield ValueHolder([])
if len(buffer):
# Final call if there is content in buffer
self.__call__(buffer, END, END)
def __call__(self, buffer, identifier, record, *args, **kwargs):
if self.layer_name:
self.write_layer = Layer.objects.get(name=self.layer_name)
elif self.layer:
self.write_layer = self.layer
else:
raise Exception("Missing layer or layer_name parameter")
is_final = identifier == END and record == END
if not is_final:
buffer.append(
(
identifier,
record,
)
)
if len(buffer) >= self.window_length or is_final:
with transaction.atomic(savepoint=False):
Feature.objects.filter(
layer=self.write_layer, identifier__in=[i for i, r in buffer]
).delete()
Feature.objects.bulk_create(
[self._get_feature_object(*feature) for feature in buffer]
)
buffer.set([])
return NOT_MODIFIED
def _get_feature_object(self, identifier, record):
properties = record.copy()
geometry = properties.pop(self.geom, GEOS_EMPTY_POINT)
return Feature(
layer=self.write_layer,
identifier=identifier,
geom=geometry,
properties=properties,
)
class ExtractFeatures(Configurable):
"""
Extract features from a queryset
Options:
`queryset` Feature QuerySet containing geometries and attributes
`id_field` field containing the identifier
`extra_properties` dict of extra attributes extracted from the feature
Return:
str identifier of the record using id_field
dict record
"""
queryset = Option(None, required=True, positional=True)
id_field = Option(str, required=True, positional=True, default="identifier")
extra_properties = Option(dict, required=True, positional=True, default={})
batch_size = 1000
def __call__(self, *args, **kwargs):
count = self.queryset.count()
for start in range(0, count, self.batch_size):
end = min(start + self.batch_size, count)
features = self.queryset[start:end]
for feature in features:
properties = {
**feature.properties,
**{
attribute: getattr(feature, field)
for attribute, field in self.extra_properties.items()
},
}
yield getattr(feature, self.id_field), properties
class BooleanIntersect(Configurable):
"""
Intersect geometry witch all geometries of one layer
Options:
`layer` Layer to intersect
`property` property where to put the resulted boolean
`geom` geometry attribute in record
Return:
str identifier of the record
dict record updated
"""
layer = Option(str, required=True, positional=True)
property = Option(str, required=True, positional=True)
geom = Option(str, positional=True, default="geom")
def __call__(self, identifier, record, *args, **kwargs):
layer = Layer.objects.get(name=self.layer)
try:
record[self.property] = layer.features.filter(
geom__intersects=record[self.geom]
).exists()
except Exception as e:
record[self.property] = False
logger.error(f"An error occured doing BooleanIntersect: {e}")
yield identifier, record
class IntersectionPercentByArea(Configurable):
"""
Get percentage of intersection of a geometry
Options:
`layer` Layer to intersect
`property` property where to put the resulted intersection
`geom` geometry attribute in record
Return:
str identifier of the record
dict record updated
"""
layer = Option(str, required=True, positional=True)
property = Option(str, required=True, positional=True)
geom = Option(str, positional=True, default="geom")
def __call__(self, identifier, record, *args, **kwargs):
layer = Layer.objects.get(name=self.layer)
try:
zone = (
layer.features.filter(geom__intersects=record[self.geom])
.annotate(
intersection=MakeValid(Intersection("geom", record[self.geom]))
)
.aggregate(zone=Union("intersection"))["zone"]
)
record[self.property] = zone and zone.area / record[self.geom].area or 0.0
except Exception as e:
logger.error(f"identifier {identifier} got error {e}")
yield identifier, record
class ClosestFeatures(Configurable):
"""
Get closes features of the geometry in a layer
Options:
`layer` Layer to intersect
`property_filter` dict of properties to filter in layer's features
`geom` geometry attribute in record
`closests` property where to put closest features
`limit` number of features maximum to load
`max_distance` maximal distance from original geometry
Return:
str identifier of the record
dict record updated
"""
layer = Option(str, positional=True, required=True)
property_filter = Option(dict, default={})
geom = Option(str, default="geom")
closests = Option(str, default="closests")
limit = Option(int, default=1)
max_distance = Option(int, default=-1)
def __call__(self, identifier, properties, *args, **kwargs):
geom_point = properties[self.geom].centroid
properties_filters = {
f"properties__{k}": v for k, v in self.property_filter.items()
}
try:
closest_points = (
Layer.objects.get(name=self.layer)
.features.filter(**properties_filters)
.exclude(geom=GEOSGeometry("POINT EMPTY"))
.annotate(
distance=Distance(
Transform("geom", 4326), Transform(geom_point, 4326)
)
)
)
if self.max_distance > 0:
closest_points = closest_points.filter(distance__lt=self.max_distance)
closest_points = closest_points.order_by("distance")[: self.limit]
properties[self.closests] = properties.get(self.closests, []) + [
c.geom for c in closest_points
]
return identifier, properties
except AttributeError:
return identifier, properties
class TransitTimeOneToMany(Configurable):
"""
Calculate transit time from geometry to list of points.
Settings can be found in graphhopper API documentation.
Options:
`vehicules` vehicules to use (car, bike, hike, …)
`weighting` what kind of way (default: fastest)
`elevation` take care of terrain elevation
`geom` where is the original geometry
`points` destination points to calculate
`times_property` where to insert calculated times
Services:
`http` requests.Session's object
Return:
str identifier of the record
dict record updated
"""
vehicles = Option(list, positional=True, default=["car"])
weighting = Option(str, positional=True, default="fastest")
elevation = Option(bool, positional=True, default=False)
geom = Option(str, positional=True, default="geom")
points = Option(str, positional=True, default="points")
times_property = Option(str, positional=True, default="times")
http = Service("http")
def __call__(self, identifier, properties, http, *args, **kwargs):
end_point = properties[self.geom].centroid
# Starts from point to deals with oneway motorway
points = properties.pop(self.points)
dim = "time" if self.weighting == "fastest" else "distance"
times = []
for point in points:
time = []
for vehicle in self.vehicles:
routing_url = urljoin(settings.GRAPHHOPPER, "route")
payload = {
"point": [f"{point.y},{point.x}", f"{end_point.y},{end_point.x}"],
"vehicle": vehicle,
"weighting": self.weighting,
"elevation": self.elevation,
"instructions": False,
"calc_points": False,
}
response = http.get(routing_url, params=payload)
try:
response = response.json()
time += [response.get("paths", [])[0].get(dim)]
except (IndexError, JSONDecodeError):
time += [None]
times += [time]
properties[self.times_property] = times
return identifier, properties
class TransitTimeOneToOne(TransitTimeOneToMany):
"""
Same as TransitTimeOneToMany but for only one destination. Uses the same API.
"""
def __call__(self, *args, **kwargs):
identifier, properties = super().__call__(*args, **kwargs)
if properties[self.times_property]:
properties[self.times_property] = properties[self.times_property][0][0]
else:
properties[self.times_property] = None
return identifier, properties
class AccessibilityRatioByTime(Configurable):
"""
Calculate accesibility using transit times
Options:
`time_limits` dict of time limits by type of vehicle
`property` property where to set in the record the resulted ratio
`times` where are the transit time stored in original record
Return:
str identifier of the record
dict record updated
"""
time_limits = Option(list, positional=True, required=True)
property = Option(str, positional=True, required=True)
times = Option(str, positional=True, default="times")
def __call__(self, identifier, properties, *args, **kwargs):
transit_times = properties.pop(self.times)
if not transit_times:
return identifier, properties
else:
n_points = len(transit_times)
access = [False] * n_points
for n in range(0, n_points):
for mode_i, limit in enumerate(self.time_limits):
time = transit_times[n][mode_i]
access[n] = access[n] or time is not None and time <= limit
properties[self.property] = (
sum(access[n] for n in range(0, n_points)) / n_points
)
return identifier, properties
class SimplifyGeom(Configurable):
"""
Simplify a geometry
Options:
`tolerance` tolerance of simplification
`geom_in` property of input geometry
`geom_out` property of output geometry
Return:
str identifier of the record
dict record updated
"""
tolerance = Option(int, positional=True, required=True)
geom_in = Option(str, positional=True, default="geom")
geom_out = Option(str, positional=True, default="geom")
def __call__(self, identifier, record, *args, **kwargs):
record[self.geom_out] = record[self.geom_in].simplify(self.tolerance)
return identifier, record
class TransformGeom(Configurable):
"""
Transform geometry
Options:
`ct` destination projection
`geom_in` property of input geometry
`geom_out` property of output geometry
Return:
str identifier of the record
dict record updated
"""
ct = Option(str, positional=True, required=True)
geom_in = Option(str, positional=True, default="geom")
geom_out = Option(str, positional=True, default="geom")
def __call__(self, identifier, record, *args, **kwargs):
record[self.geom_out] = record[self.geom_in].transform(self.ct, clone=True)
return identifier, record
class CleanOlderThan(Configurable):
"""
Clean features of layer older than input date
Options:
`time` date threshold
Return:
str identifier of the record
dict record updated
"""
time = Option(None, required=True, positional=True)
layer_name = Option(str, required=True)
@ContextProcessor
def context(self, context, *args, **kwargs):
yield context
Feature.objects.filter(layer__name=self.layer_name).filter(
updated_at__lt=self.time
).delete()
def __call__(self, context, identifier, properties, *args, **kwargs):
return NOT_MODIFIED
class IntersectionGeom(Configurable):
"""
Cut original geometry with intersection of layers geometries
Options:
`layer` layer to intersect
`geom` property of input geometry
`geom_dest` property of output geometry
Return:
str identifier of the record
dict record updated
"""
layer = Option(str, required=True, positional=True)
geom = Option(str, positional=True, default="geom")
geom_dest = Option(str, positional=True, default="geom")
def __call__(self, identifier, record, *args, **kwargs):
layer = Layer.objects.get(name=self.layer)
try:
zone = (
layer.features.filter(geom__intersects=record[self.geom])
.annotate(
intersection=MakeValid(Intersection("geom", record[self.geom]))
)
.aggregate(zone=Union("intersection"))["zone"]
)
record[self.geom_dest] = zone
except Exception as e:
logger.error(f"identifier {identifier} got error {e}")
yield identifier, record
|
<filename>terra_bonobo_nodes/terra.py
import logging
from copy import deepcopy
from json import JSONDecodeError
from bonobo.config import Configurable, Option, Service
from bonobo.config.processors import ContextProcessor
from bonobo.constants import END, NOT_MODIFIED
from bonobo.util.objects import ValueHolder
from django.conf import settings
from django.contrib.gis.db.models import Union
from django.contrib.gis.db.models.functions import (
Distance,
Intersection,
MakeValid,
Transform,
)
from django.contrib.gis.geos import GEOSGeometry
from django.db import connection, transaction
from geostore.models import Feature, FeatureQuerySet, Layer # noqa
from requests.compat import urljoin
logger = logging.getLogger(__name__)
GEOS_EMPTY_POINT = GEOSGeometry("POINT EMPTY")
class LayerClusters(Configurable):
"""
Extract cluster from layers
Options:
`input_layers` list of input layers
`metric_projection_srid` used projection
`distance` minimal distance between each cluster
Return:
Point cluster point object
QuerySet QuerySet of all features included in the cluster
"""
input_layers = Option(list, positional=True, required=True)
metric_projection_srid = Option(int, positional=True, required=True)
distance = Option(int, positional=True, required=True)
def __call__(self, *args, **kwargs):
args = [
self.metric_projection_srid,
self.distance,
[input_layer.pk for input_layer in self.input_layers],
]
with connection.cursor() as cursor:
sql_query = f"""
SELECT
array_agg(id) AS ids,
ST_AsText(ST_SnapToGrid(ST_Transform(geom, %s), %s)) AS cluster_id
FROM
{Feature._meta.db_table}
WHERE
layer_id = ANY(%s::INT[])
GROUP BY
cluster_id
"""
cursor.execute(sql_query, args)
for features, cluster in cursor.fetchall():
yield cluster, Feature.objects.filter(pk__in=features)
class SubdivideGeom(Configurable):
"""
Execute ST_Subdivide to an input geometry
Options:
`max_vertices` numbe maximal of vertices of the new geometry
`geom` geom field where is located the geometry
Return:
identifier identifier of the new record
record properties of the record
"""
max_vertices = Option(int, positional=True, default=256)
geom = Option(str, positional=True, default="geom")
def __call__(self, identifier, properties, *args, **kwargs):
args = [
properties[self.geom].ewkt,
self.max_vertices,
]
with connection.cursor() as cursor:
sql_query = (
"SELECT ST_Subdivide(ST_Buffer(ST_GeomFromText(%s), 0), %s) AS geom"
)
cursor.execute(sql_query, args)
id = 0
for (geom,) in cursor.fetchall():
properties = deepcopy(properties)
properties[self.geom] = GEOSGeometry(geom)
yield f"{identifier}-{id}", properties
id += 1
class LoadFeatureInLayer(Configurable):
"""
Load feature data in input layer
Options:
`geom` geom field where is located the geometry
`layer` layer where to insert the geometry and its attributes
`window_length` size of bulk import
Services:
`service_layer` Layer where to insert geometries, used if layer argument is empty
Return:
NOT_MODIFIED
"""
geom = Option(str, positional=True, default="geom")
layer = Option(None, required=False, positional=True)
window_length = Option(int, default=100)
layer_name = Option(str, required=False)
@ContextProcessor
def buffer(self, context, *args, **kwargs):
buffer = yield ValueHolder([])
if len(buffer):
# Final call if there is content in buffer
self.__call__(buffer, END, END)
def __call__(self, buffer, identifier, record, *args, **kwargs):
if self.layer_name:
self.write_layer = Layer.objects.get(name=self.layer_name)
elif self.layer:
self.write_layer = self.layer
else:
raise Exception("Missing layer or layer_name parameter")
is_final = identifier == END and record == END
if not is_final:
buffer.append(
(
identifier,
record,
)
)
if len(buffer) >= self.window_length or is_final:
with transaction.atomic(savepoint=False):
Feature.objects.filter(
layer=self.write_layer, identifier__in=[i for i, r in buffer]
).delete()
Feature.objects.bulk_create(
[self._get_feature_object(*feature) for feature in buffer]
)
buffer.set([])
return NOT_MODIFIED
def _get_feature_object(self, identifier, record):
properties = record.copy()
geometry = properties.pop(self.geom, GEOS_EMPTY_POINT)
return Feature(
layer=self.write_layer,
identifier=identifier,
geom=geometry,
properties=properties,
)
class ExtractFeatures(Configurable):
"""
Extract features from a queryset
Options:
`queryset` Feature QuerySet containing geometries and attributes
`id_field` field containing the identifier
`extra_properties` dict of extra attributes extracted from the feature
Return:
str identifier of the record using id_field
dict record
"""
queryset = Option(None, required=True, positional=True)
id_field = Option(str, required=True, positional=True, default="identifier")
extra_properties = Option(dict, required=True, positional=True, default={})
batch_size = 1000
def __call__(self, *args, **kwargs):
count = self.queryset.count()
for start in range(0, count, self.batch_size):
end = min(start + self.batch_size, count)
features = self.queryset[start:end]
for feature in features:
properties = {
**feature.properties,
**{
attribute: getattr(feature, field)
for attribute, field in self.extra_properties.items()
},
}
yield getattr(feature, self.id_field), properties
class BooleanIntersect(Configurable):
"""
Intersect geometry witch all geometries of one layer
Options:
`layer` Layer to intersect
`property` property where to put the resulted boolean
`geom` geometry attribute in record
Return:
str identifier of the record
dict record updated
"""
layer = Option(str, required=True, positional=True)
property = Option(str, required=True, positional=True)
geom = Option(str, positional=True, default="geom")
def __call__(self, identifier, record, *args, **kwargs):
layer = Layer.objects.get(name=self.layer)
try:
record[self.property] = layer.features.filter(
geom__intersects=record[self.geom]
).exists()
except Exception as e:
record[self.property] = False
logger.error(f"An error occured doing BooleanIntersect: {e}")
yield identifier, record
class IntersectionPercentByArea(Configurable):
"""
Get percentage of intersection of a geometry
Options:
`layer` Layer to intersect
`property` property where to put the resulted intersection
`geom` geometry attribute in record
Return:
str identifier of the record
dict record updated
"""
layer = Option(str, required=True, positional=True)
property = Option(str, required=True, positional=True)
geom = Option(str, positional=True, default="geom")
def __call__(self, identifier, record, *args, **kwargs):
layer = Layer.objects.get(name=self.layer)
try:
zone = (
layer.features.filter(geom__intersects=record[self.geom])
.annotate(
intersection=MakeValid(Intersection("geom", record[self.geom]))
)
.aggregate(zone=Union("intersection"))["zone"]
)
record[self.property] = zone and zone.area / record[self.geom].area or 0.0
except Exception as e:
logger.error(f"identifier {identifier} got error {e}")
yield identifier, record
class ClosestFeatures(Configurable):
"""
Get closes features of the geometry in a layer
Options:
`layer` Layer to intersect
`property_filter` dict of properties to filter in layer's features
`geom` geometry attribute in record
`closests` property where to put closest features
`limit` number of features maximum to load
`max_distance` maximal distance from original geometry
Return:
str identifier of the record
dict record updated
"""
layer = Option(str, positional=True, required=True)
property_filter = Option(dict, default={})
geom = Option(str, default="geom")
closests = Option(str, default="closests")
limit = Option(int, default=1)
max_distance = Option(int, default=-1)
def __call__(self, identifier, properties, *args, **kwargs):
geom_point = properties[self.geom].centroid
properties_filters = {
f"properties__{k}": v for k, v in self.property_filter.items()
}
try:
closest_points = (
Layer.objects.get(name=self.layer)
.features.filter(**properties_filters)
.exclude(geom=GEOSGeometry("POINT EMPTY"))
.annotate(
distance=Distance(
Transform("geom", 4326), Transform(geom_point, 4326)
)
)
)
if self.max_distance > 0:
closest_points = closest_points.filter(distance__lt=self.max_distance)
closest_points = closest_points.order_by("distance")[: self.limit]
properties[self.closests] = properties.get(self.closests, []) + [
c.geom for c in closest_points
]
return identifier, properties
except AttributeError:
return identifier, properties
class TransitTimeOneToMany(Configurable):
"""
Calculate transit time from geometry to list of points.
Settings can be found in graphhopper API documentation.
Options:
`vehicules` vehicules to use (car, bike, hike, …)
`weighting` what kind of way (default: fastest)
`elevation` take care of terrain elevation
`geom` where is the original geometry
`points` destination points to calculate
`times_property` where to insert calculated times
Services:
`http` requests.Session's object
Return:
str identifier of the record
dict record updated
"""
vehicles = Option(list, positional=True, default=["car"])
weighting = Option(str, positional=True, default="fastest")
elevation = Option(bool, positional=True, default=False)
geom = Option(str, positional=True, default="geom")
points = Option(str, positional=True, default="points")
times_property = Option(str, positional=True, default="times")
http = Service("http")
def __call__(self, identifier, properties, http, *args, **kwargs):
end_point = properties[self.geom].centroid
# Starts from point to deals with oneway motorway
points = properties.pop(self.points)
dim = "time" if self.weighting == "fastest" else "distance"
times = []
for point in points:
time = []
for vehicle in self.vehicles:
routing_url = urljoin(settings.GRAPHHOPPER, "route")
payload = {
"point": [f"{point.y},{point.x}", f"{end_point.y},{end_point.x}"],
"vehicle": vehicle,
"weighting": self.weighting,
"elevation": self.elevation,
"instructions": False,
"calc_points": False,
}
response = http.get(routing_url, params=payload)
try:
response = response.json()
time += [response.get("paths", [])[0].get(dim)]
except (IndexError, JSONDecodeError):
time += [None]
times += [time]
properties[self.times_property] = times
return identifier, properties
class TransitTimeOneToOne(TransitTimeOneToMany):
"""
Same as TransitTimeOneToMany but for only one destination. Uses the same API.
"""
def __call__(self, *args, **kwargs):
identifier, properties = super().__call__(*args, **kwargs)
if properties[self.times_property]:
properties[self.times_property] = properties[self.times_property][0][0]
else:
properties[self.times_property] = None
return identifier, properties
class AccessibilityRatioByTime(Configurable):
"""
Calculate accesibility using transit times
Options:
`time_limits` dict of time limits by type of vehicle
`property` property where to set in the record the resulted ratio
`times` where are the transit time stored in original record
Return:
str identifier of the record
dict record updated
"""
time_limits = Option(list, positional=True, required=True)
property = Option(str, positional=True, required=True)
times = Option(str, positional=True, default="times")
def __call__(self, identifier, properties, *args, **kwargs):
transit_times = properties.pop(self.times)
if not transit_times:
return identifier, properties
else:
n_points = len(transit_times)
access = [False] * n_points
for n in range(0, n_points):
for mode_i, limit in enumerate(self.time_limits):
time = transit_times[n][mode_i]
access[n] = access[n] or time is not None and time <= limit
properties[self.property] = (
sum(access[n] for n in range(0, n_points)) / n_points
)
return identifier, properties
class SimplifyGeom(Configurable):
"""
Simplify a geometry
Options:
`tolerance` tolerance of simplification
`geom_in` property of input geometry
`geom_out` property of output geometry
Return:
str identifier of the record
dict record updated
"""
tolerance = Option(int, positional=True, required=True)
geom_in = Option(str, positional=True, default="geom")
geom_out = Option(str, positional=True, default="geom")
def __call__(self, identifier, record, *args, **kwargs):
record[self.geom_out] = record[self.geom_in].simplify(self.tolerance)
return identifier, record
class TransformGeom(Configurable):
"""
Transform geometry
Options:
`ct` destination projection
`geom_in` property of input geometry
`geom_out` property of output geometry
Return:
str identifier of the record
dict record updated
"""
ct = Option(str, positional=True, required=True)
geom_in = Option(str, positional=True, default="geom")
geom_out = Option(str, positional=True, default="geom")
def __call__(self, identifier, record, *args, **kwargs):
record[self.geom_out] = record[self.geom_in].transform(self.ct, clone=True)
return identifier, record
class CleanOlderThan(Configurable):
"""
Clean features of layer older than input date
Options:
`time` date threshold
Return:
str identifier of the record
dict record updated
"""
time = Option(None, required=True, positional=True)
layer_name = Option(str, required=True)
@ContextProcessor
def context(self, context, *args, **kwargs):
yield context
Feature.objects.filter(layer__name=self.layer_name).filter(
updated_at__lt=self.time
).delete()
def __call__(self, context, identifier, properties, *args, **kwargs):
return NOT_MODIFIED
class IntersectionGeom(Configurable):
"""
Cut original geometry with intersection of layers geometries
Options:
`layer` layer to intersect
`geom` property of input geometry
`geom_dest` property of output geometry
Return:
str identifier of the record
dict record updated
"""
layer = Option(str, required=True, positional=True)
geom = Option(str, positional=True, default="geom")
geom_dest = Option(str, positional=True, default="geom")
def __call__(self, identifier, record, *args, **kwargs):
layer = Layer.objects.get(name=self.layer)
try:
zone = (
layer.features.filter(geom__intersects=record[self.geom])
.annotate(
intersection=MakeValid(Intersection("geom", record[self.geom]))
)
.aggregate(zone=Union("intersection"))["zone"]
)
record[self.geom_dest] = zone
except Exception as e:
logger.error(f"identifier {identifier} got error {e}")
yield identifier, record
|
en
| 0.740232
|
# noqa Extract cluster from layers Options: `input_layers` list of input layers `metric_projection_srid` used projection `distance` minimal distance between each cluster Return: Point cluster point object QuerySet QuerySet of all features included in the cluster SELECT array_agg(id) AS ids, ST_AsText(ST_SnapToGrid(ST_Transform(geom, %s), %s)) AS cluster_id FROM {Feature._meta.db_table} WHERE layer_id = ANY(%s::INT[]) GROUP BY cluster_id Execute ST_Subdivide to an input geometry Options: `max_vertices` numbe maximal of vertices of the new geometry `geom` geom field where is located the geometry Return: identifier identifier of the new record record properties of the record Load feature data in input layer Options: `geom` geom field where is located the geometry `layer` layer where to insert the geometry and its attributes `window_length` size of bulk import Services: `service_layer` Layer where to insert geometries, used if layer argument is empty Return: NOT_MODIFIED # Final call if there is content in buffer Extract features from a queryset Options: `queryset` Feature QuerySet containing geometries and attributes `id_field` field containing the identifier `extra_properties` dict of extra attributes extracted from the feature Return: str identifier of the record using id_field dict record Intersect geometry witch all geometries of one layer Options: `layer` Layer to intersect `property` property where to put the resulted boolean `geom` geometry attribute in record Return: str identifier of the record dict record updated Get percentage of intersection of a geometry Options: `layer` Layer to intersect `property` property where to put the resulted intersection `geom` geometry attribute in record Return: str identifier of the record dict record updated Get closes features of the geometry in a layer Options: `layer` Layer to intersect `property_filter` dict of properties to filter in layer's features `geom` geometry attribute in record `closests` property where to put closest features `limit` number of features maximum to load `max_distance` maximal distance from original geometry Return: str identifier of the record dict record updated Calculate transit time from geometry to list of points. Settings can be found in graphhopper API documentation. Options: `vehicules` vehicules to use (car, bike, hike, …) `weighting` what kind of way (default: fastest) `elevation` take care of terrain elevation `geom` where is the original geometry `points` destination points to calculate `times_property` where to insert calculated times Services: `http` requests.Session's object Return: str identifier of the record dict record updated # Starts from point to deals with oneway motorway Same as TransitTimeOneToMany but for only one destination. Uses the same API. Calculate accesibility using transit times Options: `time_limits` dict of time limits by type of vehicle `property` property where to set in the record the resulted ratio `times` where are the transit time stored in original record Return: str identifier of the record dict record updated Simplify a geometry Options: `tolerance` tolerance of simplification `geom_in` property of input geometry `geom_out` property of output geometry Return: str identifier of the record dict record updated Transform geometry Options: `ct` destination projection `geom_in` property of input geometry `geom_out` property of output geometry Return: str identifier of the record dict record updated Clean features of layer older than input date Options: `time` date threshold Return: str identifier of the record dict record updated Cut original geometry with intersection of layers geometries Options: `layer` layer to intersect `geom` property of input geometry `geom_dest` property of output geometry Return: str identifier of the record dict record updated
| 1.939047
| 2
|
blogweb/apis/booksapi.py
|
mnpiozhang/MyBlog
| 0
|
6627324
|
#!/usr/bin/env python
#_*_ coding:utf-8 _*_
from django.http import HttpResponse
from blogweb.popularbooks.getinfo import get_JD_Top
from blogweb.popularbooks import config as cfg
from django.views.generic import View
import json
from collections import OrderedDict
class jdBooksApi(View):
'''
https://niubidian.top/blog/jdbooks/?item=nbs&category=novel&effectivetime=week&topnumber=23
http://127.0.0.1:8000/blog/jdbooks/?item=nbs&category=novel&effectivetime=week&topnumber=23
返回的TOP数量可以自己定义1--100
item str default 新书销量榜 nbs
新书销量榜 nbs
图书热评榜 bc
新书热评榜 nbc
图书销量榜 bs
----------------------------------
category str default 计算机与互联网 internet
少儿 children
教育 edu
小说文学 novel
经管 manage
励志与成功 jitang
人文社科 socialscience
生活 life
艺术、摄影 art
科技 science
计算机与互联网 internet
英文书、港台书 en
杂志期刊 magazine
----------------------------------
effectivetime str default 最近24小时 day
最近24小时 day
最近一周 week
最近30天 month
'''
def get(self, request):
returndata = {"code":200,"errMsg":"","body":[]}
item = request.GET.get('item')
category = request.GET.get('category')
effectivetime = request.GET.get('effectivetime')
try:
topnumber = int(request.GET.get('topnumber'))
except:
returndata["code"] = 400
returndata["errMsg"] = 'topnumber must be number'
return HttpResponse(json.dumps(returndata,ensure_ascii=False,indent=2),content_type="application/json")
if all([item,category,effectivetime,topnumber]):
if item not in cfg.ITEM.keys() or category not in cfg.CATEGORY.keys() or effectivetime not in cfg.EFFECTIVE_TIME.keys() or topnumber<=0 or topnumber>100:
returndata["code"] = 400
returndata["errMsg"] = 'request error111'
return HttpResponse(json.dumps(returndata,ensure_ascii=False,indent=2),content_type="application/json")
else:
if effectivetime != 'day' and (item == 'bc' or item == 'nbc'):
returndata["code"] = 400
returndata["errMsg"] = '热评榜只有24小时内的'
return HttpResponse(json.dumps(returndata,ensure_ascii=False,indent=2),content_type="application/json")
else:
#a is json, like {"top1": {"url": "//item.jd.com/12236229.html", "name": "妖猫传(沙门空海·大唐鬼宴 全四册经典套装)", "pic": "//img13.360buyimg.com/n3/jfs/t12199/194/878683607/225186/13de2d7c/5a15320dNdfbe411e.jpg"}, "top2": {"url": "//item.jd.com/12239650.html", "name": "余华作品:活着", "pic": "//img14.360buyimg.com/n3/jfs/t10162/279/1390942739/246693/50c56f9d/59e02214N37418280.jpg"}}
# i want to trans it
a = get_JD_Top(item,category,effectivetime,topnumber)
a = json.loads(a,object_pairs_hook = OrderedDict)
#print a
resultList = []
for k,v in a.items():
resultList.append({u'rank':k,u'url':v[u'url'],u'name':v[u'name'],u'pic':v[u'pic']})
returndata["body"] = resultList
resp = HttpResponse(json.dumps(returndata,ensure_ascii=False,indent=2),content_type="application/json")
#resp["Access-Control-Allow-Headers"] = "content-type"
#resp["Access-Control-Allow-Origin"] = "*"
#resp["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
#resp["Access-Control-Max-Age"] = "1000"
return resp
else:
returndata["code"] = 400
returndata["errMsg"] = 'request error'
return HttpResponse(json.dumps(returndata,ensure_ascii=False,indent=2),content_type="application/json")
|
#!/usr/bin/env python
#_*_ coding:utf-8 _*_
from django.http import HttpResponse
from blogweb.popularbooks.getinfo import get_JD_Top
from blogweb.popularbooks import config as cfg
from django.views.generic import View
import json
from collections import OrderedDict
class jdBooksApi(View):
'''
https://niubidian.top/blog/jdbooks/?item=nbs&category=novel&effectivetime=week&topnumber=23
http://127.0.0.1:8000/blog/jdbooks/?item=nbs&category=novel&effectivetime=week&topnumber=23
返回的TOP数量可以自己定义1--100
item str default 新书销量榜 nbs
新书销量榜 nbs
图书热评榜 bc
新书热评榜 nbc
图书销量榜 bs
----------------------------------
category str default 计算机与互联网 internet
少儿 children
教育 edu
小说文学 novel
经管 manage
励志与成功 jitang
人文社科 socialscience
生活 life
艺术、摄影 art
科技 science
计算机与互联网 internet
英文书、港台书 en
杂志期刊 magazine
----------------------------------
effectivetime str default 最近24小时 day
最近24小时 day
最近一周 week
最近30天 month
'''
def get(self, request):
returndata = {"code":200,"errMsg":"","body":[]}
item = request.GET.get('item')
category = request.GET.get('category')
effectivetime = request.GET.get('effectivetime')
try:
topnumber = int(request.GET.get('topnumber'))
except:
returndata["code"] = 400
returndata["errMsg"] = 'topnumber must be number'
return HttpResponse(json.dumps(returndata,ensure_ascii=False,indent=2),content_type="application/json")
if all([item,category,effectivetime,topnumber]):
if item not in cfg.ITEM.keys() or category not in cfg.CATEGORY.keys() or effectivetime not in cfg.EFFECTIVE_TIME.keys() or topnumber<=0 or topnumber>100:
returndata["code"] = 400
returndata["errMsg"] = 'request error111'
return HttpResponse(json.dumps(returndata,ensure_ascii=False,indent=2),content_type="application/json")
else:
if effectivetime != 'day' and (item == 'bc' or item == 'nbc'):
returndata["code"] = 400
returndata["errMsg"] = '热评榜只有24小时内的'
return HttpResponse(json.dumps(returndata,ensure_ascii=False,indent=2),content_type="application/json")
else:
#a is json, like {"top1": {"url": "//item.jd.com/12236229.html", "name": "妖猫传(沙门空海·大唐鬼宴 全四册经典套装)", "pic": "//img13.360buyimg.com/n3/jfs/t12199/194/878683607/225186/13de2d7c/5a15320dNdfbe411e.jpg"}, "top2": {"url": "//item.jd.com/12239650.html", "name": "余华作品:活着", "pic": "//img14.360buyimg.com/n3/jfs/t10162/279/1390942739/246693/50c56f9d/59e02214N37418280.jpg"}}
# i want to trans it
a = get_JD_Top(item,category,effectivetime,topnumber)
a = json.loads(a,object_pairs_hook = OrderedDict)
#print a
resultList = []
for k,v in a.items():
resultList.append({u'rank':k,u'url':v[u'url'],u'name':v[u'name'],u'pic':v[u'pic']})
returndata["body"] = resultList
resp = HttpResponse(json.dumps(returndata,ensure_ascii=False,indent=2),content_type="application/json")
#resp["Access-Control-Allow-Headers"] = "content-type"
#resp["Access-Control-Allow-Origin"] = "*"
#resp["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
#resp["Access-Control-Max-Age"] = "1000"
return resp
else:
returndata["code"] = 400
returndata["errMsg"] = 'request error'
return HttpResponse(json.dumps(returndata,ensure_ascii=False,indent=2),content_type="application/json")
|
en
| 0.312481
|
#!/usr/bin/env python #_*_ coding:utf-8 _*_ https://niubidian.top/blog/jdbooks/?item=nbs&category=novel&effectivetime=week&topnumber=23
http://127.0.0.1:8000/blog/jdbooks/?item=nbs&category=novel&effectivetime=week&topnumber=23
返回的TOP数量可以自己定义1--100
item str default 新书销量榜 nbs
新书销量榜 nbs
图书热评榜 bc
新书热评榜 nbc
图书销量榜 bs
----------------------------------
category str default 计算机与互联网 internet
少儿 children
教育 edu
小说文学 novel
经管 manage
励志与成功 jitang
人文社科 socialscience
生活 life
艺术、摄影 art
科技 science
计算机与互联网 internet
英文书、港台书 en
杂志期刊 magazine
----------------------------------
effectivetime str default 最近24小时 day
最近24小时 day
最近一周 week
最近30天 month #a is json, like {"top1": {"url": "//item.jd.com/12236229.html", "name": "妖猫传(沙门空海·大唐鬼宴 全四册经典套装)", "pic": "//img13.360buyimg.com/n3/jfs/t12199/194/878683607/225186/13de2d7c/5a15320dNdfbe411e.jpg"}, "top2": {"url": "//item.jd.com/12239650.html", "name": "余华作品:活着", "pic": "//img14.360buyimg.com/n3/jfs/t10162/279/1390942739/246693/50c56f9d/59e02214N37418280.jpg"}} # i want to trans it #print a #resp["Access-Control-Allow-Headers"] = "content-type" #resp["Access-Control-Allow-Origin"] = "*" #resp["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS" #resp["Access-Control-Max-Age"] = "1000"
| 2.334178
| 2
|
vilya/models/elastic/issue_pr_search.py
|
mubashshirjamal/code
| 1,582
|
6627325
|
<reponame>mubashshirjamal/code<gh_stars>1000+
# -*- coding: utf-8 -*-
import logging
from vilya.libs.store import store
from vilya.libs.text import trunc_utf8
from vilya.models.project_issue import ProjectIssue
from vilya.models.pull import PullRequest
from vilya.models.ticket import Ticket
from vilya.models.project import CodeDoubanProject
from vilya.models.user import User
from vilya.models.elastic.indexer import IndexEngine
from vilya.models.elastic.searcher import SearchEngine
class IssuePRSearch(object):
type_name = ''
@classmethod
def index_an_object(cls, serial, data):
return IndexEngine.create_a_index(cls.type_name, serial, data)
@classmethod
def search_a_phrase(cls, phrase, project_id=None, from_=0, size=20,
state=None, sort_data=None):
filter_list = []
if project_id:
if cls.type_name == "issue":
key = "project_id"
else:
key = "to_proj_id"
filter_list.append({
"term": {
key: project_id
}
})
if state:
filter_list.append({"term": {"state": state}})
if filter_list:
filter_data = {"and": filter_list}
else:
filter_data = None
highlight_data = {
"fields": {
"description": {"number_of_fragments": 0}
}
}
facets_data = {
"state": {
"terms": {
"field": "state"
}
}
}
result = SearchEngine.search_a_phrase(cls.type_name, phrase, from_,
size, sort_data=sort_data,
filter_data=filter_data,
highlight_data=highlight_data,
facets_data=facets_data)
return result
@classmethod
def index_a_project(cls, project):
IssueSearch.index_a_project_issue(project)
PullRequestSearch.index_a_project_pr(project)
@classmethod
def format_facets(cls, result):
if not SearchEngine.check_result(result):
return {}
formatted = dict(state=result['facets']['state']['terms'])
return formatted
class IssueSearch(IssuePRSearch):
type_name = "issue"
@classmethod
def index_a_project_issue(cls, project):
issues = ProjectIssue._get_issues_by_project_id(project.id)
for issue in issues:
data = issue.as_dict()
if data:
serial = "%s_%s" % (project.index_name, issue.number)
cls.index_an_object(serial, data)
@classmethod
def format_search_result(cls, result):
if not SearchEngine.check_result(result):
return []
formatted = []
result = result['hits']['hits']
for r in result:
_source = r['_source']
try:
hl_description = r['highlight']['description'][0]
except:
logging.debug('No highlight for %s', _source)
hl_description = ''
description = _source.get('description')
sr = dict(
issue_id=_source.get('issue_id'),
description=description if description else '',
hl_description=hl_description,
)
if not sr['issue_id']:
logging.warn('Invaild issue search result, skip: %s', _source)
continue
sr = IssueResult(**sr)
formatted.append(sr)
return formatted
class PullRequestSearch(IssuePRSearch):
type_name = "pull"
@classmethod
def index_a_project_pr(cls, project):
rs = store.execute("select ticket_id from pullreq "
"where to_project=%s", project.id)
for r, in rs:
pr = PullRequest.get_by_proj_and_ticket(project.id, r)
if pr:
data = pr.as_dict()
if data:
serial = "%s_%s" % (project.index_name, r)
cls.index_an_object(serial, data)
@classmethod
def format_search_result(cls, result):
if not SearchEngine.check_result(result):
return []
formatted = []
result = result['hits']['hits']
for r in result:
_source = r['_source']
try:
hl_description = r['highlight']['description'][0]
except:
logging.debug('No highlight for %s', _source)
hl_description = ''
sr = dict(
ticket_number=_source.get('ticket_id'),
project_id=_source.get('to_proj_id'),
hl_description=hl_description,
)
if not sr['project_id'] or not sr['ticket_number']:
logging.warn(
'Invaild pullrequest search result, skip: %s', _source)
continue
sr = PullResult(**sr)
formatted.append(sr)
return formatted
class PullResult(object):
def __init__(self, project_id, ticket_number, hl_description):
self.ticket = Ticket.get_by_projectid_and_ticketnumber(
project_id, ticket_number)
self.ticket_project = CodeDoubanProject.get(self.ticket.project_id)
self.author = User(self.ticket.author)
self.ticket_url = self.ticket.url
self.hl_description = hl_description if hl_description \
else self.ticket.description
def snippet(self):
desc = self.hl_description
return trunc_utf8(desc, 300)
class IssueResult(object):
def __init__(self, issue_id, description, hl_description):
self.issue = ProjectIssue.get_by_issue_id(issue_id) \
if issue_id else None
if self.issue and self.issue.description:
description = self.issue.description
self.description = description
self.hl_description = hl_description or description
def snippet(self):
desc = self.hl_description
return trunc_utf8(desc, 300)
|
# -*- coding: utf-8 -*-
import logging
from vilya.libs.store import store
from vilya.libs.text import trunc_utf8
from vilya.models.project_issue import ProjectIssue
from vilya.models.pull import PullRequest
from vilya.models.ticket import Ticket
from vilya.models.project import CodeDoubanProject
from vilya.models.user import User
from vilya.models.elastic.indexer import IndexEngine
from vilya.models.elastic.searcher import SearchEngine
class IssuePRSearch(object):
type_name = ''
@classmethod
def index_an_object(cls, serial, data):
return IndexEngine.create_a_index(cls.type_name, serial, data)
@classmethod
def search_a_phrase(cls, phrase, project_id=None, from_=0, size=20,
state=None, sort_data=None):
filter_list = []
if project_id:
if cls.type_name == "issue":
key = "project_id"
else:
key = "to_proj_id"
filter_list.append({
"term": {
key: project_id
}
})
if state:
filter_list.append({"term": {"state": state}})
if filter_list:
filter_data = {"and": filter_list}
else:
filter_data = None
highlight_data = {
"fields": {
"description": {"number_of_fragments": 0}
}
}
facets_data = {
"state": {
"terms": {
"field": "state"
}
}
}
result = SearchEngine.search_a_phrase(cls.type_name, phrase, from_,
size, sort_data=sort_data,
filter_data=filter_data,
highlight_data=highlight_data,
facets_data=facets_data)
return result
@classmethod
def index_a_project(cls, project):
IssueSearch.index_a_project_issue(project)
PullRequestSearch.index_a_project_pr(project)
@classmethod
def format_facets(cls, result):
if not SearchEngine.check_result(result):
return {}
formatted = dict(state=result['facets']['state']['terms'])
return formatted
class IssueSearch(IssuePRSearch):
type_name = "issue"
@classmethod
def index_a_project_issue(cls, project):
issues = ProjectIssue._get_issues_by_project_id(project.id)
for issue in issues:
data = issue.as_dict()
if data:
serial = "%s_%s" % (project.index_name, issue.number)
cls.index_an_object(serial, data)
@classmethod
def format_search_result(cls, result):
if not SearchEngine.check_result(result):
return []
formatted = []
result = result['hits']['hits']
for r in result:
_source = r['_source']
try:
hl_description = r['highlight']['description'][0]
except:
logging.debug('No highlight for %s', _source)
hl_description = ''
description = _source.get('description')
sr = dict(
issue_id=_source.get('issue_id'),
description=description if description else '',
hl_description=hl_description,
)
if not sr['issue_id']:
logging.warn('Invaild issue search result, skip: %s', _source)
continue
sr = IssueResult(**sr)
formatted.append(sr)
return formatted
class PullRequestSearch(IssuePRSearch):
type_name = "pull"
@classmethod
def index_a_project_pr(cls, project):
rs = store.execute("select ticket_id from pullreq "
"where to_project=%s", project.id)
for r, in rs:
pr = PullRequest.get_by_proj_and_ticket(project.id, r)
if pr:
data = pr.as_dict()
if data:
serial = "%s_%s" % (project.index_name, r)
cls.index_an_object(serial, data)
@classmethod
def format_search_result(cls, result):
if not SearchEngine.check_result(result):
return []
formatted = []
result = result['hits']['hits']
for r in result:
_source = r['_source']
try:
hl_description = r['highlight']['description'][0]
except:
logging.debug('No highlight for %s', _source)
hl_description = ''
sr = dict(
ticket_number=_source.get('ticket_id'),
project_id=_source.get('to_proj_id'),
hl_description=hl_description,
)
if not sr['project_id'] or not sr['ticket_number']:
logging.warn(
'Invaild pullrequest search result, skip: %s', _source)
continue
sr = PullResult(**sr)
formatted.append(sr)
return formatted
class PullResult(object):
def __init__(self, project_id, ticket_number, hl_description):
self.ticket = Ticket.get_by_projectid_and_ticketnumber(
project_id, ticket_number)
self.ticket_project = CodeDoubanProject.get(self.ticket.project_id)
self.author = User(self.ticket.author)
self.ticket_url = self.ticket.url
self.hl_description = hl_description if hl_description \
else self.ticket.description
def snippet(self):
desc = self.hl_description
return trunc_utf8(desc, 300)
class IssueResult(object):
def __init__(self, issue_id, description, hl_description):
self.issue = ProjectIssue.get_by_issue_id(issue_id) \
if issue_id else None
if self.issue and self.issue.description:
description = self.issue.description
self.description = description
self.hl_description = hl_description or description
def snippet(self):
desc = self.hl_description
return trunc_utf8(desc, 300)
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 1.962891
| 2
|
evaluate_tests.py
|
ClaytonNorthey92/hal-ci-example
| 0
|
6627326
|
import re
import subprocess
import sys
if __name__ == '__main__':
try:
# run the actual tests, this assumes the tests will run in less than 5 seconds.
# as the number of tests grows, the timeout will need to be increased
# this is done this way because the tests run in a VM, so they are hard to get the
# exit code from, so we timeout and read the logs to find the results
subprocess.run(['make', 'run-arm', '>', 'test_log.txt'], timeout=5)
except subprocess.TimeoutExpired as e:
# we are expecting the tests to timeout, ignore this error and get the status from the logs
pass
with open('test_log.txt') as test_log:
logs = test_log.read();
print(logs)
tests_results = re.search(r'([0-9]*) Tests ([0-9]*) Failures ([0-9]*) Ignored', logs)
if tests_results is None:
print('could not find test results line')
sys.exit(1)
failures = tests_results.group(2)
print('found {} failures'.format(failures))
if (failures == '0'):
sys.exit(0)
else:
sys.exit(1)
|
import re
import subprocess
import sys
if __name__ == '__main__':
try:
# run the actual tests, this assumes the tests will run in less than 5 seconds.
# as the number of tests grows, the timeout will need to be increased
# this is done this way because the tests run in a VM, so they are hard to get the
# exit code from, so we timeout and read the logs to find the results
subprocess.run(['make', 'run-arm', '>', 'test_log.txt'], timeout=5)
except subprocess.TimeoutExpired as e:
# we are expecting the tests to timeout, ignore this error and get the status from the logs
pass
with open('test_log.txt') as test_log:
logs = test_log.read();
print(logs)
tests_results = re.search(r'([0-9]*) Tests ([0-9]*) Failures ([0-9]*) Ignored', logs)
if tests_results is None:
print('could not find test results line')
sys.exit(1)
failures = tests_results.group(2)
print('found {} failures'.format(failures))
if (failures == '0'):
sys.exit(0)
else:
sys.exit(1)
|
en
| 0.932886
|
# run the actual tests, this assumes the tests will run in less than 5 seconds. # as the number of tests grows, the timeout will need to be increased # this is done this way because the tests run in a VM, so they are hard to get the # exit code from, so we timeout and read the logs to find the results # we are expecting the tests to timeout, ignore this error and get the status from the logs
| 2.559559
| 3
|
examples/green-boxes.py
|
syreal17/ARENA-py
| 0
|
6627327
|
from arena import *
import random
import time
import sys
arena = Scene(host="arena.andrew.cmu.edu", realm="realm", scene="example")
color = (0, 255, 0)
# more complex case: Create many boxes
x = 1
@arena.run_forever(interval_ms=500)
def make_boxs():
global x
# Create a bunch of green boxes drawn directly to screen
position = (random.randrange(10)-5,
random.randrange(10),
-random.randrange(10))
box = Box(
position=position,
material=Material(color=color)
)
arena.add_object(box)
x = x + 1
print("object " + str(x-1) + " at " + str(position))
arena.run_tasks()
|
from arena import *
import random
import time
import sys
arena = Scene(host="arena.andrew.cmu.edu", realm="realm", scene="example")
color = (0, 255, 0)
# more complex case: Create many boxes
x = 1
@arena.run_forever(interval_ms=500)
def make_boxs():
global x
# Create a bunch of green boxes drawn directly to screen
position = (random.randrange(10)-5,
random.randrange(10),
-random.randrange(10))
box = Box(
position=position,
material=Material(color=color)
)
arena.add_object(box)
x = x + 1
print("object " + str(x-1) + " at " + str(position))
arena.run_tasks()
|
en
| 0.820049
|
# more complex case: Create many boxes # Create a bunch of green boxes drawn directly to screen
| 2.715261
| 3
|
openpyexcel/pivot/tests/test_record.py
|
sciris/openpyexcel
| 2
|
6627328
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyexcel
import pytest
from io import BytesIO
from zipfile import ZipFile
from openpyexcel.packaging.manifest import Manifest
from openpyexcel.xml.functions import fromstring, tostring
from openpyexcel.tests.helper import compare_xml
from .test_fields import (
Index,
Number,
Text,
)
@pytest.fixture
def Record():
from ..record import Record
return Record
class TestRecord:
def test_ctor(self, Record, Number, Text, Index):
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = n + s + x
field = Record(_fields=fields)
xml = tostring(field.to_tree())
expected = """
<r>
<n v="1"/>
<n v="25"/>
<s v="2014-03-24"/>
<x v="0"/>
<x v="0"/>
<x v="0"/>
</r>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Record, Number, Text, Index):
src = """
<r>
<n v="1"/>
<x v="0"/>
<s v="2014-03-24"/>
<x v="0"/>
<n v="25"/>
<x v="0"/>
</r>
"""
node = fromstring(src)
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = [
Number(v=1),
Index(),
Text(v="2014-03-24"),
Index(),
Number(v=25),
Index(),
]
field = Record.from_tree(node)
assert field == Record(_fields=fields)
@pytest.fixture
def RecordList():
from ..record import RecordList
return RecordList
class TestRecordList:
def test_ctor(self, RecordList):
cache = RecordList()
xml = tostring(cache.to_tree())
expected = """
<pivotCacheRecords xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
count="0" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, RecordList):
src = """
<pivotCacheRecords count="0" />
"""
node = fromstring(src)
cache = RecordList.from_tree(node)
assert cache == RecordList()
def test_write(self, RecordList):
out = BytesIO()
archive = ZipFile(out, mode="w")
manifest = Manifest()
records = RecordList()
xml = tostring(records.to_tree())
records._write(archive, manifest)
manifest.append(records)
assert archive.namelist() == [records.path[1:]]
assert manifest.find(records.mime_type)
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyexcel
import pytest
from io import BytesIO
from zipfile import ZipFile
from openpyexcel.packaging.manifest import Manifest
from openpyexcel.xml.functions import fromstring, tostring
from openpyexcel.tests.helper import compare_xml
from .test_fields import (
Index,
Number,
Text,
)
@pytest.fixture
def Record():
from ..record import Record
return Record
class TestRecord:
def test_ctor(self, Record, Number, Text, Index):
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = n + s + x
field = Record(_fields=fields)
xml = tostring(field.to_tree())
expected = """
<r>
<n v="1"/>
<n v="25"/>
<s v="2014-03-24"/>
<x v="0"/>
<x v="0"/>
<x v="0"/>
</r>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Record, Number, Text, Index):
src = """
<r>
<n v="1"/>
<x v="0"/>
<s v="2014-03-24"/>
<x v="0"/>
<n v="25"/>
<x v="0"/>
</r>
"""
node = fromstring(src)
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = [
Number(v=1),
Index(),
Text(v="2014-03-24"),
Index(),
Number(v=25),
Index(),
]
field = Record.from_tree(node)
assert field == Record(_fields=fields)
@pytest.fixture
def RecordList():
from ..record import RecordList
return RecordList
class TestRecordList:
def test_ctor(self, RecordList):
cache = RecordList()
xml = tostring(cache.to_tree())
expected = """
<pivotCacheRecords xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
count="0" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, RecordList):
src = """
<pivotCacheRecords count="0" />
"""
node = fromstring(src)
cache = RecordList.from_tree(node)
assert cache == RecordList()
def test_write(self, RecordList):
out = BytesIO()
archive = ZipFile(out, mode="w")
manifest = Manifest()
records = RecordList()
xml = tostring(records.to_tree())
records._write(archive, manifest)
manifest.append(records)
assert archive.namelist() == [records.path[1:]]
assert manifest.find(records.mime_type)
|
en
| 0.120074
|
# Copyright (c) 2010-2019 openpyexcel <r> <n v="1"/> <n v="25"/> <s v="2014-03-24"/> <x v="0"/> <x v="0"/> <x v="0"/> </r> <r> <n v="1"/> <x v="0"/> <s v="2014-03-24"/> <x v="0"/> <n v="25"/> <x v="0"/> </r> <pivotCacheRecords xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" count="0" /> <pivotCacheRecords count="0" />
| 2.278285
| 2
|
ara/sanitizer.py
|
sparcs-kaist/new-ara-api
| 19
|
6627329
|
from urllib.parse import urlparse
import bleach
def sanitize(content):
def _allowed_attributes(tag, name, value):
if name in ['src']:
p = urlparse(value)
return (not p.netloc) or p.netloc.endswith(('sparcs.org', 'kaist.ac.kr',
'sparcs-newara.s3.amazonaws.com', 'sparcs-newara-dev.s3.amazonaws.com'))
if tag == 'a': return name in ['href', 'title', 'data-bookmark']
if tag == 'abbr': return name in ['title'],
if tag == 'acronym': return name in ['title'],
if tag == 'ol': return name in ['start']
if tag == 'img': return name in ['width', 'height', 'alt', 'title', 'data-attachment']
if tag == 'iframe': return name in ['width', 'height', 'allowfullscreen']
if tag == 'video': return name in ['controls', 'width', 'height', 'allowfullscreen', 'preload', 'poster']
if tag == 'audio': return name in ['controls', 'preload']
return False
allowed_tags = bleach.ALLOWED_TAGS \
+ ['p', 'pre', 'span', 'h1', 'h2', 'h3', 'br', 'hr', 's', 'u', 'ol'] \
+ ['img', 'iframe', 'video', 'audio', 'source'] \
+ ['sub', 'sup', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul']
return bleach.linkify(bleach.clean(content, tags=allowed_tags, attributes=_allowed_attributes))
|
from urllib.parse import urlparse
import bleach
def sanitize(content):
def _allowed_attributes(tag, name, value):
if name in ['src']:
p = urlparse(value)
return (not p.netloc) or p.netloc.endswith(('sparcs.org', 'kaist.ac.kr',
'sparcs-newara.s3.amazonaws.com', 'sparcs-newara-dev.s3.amazonaws.com'))
if tag == 'a': return name in ['href', 'title', 'data-bookmark']
if tag == 'abbr': return name in ['title'],
if tag == 'acronym': return name in ['title'],
if tag == 'ol': return name in ['start']
if tag == 'img': return name in ['width', 'height', 'alt', 'title', 'data-attachment']
if tag == 'iframe': return name in ['width', 'height', 'allowfullscreen']
if tag == 'video': return name in ['controls', 'width', 'height', 'allowfullscreen', 'preload', 'poster']
if tag == 'audio': return name in ['controls', 'preload']
return False
allowed_tags = bleach.ALLOWED_TAGS \
+ ['p', 'pre', 'span', 'h1', 'h2', 'h3', 'br', 'hr', 's', 'u', 'ol'] \
+ ['img', 'iframe', 'video', 'audio', 'source'] \
+ ['sub', 'sup', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul']
return bleach.linkify(bleach.clean(content, tags=allowed_tags, attributes=_allowed_attributes))
|
none
| 1
| 2.683251
| 3
|
|
openrec/modules/interactions/rnn_softmax.py
|
csmithchicago/openrec
| 0
|
6627330
|
import tensorflow as tf
def RNNSoftmax(
seq_item_vec,
total_items,
seq_len,
num_units,
cell_type="gru",
softmax_samples=None,
label=None,
train=True,
subgraph=None,
scope=None,
):
with tf.variable_scope(scope, default_name="RNNSoftmax", reuse=tf.AUTO_REUSE):
if cell_type == "gru":
rnn_cell = tf.nn.rnn_cell.GRUCell(num_units)
elif cell_type == "lstm":
rnn_cell = tf.nn.rnn_cell.LSTMCell(num_units)
else:
assert False, "Invalid RNN cell type."
_, rnn_state = tf.nn.dynamic_rnn(
cell=rnn_cell,
inputs=seq_item_vec,
sequence_length=seq_len,
dtype=tf.float32,
)
weight = tf.get_variable(
"weights",
shape=[total_items, num_units],
trainable=True,
initializer=tf.contrib.layers.xavier_initializer(),
)
bias = tf.get_variable(
"biases",
shape=[total_items],
trainable=True,
initializer=tf.zeros_initializer(),
)
if train:
if softmax_samples is not None:
loss = tf.nn.sampled_sparse_softmax_loss(
weight=weight,
bias=bias,
num_sampled=softmax_samples,
num_classes=total_items,
labels=label,
inputs=rnn_state,
)
else:
logits = tf.matmul(rnn_state, tf.transpose(weight)) + bias
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label, logits=logits
)
subgraph.register_global_loss(tf.reduce_mean(loss))
else:
logits = tf.matmul(rnn_state, tf.transpose(weight)) + bias
subgraph.register_global_output(tf.squeeze(logits))
|
import tensorflow as tf
def RNNSoftmax(
seq_item_vec,
total_items,
seq_len,
num_units,
cell_type="gru",
softmax_samples=None,
label=None,
train=True,
subgraph=None,
scope=None,
):
with tf.variable_scope(scope, default_name="RNNSoftmax", reuse=tf.AUTO_REUSE):
if cell_type == "gru":
rnn_cell = tf.nn.rnn_cell.GRUCell(num_units)
elif cell_type == "lstm":
rnn_cell = tf.nn.rnn_cell.LSTMCell(num_units)
else:
assert False, "Invalid RNN cell type."
_, rnn_state = tf.nn.dynamic_rnn(
cell=rnn_cell,
inputs=seq_item_vec,
sequence_length=seq_len,
dtype=tf.float32,
)
weight = tf.get_variable(
"weights",
shape=[total_items, num_units],
trainable=True,
initializer=tf.contrib.layers.xavier_initializer(),
)
bias = tf.get_variable(
"biases",
shape=[total_items],
trainable=True,
initializer=tf.zeros_initializer(),
)
if train:
if softmax_samples is not None:
loss = tf.nn.sampled_sparse_softmax_loss(
weight=weight,
bias=bias,
num_sampled=softmax_samples,
num_classes=total_items,
labels=label,
inputs=rnn_state,
)
else:
logits = tf.matmul(rnn_state, tf.transpose(weight)) + bias
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label, logits=logits
)
subgraph.register_global_loss(tf.reduce_mean(loss))
else:
logits = tf.matmul(rnn_state, tf.transpose(weight)) + bias
subgraph.register_global_output(tf.squeeze(logits))
|
none
| 1
| 2.289208
| 2
|
|
feed/serializers.py
|
ThusharaX/mumbleapi
| 187
|
6627331
|
from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Mumble
from users.serializers import UserProfileSerializer, UserSerializer
class MumbleSerializer(serializers.ModelSerializer):
user = serializers.SerializerMethodField(read_only=True)
original_mumble = serializers.SerializerMethodField(read_only=True)
up_voters = serializers.SerializerMethodField(read_only=True)
down_voters = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Mumble
fields = '__all__'
def get_user(self, obj):
user = obj.user.userprofile
serializer = UserProfileSerializer(user, many=False)
return serializer.data
def get_original_mumble(self, obj):
original = obj.remumble
if original != None:
serializer = MumbleSerializer(original, many=False)
return serializer.data
else:
return None
def get_up_voters(self, obj):
# Returns list of users that upvoted post
voters = obj.votes.through.objects.filter(mumble=obj, value='upvote').values_list('user', flat=True)
voter_objects = obj.votes.filter(id__in=voters)
serializer = UserSerializer(voter_objects, many=True)
return serializer.data
def get_down_voters(self, obj):
# Returns list of users that upvoted post
voters = obj.votes.through.objects.filter(mumble=obj, value='downvote').values_list('user', flat=True)
voter_objects = obj.votes.filter(id__in=voters)
serializer = UserSerializer(voter_objects, many=True)
return serializer.data
|
from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Mumble
from users.serializers import UserProfileSerializer, UserSerializer
class MumbleSerializer(serializers.ModelSerializer):
user = serializers.SerializerMethodField(read_only=True)
original_mumble = serializers.SerializerMethodField(read_only=True)
up_voters = serializers.SerializerMethodField(read_only=True)
down_voters = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Mumble
fields = '__all__'
def get_user(self, obj):
user = obj.user.userprofile
serializer = UserProfileSerializer(user, many=False)
return serializer.data
def get_original_mumble(self, obj):
original = obj.remumble
if original != None:
serializer = MumbleSerializer(original, many=False)
return serializer.data
else:
return None
def get_up_voters(self, obj):
# Returns list of users that upvoted post
voters = obj.votes.through.objects.filter(mumble=obj, value='upvote').values_list('user', flat=True)
voter_objects = obj.votes.filter(id__in=voters)
serializer = UserSerializer(voter_objects, many=True)
return serializer.data
def get_down_voters(self, obj):
# Returns list of users that upvoted post
voters = obj.votes.through.objects.filter(mumble=obj, value='downvote').values_list('user', flat=True)
voter_objects = obj.votes.filter(id__in=voters)
serializer = UserSerializer(voter_objects, many=True)
return serializer.data
|
en
| 0.92259
|
# Returns list of users that upvoted post # Returns list of users that upvoted post
| 2.116683
| 2
|
pandas/tests/extension/base/ops.py
|
ingwinlu/pandas
| 1
|
6627332
|
import pytest
import operator
import pandas as pd
from pandas.core import ops
from .base import BaseExtensionTests
class BaseOpsUtil(BaseExtensionTests):
def get_op_from_name(self, op_name):
short_opname = op_name.strip('_')
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
def check_opname(self, s, op_name, other, exc=Exception):
op = self.get_op_from_name(op_name)
self._check_op(s, op, other, op_name, exc)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
expected = s.combine(other, op)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=Exception):
# divmod has multiple return values, so check separatly
if exc is None:
result_div, result_mod = op(s, other)
if op is divmod:
expected_div, expected_mod = s // other, s % other
else:
expected_div, expected_mod = other // s, other % s
self.assert_series_equal(result_div, expected_div)
self.assert_series_equal(result_mod, expected_mod)
else:
with pytest.raises(exc):
divmod(s, other)
class BaseArithmeticOpsTests(BaseOpsUtil):
"""Various Series and DataFrame arithmetic ops methods.
Subclasses supporting various ops should set the class variables
to indicate that they support ops of that kind
* series_scalar_exc = TypeError
* frame_scalar_exc = TypeError
* series_array_exc = TypeError
* divmod_exc = TypeError
"""
series_scalar_exc = TypeError
frame_scalar_exc = TypeError
series_array_exc = TypeError
divmod_exc = TypeError
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# series & scalar
op_name = all_arithmetic_operators
s = pd.Series(data)
self.check_opname(s, op_name, s.iloc[0], exc=self.series_scalar_exc)
@pytest.mark.xfail(run=False, reason="_reduce needs implementation",
strict=True)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op_name = all_arithmetic_operators
df = pd.DataFrame({'A': data})
self.check_opname(df, op_name, data[0], exc=self.frame_scalar_exc)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op_name = all_arithmetic_operators
s = pd.Series(data)
self.check_opname(s, op_name, pd.Series([s.iloc[0]] * len(s)),
exc=self.series_array_exc)
def test_divmod(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, 1, exc=self.divmod_exc)
self._check_divmod_op(1, ops.rdivmod, s, exc=self.divmod_exc)
def test_divmod_series_array(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, data)
def test_add_series_with_extension_array(self, data):
s = pd.Series(data)
result = s + data
expected = pd.Series(data + data)
self.assert_series_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op_name = all_arithmetic_operators
with pytest.raises(AttributeError):
getattr(data, op_name)
class BaseComparisonOpsTests(BaseOpsUtil):
"""Various Series and DataFrame comparison ops methods."""
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
if op_name == '__eq__':
assert getattr(data, op_name)(other) is NotImplemented
assert not op(s, other).all()
elif op_name == '__ne__':
assert getattr(data, op_name)(other) is NotImplemented
assert op(s, other).all()
else:
# array
assert getattr(data, op_name)(other) is NotImplemented
# series
s = pd.Series(data)
with pytest.raises(TypeError):
op(s, other)
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, 0)
def test_compare_array(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
other = pd.Series([data[0]] * len(data))
self._compare_other(s, data, op_name, other)
|
import pytest
import operator
import pandas as pd
from pandas.core import ops
from .base import BaseExtensionTests
class BaseOpsUtil(BaseExtensionTests):
def get_op_from_name(self, op_name):
short_opname = op_name.strip('_')
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
def check_opname(self, s, op_name, other, exc=Exception):
op = self.get_op_from_name(op_name)
self._check_op(s, op, other, op_name, exc)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
expected = s.combine(other, op)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=Exception):
# divmod has multiple return values, so check separatly
if exc is None:
result_div, result_mod = op(s, other)
if op is divmod:
expected_div, expected_mod = s // other, s % other
else:
expected_div, expected_mod = other // s, other % s
self.assert_series_equal(result_div, expected_div)
self.assert_series_equal(result_mod, expected_mod)
else:
with pytest.raises(exc):
divmod(s, other)
class BaseArithmeticOpsTests(BaseOpsUtil):
"""Various Series and DataFrame arithmetic ops methods.
Subclasses supporting various ops should set the class variables
to indicate that they support ops of that kind
* series_scalar_exc = TypeError
* frame_scalar_exc = TypeError
* series_array_exc = TypeError
* divmod_exc = TypeError
"""
series_scalar_exc = TypeError
frame_scalar_exc = TypeError
series_array_exc = TypeError
divmod_exc = TypeError
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# series & scalar
op_name = all_arithmetic_operators
s = pd.Series(data)
self.check_opname(s, op_name, s.iloc[0], exc=self.series_scalar_exc)
@pytest.mark.xfail(run=False, reason="_reduce needs implementation",
strict=True)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op_name = all_arithmetic_operators
df = pd.DataFrame({'A': data})
self.check_opname(df, op_name, data[0], exc=self.frame_scalar_exc)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op_name = all_arithmetic_operators
s = pd.Series(data)
self.check_opname(s, op_name, pd.Series([s.iloc[0]] * len(s)),
exc=self.series_array_exc)
def test_divmod(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, 1, exc=self.divmod_exc)
self._check_divmod_op(1, ops.rdivmod, s, exc=self.divmod_exc)
def test_divmod_series_array(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, data)
def test_add_series_with_extension_array(self, data):
s = pd.Series(data)
result = s + data
expected = pd.Series(data + data)
self.assert_series_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op_name = all_arithmetic_operators
with pytest.raises(AttributeError):
getattr(data, op_name)
class BaseComparisonOpsTests(BaseOpsUtil):
"""Various Series and DataFrame comparison ops methods."""
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
if op_name == '__eq__':
assert getattr(data, op_name)(other) is NotImplemented
assert not op(s, other).all()
elif op_name == '__ne__':
assert getattr(data, op_name)(other) is NotImplemented
assert op(s, other).all()
else:
# array
assert getattr(data, op_name)(other) is NotImplemented
# series
s = pd.Series(data)
with pytest.raises(TypeError):
op(s, other)
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, 0)
def test_compare_array(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
other = pd.Series([data[0]] * len(data))
self._compare_other(s, data, op_name, other)
|
en
| 0.77374
|
# Assume it is the reverse operator # divmod has multiple return values, so check separatly Various Series and DataFrame arithmetic ops methods. Subclasses supporting various ops should set the class variables to indicate that they support ops of that kind * series_scalar_exc = TypeError * frame_scalar_exc = TypeError * series_array_exc = TypeError * divmod_exc = TypeError # series & scalar # frame & scalar # ndarray & other series # invalid ops Various Series and DataFrame comparison ops methods. # array # series
| 2.690416
| 3
|
battle/battle_functions.py
|
EfrainRG/objectpokemon
| 1
|
6627333
|
import importlib.util
import random
# from importlib.machinery import SourceFileLoader
def load_pokemon_from_file(filepath):
spec = importlib.util.spec_from_file_location("", filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# return SourceFileLoader("", filepath).load_module()
return module
def pokemon_alive(poke1, poke2):
return poke1.is_alive() and poke2.is_alive()
def simulate_battle(poke1, poke2):
# Choose a random pokemon to start
if random.randint(0, 1) == 1:
attacking_poke, defending_poke = poke1, poke2
else:
attacking_poke, defending_poke = poke2, poke1
print("Pokemon", attacking_poke.get_name(), "gets to start")
while pokemon_alive(poke1, poke2):
move = attacking_poke.choose_move(defending_poke)
print(attacking_poke.get_name(), "chooses", move.get_name())
inflicted = defending_poke.inflict(move, attacking_poke)
print(attacking_poke.get_name(), "inflicts", inflicted, "damage on", defending_poke.get_name())
attacking_poke, defending_poke = defending_poke, attacking_poke
if attacking_poke.is_alive():
winner = attacking_poke
else:
winner = defending_poke
print("The winner is", winner.get_name(), "with", winner.hp, "HP left")
return winner
|
import importlib.util
import random
# from importlib.machinery import SourceFileLoader
def load_pokemon_from_file(filepath):
spec = importlib.util.spec_from_file_location("", filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# return SourceFileLoader("", filepath).load_module()
return module
def pokemon_alive(poke1, poke2):
return poke1.is_alive() and poke2.is_alive()
def simulate_battle(poke1, poke2):
# Choose a random pokemon to start
if random.randint(0, 1) == 1:
attacking_poke, defending_poke = poke1, poke2
else:
attacking_poke, defending_poke = poke2, poke1
print("Pokemon", attacking_poke.get_name(), "gets to start")
while pokemon_alive(poke1, poke2):
move = attacking_poke.choose_move(defending_poke)
print(attacking_poke.get_name(), "chooses", move.get_name())
inflicted = defending_poke.inflict(move, attacking_poke)
print(attacking_poke.get_name(), "inflicts", inflicted, "damage on", defending_poke.get_name())
attacking_poke, defending_poke = defending_poke, attacking_poke
if attacking_poke.is_alive():
winner = attacking_poke
else:
winner = defending_poke
print("The winner is", winner.get_name(), "with", winner.hp, "HP left")
return winner
|
en
| 0.31695
|
# from importlib.machinery import SourceFileLoader # return SourceFileLoader("", filepath).load_module() # Choose a random pokemon to start
| 3.302449
| 3
|
examples/demo.py
|
orest-d/liquer-reports
| 0
|
6627334
|
<reponame>orest-d/liquer-reports
import sys
sys.path.append("..")
import matplotlib.pyplot as plt
from lqreports.segments import *
if __name__ == '__main__':
r = Register()
doc = (
VuetifyDashboard(r)
.with_navigation_drawer()
.with_app_bar(color="primary")
.with_plotly()
.with_panels()
)
r.home_panel.add("<h1>Home</h1>")
doc.panel("panel1", fluid=True).add(
"<v-row><v-col><h1>Panel 1</h1>Hello {{what}}!</v-col></v-row>"
)
doc.panel("panel2").add("<h1>Panel 2</h1>")
doc.panel("panel3").add("""<plotly-chart :chart="chart1" style="min-height:800px;"></plotly-chart>""")
doc.panel("panel4").chart("chart2", value=dict(
uuid= "12345",
traces= [dict(y=[1,2,3], line=dict(color="blue", width=5, shape="line"))],
layout= dict(title='Chart 2', xaxis=dict(title="X Axis"), yaxis=dict(title="Y Axis")),
config= dict()
))
r.vuetify_script.add_method("update_chart2", """
function(){
this.chart2.traces[0].x=[1,2,3,4];
this.chart2.traces[0].y=[10,2,30,4];
}
""")
r.panel4.button("Update chart 2", click="update_chart2()")
doc.drawer_item("Home", icon="mdi-home", panel="home_panel")
doc.drawer_item("Google href", href="http://google.com")
doc.drawer_item("Google to", to="http://google.com")
doc.add_bar_button("Hello", click="this.alert('Hello')", color="primary")
doc.add_bar_menu(
"Second",
[
dict(title="first", click="this.alert('Hello1')"),
dict(title="second", click="this.alert('Hello2')"),
dict(title="Panel 1", panel="panel1"),
dict(title="Panel 2", panel="panel2"),
dict(title="Panel 3 (chart 1)", panel="panel3"),
dict(title="Panel 4 (chart 2)", panel="panel4"),
],
)
doc.add_bar_spacer()
doc.add_bar_button(None, icon="mdi-magnify", click="this.alert('magnify')")
# doc.with_dataframe(pd.DataFrame(dict(a=[1,2,3],b=[4,5,6])))
doc.with_dataframe(pd.read_csv("test.csv")).with_panel_row_action("panel2")
#r.vuetify_script.add_data("myfilter",False)
r.vuetify_script.add_method("update_filter", """
function(){
console.log("Update filter",this.myfilter);
if (this.myfilter){
this.dataframe_data = this.dataframe.data.filter(function(x){
return ((x[1]>2000) && (x[1]<2005));
});
}
else{
this.dataframe_data = this.dataframe.data;
}
}
""")
r.vuetify_script.add_watch("myfilter", "function(new_value,old_value){console.log('watch',new_value,old_value);this.update_filter();}")
r.panel1.switch("myfilter","My filter", value=False)
r.panel1.dataframe_view()
r.panel1.add("""{{selected_row}}""")
r.panel2.add("""<h2>Selected</h2>{{selected_row}}""")
r.panel2.row_detail()
plt.plot([0,1],[0,1])
r.panel2.figure(plt.gcf())
r.panel1.liquer_logo()
# r.app.add("<v-main><v-container>Hello {{what}}!</v-container></v-main>")
# r.scripts.add(VuetifyScript(r))
r.vuetify_script.add_data("to_greet", "WORLD")
r.vuetify_script.add_data("chart1", dict(
uuid= "1234",
traces= [
{
"y": [0,1,2],
"line": {
"color": "#000000",
"width": 4,
"shape": "line"
}
}
],
layout={
"title":'Chart 1',
"xaxis": {
"title": 'xaxis title'
},
"yaxis": {
"title": 'yaxis title'
}
},
config={
"responsive":True
}
))
r.vuetify_script.add_computed(
"what", "return '*'+this.to_greet+'*';", "this.to_greet=value;"
)
r.vuetify_script.add_created("this.to_greet='me';")
# doc.register.header.add_resource("vuetify_css")
print(doc.render(RenderContext(link_type=LinkType.LINK)))
|
import sys
sys.path.append("..")
import matplotlib.pyplot as plt
from lqreports.segments import *
if __name__ == '__main__':
r = Register()
doc = (
VuetifyDashboard(r)
.with_navigation_drawer()
.with_app_bar(color="primary")
.with_plotly()
.with_panels()
)
r.home_panel.add("<h1>Home</h1>")
doc.panel("panel1", fluid=True).add(
"<v-row><v-col><h1>Panel 1</h1>Hello {{what}}!</v-col></v-row>"
)
doc.panel("panel2").add("<h1>Panel 2</h1>")
doc.panel("panel3").add("""<plotly-chart :chart="chart1" style="min-height:800px;"></plotly-chart>""")
doc.panel("panel4").chart("chart2", value=dict(
uuid= "12345",
traces= [dict(y=[1,2,3], line=dict(color="blue", width=5, shape="line"))],
layout= dict(title='Chart 2', xaxis=dict(title="X Axis"), yaxis=dict(title="Y Axis")),
config= dict()
))
r.vuetify_script.add_method("update_chart2", """
function(){
this.chart2.traces[0].x=[1,2,3,4];
this.chart2.traces[0].y=[10,2,30,4];
}
""")
r.panel4.button("Update chart 2", click="update_chart2()")
doc.drawer_item("Home", icon="mdi-home", panel="home_panel")
doc.drawer_item("Google href", href="http://google.com")
doc.drawer_item("Google to", to="http://google.com")
doc.add_bar_button("Hello", click="this.alert('Hello')", color="primary")
doc.add_bar_menu(
"Second",
[
dict(title="first", click="this.alert('Hello1')"),
dict(title="second", click="this.alert('Hello2')"),
dict(title="Panel 1", panel="panel1"),
dict(title="Panel 2", panel="panel2"),
dict(title="Panel 3 (chart 1)", panel="panel3"),
dict(title="Panel 4 (chart 2)", panel="panel4"),
],
)
doc.add_bar_spacer()
doc.add_bar_button(None, icon="mdi-magnify", click="this.alert('magnify')")
# doc.with_dataframe(pd.DataFrame(dict(a=[1,2,3],b=[4,5,6])))
doc.with_dataframe(pd.read_csv("test.csv")).with_panel_row_action("panel2")
#r.vuetify_script.add_data("myfilter",False)
r.vuetify_script.add_method("update_filter", """
function(){
console.log("Update filter",this.myfilter);
if (this.myfilter){
this.dataframe_data = this.dataframe.data.filter(function(x){
return ((x[1]>2000) && (x[1]<2005));
});
}
else{
this.dataframe_data = this.dataframe.data;
}
}
""")
r.vuetify_script.add_watch("myfilter", "function(new_value,old_value){console.log('watch',new_value,old_value);this.update_filter();}")
r.panel1.switch("myfilter","My filter", value=False)
r.panel1.dataframe_view()
r.panel1.add("""{{selected_row}}""")
r.panel2.add("""<h2>Selected</h2>{{selected_row}}""")
r.panel2.row_detail()
plt.plot([0,1],[0,1])
r.panel2.figure(plt.gcf())
r.panel1.liquer_logo()
# r.app.add("<v-main><v-container>Hello {{what}}!</v-container></v-main>")
# r.scripts.add(VuetifyScript(r))
r.vuetify_script.add_data("to_greet", "WORLD")
r.vuetify_script.add_data("chart1", dict(
uuid= "1234",
traces= [
{
"y": [0,1,2],
"line": {
"color": "#000000",
"width": 4,
"shape": "line"
}
}
],
layout={
"title":'Chart 1',
"xaxis": {
"title": 'xaxis title'
},
"yaxis": {
"title": 'yaxis title'
}
},
config={
"responsive":True
}
))
r.vuetify_script.add_computed(
"what", "return '*'+this.to_greet+'*';", "this.to_greet=value;"
)
r.vuetify_script.add_created("this.to_greet='me';")
# doc.register.header.add_resource("vuetify_css")
print(doc.render(RenderContext(link_type=LinkType.LINK)))
|
en
| 0.167958
|
<plotly-chart :chart="chart1" style="min-height:800px;"></plotly-chart> function(){ this.chart2.traces[0].x=[1,2,3,4]; this.chart2.traces[0].y=[10,2,30,4]; } # doc.with_dataframe(pd.DataFrame(dict(a=[1,2,3],b=[4,5,6]))) #r.vuetify_script.add_data("myfilter",False) function(){ console.log("Update filter",this.myfilter); if (this.myfilter){ this.dataframe_data = this.dataframe.data.filter(function(x){ return ((x[1]>2000) && (x[1]<2005)); }); } else{ this.dataframe_data = this.dataframe.data; } } {{selected_row}} <h2>Selected</h2>{{selected_row}} # r.app.add("<v-main><v-container>Hello {{what}}!</v-container></v-main>") # r.scripts.add(VuetifyScript(r)) # doc.register.header.add_resource("vuetify_css")
| 2.228455
| 2
|
test/test_config_preparation.py
|
rinrinne/aws-adfs
| 0
|
6627335
|
import mock
from aws_adfs import prepare
class TestConfigPreparation:
def test_when_there_is_no_profile_use_default_values(self):
# given profile to read the configuration doesn't exist
not_existing_profile = 'not_existing_profile'
prepare.configparser = mock.Mock()
config_without_non_existing_profile = mock.Mock()
prepare.configparser.RawConfigParser = mock.Mock(return_value=config_without_non_existing_profile)
config_without_non_existing_profile.has_section = mock.Mock(return_value=False)
# and defaults are setup as follows
default_ssl_config = True
default_adfs_ca_bundle = None
default_region = 'default_region'
default_adfs_host = 'default_adfs_host'
default_output_format = 'default_output_format'
default_provider_id = 'default_provider_id'
default_s3_signature_version = None
default_session_duration = 3600
# when configuration is prepared for not existing profile
adfs_config = prepare.get_prepared_config(
not_existing_profile,
default_region,
default_ssl_config,
default_adfs_ca_bundle,
default_adfs_host,
default_output_format,
default_provider_id,
default_s3_signature_version,
default_session_duration,
)
# then resolved config contains defaults values
assert default_ssl_config == adfs_config.ssl_verification
assert default_adfs_ca_bundle == adfs_config.adfs_ca_bundle
assert default_region == adfs_config.region
assert default_adfs_host == adfs_config.adfs_host
assert default_output_format == adfs_config.output_format
assert default_session_duration == adfs_config.session_duration
def test_when_the_profile_exists_but_lacks_ssl_verification_use_default_value(self):
# given profile to read the configuration exists
empty_profile = 'empty_profile'
prepare.configparser = mock.Mock()
config_with_the_empty_profile = mock.Mock()
prepare.configparser.RawConfigParser = mock.Mock(return_value=config_with_the_empty_profile)
config_with_the_empty_profile.has_section = mock.Mock(return_value=True)
# and no options are stored in the profile
config_with_the_empty_profile.has_option = mock.Mock(return_value=False)
# and defaults are setup as follows
default_ssl_config = True
default_adfs_ca_bundle = None
irrelevant_region = 'irrelevant_region'
irrelevant_adfs_host = 'irrelevant_adfs_host'
irrelevant_output_format = 'irrelevant_output_format'
irrelevant_provider_id = 'irrelevant_provider_id'
irrelevant_s3_signature_version = 'irrelevant_s3_signature_version'
irrelevant_session_duration = 'irrelevant_session_duration'
# when configuration is prepared for existing profile
adfs_config = prepare.get_prepared_config(
empty_profile,
irrelevant_region,
default_ssl_config,
default_adfs_ca_bundle,
irrelevant_adfs_host,
irrelevant_output_format,
irrelevant_provider_id,
irrelevant_s3_signature_version,
irrelevant_session_duration,
)
# then resolved ssl verification holds the default value
assert default_ssl_config == adfs_config.ssl_verification
assert default_adfs_ca_bundle == adfs_config.adfs_ca_bundle
|
import mock
from aws_adfs import prepare
class TestConfigPreparation:
def test_when_there_is_no_profile_use_default_values(self):
# given profile to read the configuration doesn't exist
not_existing_profile = 'not_existing_profile'
prepare.configparser = mock.Mock()
config_without_non_existing_profile = mock.Mock()
prepare.configparser.RawConfigParser = mock.Mock(return_value=config_without_non_existing_profile)
config_without_non_existing_profile.has_section = mock.Mock(return_value=False)
# and defaults are setup as follows
default_ssl_config = True
default_adfs_ca_bundle = None
default_region = 'default_region'
default_adfs_host = 'default_adfs_host'
default_output_format = 'default_output_format'
default_provider_id = 'default_provider_id'
default_s3_signature_version = None
default_session_duration = 3600
# when configuration is prepared for not existing profile
adfs_config = prepare.get_prepared_config(
not_existing_profile,
default_region,
default_ssl_config,
default_adfs_ca_bundle,
default_adfs_host,
default_output_format,
default_provider_id,
default_s3_signature_version,
default_session_duration,
)
# then resolved config contains defaults values
assert default_ssl_config == adfs_config.ssl_verification
assert default_adfs_ca_bundle == adfs_config.adfs_ca_bundle
assert default_region == adfs_config.region
assert default_adfs_host == adfs_config.adfs_host
assert default_output_format == adfs_config.output_format
assert default_session_duration == adfs_config.session_duration
def test_when_the_profile_exists_but_lacks_ssl_verification_use_default_value(self):
# given profile to read the configuration exists
empty_profile = 'empty_profile'
prepare.configparser = mock.Mock()
config_with_the_empty_profile = mock.Mock()
prepare.configparser.RawConfigParser = mock.Mock(return_value=config_with_the_empty_profile)
config_with_the_empty_profile.has_section = mock.Mock(return_value=True)
# and no options are stored in the profile
config_with_the_empty_profile.has_option = mock.Mock(return_value=False)
# and defaults are setup as follows
default_ssl_config = True
default_adfs_ca_bundle = None
irrelevant_region = 'irrelevant_region'
irrelevant_adfs_host = 'irrelevant_adfs_host'
irrelevant_output_format = 'irrelevant_output_format'
irrelevant_provider_id = 'irrelevant_provider_id'
irrelevant_s3_signature_version = 'irrelevant_s3_signature_version'
irrelevant_session_duration = 'irrelevant_session_duration'
# when configuration is prepared for existing profile
adfs_config = prepare.get_prepared_config(
empty_profile,
irrelevant_region,
default_ssl_config,
default_adfs_ca_bundle,
irrelevant_adfs_host,
irrelevant_output_format,
irrelevant_provider_id,
irrelevant_s3_signature_version,
irrelevant_session_duration,
)
# then resolved ssl verification holds the default value
assert default_ssl_config == adfs_config.ssl_verification
assert default_adfs_ca_bundle == adfs_config.adfs_ca_bundle
|
en
| 0.864178
|
# given profile to read the configuration doesn't exist # and defaults are setup as follows # when configuration is prepared for not existing profile # then resolved config contains defaults values # given profile to read the configuration exists # and no options are stored in the profile # and defaults are setup as follows # when configuration is prepared for existing profile # then resolved ssl verification holds the default value
| 2.582475
| 3
|
helpers/discretization.py
|
maryprimary/frg
| 0
|
6627336
|
<reponame>maryprimary/frg
"""布里渊区中patches相关的功能"""
from matplotlib import pyplot
import matplotlib.patches as patches
import matplotlib.path as path
def patches_visualize(pats, lsurface, show):
'''可视化patches对应的点和费米面
'''
pyplot.figure()
#绘制patches对应的点
xvals = []
yvals = []
for pnt in pats:
xvals.append(pnt.coord[0])
yvals.append(pnt.coord[1])
pyplot.scatter(xvals, yvals, c='g', lw=4)
#绘制费米面的线
for seg in lsurface:
if seg is None:
continue
xvals = [_pt.coord[0] for _pt in seg.ends]
yvals = [_pt.coord[1] for _pt in seg.ends]
pyplot.plot(xvals, yvals, c='k', lw=1)
if show == 'show':
pyplot.show()
else:
pyplot.savefig(show)
pyplot.close()
def district_visualize(ltris, lpatches, show):
'''可视化切分的效果\n
ltris是切分的小三角,lpathces是每个小三角对应的编号\n
show = 'window': 显示在窗口\n
其他: 保存为这个名字的图片
'''
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
pyplot.figure()
for tri, pidx in zip(ltris, lpatches):
vertex = [ver.coord for ver in tri.vertex] + [(0, 0)]
codes = [path.Path.LINETO] * len(vertex)
codes[0] = path.Path.MOVETO
codes[-1] = path.Path.CLOSEPOLY
rectp = patches.PathPatch(
path.Path(vertex, codes),
facecolor=colors[pidx % 8],
lw=0)
pyplot.gca().add_patch(rectp)
pyplot.gca().relim()
pyplot.gca().autoscale_view()
###
if show == 'show':
pyplot.show()
else:
pyplot.savefig(show)
pyplot.close()
def get_patch_edges(ltris, ladjs, lpats):
'''得到patch之间的边界'''
egs = []
tri2pat = dict(((tri, pat) for tri, pat in zip(ltris, lpats)))
for tri, adjs in zip(ltris, ladjs):
for eidx, adj in enumerate(adjs, 0):
if adj is None:
continue
if tri2pat[adj] > tri2pat[tri]:
egs.append(tri.edges[eidx])
return egs
def save_to(fname, lpatches):
'''保存patches,lpatches应该是对应好Rtriangles的顺序的\n
```没有直接把pidx放到Rtriabgles的attr里面,这个顺序要对应好```
'''
outf = open(fname, 'w')
#第一行记录长度
outf.write(str(len(lpatches)) + '\n')
for pidx in lpatches:
outf.write(str(pidx) + '\n')
def load_from(fname):
'''读取patches,注意对应好保存时候的顺序'''
inf = open(fname, 'r')
length = int(inf.readline())
lpatches = []
for _ in range(length):
lpatches.append(int(inf.readline()))
return lpatches
|
"""布里渊区中patches相关的功能"""
from matplotlib import pyplot
import matplotlib.patches as patches
import matplotlib.path as path
def patches_visualize(pats, lsurface, show):
'''可视化patches对应的点和费米面
'''
pyplot.figure()
#绘制patches对应的点
xvals = []
yvals = []
for pnt in pats:
xvals.append(pnt.coord[0])
yvals.append(pnt.coord[1])
pyplot.scatter(xvals, yvals, c='g', lw=4)
#绘制费米面的线
for seg in lsurface:
if seg is None:
continue
xvals = [_pt.coord[0] for _pt in seg.ends]
yvals = [_pt.coord[1] for _pt in seg.ends]
pyplot.plot(xvals, yvals, c='k', lw=1)
if show == 'show':
pyplot.show()
else:
pyplot.savefig(show)
pyplot.close()
def district_visualize(ltris, lpatches, show):
'''可视化切分的效果\n
ltris是切分的小三角,lpathces是每个小三角对应的编号\n
show = 'window': 显示在窗口\n
其他: 保存为这个名字的图片
'''
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
pyplot.figure()
for tri, pidx in zip(ltris, lpatches):
vertex = [ver.coord for ver in tri.vertex] + [(0, 0)]
codes = [path.Path.LINETO] * len(vertex)
codes[0] = path.Path.MOVETO
codes[-1] = path.Path.CLOSEPOLY
rectp = patches.PathPatch(
path.Path(vertex, codes),
facecolor=colors[pidx % 8],
lw=0)
pyplot.gca().add_patch(rectp)
pyplot.gca().relim()
pyplot.gca().autoscale_view()
###
if show == 'show':
pyplot.show()
else:
pyplot.savefig(show)
pyplot.close()
def get_patch_edges(ltris, ladjs, lpats):
'''得到patch之间的边界'''
egs = []
tri2pat = dict(((tri, pat) for tri, pat in zip(ltris, lpats)))
for tri, adjs in zip(ltris, ladjs):
for eidx, adj in enumerate(adjs, 0):
if adj is None:
continue
if tri2pat[adj] > tri2pat[tri]:
egs.append(tri.edges[eidx])
return egs
def save_to(fname, lpatches):
'''保存patches,lpatches应该是对应好Rtriangles的顺序的\n
```没有直接把pidx放到Rtriabgles的attr里面,这个顺序要对应好```
'''
outf = open(fname, 'w')
#第一行记录长度
outf.write(str(len(lpatches)) + '\n')
for pidx in lpatches:
outf.write(str(pidx) + '\n')
def load_from(fname):
'''读取patches,注意对应好保存时候的顺序'''
inf = open(fname, 'r')
length = int(inf.readline())
lpatches = []
for _ in range(length):
lpatches.append(int(inf.readline()))
return lpatches
|
zh
| 0.910823
|
布里渊区中patches相关的功能 可视化patches对应的点和费米面 #绘制patches对应的点 #绘制费米面的线 可视化切分的效果\n ltris是切分的小三角,lpathces是每个小三角对应的编号\n show = 'window': 显示在窗口\n 其他: 保存为这个名字的图片 ### 得到patch之间的边界 保存patches,lpatches应该是对应好Rtriangles的顺序的\n ```没有直接把pidx放到Rtriabgles的attr里面,这个顺序要对应好``` #第一行记录长度 读取patches,注意对应好保存时候的顺序
| 2.802918
| 3
|
settings.py
|
Reathe/Qubic
| 0
|
6627337
|
<filename>settings.py
window.fullscreen = False
window.borderless = False
window.exit_button.enabled = False
|
<filename>settings.py
window.fullscreen = False
window.borderless = False
window.exit_button.enabled = False
|
none
| 1
| 1.094182
| 1
|
|
nodes/1.x/python/String.ReplaceRegularExpression.py
|
andydandy74/ClockworkForDynamo
| 147
|
6627338
|
<reponame>andydandy74/ClockworkForDynamo<filename>nodes/1.x/python/String.ReplaceRegularExpression.py<gh_stars>100-1000
import clr
import re
if isinstance(IN[1], list): OUT = [IN[0].sub(IN[2],x) for x in IN[1]]
else: OUT = IN[0].sub(IN[2],IN[1])
|
import clr
import re
if isinstance(IN[1], list): OUT = [IN[0].sub(IN[2],x) for x in IN[1]]
else: OUT = IN[0].sub(IN[2],IN[1])
|
none
| 1
| 2.749288
| 3
|
|
trade_automation/td/td.py
|
cowen314/web-tools
| 0
|
6627339
|
<reponame>cowen314/web-tools<gh_stars>0
from urllib.parse import urlencode, unquote, parse_qs
import json
from pathlib import Path
import requests
from sys import exit
import websockets
import asyncio
import datetime
from typing import Dict
### AUTH
# See docs here for initial setup: https://developer.tdameritrade.com/content/simple-auth-local-apps
# This lib handles all of this (pretty much) automatically: https://github.com/areed1192/td-ameritrade-python-api
# Step 1: Create a TD Ameritrade app
# Step 2: Hit auth endpoint
# print(urlencode({"redirect_uri": "https://127.0.0.1", "client_id": ""})) # add client_key here for encode
# Step 3: Copy and decode auth code returned in `code` parameter
# a = unquote("", encoding='ascii', errors='strict') # authorization_code here for decode
# print(a)
# Step 4: hit the access token endpoint
### SETTING UP A STREAM
# Instructions here: https://developer.tdameritrade.com/content/streaming-data
# requests.request('GET', 'https://developer.tdameritrade.com/user-principal/apis/get/userprincipals-0')
class TDClient:
def __init__(self, refresh_token: str, client_id: str):
self._refresh_token = refresh_token
self._client_id = client_id
self._access_token, cerr = self._get_new_token()
if cerr:
raise ConnectionError(cerr)
def _get_new_token(self) -> (str, str):
params = {
"grant_type": "refresh_token",
"refresh_token": self._refresh_token,
"client_id": self._client_id
}
response = requests.post('https://api.tdameritrade.com/v1/oauth2/token', data=params)
if not response.ok:
return None, "Request failed with status code %d (%s)" % (response.status_code, response.text.strip())
return response.json()['access_token'], None
def _get_user_principles(self) -> (str, str):
qs_params = {
"fields": "streamerSubscriptionKeys,streamerConnectionInfo"
}
header_params = {
"authorization": "Bearer %s" % self._access_token
}
response = requests.request('GET', 'https://api.tdameritrade.com/v1/userprincipals', params=qs_params, headers=header_params)
if not response.ok:
return None, "Request failed with status code %d (%s)" % (response.status_code, response.text.strip())
return response.json()['access_token'], None
def open_stream(self):
self._get_user_principles()
# TODO open up a websocket, then blast a login message out through the websocket. That login message contains user principles data
def get_price_history(
self,
symbol: str,
start_date: datetime.datetime,
period_type: str = "day",
period: int = 1,
frequency_type: str = "minute",
frequency: int = 1,
need_ext_hours_data: bool = False
) -> (Dict, str):
"""
See https://developer.tdameritrade.com/price-history/apis/get/marketdata/%7Bsymbol%7D/pricehistory
Note: "frequency" is actually the time between samples
:return: JSON with candlestick data
"""
# NOTE the end date parameter is ignored here. Can add it later if needed.
qs_params = {
"period_type": period_type,
"period": period,
"frequencyType": frequency_type,
"frequency": frequency,
"startDate": datetime.datetime.timestamp(start_date), # TODO left off here
"needExtendedHoursData": need_ext_hours_data
}
header_params = {
"authorization": "Bearer %s" % self._access_token
}
response = requests.request('GET', 'https://api.tdameritrade.com/v1/marketdata/%s/pricehistory' % symbol, params=qs_params, headers=header_params)
if not response.ok:
return None, "Request failed with status code %d (%s)" % (response.status_code, response.text.strip())
return response.json(), None
def sample_hi_lo_auto(
client: TDClient,
symbol: str,
window_start_date: datetime.date=datetime.date.today(),
window_start_time: datetime.time=datetime.time(8, 30, 00),
window_duration: datetime.timedelta=datetime.timedelta(0, 30),
):
# capture the high and the low over some period
price_hist, err = client.get_price_history(
symbol,
frequency_type="minute",
frequency=1,
start_date=window_start_date,
period_type="day",
period=1
)
need_ext_hours_data: bool = False
if err:
raise ConnectionError(err)
high = None
low = None
for candlestick in price_hist["candles"]:
if high is None or candlestick["high"] > high:
high = candlestick["high"]
if low is None or candlestick["low"] < low:
low = candlestick["low"]
for candle in price_hist["candles"]:
print("%s : %s - %s" % (datetime.datetime.fromtimestamp(float(candle["datetime"]) / 1000), candle["low"], candle["high"]))
pass
# TODO define trade logic
if __name__ == "__main__":
with open(Path("./secrets.json")) as fh:
secrets = json.load(fh)
client = TDClient(secrets["refresh_token"], secrets["client_key"])
sample_hi_lo_auto(
client,
"VOO",
window_start_time=datetime.time(9, 30, 0),
window_duration=datetime.timedelta(minutes=1),
window_start_date=datetime.date(2020, 12, 12)
)
pass
|
from urllib.parse import urlencode, unquote, parse_qs
import json
from pathlib import Path
import requests
from sys import exit
import websockets
import asyncio
import datetime
from typing import Dict
### AUTH
# See docs here for initial setup: https://developer.tdameritrade.com/content/simple-auth-local-apps
# This lib handles all of this (pretty much) automatically: https://github.com/areed1192/td-ameritrade-python-api
# Step 1: Create a TD Ameritrade app
# Step 2: Hit auth endpoint
# print(urlencode({"redirect_uri": "https://127.0.0.1", "client_id": ""})) # add client_key here for encode
# Step 3: Copy and decode auth code returned in `code` parameter
# a = unquote("", encoding='ascii', errors='strict') # authorization_code here for decode
# print(a)
# Step 4: hit the access token endpoint
### SETTING UP A STREAM
# Instructions here: https://developer.tdameritrade.com/content/streaming-data
# requests.request('GET', 'https://developer.tdameritrade.com/user-principal/apis/get/userprincipals-0')
class TDClient:
def __init__(self, refresh_token: str, client_id: str):
self._refresh_token = refresh_token
self._client_id = client_id
self._access_token, cerr = self._get_new_token()
if cerr:
raise ConnectionError(cerr)
def _get_new_token(self) -> (str, str):
params = {
"grant_type": "refresh_token",
"refresh_token": self._refresh_token,
"client_id": self._client_id
}
response = requests.post('https://api.tdameritrade.com/v1/oauth2/token', data=params)
if not response.ok:
return None, "Request failed with status code %d (%s)" % (response.status_code, response.text.strip())
return response.json()['access_token'], None
def _get_user_principles(self) -> (str, str):
qs_params = {
"fields": "streamerSubscriptionKeys,streamerConnectionInfo"
}
header_params = {
"authorization": "Bearer %s" % self._access_token
}
response = requests.request('GET', 'https://api.tdameritrade.com/v1/userprincipals', params=qs_params, headers=header_params)
if not response.ok:
return None, "Request failed with status code %d (%s)" % (response.status_code, response.text.strip())
return response.json()['access_token'], None
def open_stream(self):
self._get_user_principles()
# TODO open up a websocket, then blast a login message out through the websocket. That login message contains user principles data
def get_price_history(
self,
symbol: str,
start_date: datetime.datetime,
period_type: str = "day",
period: int = 1,
frequency_type: str = "minute",
frequency: int = 1,
need_ext_hours_data: bool = False
) -> (Dict, str):
"""
See https://developer.tdameritrade.com/price-history/apis/get/marketdata/%7Bsymbol%7D/pricehistory
Note: "frequency" is actually the time between samples
:return: JSON with candlestick data
"""
# NOTE the end date parameter is ignored here. Can add it later if needed.
qs_params = {
"period_type": period_type,
"period": period,
"frequencyType": frequency_type,
"frequency": frequency,
"startDate": datetime.datetime.timestamp(start_date), # TODO left off here
"needExtendedHoursData": need_ext_hours_data
}
header_params = {
"authorization": "Bearer %s" % self._access_token
}
response = requests.request('GET', 'https://api.tdameritrade.com/v1/marketdata/%s/pricehistory' % symbol, params=qs_params, headers=header_params)
if not response.ok:
return None, "Request failed with status code %d (%s)" % (response.status_code, response.text.strip())
return response.json(), None
def sample_hi_lo_auto(
client: TDClient,
symbol: str,
window_start_date: datetime.date=datetime.date.today(),
window_start_time: datetime.time=datetime.time(8, 30, 00),
window_duration: datetime.timedelta=datetime.timedelta(0, 30),
):
# capture the high and the low over some period
price_hist, err = client.get_price_history(
symbol,
frequency_type="minute",
frequency=1,
start_date=window_start_date,
period_type="day",
period=1
)
need_ext_hours_data: bool = False
if err:
raise ConnectionError(err)
high = None
low = None
for candlestick in price_hist["candles"]:
if high is None or candlestick["high"] > high:
high = candlestick["high"]
if low is None or candlestick["low"] < low:
low = candlestick["low"]
for candle in price_hist["candles"]:
print("%s : %s - %s" % (datetime.datetime.fromtimestamp(float(candle["datetime"]) / 1000), candle["low"], candle["high"]))
pass
# TODO define trade logic
if __name__ == "__main__":
with open(Path("./secrets.json")) as fh:
secrets = json.load(fh)
client = TDClient(secrets["refresh_token"], secrets["client_key"])
sample_hi_lo_auto(
client,
"VOO",
window_start_time=datetime.time(9, 30, 0),
window_duration=datetime.timedelta(minutes=1),
window_start_date=datetime.date(2020, 12, 12)
)
pass
|
en
| 0.580416
|
### AUTH # See docs here for initial setup: https://developer.tdameritrade.com/content/simple-auth-local-apps # This lib handles all of this (pretty much) automatically: https://github.com/areed1192/td-ameritrade-python-api # Step 1: Create a TD Ameritrade app # Step 2: Hit auth endpoint # print(urlencode({"redirect_uri": "https://127.0.0.1", "client_id": ""})) # add client_key here for encode # Step 3: Copy and decode auth code returned in `code` parameter # a = unquote("", encoding='ascii', errors='strict') # authorization_code here for decode # print(a) # Step 4: hit the access token endpoint ### SETTING UP A STREAM # Instructions here: https://developer.tdameritrade.com/content/streaming-data # requests.request('GET', 'https://developer.tdameritrade.com/user-principal/apis/get/userprincipals-0') # TODO open up a websocket, then blast a login message out through the websocket. That login message contains user principles data See https://developer.tdameritrade.com/price-history/apis/get/marketdata/%7Bsymbol%7D/pricehistory Note: "frequency" is actually the time between samples :return: JSON with candlestick data # NOTE the end date parameter is ignored here. Can add it later if needed. # TODO left off here # capture the high and the low over some period # TODO define trade logic
| 2.635862
| 3
|
fabfile.py
|
pyeliteman/PDF-OCR-RTP
| 1
|
6627340
|
<reponame>pyeliteman/PDF-OCR-RTP<filename>fabfile.py
# -*- coding: utf-8 -*-
"""
Fabfile for managing a Python/Flask/Apache/MySQL project in MacOS/Ubuntu.
"""
import os
from fabric.api import env, task, run, local, get, sudo
from fabric.context_managers import cd, lcd, prefix, shell_env
PROJECT_NAME = "fbone"
# Remote Database Config
REMOTE_DB_USERNAME = ""
REMOTE_DB_PASSWORD = ""
REMOTE_DB_NAME = ""
# Local Database Config
LOCAL_DB_USERNAME = ""
LOCAL_DB_PASSWORD = ""
LOCAL_DB_NAME = ""
# the user to use for the remote commands
env.user = ''
# the servers where the commands are executed
env.hosts = ['']
# http://stackoverflow.com/questions/17102968/reading-logs-with-fabric
env.remote_interrupt = True
@task
def setup_python_macos():
"""Setup Python in MacOS via Homebrew"""
# Setup Homebrew
# TODO: Test if Homebrew installed?
HOMEBREW_URL = "https://raw.githubusercontent.com/Homebrew/install/master/install"
local("/usr/bin/ruby -e \"$(curl -fsSL %s)\"" % HOMEBREW_URL)
local("echo export PATH=/usr/local/bin:/usr/local/sbin:$PATH >> ~/.bash_profile")
# Setup Python
local("brew install python")
local("brew update")
# Setup Virtualenv
local("pip install virtualenvwrapper")
local("echo source /usr/local/bin/virtualenvwrapper.sh >> ~/.bash_profile")
@task
def setup_python_ubuntu():
"""Setup Python in Ubuntu, which already comes with Python"""
# Setup Virtualenv
local("pip install virtualenvwrapper")
local("echo source /usr/local/bin/virtualenvwrapper.sh >> ~/.bash_profile")
@task
def bootstrap():
"""Bootstrap in local"""
local("rm -rf /tmp/instance")
local("mkdir -p /tmp/instance/logs")
local("mkdir -p /tmp/instance/uploads")
with shell_env(FLASK_APP='wsgi.py', FLASK_DEBUG="1"):
local("flask initdb")
@task
def bootstrap_production():
"""Bootstrap in production server"""
pass
@task
def debug():
"""Run in debug mode in local"""
with shell_env(FLASK_APP='wsgi.py', FLASK_DEBUG="1"):
local("flask run")
@task(alias='t')
def test():
"""Run unittest in local"""
with shell_env(FLASK_APP='wsgi.py', FLASK_DEBUG="1"):
local("python tests.py")
@task
def deploy():
"""Deploy via Git"""
local("cd " + os.path.join(os.environ["HOME"], PROJECT_NAME))
local("git push")
with cd(os.path.join("/home/wilson", PROJECT_NAME)):
# Make sure git can be accessed via ssh
run("git pull")
# Make sure "WSGIScriptReloading On" in apache conf file
run("touch wsgi.py")
@task
def syncdb():
"""Sync loacl db with remote db"""
if not REMOTE_DB_USERNAME or not REMOTE_DB_PASSWORD or not REMOTE_DB_NAME:
print "Please setup remote db configs"
return
if not LOCAL_DB_USERNAME or not LOCAL_DB_PASSWORD or not LOCAL_DB_NAME:
print "Please setup local db configs"
return
with cd("/tmp"):
run("mysqldump -u%s -p%s %s > latest_db.sql" % (REMOTE_DB_USERNAME,
REMOTE_DB_PASSWORD,
REMOTE_DB_NAME))
run("tar cfz latest_db.sql.tgz latest_db.sql")
# Download to local
get("/tmp/latest_db.sql.tgz", "/tmp")
with lcd("/tmp"):
local("tar xfz latest_db.sql.tgz")
local("mysql -u%s -p%s %s < latest_db.sql" % (LOCAL_DB_USERNAME,
LOCAL_DB_PASSWORD,
LOCAL_DB_NAME))
|
# -*- coding: utf-8 -*-
"""
Fabfile for managing a Python/Flask/Apache/MySQL project in MacOS/Ubuntu.
"""
import os
from fabric.api import env, task, run, local, get, sudo
from fabric.context_managers import cd, lcd, prefix, shell_env
PROJECT_NAME = "fbone"
# Remote Database Config
REMOTE_DB_USERNAME = ""
REMOTE_DB_PASSWORD = ""
REMOTE_DB_NAME = ""
# Local Database Config
LOCAL_DB_USERNAME = ""
LOCAL_DB_PASSWORD = ""
LOCAL_DB_NAME = ""
# the user to use for the remote commands
env.user = ''
# the servers where the commands are executed
env.hosts = ['']
# http://stackoverflow.com/questions/17102968/reading-logs-with-fabric
env.remote_interrupt = True
@task
def setup_python_macos():
"""Setup Python in MacOS via Homebrew"""
# Setup Homebrew
# TODO: Test if Homebrew installed?
HOMEBREW_URL = "https://raw.githubusercontent.com/Homebrew/install/master/install"
local("/usr/bin/ruby -e \"$(curl -fsSL %s)\"" % HOMEBREW_URL)
local("echo export PATH=/usr/local/bin:/usr/local/sbin:$PATH >> ~/.bash_profile")
# Setup Python
local("brew install python")
local("brew update")
# Setup Virtualenv
local("pip install virtualenvwrapper")
local("echo source /usr/local/bin/virtualenvwrapper.sh >> ~/.bash_profile")
@task
def setup_python_ubuntu():
"""Setup Python in Ubuntu, which already comes with Python"""
# Setup Virtualenv
local("pip install virtualenvwrapper")
local("echo source /usr/local/bin/virtualenvwrapper.sh >> ~/.bash_profile")
@task
def bootstrap():
"""Bootstrap in local"""
local("rm -rf /tmp/instance")
local("mkdir -p /tmp/instance/logs")
local("mkdir -p /tmp/instance/uploads")
with shell_env(FLASK_APP='wsgi.py', FLASK_DEBUG="1"):
local("flask initdb")
@task
def bootstrap_production():
"""Bootstrap in production server"""
pass
@task
def debug():
"""Run in debug mode in local"""
with shell_env(FLASK_APP='wsgi.py', FLASK_DEBUG="1"):
local("flask run")
@task(alias='t')
def test():
"""Run unittest in local"""
with shell_env(FLASK_APP='wsgi.py', FLASK_DEBUG="1"):
local("python tests.py")
@task
def deploy():
"""Deploy via Git"""
local("cd " + os.path.join(os.environ["HOME"], PROJECT_NAME))
local("git push")
with cd(os.path.join("/home/wilson", PROJECT_NAME)):
# Make sure git can be accessed via ssh
run("git pull")
# Make sure "WSGIScriptReloading On" in apache conf file
run("touch wsgi.py")
@task
def syncdb():
"""Sync loacl db with remote db"""
if not REMOTE_DB_USERNAME or not REMOTE_DB_PASSWORD or not REMOTE_DB_NAME:
print "Please setup remote db configs"
return
if not LOCAL_DB_USERNAME or not LOCAL_DB_PASSWORD or not LOCAL_DB_NAME:
print "Please setup local db configs"
return
with cd("/tmp"):
run("mysqldump -u%s -p%s %s > latest_db.sql" % (REMOTE_DB_USERNAME,
REMOTE_DB_PASSWORD,
REMOTE_DB_NAME))
run("tar cfz latest_db.sql.tgz latest_db.sql")
# Download to local
get("/tmp/latest_db.sql.tgz", "/tmp")
with lcd("/tmp"):
local("tar xfz latest_db.sql.tgz")
local("mysql -u%s -p%s %s < latest_db.sql" % (LOCAL_DB_USERNAME,
LOCAL_DB_PASSWORD,
LOCAL_DB_NAME))
|
en
| 0.730428
|
# -*- coding: utf-8 -*- Fabfile for managing a Python/Flask/Apache/MySQL project in MacOS/Ubuntu. # Remote Database Config # Local Database Config # the user to use for the remote commands # the servers where the commands are executed # http://stackoverflow.com/questions/17102968/reading-logs-with-fabric Setup Python in MacOS via Homebrew # Setup Homebrew # TODO: Test if Homebrew installed? # Setup Python # Setup Virtualenv Setup Python in Ubuntu, which already comes with Python # Setup Virtualenv Bootstrap in local Bootstrap in production server Run in debug mode in local Run unittest in local Deploy via Git # Make sure git can be accessed via ssh # Make sure "WSGIScriptReloading On" in apache conf file Sync loacl db with remote db # Download to local
| 2.127478
| 2
|
asynctest.py
|
projecthexa/hexa
| 7
|
6627341
|
<reponame>projecthexa/hexa
import asyncio
import datetime
def match(pattern):
print('Looking for ' + pattern)
try:
while True:
s = (yield)
if pattern in s:
print(s)
except GeneratorExit:
print("=== Done ===")
m = match("panni")
m.__next__()
|
import asyncio
import datetime
def match(pattern):
print('Looking for ' + pattern)
try:
while True:
s = (yield)
if pattern in s:
print(s)
except GeneratorExit:
print("=== Done ===")
m = match("panni")
m.__next__()
|
none
| 1
| 3.126839
| 3
|
|
dask_test.py
|
CLAHRCWessex/fast-py-bootstrap
| 0
|
6627342
|
<gh_stars>0
import numpy as np
from dask import delayed
def bootstrap_dask(data, boots):
"""
Create bootstrap datasets that represent the distribution of the mean.
Returns a numpy array containing the bootstrap datasets
Keyword arguments:
data -- numpy array of systems to boostrap
boots -- number of bootstrap (default = 1000)
DOESN't Work. Had to switch from numpy array to list
End up with a an array of delayed objects rather.
"""
#to_return = np.empty(boots)
d = []
total=0.0
for b in range(boots):
mn = bs_mean(data)
d.append(mn)
return d
@delayed
def bs_mean(data):
total = 0
for s in range(data.shape[0]):
total += draw_sample(data)
return total / data.shape[0]
@delayed
def draw_sample(data):
u = np.random.uniform(0, data.shape[0]-1)
u = round(u)
return data[u]
#remebering how to using indexing in multi-d arrays!
x = [1, 2, 3]
y = [4, 5, 6]
z =np.zeros((3, 4))
#np.append(z, x, 4xis=0)
z[0][3] = 4
z[1][3] = 4
z[2][3] = 4
z[0][2] = 3
z[1][2] = 3
z[2][2] = 3
print(z)
design = 2
z.T[design:design+1] = x
z
#np.append(z, x, axis=1)
|
import numpy as np
from dask import delayed
def bootstrap_dask(data, boots):
"""
Create bootstrap datasets that represent the distribution of the mean.
Returns a numpy array containing the bootstrap datasets
Keyword arguments:
data -- numpy array of systems to boostrap
boots -- number of bootstrap (default = 1000)
DOESN't Work. Had to switch from numpy array to list
End up with a an array of delayed objects rather.
"""
#to_return = np.empty(boots)
d = []
total=0.0
for b in range(boots):
mn = bs_mean(data)
d.append(mn)
return d
@delayed
def bs_mean(data):
total = 0
for s in range(data.shape[0]):
total += draw_sample(data)
return total / data.shape[0]
@delayed
def draw_sample(data):
u = np.random.uniform(0, data.shape[0]-1)
u = round(u)
return data[u]
#remebering how to using indexing in multi-d arrays!
x = [1, 2, 3]
y = [4, 5, 6]
z =np.zeros((3, 4))
#np.append(z, x, 4xis=0)
z[0][3] = 4
z[1][3] = 4
z[2][3] = 4
z[0][2] = 3
z[1][2] = 3
z[2][2] = 3
print(z)
design = 2
z.T[design:design+1] = x
z
#np.append(z, x, axis=1)
|
en
| 0.551918
|
Create bootstrap datasets that represent the distribution of the mean. Returns a numpy array containing the bootstrap datasets Keyword arguments: data -- numpy array of systems to boostrap boots -- number of bootstrap (default = 1000) DOESN't Work. Had to switch from numpy array to list End up with a an array of delayed objects rather. #to_return = np.empty(boots) #remebering how to using indexing in multi-d arrays! #np.append(z, x, 4xis=0) #np.append(z, x, axis=1)
| 3.313833
| 3
|
lapy/TetIO.py
|
AhmedFaisal95/LaPy
| 8
|
6627343
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Original Author: <NAME>
# Date: Jul-5-2018
#
import numpy as np
import os.path
from .TetMesh import TetMesh
def import_gmsh(infile):
"""
Load GMSH tetrahedron mesh
"""
extension = os.path.splitext(infile)[1]
verbose = 1
if verbose > 0:
print("--> GMSH format ... ")
if extension != ".msh":
print("[no .msh file] --> FAILED\n")
return
try:
f = open(infile, 'r')
except IOError:
print("[file not found or not readable]\n")
return
line = f.readline()
if not line.startswith("$MeshFormat"):
print("[$MeshFormat keyword not found] --> FAILED\n")
f.close()
return
line = f.readline()
larr = line.split()
ver = float(larr[0])
ftype = int(larr[1])
datatype = int(larr[2])
print('Msh file ver ', ver, ' , ftype ', ftype, ' , datatype ', datatype, '\n')
if ftype != 0:
print("[binary format not implemented] --> FAILED\n")
f.close()
return
line = f.readline()
if not line.startswith("$EndMeshFormat"):
print("[$EndMeshFormat keyword not found] --> FAILED\n")
f.close()
return
line = f.readline()
if not line.startswith("$Nodes"):
print("[$Nodes keyword not found] --> FAILED\n")
f.close()
return
pnum = int(f.readline())
# read (nodes X 4) matrix as chunck
# drop first column
v = np.fromfile(f, 'float32', 4 * pnum, ' ')
v.shape = (pnum, 4)
v = np.delete(v, 0, 1)
line = f.readline()
if not line.startswith("$EndNodes"):
print("[$EndNodes keyword not found] --> FAILED\n")
f.close()
return
line = f.readline()
if not line.startswith("$Elements"):
print("[$Elements keyword not found] --> FAILED\n")
f.close()
return
tnum = int(f.readline())
pos = f.tell()
line = f.readline()
f.seek(pos)
larr = line.split()
if int(larr[1]) != 4:
print("larr: ", larr, "\n")
print("[can only read tetras] --> FAILED\n")
f.close()
return
# read (nodes X ?) matrix
t = np.fromfile(f, 'int', tnum * len(larr), ' ')
t.shape = (tnum, len(larr))
t = np.delete(t, np.s_[0:len(larr) - 4], 1)
line = f.readline()
if not line.startswith("$EndElements"):
print("Line: ", line, " \n")
print("[$EndElements keyword not found] --> FAILED\n")
f.close()
return
f.close()
print(" --> DONE ( V: " + str(v.shape[0]) + " , T: " + str(t.shape[0]) + " )\n")
return TetMesh(v, t)
def import_vtk(infile):
"""
Load VTK tetrahedron mesh
"""
verbose = 1
if verbose > 0:
print("--> VTK format ... ")
try:
f = open(infile, 'r')
except IOError:
print("[file not found or not readable]\n")
return
# skip comments
line = f.readline()
while line[0] == '#':
line = f.readline()
# search for ASCII keyword in first 5 lines:
count = 0
while count < 5 and not line.startswith("ASCII"):
line = f.readline()
# print line
count = count + 1
if not line.startswith("ASCII"):
print("[ASCII keyword not found] --> FAILED\n")
return
# expect Dataset Polydata line after ASCII:
line = f.readline()
if not line.startswith("DATASET POLYDATA") and not line.startswith("DATASET UNSTRUCTURED_GRID"):
print("[read: " + line + " expected DATASET POLYDATA or DATASET UNSTRUCTURED_GRID] --> FAILED\n")
return
# read number of points
line = f.readline()
larr = line.split()
if larr[0] != "POINTS" or (larr[2] != "float" and larr[2] != "double"):
print("[read: " + line + " expected POINTS # float or POINTS # double ] --> FAILED\n")
return
pnum = int(larr[1])
# read points as chunk
v = np.fromfile(f, 'float32', 3 * pnum, ' ')
v.shape = (pnum, 3)
# expect polygon or tria_strip line
line = f.readline()
larr = line.split()
if larr[0] == "POLYGONS" or larr[0] == "CELLS":
tnum = int(larr[1])
ttnum = int(larr[2])
npt = float(ttnum) / tnum
if npt != 5.0:
print("[having: " + str(npt) + " data per tetra, expected 4+1] --> FAILED\n")
return
t = np.fromfile(f, 'int', ttnum, ' ')
t.shape = (tnum, 5)
if t[tnum - 1][0] != 4:
print("[can only read tetras] --> FAILED\n")
return
t = np.delete(t, 0, 1)
else:
print("[read: " + line + " expected POLYGONS or CELLS] --> FAILED\n")
return
f.close()
print(" --> DONE ( V: " + str(v.shape[0]) + " , T: " + str(t.shape[0]) + " )\n")
return TetMesh(v, t)
def export_vtk(tet, outfile):
"""
Save VTK file
usage: exportVTK(TetMesh,outfile)
"""
# open file
try:
f = open(outfile, 'w')
except IOError:
print("[File " + outfile + " not writable]")
return
# check data structure
# ...
# Write
f.write('# vtk DataFile Version 1.0\n')
f.write('vtk output\n')
f.write('ASCII\n')
f.write('DATASET POLYDATA\n')
f.write('POINTS ' + str(np.shape(tet.v)[0]) + ' float\n')
for i in range(np.shape(tet.v)[0]):
f.write(' '.join(map(str, tet.v[i, :])))
f.write('\n')
f.write('POLYGONS ' + str(np.shape(tet.t)[0]) + ' ' + str(5 * np.shape(tet.t)[0]) + '\n')
for i in range(np.shape(tet.t)[0]):
f.write(' '.join(map(str, np.append(4, tet.t[i, :]))))
f.write('\n')
f.close()
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Original Author: <NAME>
# Date: Jul-5-2018
#
import numpy as np
import os.path
from .TetMesh import TetMesh
def import_gmsh(infile):
"""
Load GMSH tetrahedron mesh
"""
extension = os.path.splitext(infile)[1]
verbose = 1
if verbose > 0:
print("--> GMSH format ... ")
if extension != ".msh":
print("[no .msh file] --> FAILED\n")
return
try:
f = open(infile, 'r')
except IOError:
print("[file not found or not readable]\n")
return
line = f.readline()
if not line.startswith("$MeshFormat"):
print("[$MeshFormat keyword not found] --> FAILED\n")
f.close()
return
line = f.readline()
larr = line.split()
ver = float(larr[0])
ftype = int(larr[1])
datatype = int(larr[2])
print('Msh file ver ', ver, ' , ftype ', ftype, ' , datatype ', datatype, '\n')
if ftype != 0:
print("[binary format not implemented] --> FAILED\n")
f.close()
return
line = f.readline()
if not line.startswith("$EndMeshFormat"):
print("[$EndMeshFormat keyword not found] --> FAILED\n")
f.close()
return
line = f.readline()
if not line.startswith("$Nodes"):
print("[$Nodes keyword not found] --> FAILED\n")
f.close()
return
pnum = int(f.readline())
# read (nodes X 4) matrix as chunck
# drop first column
v = np.fromfile(f, 'float32', 4 * pnum, ' ')
v.shape = (pnum, 4)
v = np.delete(v, 0, 1)
line = f.readline()
if not line.startswith("$EndNodes"):
print("[$EndNodes keyword not found] --> FAILED\n")
f.close()
return
line = f.readline()
if not line.startswith("$Elements"):
print("[$Elements keyword not found] --> FAILED\n")
f.close()
return
tnum = int(f.readline())
pos = f.tell()
line = f.readline()
f.seek(pos)
larr = line.split()
if int(larr[1]) != 4:
print("larr: ", larr, "\n")
print("[can only read tetras] --> FAILED\n")
f.close()
return
# read (nodes X ?) matrix
t = np.fromfile(f, 'int', tnum * len(larr), ' ')
t.shape = (tnum, len(larr))
t = np.delete(t, np.s_[0:len(larr) - 4], 1)
line = f.readline()
if not line.startswith("$EndElements"):
print("Line: ", line, " \n")
print("[$EndElements keyword not found] --> FAILED\n")
f.close()
return
f.close()
print(" --> DONE ( V: " + str(v.shape[0]) + " , T: " + str(t.shape[0]) + " )\n")
return TetMesh(v, t)
def import_vtk(infile):
"""
Load VTK tetrahedron mesh
"""
verbose = 1
if verbose > 0:
print("--> VTK format ... ")
try:
f = open(infile, 'r')
except IOError:
print("[file not found or not readable]\n")
return
# skip comments
line = f.readline()
while line[0] == '#':
line = f.readline()
# search for ASCII keyword in first 5 lines:
count = 0
while count < 5 and not line.startswith("ASCII"):
line = f.readline()
# print line
count = count + 1
if not line.startswith("ASCII"):
print("[ASCII keyword not found] --> FAILED\n")
return
# expect Dataset Polydata line after ASCII:
line = f.readline()
if not line.startswith("DATASET POLYDATA") and not line.startswith("DATASET UNSTRUCTURED_GRID"):
print("[read: " + line + " expected DATASET POLYDATA or DATASET UNSTRUCTURED_GRID] --> FAILED\n")
return
# read number of points
line = f.readline()
larr = line.split()
if larr[0] != "POINTS" or (larr[2] != "float" and larr[2] != "double"):
print("[read: " + line + " expected POINTS # float or POINTS # double ] --> FAILED\n")
return
pnum = int(larr[1])
# read points as chunk
v = np.fromfile(f, 'float32', 3 * pnum, ' ')
v.shape = (pnum, 3)
# expect polygon or tria_strip line
line = f.readline()
larr = line.split()
if larr[0] == "POLYGONS" or larr[0] == "CELLS":
tnum = int(larr[1])
ttnum = int(larr[2])
npt = float(ttnum) / tnum
if npt != 5.0:
print("[having: " + str(npt) + " data per tetra, expected 4+1] --> FAILED\n")
return
t = np.fromfile(f, 'int', ttnum, ' ')
t.shape = (tnum, 5)
if t[tnum - 1][0] != 4:
print("[can only read tetras] --> FAILED\n")
return
t = np.delete(t, 0, 1)
else:
print("[read: " + line + " expected POLYGONS or CELLS] --> FAILED\n")
return
f.close()
print(" --> DONE ( V: " + str(v.shape[0]) + " , T: " + str(t.shape[0]) + " )\n")
return TetMesh(v, t)
def export_vtk(tet, outfile):
"""
Save VTK file
usage: exportVTK(TetMesh,outfile)
"""
# open file
try:
f = open(outfile, 'w')
except IOError:
print("[File " + outfile + " not writable]")
return
# check data structure
# ...
# Write
f.write('# vtk DataFile Version 1.0\n')
f.write('vtk output\n')
f.write('ASCII\n')
f.write('DATASET POLYDATA\n')
f.write('POINTS ' + str(np.shape(tet.v)[0]) + ' float\n')
for i in range(np.shape(tet.v)[0]):
f.write(' '.join(map(str, tet.v[i, :])))
f.write('\n')
f.write('POLYGONS ' + str(np.shape(tet.t)[0]) + ' ' + str(5 * np.shape(tet.t)[0]) + '\n')
for i in range(np.shape(tet.t)[0]):
f.write(' '.join(map(str, np.append(4, tet.t[i, :]))))
f.write('\n')
f.close()
|
en
| 0.714293
|
#!/usr/bin/env python # -*- coding: latin-1 -*- # # Original Author: <NAME> # Date: Jul-5-2018 # Load GMSH tetrahedron mesh # read (nodes X 4) matrix as chunck # drop first column # read (nodes X ?) matrix Load VTK tetrahedron mesh # skip comments # search for ASCII keyword in first 5 lines: # print line # expect Dataset Polydata line after ASCII: # read number of points # float or POINTS # double ] --> FAILED\n") # read points as chunk # expect polygon or tria_strip line Save VTK file usage: exportVTK(TetMesh,outfile) # open file # check data structure # ... # Write
| 2.709689
| 3
|
model-optimizer/mo/front/mxnet/extractors/pooling.py
|
mypopydev/dldt
| 3
|
6627344
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.front.common.extractors.utils import layout_attrs
from mo.front.common.partial_infer.pooling import pool_explicit_padding_infer
def pooling_ext(attrs):
kernel = attrs.tuple("kernel", int, None)
stride = attrs.tuple("stride", int, (1, 1))
padding = attrs.tuple("pad", int, (0, 0))
method = attrs.str("pool_type", None)
data = {
'window': np.array([1, 1, kernel[1], kernel[0]], dtype=np.int64),
'stride': np.array([1, 1, stride[1], stride[0]], dtype=np.int64),
'pad': np.array([[0, 0], [0, 0], [padding[1], padding[1]], [padding[0], padding[0]]], dtype=np.int64),
'pad_spatial_shape': np.array([[padding[1], padding[1]], [padding[0], padding[0]]], dtype=np.int64),
'pool_method': method,
'exclude_pad': 'false',
'infer': pool_explicit_padding_infer,
'output_spatial_shape': None,
'rounding_type': 'floor'
}
data.update(layout_attrs())
pooling_conv = attrs.str("pooling_convention", 'valid')
if pooling_conv:
data["pooling_convention"] = pooling_conv
data["rounding_type"] = 'ceil'
global_pool = attrs.bool("global_pool", False)
if global_pool:
data["global_pool"] = global_pool
return data
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.front.common.extractors.utils import layout_attrs
from mo.front.common.partial_infer.pooling import pool_explicit_padding_infer
def pooling_ext(attrs):
kernel = attrs.tuple("kernel", int, None)
stride = attrs.tuple("stride", int, (1, 1))
padding = attrs.tuple("pad", int, (0, 0))
method = attrs.str("pool_type", None)
data = {
'window': np.array([1, 1, kernel[1], kernel[0]], dtype=np.int64),
'stride': np.array([1, 1, stride[1], stride[0]], dtype=np.int64),
'pad': np.array([[0, 0], [0, 0], [padding[1], padding[1]], [padding[0], padding[0]]], dtype=np.int64),
'pad_spatial_shape': np.array([[padding[1], padding[1]], [padding[0], padding[0]]], dtype=np.int64),
'pool_method': method,
'exclude_pad': 'false',
'infer': pool_explicit_padding_infer,
'output_spatial_shape': None,
'rounding_type': 'floor'
}
data.update(layout_attrs())
pooling_conv = attrs.str("pooling_convention", 'valid')
if pooling_conv:
data["pooling_convention"] = pooling_conv
data["rounding_type"] = 'ceil'
global_pool = attrs.bool("global_pool", False)
if global_pool:
data["global_pool"] = global_pool
return data
|
en
| 0.858093
|
Copyright (c) 2018 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| 1.761661
| 2
|
zerver/migrations/0238_usermessage_bigint_id.py
|
kaustubh-nair/zulip
| 6
|
6627345
|
<filename>zerver/migrations/0238_usermessage_bigint_id.py
# Generated by Django 1.11.23 on 2019-08-22 22:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0237_rename_zulip_realm_to_zulipinternal'),
]
operations = [
migrations.AddField(
model_name='archivedusermessage',
name='bigint_id',
field=models.BigIntegerField(null=True),
),
migrations.AddField(
model_name='usermessage',
name='bigint_id',
field=models.BigIntegerField(null=True),
),
]
|
<filename>zerver/migrations/0238_usermessage_bigint_id.py
# Generated by Django 1.11.23 on 2019-08-22 22:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0237_rename_zulip_realm_to_zulipinternal'),
]
operations = [
migrations.AddField(
model_name='archivedusermessage',
name='bigint_id',
field=models.BigIntegerField(null=True),
),
migrations.AddField(
model_name='usermessage',
name='bigint_id',
field=models.BigIntegerField(null=True),
),
]
|
en
| 0.596568
|
# Generated by Django 1.11.23 on 2019-08-22 22:02
| 1.466214
| 1
|
031_CoinSums.py
|
joetache4/project-euler
| 0
|
6627346
|
"""
In the United Kingdom the currency is made up of pound (£) and pence (p). There are eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, £1 (100p), and £2 (200p).
It is possible to make £2 in the following way:
1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
How many different ways can £2 be made using any number of coins?
ans: 73682
"""
def ways(amount, nohigher=200):
if amount == 0:
return 1
count = 0
coins = [1, 2, 5, 10, 20, 50, 100, 200]
for c in coins:
if amount >= c and c <= nohigher:
count += ways(amount - c, c)
return count
print(ways(200))
|
"""
In the United Kingdom the currency is made up of pound (£) and pence (p). There are eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, £1 (100p), and £2 (200p).
It is possible to make £2 in the following way:
1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
How many different ways can £2 be made using any number of coins?
ans: 73682
"""
def ways(amount, nohigher=200):
if amount == 0:
return 1
count = 0
coins = [1, 2, 5, 10, 20, 50, 100, 200]
for c in coins:
if amount >= c and c <= nohigher:
count += ways(amount - c, c)
return count
print(ways(200))
|
en
| 0.914299
|
In the United Kingdom the currency is made up of pound (£) and pence (p). There are eight coins in general circulation: 1p, 2p, 5p, 10p, 20p, 50p, £1 (100p), and £2 (200p). It is possible to make £2 in the following way: 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p How many different ways can £2 be made using any number of coins? ans: 73682
| 3.230886
| 3
|
sitioWeb/SucursalA/models.py
|
UnopposedQuill/muedatos2
| 0
|
6627347
|
from django.db import models
## SucursalA
class SACliente(models.Model):
idcliente = models.AutoField(db_column='idCliente', primary_key=True) # Field name made lowercase.
nombre = models.CharField(max_length=30)
apellidos = models.CharField(max_length=30)
ubicacionLat = models.FloatField() # This field type is a guess.
ubicacionLong = models.FloatField() # This field type is a guess.
direccion = models.CharField(max_length=120)
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'Cliente'
class SAEmpleado(models.Model):
idempleado = models.AutoField(db_column='idEmpleado', primary_key=True) # Field name made lowercase.
idpuesto = models.ForeignKey('SAPuesto', models.DO_NOTHING, db_column='idPuesto') # Field name made lowercase.
nombre = models.CharField(max_length=30)
apellidos = models.CharField(max_length=30)
fechacontratacion = models.DateField(db_column='fechaContratacion') # Field name made lowercase.
foto = models.TextField()
salario = models.FloatField()
comision = models.FloatField()
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'Empleado'
class SALineaventa(models.Model):
idlineaventa = models.AutoField(db_column='idLineaVenta', primary_key=True) # Field name made lowercase.
idventa = models.ForeignKey('SAVenta', models.DO_NOTHING, db_column='idVenta') # Field name made lowercase.
idproducto = models.IntegerField(db_column='idProducto') # Field name made lowercase.
cantidad = models.IntegerField()
precio = models.FloatField()
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'LineaVenta'
class SAMetodopago(models.Model):
idmetodopago = models.AutoField(db_column='idMetodoPago', primary_key=True) # Field name made lowercase.
idcliente = models.ForeignKey(SACliente, models.DO_NOTHING, db_column='idCliente') # Field name made lowercase.
descripcion = models.CharField(max_length=20, blank=True, null=True)
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'MetodoPago'
class SAPuesto(models.Model):
idpuesto = models.AutoField(db_column='idPuesto', primary_key=True) # Field name made lowercase.
descripcion = models.CharField(max_length=30)
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'Puesto'
class SAVenta(models.Model):
idventa = models.AutoField(db_column='idVenta', primary_key=True) # Field name made lowercase.
idempleado = models.ForeignKey(SAEmpleado, models.DO_NOTHING, db_column='idEmpleado') # Field name made lowercase.
idcliente = models.ForeignKey(SACliente, models.DO_NOTHING, db_column='idCliente') # Field name made lowercase.
idmetodopago = models.ForeignKey(SAMetodopago, models.DO_NOTHING, db_column='idMetodoPago') # Field name made lowercase.
fechaventa = models.DateField(db_column='fechaVenta') # Field name made lowercase.
reciboconforme = models.TextField(db_column='reciboConforme', blank=True, null=True) # Field name made lowercase. This field type is a guess.
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'Venta'
|
from django.db import models
## SucursalA
class SACliente(models.Model):
idcliente = models.AutoField(db_column='idCliente', primary_key=True) # Field name made lowercase.
nombre = models.CharField(max_length=30)
apellidos = models.CharField(max_length=30)
ubicacionLat = models.FloatField() # This field type is a guess.
ubicacionLong = models.FloatField() # This field type is a guess.
direccion = models.CharField(max_length=120)
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'Cliente'
class SAEmpleado(models.Model):
idempleado = models.AutoField(db_column='idEmpleado', primary_key=True) # Field name made lowercase.
idpuesto = models.ForeignKey('SAPuesto', models.DO_NOTHING, db_column='idPuesto') # Field name made lowercase.
nombre = models.CharField(max_length=30)
apellidos = models.CharField(max_length=30)
fechacontratacion = models.DateField(db_column='fechaContratacion') # Field name made lowercase.
foto = models.TextField()
salario = models.FloatField()
comision = models.FloatField()
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'Empleado'
class SALineaventa(models.Model):
idlineaventa = models.AutoField(db_column='idLineaVenta', primary_key=True) # Field name made lowercase.
idventa = models.ForeignKey('SAVenta', models.DO_NOTHING, db_column='idVenta') # Field name made lowercase.
idproducto = models.IntegerField(db_column='idProducto') # Field name made lowercase.
cantidad = models.IntegerField()
precio = models.FloatField()
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'LineaVenta'
class SAMetodopago(models.Model):
idmetodopago = models.AutoField(db_column='idMetodoPago', primary_key=True) # Field name made lowercase.
idcliente = models.ForeignKey(SACliente, models.DO_NOTHING, db_column='idCliente') # Field name made lowercase.
descripcion = models.CharField(max_length=20, blank=True, null=True)
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'MetodoPago'
class SAPuesto(models.Model):
idpuesto = models.AutoField(db_column='idPuesto', primary_key=True) # Field name made lowercase.
descripcion = models.CharField(max_length=30)
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'Puesto'
class SAVenta(models.Model):
idventa = models.AutoField(db_column='idVenta', primary_key=True) # Field name made lowercase.
idempleado = models.ForeignKey(SAEmpleado, models.DO_NOTHING, db_column='idEmpleado') # Field name made lowercase.
idcliente = models.ForeignKey(SACliente, models.DO_NOTHING, db_column='idCliente') # Field name made lowercase.
idmetodopago = models.ForeignKey(SAMetodopago, models.DO_NOTHING, db_column='idMetodoPago') # Field name made lowercase.
fechaventa = models.DateField(db_column='fechaVenta') # Field name made lowercase.
reciboconforme = models.TextField(db_column='reciboConforme', blank=True, null=True) # Field name made lowercase. This field type is a guess.
class Meta:
managed = False
app_label = 'SucursalA'
db_table = 'Venta'
|
en
| 0.886408
|
## SucursalA # Field name made lowercase. # This field type is a guess. # This field type is a guess. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. # Field name made lowercase. This field type is a guess.
| 2.021249
| 2
|
ckanext/datastore/tests/test_unit.py
|
robin-NEC/ckan
| 2,805
|
6627348
|
<filename>ckanext/datastore/tests/test_unit.py
# encoding: utf-8
import ckanext.datastore.backend.postgres as backend
import ckanext.datastore.backend.postgres as db
import ckanext.datastore.helpers as helpers
from ckan.common import config
postgres_backend = backend.DatastorePostgresqlBackend()
postgres_backend.configure(config)
def test_is_valid_field_name():
assert helpers.is_valid_field_name("foo")
assert helpers.is_valid_field_name("foo bar")
assert helpers.is_valid_field_name("42")
assert not helpers.is_valid_field_name('foo"bar')
assert not helpers.is_valid_field_name('"')
assert helpers.is_valid_field_name("'")
assert not helpers.is_valid_field_name("")
assert helpers.is_valid_field_name("foo%bar")
def test_is_valid_table_name():
assert helpers.is_valid_table_name("foo")
assert helpers.is_valid_table_name("foo bar")
assert helpers.is_valid_table_name("42")
assert not helpers.is_valid_table_name('foo"bar')
assert not helpers.is_valid_table_name('"')
assert helpers.is_valid_table_name("'")
assert not helpers.is_valid_table_name("")
assert not helpers.is_valid_table_name("foo%bar")
def test_pg_version_check():
engine = db._get_engine_from_url(config["sqlalchemy.url"])
connection = engine.connect()
assert db._pg_version_is_at_least(connection, "8.0")
assert not db._pg_version_is_at_least(connection, "20.0")
|
<filename>ckanext/datastore/tests/test_unit.py
# encoding: utf-8
import ckanext.datastore.backend.postgres as backend
import ckanext.datastore.backend.postgres as db
import ckanext.datastore.helpers as helpers
from ckan.common import config
postgres_backend = backend.DatastorePostgresqlBackend()
postgres_backend.configure(config)
def test_is_valid_field_name():
assert helpers.is_valid_field_name("foo")
assert helpers.is_valid_field_name("foo bar")
assert helpers.is_valid_field_name("42")
assert not helpers.is_valid_field_name('foo"bar')
assert not helpers.is_valid_field_name('"')
assert helpers.is_valid_field_name("'")
assert not helpers.is_valid_field_name("")
assert helpers.is_valid_field_name("foo%bar")
def test_is_valid_table_name():
assert helpers.is_valid_table_name("foo")
assert helpers.is_valid_table_name("foo bar")
assert helpers.is_valid_table_name("42")
assert not helpers.is_valid_table_name('foo"bar')
assert not helpers.is_valid_table_name('"')
assert helpers.is_valid_table_name("'")
assert not helpers.is_valid_table_name("")
assert not helpers.is_valid_table_name("foo%bar")
def test_pg_version_check():
engine = db._get_engine_from_url(config["sqlalchemy.url"])
connection = engine.connect()
assert db._pg_version_is_at_least(connection, "8.0")
assert not db._pg_version_is_at_least(connection, "20.0")
|
en
| 0.83829
|
# encoding: utf-8
| 2.178244
| 2
|
flight/lorikeet/cluster.py
|
rhysnewell/flock
| 0
|
6627349
|
<filename>flight/lorikeet/cluster.py
#!/usr/bin/env python
###############################################################################
# cluster.py - A program which handles the UMAP and HDBSCAN python components
# of lorikeet
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
__credits__ = ["<NAME>"]
__license__ = "GPL3"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL> near hdr.qut.edu.au"
__status__ = "Development"
###############################################################################
# System imports
import argparse
import logging
# Function imports
import numpy as np
import hdbscan
import seaborn as sns
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import skbio.stats.composition
from sklearn.metrics import pairwise_distances
import umap
import scipy.spatial.distance as sp_distance
# import pacmap
# import phate
# self imports
import flight.utils as utils
import flight.metrics as metrics
# Set plotting style
sns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})
# Debug
debug = {
1: logging.CRITICAL,
2: logging.ERROR,
3: logging.WARNING,
4: logging.INFO,
5: logging.DEBUG
}
###############################################################################
############################### - Exceptions - ################################
class BadTreeFileException(Exception):
pass
############################################################################### [44/1010]
################################ - Functions - ################################
def phelp():
print("""
Usage:
cluster.py [SUBCOMMAND] ..
Subcommands:
fit
""")
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
###############################################################################
################################ - Classes - ##################################
class CustomHelpFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
return text.splitlines()
def _get_help_string(self, action):
h = action.help
if '%(default)' not in action.help:
if action.default != '' and \
action.default != [] and \
action.default != None \
and action.default != False:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [
argparse.OPTIONAL, argparse.ZERO_OR_MORE
]
if action.option_strings or action.nargs in defaulting_nargs:
if '\n' in h:
lines = h.splitlines()
lines[0] += ' (default: %(default)s)'
h = '\n'.join(lines)
else:
h += ' (default: %(default)s)'
return h
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class Cluster:
def __init__(
self,
count_path,
output_prefix,
scaler="clr",
n_neighbors=100,
min_dist=0.1,
n_components=2,
random_state=42,
min_cluster_size=100,
min_samples=50,
prediction_data=True,
cluster_selection_method="eom",
precomputed=False,
metric='hellinger_distance_poisson',
hdbscan_metric="euclidean",
threads=8,
b=0.5,
a=1.48,
random_seed=42069,
):
# set_num_threads(threads)
self.embeddings = []
self.labels = None
self.cluster_means = None
self.separation = None
self.threads = threads
## Set up clusterer and UMAP
self.path = output_prefix
self.depths = np.load(count_path)
if self.depths.shape[1] == 1:
self.single_sample = True
else:
self.single_sample = False
## Scale the data
# self.sample_distance = utils.sample_distance(self.depths)
self.clr_depths = skbio.stats.composition.clr((self.depths[:, 2:] + 1).T).T
if self.single_sample:
# Have to reshape after clr transformation
self.clr_depths = self.clr_depths.reshape((-1, 1))
# self.clr_depths = skbio.stats.composition.clr((self.depths + 1).T).T
# self.depths[:, 2:] = self.clr_depths
try:
self.n_samples = (self.depths.shape[1] - 2) // 2
except IndexError:
self.n_samples = (self.depths.shape[0] - 2) // 2
n_components = min(max(self.n_samples, 2), 10)
# n_components = 2
if n_neighbors > self.depths.shape[0]:
n_neighbors = self.depths.shape[0] - 1
self.rho_reducer = umap.UMAP(
n_neighbors=n_neighbors,
# min_dist=min_dist,
n_components=n_components,
random_state=random_seed,
# spread=1,
metric=metrics.rho_variants,
a=a,
b=b,
init="spectral"
)
self.distance_reducer = umap.UMAP(
n_neighbors=n_neighbors,
# min_dist=min_dist,
n_components=n_components,
random_state=random_seed,
# spread=1,
# metric=metrics.euclidean_variant,
a=a,
b=b,
init="spectral"
)
self.precomputed_reducer_low = umap.UMAP(
metric="precomputed",
densmap=False,
dens_lambda=2.5,
# output_dens=True,
n_neighbors=n_neighbors,
n_components=n_components,
min_dist=min_dist,
set_op_mix_ratio=1,
a=1.48,
b=0.3,
n_jobs=self.threads,
random_state=random_seed
)
self.precomputed_reducer_mid = umap.UMAP(
metric="precomputed",
densmap=False,
dens_lambda=2.5,
# output_dens=True,
n_neighbors=n_neighbors,
n_components=n_components,
min_dist=min_dist,
set_op_mix_ratio=1,
a=1.58,
b=0.4,
n_jobs=self.threads,
random_state=random_seed
)
self.precomputed_reducer_high = umap.UMAP(
metric="precomputed",
n_neighbors=n_neighbors,
n_components=n_components,
min_dist=min_dist,
set_op_mix_ratio=1,
a=1.68,
b=0.5,
n_jobs=self.threads,
random_state=random_seed
)
if precomputed:
self.metric = "precomputed"
else:
self.metric = "euclidean"
def filter(self):
# Not sure to include this
pass
def filter(self):
# Not sure to include this
pass
def fit_transform(self, stat, second_pass=False):
## Calculate the UMAP embeddings
try:
if self.depths.shape[0] >= 5:
# dist_embeddings = self.distance_reducer.fit(self.clr_depths)
# rho_embeddings = self.rho_reducer.fit(self.clr_depths)
# intersect = dist_embeddings * rho_embeddings
self.precomputed_reducer_low.fit(sp_distance.squareform(stat))
self.precomputed_reducer_mid.fit(sp_distance.squareform(stat))
self.precomputed_reducer_high.fit(sp_distance.squareform(stat))
self.embeddings = self.precomputed_reducer_low.embedding_
# self.embeddings = self.distance_reducer.fit_transform(self.clr_depths)
else:
self.precomputed_reducer_low.embedding_ = self.clr_depths
self.precomputed_reducer_mid.embedding_ = self.clr_depths
self.precomputed_reducer_high.embedding_ = self.clr_depths
self.embeddings = self.clr_depths
except TypeError as e:
if not second_pass:
## TypeError occurs here on sparse input. So need to lower the number of components
## That are trying to be embedded to. Choose minimum of 2
self.precomputed_reducer_low.n_components = 2
self.precomputed_reducer_mid.n_components = 2
self.precomputed_reducer_high.n_components = 2
self.fit_transform(stat, True)
else:
raise e
def cluster(self, embeddings):
if embeddings.shape[0] >= 5 and len(embeddings.shape) >= 2:
try:
## Cluster on the UMAP embeddings and return soft clusters
tuned = utils.hyperparameter_selection(embeddings, self.threads, metric=self.metric, starting_size=max(2, round(embeddings.shape[0] * 0.05)), use_multi_processing=False)
best = utils.best_validity(tuned)
self.clusterer = hdbscan.HDBSCAN(
algorithm='best',
alpha=1.0,
approx_min_span_tree=True,
gen_min_span_tree=True,
leaf_size=40,
cluster_selection_method='eom',
metric=self.metric,
min_cluster_size=int(best['min_cluster_size']),
min_samples=int(best['min_samples']),
allow_single_cluster=False,
core_dist_n_jobs=self.threads,
prediction_data=True
)
# logging.info("Running HDBSCAN - %s" % self.clusterer)
self.clusterer.fit(embeddings)
try:
self.validity, self.cluster_validity = hdbscan.validity.validity_index(embeddings.astype(np.float64),
self.clusterer.labels_,
per_cluster_scores=True)
except (ValueError, SystemError):
self.validity = None
self.cluster_validity = [0.5 for i in range(len(set(self.clusterer.labels_)))]
return self.clusterer.labels_
except TypeError:
return np.array([-1 for _ in range(embeddings.shape[0])])
else:
return np.array([-1 for _ in range(embeddings.shape[0])])
"""
Reclusters unclustered elements and updates the labels array with the potential new label making sure to make the label
at least 1 value higher than the previous max label value
"""
def recover_unbinned(self):
unclustered_truth_array = self.labels == -1
unclustered_embeddings = self.embeddings[unclustered_truth_array]
if unclustered_embeddings.shape[0] > 5:
unclustered_labels = self.cluster(unclustered_embeddings)
if unclustered_labels is not None:
previous_max_label = np.max(self.labels)
unclustered_idx = 0
for (idx, label) in enumerate(self.labels):
if label == -1:
new_label = unclustered_labels[unclustered_idx]
if new_label != -1:
new_label += previous_max_label + 1
self.labels[idx] = new_label
unclustered_idx += 1
def recluster(self):
unique_labels = set(self.labels)
logging.info("Refining clusters...")
if len(unique_labels) == 1 and -1 in unique_labels:
self.labels = self.labels + 1
else:
for label in unique_labels:
if label != -1:
truth_array = self.labels == label
embeddings_for_label = self.embeddings[truth_array]
recluster_attempt = self.cluster(embeddings_for_label)
if recluster_attempt is not None:
try:
cluster_validity = hdbscan.validity.validity_index(embeddings_for_label.astype(np.float64), np.array(recluster_attempt), per_cluster_scores=False)
except (ValueError, SystemError):
cluster_validity = -1
if cluster_validity >= 0.9:
# print("reclustering %d validity %.3f" % (label, cluster_validity))
if not np.any(recluster_attempt == -1):
# shift all labels greater than current label down by one since this label is fully
# removed
self.labels[self.labels >= label] = self.labels[self.labels >= label] - 1
previous_max_label = np.max(self.labels)
new_labels_idx = 0
for (idx, label) in enumerate(truth_array):
if label:
new_label = recluster_attempt[new_labels_idx]
if new_label != -1:
new_label += previous_max_label + 1
self.labels[idx] = new_label
new_labels_idx += 1
def cluster_separation(self):
# dist_mat = utils.cluster_distances(self.embeddings, self.labels, self.threads)
labels_no_unlabelled = set(self.labels[self.labels != -1])
if len(labels_no_unlabelled) > 1:
cluster_centres = [[] for _ in range(len(labels_no_unlabelled))]
for index, label in enumerate(labels_no_unlabelled):
# print(f"Len {len(cluster_centres)} index {index} label {label}")
cluster_centres[index] = self.cluster_means[label]
dist_mat = pairwise_distances(cluster_centres)
return dist_mat
else:
return np.zeros((1, 1))
def combine_bins(self):
not_neg_labs = self.labels[self.labels != -1]
# recscale the labels so that they increment by one
for (i, previous_label) in enumerate(set(not_neg_labs)):
not_neg_labs[not_neg_labs == previous_label] = i
self.labels[self.labels != -1] = not_neg_labs
self.cluster_means = self.get_cluster_means()
self.separation = self.cluster_separation()
clocked = set()
combine_these = {}
for i in range(self.separation.shape[0]):
if i not in clocked:
for j in range(self.separation.shape[1]):
if j not in combine_these.keys() and i != j:
if self.separation[i, j] <= 0.1:
try:
combine_these[i].append(j)
except KeyError:
combine_these[i] = [j]
clocked.add(j)
if len(combine_these.keys()) >= 1:
for (base_label, other_labels) in combine_these.items():
# change the labels over to the base label
for other_label in other_labels:
self.labels[self.labels == other_label] = base_label
self.combine_bins()
def cluster_distances(self):
## Cluster on the UMAP embeddings
tuned = utils.hyperparameter_selection(self.depths, self.threads, metric=self.metric)
best = utils.best_validity(tuned)
self.clusterer = hdbscan.HDBSCAN(
algorithm='best',
alpha=1.0,
approx_min_span_tree=True,
gen_min_span_tree=True,
leaf_size=40,
cluster_selection_method='eom',
metric=self.metric,
min_cluster_size=int(best['min_cluster_size']),
min_samples=int(best['min_samples']),
allow_single_cluster=False,
core_dist_n_jobs=self.threads,
)
logging.info("Running HDBSCAN - %s" % self.clusterer)
self.clusterer.fit(self.embeddings)
def plot(self):
color_palette = sns.color_palette('Paired', max(self.labels) + 1)
cluster_colors = [
color_palette[x] if x >= 0 else (0.5, 0.5, 0.5) for x in self.labels
]
# cluster_member_colors = [
# sns.desaturate(x, p) for x, p in zip(cluster_colors, self.clusterer.probabilities_)
# ]
try:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.embeddings[:, 0],
self.embeddings[:, 1],
s=7,
linewidth=0,
c=cluster_colors,
alpha=0.7)
for label, coords in self.cluster_means.items():
if label != -1:
plt.annotate(
label,
coords,
size = 14,
weight = 'bold',
color = color_palette[label]
)
# ax.add_artist(legend)
plt.gca().set_aspect('equal', 'datalim')
plt.title('UMAP projection of variants - %d Clusters' % len(self.cluster_means), fontsize=24)
plt.savefig(self.path + '_UMAP_projection_with_clusters.png')
except IndexError:
pass
def get_cluster_means(self):
result = {}
cluster_size = {}
for (i, label) in enumerate(self.labels):
try:
label_val = result[label]
try:
label_val[0] += self.embeddings[i, 0]
label_val[1] += self.embeddings[i, 1]
except IndexError:
label_val[0] += self.embeddings[0]
label_val[1] += self.embeddings[1]
cluster_size[label] += 1
except KeyError:
try:
result[label] = list(self.embeddings[i, :2])
except IndexError:
result[label] = list(self.embeddings[:2]) # when only one variant
cluster_size[label] = 1
new_result = {}
for (key, value) in result.items():
new_values = [val / cluster_size[key] for val in value]
new_result[key] = new_values
return new_result
def plot_distances(self):
self.clusterer.condensed_tree_.plot(
select_clusters=True,
selection_palette=sns.color_palette('deep', len(set(self.clusterer.labels_))))
plt.title('Hierarchical tree of clusters', fontsize=24)
plt.savefig(self.path + '_UMAP_projection_with_clusters.png')
def labels_for_printing(self):
try:
return self.labels.astype('int32')
except AttributeError:
return self.labels.astype('int32')
def break_clusters(self):
redo_bins = {}
for (idx, label) in enumerate(self.clusterer.labels_):
if label != -1:
if self.cluster_validity[label] < 0.0:
try:
redo_bins[label.item()]["embeddings"].append(self.embeddings[idx, :])
redo_bins[label.item()]["indices"].append(idx)
except KeyError:
redo_bins[label.item()] = {}
redo_bins[label.item()]["embeddings"] = [self.embeddings[idx, :]]
redo_bins[label.item()]["indices"] = [idx]
removed_labels = redo_bins.keys()
self.clusterer.labels_[:] = [
label - sum(i < label for i in removed_labels) if label not in removed_labels else label for label in
self.clusterer.labels_]
# break up very large bins. Not sure how to threshold this
max_bin_id = max([label for label in set(self.clusterer.labels_) if label not in removed_labels]) + 1
for (bin, values) in redo_bins.items():
new_labels = utils.break_overclustered(np.array(values["embeddings"]), self.threads)
for (idx, label) in zip(values["indices"], new_labels):
if label != -1:
# Update labels
self.clusterer.labels_[idx] = label + max_bin_id
self.soft_clusters_capped[idx] = label + max_bin_id
else:
self.clusterer.labels_[idx] = label
self.soft_clusters_capped[idx] = label
|
<filename>flight/lorikeet/cluster.py
#!/usr/bin/env python
###############################################################################
# cluster.py - A program which handles the UMAP and HDBSCAN python components
# of lorikeet
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
__credits__ = ["<NAME>"]
__license__ = "GPL3"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL> near hdr.qut.edu.au"
__status__ = "Development"
###############################################################################
# System imports
import argparse
import logging
# Function imports
import numpy as np
import hdbscan
import seaborn as sns
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import skbio.stats.composition
from sklearn.metrics import pairwise_distances
import umap
import scipy.spatial.distance as sp_distance
# import pacmap
# import phate
# self imports
import flight.utils as utils
import flight.metrics as metrics
# Set plotting style
sns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})
# Debug
debug = {
1: logging.CRITICAL,
2: logging.ERROR,
3: logging.WARNING,
4: logging.INFO,
5: logging.DEBUG
}
###############################################################################
############################### - Exceptions - ################################
class BadTreeFileException(Exception):
pass
############################################################################### [44/1010]
################################ - Functions - ################################
def phelp():
print("""
Usage:
cluster.py [SUBCOMMAND] ..
Subcommands:
fit
""")
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
###############################################################################
################################ - Classes - ##################################
class CustomHelpFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
return text.splitlines()
def _get_help_string(self, action):
h = action.help
if '%(default)' not in action.help:
if action.default != '' and \
action.default != [] and \
action.default != None \
and action.default != False:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [
argparse.OPTIONAL, argparse.ZERO_OR_MORE
]
if action.option_strings or action.nargs in defaulting_nargs:
if '\n' in h:
lines = h.splitlines()
lines[0] += ' (default: %(default)s)'
h = '\n'.join(lines)
else:
h += ' (default: %(default)s)'
return h
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class Cluster:
def __init__(
self,
count_path,
output_prefix,
scaler="clr",
n_neighbors=100,
min_dist=0.1,
n_components=2,
random_state=42,
min_cluster_size=100,
min_samples=50,
prediction_data=True,
cluster_selection_method="eom",
precomputed=False,
metric='hellinger_distance_poisson',
hdbscan_metric="euclidean",
threads=8,
b=0.5,
a=1.48,
random_seed=42069,
):
# set_num_threads(threads)
self.embeddings = []
self.labels = None
self.cluster_means = None
self.separation = None
self.threads = threads
## Set up clusterer and UMAP
self.path = output_prefix
self.depths = np.load(count_path)
if self.depths.shape[1] == 1:
self.single_sample = True
else:
self.single_sample = False
## Scale the data
# self.sample_distance = utils.sample_distance(self.depths)
self.clr_depths = skbio.stats.composition.clr((self.depths[:, 2:] + 1).T).T
if self.single_sample:
# Have to reshape after clr transformation
self.clr_depths = self.clr_depths.reshape((-1, 1))
# self.clr_depths = skbio.stats.composition.clr((self.depths + 1).T).T
# self.depths[:, 2:] = self.clr_depths
try:
self.n_samples = (self.depths.shape[1] - 2) // 2
except IndexError:
self.n_samples = (self.depths.shape[0] - 2) // 2
n_components = min(max(self.n_samples, 2), 10)
# n_components = 2
if n_neighbors > self.depths.shape[0]:
n_neighbors = self.depths.shape[0] - 1
self.rho_reducer = umap.UMAP(
n_neighbors=n_neighbors,
# min_dist=min_dist,
n_components=n_components,
random_state=random_seed,
# spread=1,
metric=metrics.rho_variants,
a=a,
b=b,
init="spectral"
)
self.distance_reducer = umap.UMAP(
n_neighbors=n_neighbors,
# min_dist=min_dist,
n_components=n_components,
random_state=random_seed,
# spread=1,
# metric=metrics.euclidean_variant,
a=a,
b=b,
init="spectral"
)
self.precomputed_reducer_low = umap.UMAP(
metric="precomputed",
densmap=False,
dens_lambda=2.5,
# output_dens=True,
n_neighbors=n_neighbors,
n_components=n_components,
min_dist=min_dist,
set_op_mix_ratio=1,
a=1.48,
b=0.3,
n_jobs=self.threads,
random_state=random_seed
)
self.precomputed_reducer_mid = umap.UMAP(
metric="precomputed",
densmap=False,
dens_lambda=2.5,
# output_dens=True,
n_neighbors=n_neighbors,
n_components=n_components,
min_dist=min_dist,
set_op_mix_ratio=1,
a=1.58,
b=0.4,
n_jobs=self.threads,
random_state=random_seed
)
self.precomputed_reducer_high = umap.UMAP(
metric="precomputed",
n_neighbors=n_neighbors,
n_components=n_components,
min_dist=min_dist,
set_op_mix_ratio=1,
a=1.68,
b=0.5,
n_jobs=self.threads,
random_state=random_seed
)
if precomputed:
self.metric = "precomputed"
else:
self.metric = "euclidean"
def filter(self):
# Not sure to include this
pass
def filter(self):
# Not sure to include this
pass
def fit_transform(self, stat, second_pass=False):
## Calculate the UMAP embeddings
try:
if self.depths.shape[0] >= 5:
# dist_embeddings = self.distance_reducer.fit(self.clr_depths)
# rho_embeddings = self.rho_reducer.fit(self.clr_depths)
# intersect = dist_embeddings * rho_embeddings
self.precomputed_reducer_low.fit(sp_distance.squareform(stat))
self.precomputed_reducer_mid.fit(sp_distance.squareform(stat))
self.precomputed_reducer_high.fit(sp_distance.squareform(stat))
self.embeddings = self.precomputed_reducer_low.embedding_
# self.embeddings = self.distance_reducer.fit_transform(self.clr_depths)
else:
self.precomputed_reducer_low.embedding_ = self.clr_depths
self.precomputed_reducer_mid.embedding_ = self.clr_depths
self.precomputed_reducer_high.embedding_ = self.clr_depths
self.embeddings = self.clr_depths
except TypeError as e:
if not second_pass:
## TypeError occurs here on sparse input. So need to lower the number of components
## That are trying to be embedded to. Choose minimum of 2
self.precomputed_reducer_low.n_components = 2
self.precomputed_reducer_mid.n_components = 2
self.precomputed_reducer_high.n_components = 2
self.fit_transform(stat, True)
else:
raise e
def cluster(self, embeddings):
if embeddings.shape[0] >= 5 and len(embeddings.shape) >= 2:
try:
## Cluster on the UMAP embeddings and return soft clusters
tuned = utils.hyperparameter_selection(embeddings, self.threads, metric=self.metric, starting_size=max(2, round(embeddings.shape[0] * 0.05)), use_multi_processing=False)
best = utils.best_validity(tuned)
self.clusterer = hdbscan.HDBSCAN(
algorithm='best',
alpha=1.0,
approx_min_span_tree=True,
gen_min_span_tree=True,
leaf_size=40,
cluster_selection_method='eom',
metric=self.metric,
min_cluster_size=int(best['min_cluster_size']),
min_samples=int(best['min_samples']),
allow_single_cluster=False,
core_dist_n_jobs=self.threads,
prediction_data=True
)
# logging.info("Running HDBSCAN - %s" % self.clusterer)
self.clusterer.fit(embeddings)
try:
self.validity, self.cluster_validity = hdbscan.validity.validity_index(embeddings.astype(np.float64),
self.clusterer.labels_,
per_cluster_scores=True)
except (ValueError, SystemError):
self.validity = None
self.cluster_validity = [0.5 for i in range(len(set(self.clusterer.labels_)))]
return self.clusterer.labels_
except TypeError:
return np.array([-1 for _ in range(embeddings.shape[0])])
else:
return np.array([-1 for _ in range(embeddings.shape[0])])
"""
Reclusters unclustered elements and updates the labels array with the potential new label making sure to make the label
at least 1 value higher than the previous max label value
"""
def recover_unbinned(self):
unclustered_truth_array = self.labels == -1
unclustered_embeddings = self.embeddings[unclustered_truth_array]
if unclustered_embeddings.shape[0] > 5:
unclustered_labels = self.cluster(unclustered_embeddings)
if unclustered_labels is not None:
previous_max_label = np.max(self.labels)
unclustered_idx = 0
for (idx, label) in enumerate(self.labels):
if label == -1:
new_label = unclustered_labels[unclustered_idx]
if new_label != -1:
new_label += previous_max_label + 1
self.labels[idx] = new_label
unclustered_idx += 1
def recluster(self):
unique_labels = set(self.labels)
logging.info("Refining clusters...")
if len(unique_labels) == 1 and -1 in unique_labels:
self.labels = self.labels + 1
else:
for label in unique_labels:
if label != -1:
truth_array = self.labels == label
embeddings_for_label = self.embeddings[truth_array]
recluster_attempt = self.cluster(embeddings_for_label)
if recluster_attempt is not None:
try:
cluster_validity = hdbscan.validity.validity_index(embeddings_for_label.astype(np.float64), np.array(recluster_attempt), per_cluster_scores=False)
except (ValueError, SystemError):
cluster_validity = -1
if cluster_validity >= 0.9:
# print("reclustering %d validity %.3f" % (label, cluster_validity))
if not np.any(recluster_attempt == -1):
# shift all labels greater than current label down by one since this label is fully
# removed
self.labels[self.labels >= label] = self.labels[self.labels >= label] - 1
previous_max_label = np.max(self.labels)
new_labels_idx = 0
for (idx, label) in enumerate(truth_array):
if label:
new_label = recluster_attempt[new_labels_idx]
if new_label != -1:
new_label += previous_max_label + 1
self.labels[idx] = new_label
new_labels_idx += 1
def cluster_separation(self):
# dist_mat = utils.cluster_distances(self.embeddings, self.labels, self.threads)
labels_no_unlabelled = set(self.labels[self.labels != -1])
if len(labels_no_unlabelled) > 1:
cluster_centres = [[] for _ in range(len(labels_no_unlabelled))]
for index, label in enumerate(labels_no_unlabelled):
# print(f"Len {len(cluster_centres)} index {index} label {label}")
cluster_centres[index] = self.cluster_means[label]
dist_mat = pairwise_distances(cluster_centres)
return dist_mat
else:
return np.zeros((1, 1))
def combine_bins(self):
not_neg_labs = self.labels[self.labels != -1]
# recscale the labels so that they increment by one
for (i, previous_label) in enumerate(set(not_neg_labs)):
not_neg_labs[not_neg_labs == previous_label] = i
self.labels[self.labels != -1] = not_neg_labs
self.cluster_means = self.get_cluster_means()
self.separation = self.cluster_separation()
clocked = set()
combine_these = {}
for i in range(self.separation.shape[0]):
if i not in clocked:
for j in range(self.separation.shape[1]):
if j not in combine_these.keys() and i != j:
if self.separation[i, j] <= 0.1:
try:
combine_these[i].append(j)
except KeyError:
combine_these[i] = [j]
clocked.add(j)
if len(combine_these.keys()) >= 1:
for (base_label, other_labels) in combine_these.items():
# change the labels over to the base label
for other_label in other_labels:
self.labels[self.labels == other_label] = base_label
self.combine_bins()
def cluster_distances(self):
## Cluster on the UMAP embeddings
tuned = utils.hyperparameter_selection(self.depths, self.threads, metric=self.metric)
best = utils.best_validity(tuned)
self.clusterer = hdbscan.HDBSCAN(
algorithm='best',
alpha=1.0,
approx_min_span_tree=True,
gen_min_span_tree=True,
leaf_size=40,
cluster_selection_method='eom',
metric=self.metric,
min_cluster_size=int(best['min_cluster_size']),
min_samples=int(best['min_samples']),
allow_single_cluster=False,
core_dist_n_jobs=self.threads,
)
logging.info("Running HDBSCAN - %s" % self.clusterer)
self.clusterer.fit(self.embeddings)
def plot(self):
color_palette = sns.color_palette('Paired', max(self.labels) + 1)
cluster_colors = [
color_palette[x] if x >= 0 else (0.5, 0.5, 0.5) for x in self.labels
]
# cluster_member_colors = [
# sns.desaturate(x, p) for x, p in zip(cluster_colors, self.clusterer.probabilities_)
# ]
try:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.embeddings[:, 0],
self.embeddings[:, 1],
s=7,
linewidth=0,
c=cluster_colors,
alpha=0.7)
for label, coords in self.cluster_means.items():
if label != -1:
plt.annotate(
label,
coords,
size = 14,
weight = 'bold',
color = color_palette[label]
)
# ax.add_artist(legend)
plt.gca().set_aspect('equal', 'datalim')
plt.title('UMAP projection of variants - %d Clusters' % len(self.cluster_means), fontsize=24)
plt.savefig(self.path + '_UMAP_projection_with_clusters.png')
except IndexError:
pass
def get_cluster_means(self):
result = {}
cluster_size = {}
for (i, label) in enumerate(self.labels):
try:
label_val = result[label]
try:
label_val[0] += self.embeddings[i, 0]
label_val[1] += self.embeddings[i, 1]
except IndexError:
label_val[0] += self.embeddings[0]
label_val[1] += self.embeddings[1]
cluster_size[label] += 1
except KeyError:
try:
result[label] = list(self.embeddings[i, :2])
except IndexError:
result[label] = list(self.embeddings[:2]) # when only one variant
cluster_size[label] = 1
new_result = {}
for (key, value) in result.items():
new_values = [val / cluster_size[key] for val in value]
new_result[key] = new_values
return new_result
def plot_distances(self):
self.clusterer.condensed_tree_.plot(
select_clusters=True,
selection_palette=sns.color_palette('deep', len(set(self.clusterer.labels_))))
plt.title('Hierarchical tree of clusters', fontsize=24)
plt.savefig(self.path + '_UMAP_projection_with_clusters.png')
def labels_for_printing(self):
try:
return self.labels.astype('int32')
except AttributeError:
return self.labels.astype('int32')
def break_clusters(self):
redo_bins = {}
for (idx, label) in enumerate(self.clusterer.labels_):
if label != -1:
if self.cluster_validity[label] < 0.0:
try:
redo_bins[label.item()]["embeddings"].append(self.embeddings[idx, :])
redo_bins[label.item()]["indices"].append(idx)
except KeyError:
redo_bins[label.item()] = {}
redo_bins[label.item()]["embeddings"] = [self.embeddings[idx, :]]
redo_bins[label.item()]["indices"] = [idx]
removed_labels = redo_bins.keys()
self.clusterer.labels_[:] = [
label - sum(i < label for i in removed_labels) if label not in removed_labels else label for label in
self.clusterer.labels_]
# break up very large bins. Not sure how to threshold this
max_bin_id = max([label for label in set(self.clusterer.labels_) if label not in removed_labels]) + 1
for (bin, values) in redo_bins.items():
new_labels = utils.break_overclustered(np.array(values["embeddings"]), self.threads)
for (idx, label) in zip(values["indices"], new_labels):
if label != -1:
# Update labels
self.clusterer.labels_[idx] = label + max_bin_id
self.soft_clusters_capped[idx] = label + max_bin_id
else:
self.clusterer.labels_[idx] = label
self.soft_clusters_capped[idx] = label
|
en
| 0.438786
|
#!/usr/bin/env python ############################################################################### # cluster.py - A program which handles the UMAP and HDBSCAN python components # of lorikeet ############################################################################### # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # ############################################################################### ############################################################################### # System imports # Function imports # import pacmap # import phate # self imports # Set plotting style # Debug ############################################################################### ############################### - Exceptions - ################################ ############################################################################### [44/1010] ################################ - Functions - ################################ Usage: cluster.py [SUBCOMMAND] .. Subcommands: fit ############################################################################### ################################ - Classes - ################################## # set_num_threads(threads) ## Set up clusterer and UMAP ## Scale the data # self.sample_distance = utils.sample_distance(self.depths) # Have to reshape after clr transformation # self.clr_depths = skbio.stats.composition.clr((self.depths + 1).T).T # self.depths[:, 2:] = self.clr_depths # n_components = 2 # min_dist=min_dist, # spread=1, # min_dist=min_dist, # spread=1, # metric=metrics.euclidean_variant, # output_dens=True, # output_dens=True, # Not sure to include this # Not sure to include this ## Calculate the UMAP embeddings # dist_embeddings = self.distance_reducer.fit(self.clr_depths) # rho_embeddings = self.rho_reducer.fit(self.clr_depths) # intersect = dist_embeddings * rho_embeddings # self.embeddings = self.distance_reducer.fit_transform(self.clr_depths) ## TypeError occurs here on sparse input. So need to lower the number of components ## That are trying to be embedded to. Choose minimum of 2 ## Cluster on the UMAP embeddings and return soft clusters # logging.info("Running HDBSCAN - %s" % self.clusterer) Reclusters unclustered elements and updates the labels array with the potential new label making sure to make the label at least 1 value higher than the previous max label value # print("reclustering %d validity %.3f" % (label, cluster_validity)) # shift all labels greater than current label down by one since this label is fully # removed # dist_mat = utils.cluster_distances(self.embeddings, self.labels, self.threads) # print(f"Len {len(cluster_centres)} index {index} label {label}") # recscale the labels so that they increment by one # change the labels over to the base label ## Cluster on the UMAP embeddings # cluster_member_colors = [ # sns.desaturate(x, p) for x, p in zip(cluster_colors, self.clusterer.probabilities_) # ] # ax.add_artist(legend) # when only one variant # break up very large bins. Not sure how to threshold this # Update labels
| 1.613081
| 2
|
caffe-sparse-tool.py
|
eric612/caffe-int8-convert-tools
| 9
|
6627350
|
# -*- coding: utf-8 -*-
# SenseNets is pleased to support the open source community by making caffe-sparse-tool available.
#
# Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
Analyze module for generating the sparse-connection table
This tool is based on Caffe Framework.
"""
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import math, copy
import matplotlib.pyplot as plt
import sys,os
import caffe
import caffe.proto.caffe_pb2 as caffe_pb2
import time
import datetime
from google.protobuf import text_format
def parse_args():
parser = argparse.ArgumentParser(
description='find the pretrained caffe models sparse value')
parser.add_argument('--proto', dest='proto',
help="path to deploy prototxt.", type=str)
parser.add_argument('--model', dest='model',
help='path to pretrained weights', type=str)
parser.add_argument('--mean', dest='mean',
help='value of mean', type=float, nargs=3)
parser.add_argument('--norm', dest='norm',
help='value of normalize', type=float, nargs=1, default=1.0)
parser.add_argument('--images', dest='images',
help='path to sparse images', type=str)
parser.add_argument('--output', dest='output',
help='path to output sparse file', type=str, default='sparse.table')
parser.add_argument('--gpu', dest='gpu',
help='use gpu to forward', type=int, default=0)
args = parser.parse_args()
return args, parser
global args, parser
args, parser = parse_args()
# ugly global params
sparse_layer_lists = []
class SparseLayer:
def __init__(self, name, bottom_blob_name, top_blob_name, num_inch, num_outch):
self.name = name
self.bottom_blob_name = bottom_blob_name
self.top_blob_name = top_blob_name
self.num_inch = num_inch
self.num_outch = num_outch
self.top_blob_max = [0 for x in range(0, num_outch)]
self.bottom_blob_max = [0 for x in range(0, num_inch)]
self.weight_zero = [0 for x in range(0, num_outch)]
self.inch_zero = [0 for x in range(0, num_inch)]
self.outch_zero = [0 for x in range(0, num_outch)]
def sparse_weight(self, weight_data):
# spilt the weight data by outch num
weight_outch_data = np.array_split(weight_data, self.num_outch)
for i, data in enumerate(weight_outch_data):
max_val = np.max(data)
min_val = np.min(data)
threshold = max(abs(max_val), abs(min_val))
if threshold < 0.0001:
self.weight_zero[i] = 1
#print("%-20s group : %-5d max_val : %-10f scale_val : %-10f" % (self.name + "_param0", i, threshold, self.weight_scale[i]))
def analyze_bottom_blob(self, blob_data):
# spilt the blob data by inch num
blob_inch_data = np.array_split(blob_data, self.num_inch)
# interval for per bottom blob channel
for i, data in enumerate(blob_inch_data):
max_val = np.max(data)
min_val = np.min(data)
self.bottom_blob_max[i] = max(self.bottom_blob_max[i], max(abs(max_val), abs(min_val)))
if max_val == min_val:
self.inch_zero[i] = 1
def analyze_top_blob(self, blob_data):
# spilt the blob data by outch num
blob_outch_data = np.array_split(blob_data, self.num_outch)
# interval for per top blob channel
for i, data in enumerate(blob_outch_data):
max_val = np.max(data)
min_val = np.min(data)
self.top_blob_max[i] = max(self.top_blob_max[i], max(abs(max_val), abs(min_val)))
if max_val == min_val:
self.outch_zero[i] = 1
def sparse_bottom_blob(self):
for i in range(0, self.num_inch):
if self.bottom_blob_max[i] < 0.0001:
self.inch_zero[i] = 1
def sparse_top_blob(self):
for i in range(0, self.num_outch):
if self.top_blob_max[i] < 0.0001:
self.outch_zero[i] = 1
#print("%-20s outch : %-5d max_val : %-10.8f " % (self.name, i, self.blob_max[i]))
def display_sparse_info(self):
count = 0
for i in range(self.num_outch):
if self.outch_zero[i] != 0 or self.weight_zero[i] !=0:
count += 1
print("%-20s outch : %-8d sparse : %-8d ratio : %-6.2f " % (self.name, self.num_outch, count, count / float(self.num_outch) * 100))
def save_calibration(file_path):
pass
def net_forward(net, image_path, transformer):
"""
network inference and statistics the cost time
Args:
net: the instance of Caffe inference
image_path: a image need to be inference
transformer:
Returns:
none
"""
# load image
image = caffe.io.load_image(image_path)
# transformer.preprocess the image
net.blobs['data'].data[...] = transformer.preprocess('data',image)
# net forward
start = time.clock()
output = net.forward()
end = time.clock()
print("%s forward time : %.3f s" % (image_path, end - start))
def file_name(file_dir):
"""
Find the all file path with the directory
Args:
file_dir: The source file directory
Returns:
files_path: all the file path into a list
"""
files_path = []
for root, dir, files in os.walk(file_dir):
for name in files:
file_path = root + "/" + name
print(file_path)
files_path.append(file_path)
return files_path
def network_prepare(net, mean, norm):
"""
instance the prepare process param of caffe network inference
Args:
net: the instance of Caffe inference
mean: the value of mean
norm: the value of normalize
Returns:
none
"""
print("Network initial")
img_mean = np.array(mean)
# initial transformer
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
# convert shape from RBG to BGR
transformer.set_transpose('data', (2,0,1))
# load meanfile
transformer.set_mean('data', img_mean)
# resize image data from [0,1] to [0,255]
transformer.set_raw_scale('data', 255)
# convert RGB -> BGR
transformer.set_channel_swap('data', (2,1,0))
# normalize
transformer.set_input_scale('data', norm)
return transformer
def weight_sparse(net, net_file, transformer, images_files):
"""
CaffeModel convolution weight blob sparse
Args:
net: the instance of Caffe inference
net_file: deploy caffe prototxt
Returns:
none
"""
print("\nSparse the kernel weight:")
# forward only once to find the bottom blob property
net_forward(net, images_files[0], transformer)
# parse the net param from deploy prototxt
params = caffe_pb2.NetParameter()
with open(net_file) as f:
text_format.Merge(f.read(), params)
for i, layer in enumerate(params.layer):
if i == 0:
if layer.type != "Input":
raise ValueError("First layer should be input")
# find the convolution 3x3 and 1x1 layers to get out the weight_scale
if(layer.type == "Convolution" or layer.type == "ConvolutionDepthwise"):
kernel_size = layer.convolution_param.kernel_size[0]
if(kernel_size == 3 or kernel_size == 1):
weight_blob = net.params[layer.name][0].data
# find bottom blob channel num
num_input = net.blobs[layer.bottom[0]].shape[1]
# initial the instance of SparseLayer Class lists
sparse_layer = SparseLayer(layer.name, layer.bottom[0], layer.top[0], num_input, layer.convolution_param.num_output)
# sparse the weight value
sparse_layer.sparse_weight(weight_blob)
# add the sparse_layer into the save list
sparse_layer_lists.append(sparse_layer)
return None
def activation_sparse(net, transformer, images_files):
"""
Activation bottom/top blob sparse analyze
Args:
net: the instance of Caffe inference
transformer:
images_files: sparse dataset
Returns:
none
"""
print("\nAnalyze the sparse info of the Activation:")
# run float32 inference on sparse dataset to analyze activations
for i , image in enumerate(images_files):
net_forward(net, image, transformer)
# analyze bottom/top blob
for layer in sparse_layer_lists:
blob = net.blobs[layer.bottom_blob_name].data[0].flatten()
layer.analyze_bottom_blob(blob)
blob = net.blobs[layer.top_blob_name].data[0].flatten()
layer.analyze_top_blob(blob)
# calculate top blob and flag the sparse channels in every layers
for layer in sparse_layer_lists:
layer.sparse_bottom_blob()
layer.sparse_top_blob()
return None
def save_sparse_file(sparse_path):
sparse_file = open(sparse_path, 'w')
# save temp
save_temp = []
# save weight scale
for layer in sparse_layer_lists:
save_string = layer.name + "_weight"
for i in range(layer.num_outch):
if layer.weight_zero[i] != 0:
save_string = save_string + " " + str(i)
save_temp.append(save_string)
# save bottom/top blob sparse channel
save_string = layer.name + "_bottom"
for i in range(layer.num_inch):
if layer.inch_zero[i] != 0:
save_string = save_string + " " + str(i)
save_temp.append(save_string)
save_string = layer.name + "_top "
for i in range(layer.num_outch):
if layer.outch_zero[i] != 0:
save_string = save_string + " " + str(i)
save_temp.append(save_string)
# save into txt file
for data in save_temp:
sparse_file.write(data + "\n")
sparse_file.close()
def usage_info():
"""
usage info
"""
print("Input params is illegal...╮(╯3╰)╭")
print("try it again:\n python caffe-sparse-tool.py -h")
def main():
"""
main function
"""
# time start
time_start = datetime.datetime.now()
print(args)
if args.proto == None or args.model == None or args.mean == None or args.images == None:
usage_info()
return None
# deploy caffe prototxt path
net_file = args.proto
# trained caffemodel path
caffe_model = args.model
# mean value
mean = args.mean
# norm value
norm = 1.0
if args.norm != 1.0:
norm = args.norm[0]
# calibration dataset
images_path = args.images
# the output sparse file
sparse_path = args.output
# default use CPU to forwark
if args.gpu != 0:
caffe.set_device(0)
caffe.set_mode_gpu()
# initial caffe net and the forword model(GPU or CPU)
net = caffe.Net(net_file,caffe_model,caffe.TEST)
# prepare the cnn network
transformer = network_prepare(net, mean, norm)
# get the calibration datasets images files path
images_files = file_name(images_path)
# analyze kernel weight of the caffemodel to find some channels whose weight value whole zero
weight_sparse(net, net_file, transformer, images_files)
# analyze activation value of the caffemodel to find some channels whose value whole zero or the same value(maybe the bisa value of latest conv layer)
activation_sparse(net, transformer, images_files)
# show sparse info
for layer in sparse_layer_lists:
layer.display_sparse_info()
# save the sparse tables,best wish for your sparse have low accuracy loss :)
save_sparse_file(sparse_path)
# time end
time_end = datetime.datetime.now()
print("\nCaffe Sparse table create success, it's cost %s, best wish for your Sparse inference has a low accuracy loss...\(^▽^)/...2333..." % (time_end - time_start))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# SenseNets is pleased to support the open source community by making caffe-sparse-tool available.
#
# Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
Analyze module for generating the sparse-connection table
This tool is based on Caffe Framework.
"""
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import math, copy
import matplotlib.pyplot as plt
import sys,os
import caffe
import caffe.proto.caffe_pb2 as caffe_pb2
import time
import datetime
from google.protobuf import text_format
def parse_args():
parser = argparse.ArgumentParser(
description='find the pretrained caffe models sparse value')
parser.add_argument('--proto', dest='proto',
help="path to deploy prototxt.", type=str)
parser.add_argument('--model', dest='model',
help='path to pretrained weights', type=str)
parser.add_argument('--mean', dest='mean',
help='value of mean', type=float, nargs=3)
parser.add_argument('--norm', dest='norm',
help='value of normalize', type=float, nargs=1, default=1.0)
parser.add_argument('--images', dest='images',
help='path to sparse images', type=str)
parser.add_argument('--output', dest='output',
help='path to output sparse file', type=str, default='sparse.table')
parser.add_argument('--gpu', dest='gpu',
help='use gpu to forward', type=int, default=0)
args = parser.parse_args()
return args, parser
global args, parser
args, parser = parse_args()
# ugly global params
sparse_layer_lists = []
class SparseLayer:
def __init__(self, name, bottom_blob_name, top_blob_name, num_inch, num_outch):
self.name = name
self.bottom_blob_name = bottom_blob_name
self.top_blob_name = top_blob_name
self.num_inch = num_inch
self.num_outch = num_outch
self.top_blob_max = [0 for x in range(0, num_outch)]
self.bottom_blob_max = [0 for x in range(0, num_inch)]
self.weight_zero = [0 for x in range(0, num_outch)]
self.inch_zero = [0 for x in range(0, num_inch)]
self.outch_zero = [0 for x in range(0, num_outch)]
def sparse_weight(self, weight_data):
# spilt the weight data by outch num
weight_outch_data = np.array_split(weight_data, self.num_outch)
for i, data in enumerate(weight_outch_data):
max_val = np.max(data)
min_val = np.min(data)
threshold = max(abs(max_val), abs(min_val))
if threshold < 0.0001:
self.weight_zero[i] = 1
#print("%-20s group : %-5d max_val : %-10f scale_val : %-10f" % (self.name + "_param0", i, threshold, self.weight_scale[i]))
def analyze_bottom_blob(self, blob_data):
# spilt the blob data by inch num
blob_inch_data = np.array_split(blob_data, self.num_inch)
# interval for per bottom blob channel
for i, data in enumerate(blob_inch_data):
max_val = np.max(data)
min_val = np.min(data)
self.bottom_blob_max[i] = max(self.bottom_blob_max[i], max(abs(max_val), abs(min_val)))
if max_val == min_val:
self.inch_zero[i] = 1
def analyze_top_blob(self, blob_data):
# spilt the blob data by outch num
blob_outch_data = np.array_split(blob_data, self.num_outch)
# interval for per top blob channel
for i, data in enumerate(blob_outch_data):
max_val = np.max(data)
min_val = np.min(data)
self.top_blob_max[i] = max(self.top_blob_max[i], max(abs(max_val), abs(min_val)))
if max_val == min_val:
self.outch_zero[i] = 1
def sparse_bottom_blob(self):
for i in range(0, self.num_inch):
if self.bottom_blob_max[i] < 0.0001:
self.inch_zero[i] = 1
def sparse_top_blob(self):
for i in range(0, self.num_outch):
if self.top_blob_max[i] < 0.0001:
self.outch_zero[i] = 1
#print("%-20s outch : %-5d max_val : %-10.8f " % (self.name, i, self.blob_max[i]))
def display_sparse_info(self):
count = 0
for i in range(self.num_outch):
if self.outch_zero[i] != 0 or self.weight_zero[i] !=0:
count += 1
print("%-20s outch : %-8d sparse : %-8d ratio : %-6.2f " % (self.name, self.num_outch, count, count / float(self.num_outch) * 100))
def save_calibration(file_path):
pass
def net_forward(net, image_path, transformer):
"""
network inference and statistics the cost time
Args:
net: the instance of Caffe inference
image_path: a image need to be inference
transformer:
Returns:
none
"""
# load image
image = caffe.io.load_image(image_path)
# transformer.preprocess the image
net.blobs['data'].data[...] = transformer.preprocess('data',image)
# net forward
start = time.clock()
output = net.forward()
end = time.clock()
print("%s forward time : %.3f s" % (image_path, end - start))
def file_name(file_dir):
"""
Find the all file path with the directory
Args:
file_dir: The source file directory
Returns:
files_path: all the file path into a list
"""
files_path = []
for root, dir, files in os.walk(file_dir):
for name in files:
file_path = root + "/" + name
print(file_path)
files_path.append(file_path)
return files_path
def network_prepare(net, mean, norm):
"""
instance the prepare process param of caffe network inference
Args:
net: the instance of Caffe inference
mean: the value of mean
norm: the value of normalize
Returns:
none
"""
print("Network initial")
img_mean = np.array(mean)
# initial transformer
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
# convert shape from RBG to BGR
transformer.set_transpose('data', (2,0,1))
# load meanfile
transformer.set_mean('data', img_mean)
# resize image data from [0,1] to [0,255]
transformer.set_raw_scale('data', 255)
# convert RGB -> BGR
transformer.set_channel_swap('data', (2,1,0))
# normalize
transformer.set_input_scale('data', norm)
return transformer
def weight_sparse(net, net_file, transformer, images_files):
"""
CaffeModel convolution weight blob sparse
Args:
net: the instance of Caffe inference
net_file: deploy caffe prototxt
Returns:
none
"""
print("\nSparse the kernel weight:")
# forward only once to find the bottom blob property
net_forward(net, images_files[0], transformer)
# parse the net param from deploy prototxt
params = caffe_pb2.NetParameter()
with open(net_file) as f:
text_format.Merge(f.read(), params)
for i, layer in enumerate(params.layer):
if i == 0:
if layer.type != "Input":
raise ValueError("First layer should be input")
# find the convolution 3x3 and 1x1 layers to get out the weight_scale
if(layer.type == "Convolution" or layer.type == "ConvolutionDepthwise"):
kernel_size = layer.convolution_param.kernel_size[0]
if(kernel_size == 3 or kernel_size == 1):
weight_blob = net.params[layer.name][0].data
# find bottom blob channel num
num_input = net.blobs[layer.bottom[0]].shape[1]
# initial the instance of SparseLayer Class lists
sparse_layer = SparseLayer(layer.name, layer.bottom[0], layer.top[0], num_input, layer.convolution_param.num_output)
# sparse the weight value
sparse_layer.sparse_weight(weight_blob)
# add the sparse_layer into the save list
sparse_layer_lists.append(sparse_layer)
return None
def activation_sparse(net, transformer, images_files):
"""
Activation bottom/top blob sparse analyze
Args:
net: the instance of Caffe inference
transformer:
images_files: sparse dataset
Returns:
none
"""
print("\nAnalyze the sparse info of the Activation:")
# run float32 inference on sparse dataset to analyze activations
for i , image in enumerate(images_files):
net_forward(net, image, transformer)
# analyze bottom/top blob
for layer in sparse_layer_lists:
blob = net.blobs[layer.bottom_blob_name].data[0].flatten()
layer.analyze_bottom_blob(blob)
blob = net.blobs[layer.top_blob_name].data[0].flatten()
layer.analyze_top_blob(blob)
# calculate top blob and flag the sparse channels in every layers
for layer in sparse_layer_lists:
layer.sparse_bottom_blob()
layer.sparse_top_blob()
return None
def save_sparse_file(sparse_path):
sparse_file = open(sparse_path, 'w')
# save temp
save_temp = []
# save weight scale
for layer in sparse_layer_lists:
save_string = layer.name + "_weight"
for i in range(layer.num_outch):
if layer.weight_zero[i] != 0:
save_string = save_string + " " + str(i)
save_temp.append(save_string)
# save bottom/top blob sparse channel
save_string = layer.name + "_bottom"
for i in range(layer.num_inch):
if layer.inch_zero[i] != 0:
save_string = save_string + " " + str(i)
save_temp.append(save_string)
save_string = layer.name + "_top "
for i in range(layer.num_outch):
if layer.outch_zero[i] != 0:
save_string = save_string + " " + str(i)
save_temp.append(save_string)
# save into txt file
for data in save_temp:
sparse_file.write(data + "\n")
sparse_file.close()
def usage_info():
"""
usage info
"""
print("Input params is illegal...╮(╯3╰)╭")
print("try it again:\n python caffe-sparse-tool.py -h")
def main():
"""
main function
"""
# time start
time_start = datetime.datetime.now()
print(args)
if args.proto == None or args.model == None or args.mean == None or args.images == None:
usage_info()
return None
# deploy caffe prototxt path
net_file = args.proto
# trained caffemodel path
caffe_model = args.model
# mean value
mean = args.mean
# norm value
norm = 1.0
if args.norm != 1.0:
norm = args.norm[0]
# calibration dataset
images_path = args.images
# the output sparse file
sparse_path = args.output
# default use CPU to forwark
if args.gpu != 0:
caffe.set_device(0)
caffe.set_mode_gpu()
# initial caffe net and the forword model(GPU or CPU)
net = caffe.Net(net_file,caffe_model,caffe.TEST)
# prepare the cnn network
transformer = network_prepare(net, mean, norm)
# get the calibration datasets images files path
images_files = file_name(images_path)
# analyze kernel weight of the caffemodel to find some channels whose weight value whole zero
weight_sparse(net, net_file, transformer, images_files)
# analyze activation value of the caffemodel to find some channels whose value whole zero or the same value(maybe the bisa value of latest conv layer)
activation_sparse(net, transformer, images_files)
# show sparse info
for layer in sparse_layer_lists:
layer.display_sparse_info()
# save the sparse tables,best wish for your sparse have low accuracy loss :)
save_sparse_file(sparse_path)
# time end
time_end = datetime.datetime.now()
print("\nCaffe Sparse table create success, it's cost %s, best wish for your Sparse inference has a low accuracy loss...\(^▽^)/...2333..." % (time_end - time_start))
if __name__ == "__main__":
main()
|
en
| 0.692087
|
# -*- coding: utf-8 -*- # SenseNets is pleased to support the open source community by making caffe-sparse-tool available. # # Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved. # # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. Analyze module for generating the sparse-connection table This tool is based on Caffe Framework. # ugly global params # spilt the weight data by outch num #print("%-20s group : %-5d max_val : %-10f scale_val : %-10f" % (self.name + "_param0", i, threshold, self.weight_scale[i])) # spilt the blob data by inch num # interval for per bottom blob channel # spilt the blob data by outch num # interval for per top blob channel #print("%-20s outch : %-5d max_val : %-10.8f " % (self.name, i, self.blob_max[i])) network inference and statistics the cost time Args: net: the instance of Caffe inference image_path: a image need to be inference transformer: Returns: none # load image # transformer.preprocess the image # net forward Find the all file path with the directory Args: file_dir: The source file directory Returns: files_path: all the file path into a list instance the prepare process param of caffe network inference Args: net: the instance of Caffe inference mean: the value of mean norm: the value of normalize Returns: none # initial transformer # convert shape from RBG to BGR # load meanfile # resize image data from [0,1] to [0,255] # convert RGB -> BGR # normalize CaffeModel convolution weight blob sparse Args: net: the instance of Caffe inference net_file: deploy caffe prototxt Returns: none # forward only once to find the bottom blob property # parse the net param from deploy prototxt # find the convolution 3x3 and 1x1 layers to get out the weight_scale # find bottom blob channel num # initial the instance of SparseLayer Class lists # sparse the weight value # add the sparse_layer into the save list Activation bottom/top blob sparse analyze Args: net: the instance of Caffe inference transformer: images_files: sparse dataset Returns: none # run float32 inference on sparse dataset to analyze activations # analyze bottom/top blob # calculate top blob and flag the sparse channels in every layers # save temp # save weight scale # save bottom/top blob sparse channel # save into txt file usage info main function # time start # deploy caffe prototxt path # trained caffemodel path # mean value # norm value # calibration dataset # the output sparse file # default use CPU to forwark # initial caffe net and the forword model(GPU or CPU) # prepare the cnn network # get the calibration datasets images files path # analyze kernel weight of the caffemodel to find some channels whose weight value whole zero # analyze activation value of the caffemodel to find some channels whose value whole zero or the same value(maybe the bisa value of latest conv layer) # show sparse info # save the sparse tables,best wish for your sparse have low accuracy loss :) # time end
| 1.603986
| 2
|